summaryrefslogtreecommitdiff
path: root/chromium/v8
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8')
-rw-r--r--chromium/v8/AUTHORS7
-rw-r--r--chromium/v8/BUILD.gn626
-rw-r--r--chromium/v8/ChangeLog3734
-rw-r--r--chromium/v8/DEPS63
-rw-r--r--chromium/v8/Makefile9
-rw-r--r--chromium/v8/PRESUBMIT.py28
-rw-r--r--chromium/v8/README.md6
-rw-r--r--chromium/v8/WATCHLISTS22
-rw-r--r--chromium/v8/build/all.gyp18
-rw-r--r--chromium/v8/build/config/win/msvs_dependencies.isolate77
-rw-r--r--chromium/v8/build/features.gypi11
-rwxr-xr-xchromium/v8/build/get_landmines.py2
-rw-r--r--chromium/v8/build/gyp_environment.py2
-rwxr-xr-xchromium/v8/build/gyp_v829
-rw-r--r--chromium/v8/build/isolate.gypi24
-rw-r--r--chromium/v8/build/standalone.gypi96
-rw-r--r--chromium/v8/build/toolchain.gypi127
-rw-r--r--chromium/v8/build/vs_toolchain.py268
-rw-r--r--chromium/v8/docs/README.md2
-rw-r--r--chromium/v8/docs/arm_debugging_with_the_simulator.md205
-rw-r--r--chromium/v8/docs/becoming_v8_committer.md38
-rw-r--r--chromium/v8/docs/building_with_gyp.md260
-rw-r--r--chromium/v8/docs/contributing.md32
-rw-r--r--chromium/v8/docs/cross_compiling_for_arm.md151
-rw-r--r--chromium/v8/docs/d8_on_android.md101
-rw-r--r--chromium/v8/docs/debugger_protocol.md934
-rw-r--r--chromium/v8/docs/gdb_jit_interface.md63
-rw-r--r--chromium/v8/docs/handling_of_ports.md24
-rw-r--r--chromium/v8/docs/i18n_support.md44
-rw-r--r--chromium/v8/docs/javascript.md6
-rw-r--r--chromium/v8/docs/javascript_stack_trace_api.md161
-rw-r--r--chromium/v8/docs/merging_and_patching.md66
-rw-r--r--chromium/v8/docs/profiling_chromium_with_v8.md34
-rw-r--r--chromium/v8/docs/release_process.md57
-rw-r--r--chromium/v8/docs/runtime_functions.md14
-rw-r--r--chromium/v8/docs/source.md39
-rw-r--r--chromium/v8/docs/testing.md58
-rw-r--r--chromium/v8/docs/triaging_issues.md22
-rw-r--r--chromium/v8/docs/using_git.md147
-rw-r--r--chromium/v8/docs/v8_c_plus_plus_styleand_sops.md7
-rw-r--r--chromium/v8/docs/v8_committers_responsibility.md41
-rw-r--r--chromium/v8/docs/v8_profiler.md141
-rw-r--r--chromium/v8/include/v8-debug.h28
-rw-r--r--chromium/v8/include/v8-experimental.h53
-rw-r--r--chromium/v8/include/v8-platform.h47
-rw-r--r--chromium/v8/include/v8-testing.h2
-rw-r--r--chromium/v8/include/v8-version.h6
-rw-r--r--chromium/v8/include/v8.h582
-rw-r--r--chromium/v8/include/v8config.h20
-rw-r--r--chromium/v8/infra/config/cq.cfg37
-rw-r--r--chromium/v8/samples/samples.gyp4
-rw-r--r--chromium/v8/samples/shell.cc8
-rw-r--r--chromium/v8/snapshot_toolchain.gni4
-rw-r--r--chromium/v8/src/DEPS6
-rw-r--r--chromium/v8/src/OWNERS2
-rw-r--r--chromium/v8/src/accessors.cc71
-rw-r--r--chromium/v8/src/accessors.h3
-rw-r--r--chromium/v8/src/address-map.cc38
-rw-r--r--chromium/v8/src/address-map.h184
-rw-r--r--chromium/v8/src/allocation-site-scopes.h7
-rw-r--r--chromium/v8/src/allocation.h3
-rw-r--r--chromium/v8/src/api-experimental.cc126
-rw-r--r--chromium/v8/src/api-experimental.h28
-rw-r--r--chromium/v8/src/api-natives.cc144
-rw-r--r--chromium/v8/src/api-natives.h9
-rw-r--r--chromium/v8/src/api.cc1071
-rw-r--r--chromium/v8/src/api.h25
-rw-r--r--chromium/v8/src/arguments.h10
-rw-r--r--chromium/v8/src/arm/assembler-arm-inl.h70
-rw-r--r--chromium/v8/src/arm/assembler-arm.cc161
-rw-r--r--chromium/v8/src/arm/assembler-arm.h280
-rw-r--r--chromium/v8/src/arm/builtins-arm.cc1910
-rw-r--r--chromium/v8/src/arm/code-stubs-arm.cc547
-rw-r--r--chromium/v8/src/arm/code-stubs-arm.h8
-rw-r--r--chromium/v8/src/arm/codegen-arm.cc65
-rw-r--r--chromium/v8/src/arm/codegen-arm.h5
-rw-r--r--chromium/v8/src/arm/constants-arm.cc11
-rw-r--r--chromium/v8/src/arm/constants-arm.h3
-rw-r--r--chromium/v8/src/arm/deoptimizer-arm.cc26
-rw-r--r--chromium/v8/src/arm/disasm-arm.cc10
-rw-r--r--chromium/v8/src/arm/frames-arm.h3
-rw-r--r--chromium/v8/src/arm/interface-descriptors-arm.cc83
-rw-r--r--chromium/v8/src/arm/interface-descriptors-arm.h4
-rw-r--r--chromium/v8/src/arm/macro-assembler-arm.cc526
-rw-r--r--chromium/v8/src/arm/macro-assembler-arm.h163
-rw-r--r--chromium/v8/src/arm/simulator-arm.cc127
-rw-r--r--chromium/v8/src/arm/simulator-arm.h50
-rw-r--r--chromium/v8/src/arm64/assembler-arm64-inl.h33
-rw-r--r--chromium/v8/src/arm64/assembler-arm64.cc74
-rw-r--r--chromium/v8/src/arm64/assembler-arm64.h313
-rw-r--r--chromium/v8/src/arm64/builtins-arm64.cc1949
-rw-r--r--chromium/v8/src/arm64/code-stubs-arm64.cc602
-rw-r--r--chromium/v8/src/arm64/code-stubs-arm64.h4
-rw-r--r--chromium/v8/src/arm64/codegen-arm64.cc32
-rw-r--r--chromium/v8/src/arm64/codegen-arm64.h5
-rw-r--r--chromium/v8/src/arm64/constants-arm64.h68
-rw-r--r--chromium/v8/src/arm64/decoder-arm64-inl.h3
-rw-r--r--chromium/v8/src/arm64/decoder-arm64.h3
-rw-r--r--chromium/v8/src/arm64/deoptimizer-arm64.cc12
-rw-r--r--chromium/v8/src/arm64/disasm-arm64.cc164
-rw-r--r--chromium/v8/src/arm64/disasm-arm64.h13
-rw-r--r--chromium/v8/src/arm64/frames-arm64.h3
-rw-r--r--chromium/v8/src/arm64/instructions-arm64.cc15
-rw-r--r--chromium/v8/src/arm64/instructions-arm64.h10
-rw-r--r--chromium/v8/src/arm64/instrument-arm64.h3
-rw-r--r--chromium/v8/src/arm64/interface-descriptors-arm64.cc92
-rw-r--r--chromium/v8/src/arm64/interface-descriptors-arm64.h4
-rw-r--r--chromium/v8/src/arm64/macro-assembler-arm64-inl.h37
-rw-r--r--chromium/v8/src/arm64/macro-assembler-arm64.cc635
-rw-r--r--chromium/v8/src/arm64/macro-assembler-arm64.h174
-rw-r--r--chromium/v8/src/arm64/simulator-arm64.cc25
-rw-r--r--chromium/v8/src/arm64/simulator-arm64.h55
-rw-r--r--chromium/v8/src/arm64/utils-arm64.h9
-rw-r--r--chromium/v8/src/assembler.cc114
-rw-r--r--chromium/v8/src/assembler.h95
-rw-r--r--chromium/v8/src/assert-scope.h3
-rw-r--r--chromium/v8/src/ast/OWNERS8
-rw-r--r--chromium/v8/src/ast/ast-expression-rewriter.cc409
-rw-r--r--chromium/v8/src/ast/ast-expression-rewriter.h54
-rw-r--r--chromium/v8/src/ast/ast-expression-visitor.cc (renamed from chromium/v8/src/ast-expression-visitor.cc)84
-rw-r--r--chromium/v8/src/ast/ast-expression-visitor.h (renamed from chromium/v8/src/ast-expression-visitor.h)21
-rw-r--r--chromium/v8/src/ast/ast-literal-reindexer.cc (renamed from chromium/v8/src/ast-literal-reindexer.cc)21
-rw-r--r--chromium/v8/src/ast/ast-literal-reindexer.h (renamed from chromium/v8/src/ast-literal-reindexer.h)17
-rw-r--r--chromium/v8/src/ast/ast-numbering.cc (renamed from chromium/v8/src/ast-numbering.cc)60
-rw-r--r--chromium/v8/src/ast/ast-numbering.h (renamed from chromium/v8/src/ast-numbering.h)6
-rw-r--r--chromium/v8/src/ast/ast-value-factory.cc (renamed from chromium/v8/src/ast-value-factory.cc)4
-rw-r--r--chromium/v8/src/ast/ast-value-factory.h (renamed from chromium/v8/src/ast-value-factory.h)25
-rw-r--r--chromium/v8/src/ast/ast.cc (renamed from chromium/v8/src/ast.cc)451
-rw-r--r--chromium/v8/src/ast/ast.h (renamed from chromium/v8/src/ast.h)1012
-rw-r--r--chromium/v8/src/ast/modules.cc (renamed from chromium/v8/src/modules.cc)4
-rw-r--r--chromium/v8/src/ast/modules.h (renamed from chromium/v8/src/modules.h)9
-rw-r--r--chromium/v8/src/ast/prettyprinter.cc (renamed from chromium/v8/src/prettyprinter.cc)246
-rw-r--r--chromium/v8/src/ast/prettyprinter.h (renamed from chromium/v8/src/prettyprinter.h)22
-rw-r--r--chromium/v8/src/ast/scopeinfo.cc (renamed from chromium/v8/src/scopeinfo.cc)11
-rw-r--r--chromium/v8/src/ast/scopeinfo.h (renamed from chromium/v8/src/scopeinfo.h)13
-rw-r--r--chromium/v8/src/ast/scopes.cc (renamed from chromium/v8/src/scopes.cc)210
-rw-r--r--chromium/v8/src/ast/scopes.h (renamed from chromium/v8/src/scopes.h)107
-rw-r--r--chromium/v8/src/ast/variables.cc (renamed from chromium/v8/src/variables.cc)7
-rw-r--r--chromium/v8/src/ast/variables.h (renamed from chromium/v8/src/variables.h)30
-rw-r--r--chromium/v8/src/atomic-utils.h7
-rw-r--r--chromium/v8/src/background-parsing-task.h6
-rw-r--r--chromium/v8/src/bailout-reason.h40
-rw-r--r--chromium/v8/src/base.isolate64
-rw-r--r--chromium/v8/src/base/atomicops.h3
-rw-r--r--chromium/v8/src/base/atomicops_internals_arm64_gcc.h3
-rw-r--r--chromium/v8/src/base/atomicops_internals_arm_gcc.h6
-rw-r--r--chromium/v8/src/base/atomicops_internals_atomicword_compat.h3
-rw-r--r--chromium/v8/src/base/atomicops_internals_mac.h3
-rw-r--r--chromium/v8/src/base/atomicops_internals_mips64_gcc.h3
-rw-r--r--chromium/v8/src/base/atomicops_internals_mips_gcc.h3
-rw-r--r--chromium/v8/src/base/atomicops_internals_portable.h4
-rw-r--r--chromium/v8/src/base/atomicops_internals_ppc_gcc.h4
-rw-r--r--chromium/v8/src/base/atomicops_internals_x86_gcc.cc3
-rw-r--r--chromium/v8/src/base/atomicops_internals_x86_gcc.h3
-rw-r--r--chromium/v8/src/base/atomicops_internals_x86_msvc.h3
-rw-r--r--chromium/v8/src/base/bits.h20
-rw-r--r--chromium/v8/src/base/build_config.h32
-rw-r--r--chromium/v8/src/base/cpu.cc3
-rw-r--r--chromium/v8/src/base/cpu.h3
-rw-r--r--chromium/v8/src/base/flags.h23
-rw-r--r--chromium/v8/src/base/lazy-instance.h3
-rw-r--r--chromium/v8/src/base/logging.h7
-rw-r--r--chromium/v8/src/base/macros.h95
-rw-r--r--chromium/v8/src/base/once.cc3
-rw-r--r--chromium/v8/src/base/once.h3
-rw-r--r--chromium/v8/src/base/platform/condition-variable.cc3
-rw-r--r--chromium/v8/src/base/platform/condition-variable.h3
-rw-r--r--chromium/v8/src/base/platform/elapsed-timer.h3
-rw-r--r--chromium/v8/src/base/platform/mutex.cc3
-rw-r--r--chromium/v8/src/base/platform/mutex.h3
-rw-r--r--chromium/v8/src/base/platform/platform-aix.cc4
-rw-r--r--chromium/v8/src/base/platform/platform-cygwin.cc3
-rw-r--r--chromium/v8/src/base/platform/platform-freebsd.cc3
-rw-r--r--chromium/v8/src/base/platform/platform-linux.cc3
-rw-r--r--chromium/v8/src/base/platform/platform-macos.cc3
-rw-r--r--chromium/v8/src/base/platform/platform-openbsd.cc3
-rw-r--r--chromium/v8/src/base/platform/platform-qnx.cc3
-rw-r--r--chromium/v8/src/base/platform/platform-solaris.cc3
-rw-r--r--chromium/v8/src/base/platform/platform-win32.cc23
-rw-r--r--chromium/v8/src/base/platform/platform.h5
-rw-r--r--chromium/v8/src/base/platform/semaphore.cc7
-rw-r--r--chromium/v8/src/base/platform/semaphore.h3
-rw-r--r--chromium/v8/src/base/platform/time.cc3
-rw-r--r--chromium/v8/src/base/platform/time.h3
-rw-r--r--chromium/v8/src/base/utils/random-number-generator.cc37
-rw-r--r--chromium/v8/src/base/utils/random-number-generator.h43
-rw-r--r--chromium/v8/src/bignum-dtoa.h3
-rw-r--r--chromium/v8/src/bignum.cc4
-rw-r--r--chromium/v8/src/bignum.h3
-rw-r--r--chromium/v8/src/bit-vector.cc1
-rw-r--r--chromium/v8/src/bootstrapper.cc1883
-rw-r--r--chromium/v8/src/bootstrapper.h11
-rw-r--r--chromium/v8/src/builtins.cc2281
-rw-r--r--chromium/v8/src/builtins.h421
-rw-r--r--chromium/v8/src/cached-powers.h3
-rw-r--r--chromium/v8/src/cancelable-task.cc105
-rw-r--r--chromium/v8/src/cancelable-task.h110
-rw-r--r--chromium/v8/src/char-predicates-inl.h3
-rw-r--r--chromium/v8/src/char-predicates.h3
-rw-r--r--chromium/v8/src/checks.h3
-rw-r--r--chromium/v8/src/code-factory.cc177
-rw-r--r--chromium/v8/src/code-factory.h44
-rw-r--r--chromium/v8/src/code-stubs-hydrogen.cc237
-rw-r--r--chromium/v8/src/code-stubs.cc115
-rw-r--r--chromium/v8/src/code-stubs.h355
-rw-r--r--chromium/v8/src/code-stubs.js69
-rw-r--r--chromium/v8/src/codegen.cc72
-rw-r--r--chromium/v8/src/codegen.h22
-rw-r--r--chromium/v8/src/compilation-cache.h3
-rw-r--r--chromium/v8/src/compilation-dependencies.cc33
-rw-r--r--chromium/v8/src/compilation-dependencies.h9
-rw-r--r--chromium/v8/src/compiler.cc290
-rw-r--r--chromium/v8/src/compiler.h115
-rw-r--r--chromium/v8/src/compiler/OWNERS1
-rw-r--r--chromium/v8/src/compiler/access-builder.cc295
-rw-r--r--chromium/v8/src/compiler/access-builder.h68
-rw-r--r--chromium/v8/src/compiler/access-info.cc488
-rw-r--r--chromium/v8/src/compiler/access-info.h164
-rw-r--r--chromium/v8/src/compiler/arm/code-generator-arm.cc167
-rw-r--r--chromium/v8/src/compiler/arm/instruction-codes-arm.h8
-rw-r--r--chromium/v8/src/compiler/arm/instruction-scheduler-arm.cc129
-rw-r--r--chromium/v8/src/compiler/arm/instruction-selector-arm.cc375
-rw-r--r--chromium/v8/src/compiler/arm64/code-generator-arm64.cc422
-rw-r--r--chromium/v8/src/compiler/arm64/instruction-codes-arm64.h25
-rw-r--r--chromium/v8/src/compiler/arm64/instruction-scheduler-arm64.cc224
-rw-r--r--chromium/v8/src/compiler/arm64/instruction-selector-arm64.cc655
-rw-r--r--chromium/v8/src/compiler/ast-graph-builder.cc1163
-rw-r--r--chromium/v8/src/compiler/ast-graph-builder.h94
-rw-r--r--chromium/v8/src/compiler/ast-loop-assignment-analyzer.cc28
-rw-r--r--chromium/v8/src/compiler/ast-loop-assignment-analyzer.h13
-rw-r--r--chromium/v8/src/compiler/basic-block-instrumentor.cc9
-rw-r--r--chromium/v8/src/compiler/branch-elimination.cc269
-rw-r--r--chromium/v8/src/compiler/branch-elimination.h97
-rw-r--r--chromium/v8/src/compiler/bytecode-branch-analysis.cc125
-rw-r--r--chromium/v8/src/compiler/bytecode-branch-analysis.h79
-rw-r--r--chromium/v8/src/compiler/bytecode-graph-builder.cc1645
-rw-r--r--chromium/v8/src/compiler/bytecode-graph-builder.h194
-rw-r--r--chromium/v8/src/compiler/c-linkage.cc11
-rw-r--r--chromium/v8/src/compiler/change-lowering.cc281
-rw-r--r--chromium/v8/src/compiler/change-lowering.h8
-rw-r--r--chromium/v8/src/compiler/coalesced-live-ranges.cc2
-rw-r--r--chromium/v8/src/compiler/coalesced-live-ranges.h2
-rw-r--r--chromium/v8/src/compiler/code-generator-impl.h15
-rw-r--r--chromium/v8/src/compiler/code-generator.cc229
-rw-r--r--chromium/v8/src/compiler/code-generator.h50
-rw-r--r--chromium/v8/src/compiler/code-stub-assembler.cc176
-rw-r--r--chromium/v8/src/compiler/code-stub-assembler.h96
-rw-r--r--chromium/v8/src/compiler/common-node-cache.cc6
-rw-r--r--chromium/v8/src/compiler/common-node-cache.h8
-rw-r--r--chromium/v8/src/compiler/common-operator-reducer.cc14
-rw-r--r--chromium/v8/src/compiler/common-operator-reducer.h1
-rw-r--r--chromium/v8/src/compiler/common-operator.cc244
-rw-r--r--chromium/v8/src/compiler/common-operator.h42
-rw-r--r--chromium/v8/src/compiler/control-builders.cc10
-rw-r--r--chromium/v8/src/compiler/control-builders.h31
-rw-r--r--chromium/v8/src/compiler/control-equivalence.cc4
-rw-r--r--chromium/v8/src/compiler/diamond.h4
-rw-r--r--chromium/v8/src/compiler/escape-analysis-reducer.cc313
-rw-r--r--chromium/v8/src/compiler/escape-analysis-reducer.h63
-rw-r--r--chromium/v8/src/compiler/escape-analysis.cc1471
-rw-r--r--chromium/v8/src/compiler/escape-analysis.h169
-rw-r--r--chromium/v8/src/compiler/fast-accessor-assembler.cc220
-rw-r--r--chromium/v8/src/compiler/fast-accessor-assembler.h106
-rw-r--r--chromium/v8/src/compiler/frame-elider.cc3
-rw-r--r--chromium/v8/src/compiler/frame-states.cc9
-rw-r--r--chromium/v8/src/compiler/frame-states.h11
-rw-r--r--chromium/v8/src/compiler/frame.cc40
-rw-r--r--chromium/v8/src/compiler/frame.h196
-rw-r--r--chromium/v8/src/compiler/gap-resolver.cc2
-rw-r--r--chromium/v8/src/compiler/graph-reducer.cc9
-rw-r--r--chromium/v8/src/compiler/graph-reducer.h10
-rw-r--r--chromium/v8/src/compiler/graph-visualizer.cc221
-rw-r--r--chromium/v8/src/compiler/graph-visualizer.h13
-rw-r--r--chromium/v8/src/compiler/graph.h3
-rw-r--r--chromium/v8/src/compiler/greedy-allocator.cc66
-rw-r--r--chromium/v8/src/compiler/greedy-allocator.h8
-rw-r--r--chromium/v8/src/compiler/ia32/code-generator-ia32.cc245
-rw-r--r--chromium/v8/src/compiler/ia32/instruction-codes-ia32.h6
-rw-r--r--chromium/v8/src/compiler/ia32/instruction-scheduler-ia32.cc135
-rw-r--r--chromium/v8/src/compiler/ia32/instruction-selector-ia32.cc465
-rw-r--r--chromium/v8/src/compiler/instruction-codes.h71
-rw-r--r--chromium/v8/src/compiler/instruction-scheduler.cc280
-rw-r--r--chromium/v8/src/compiler/instruction-scheduler.h162
-rw-r--r--chromium/v8/src/compiler/instruction-selector-impl.h112
-rw-r--r--chromium/v8/src/compiler/instruction-selector.cc774
-rw-r--r--chromium/v8/src/compiler/instruction-selector.h89
-rw-r--r--chromium/v8/src/compiler/instruction.cc225
-rw-r--r--chromium/v8/src/compiler/instruction.h384
-rw-r--r--chromium/v8/src/compiler/interpreter-assembler.cc470
-rw-r--r--chromium/v8/src/compiler/interpreter-assembler.h77
-rw-r--r--chromium/v8/src/compiler/js-builtin-reducer.cc16
-rw-r--r--chromium/v8/src/compiler/js-builtin-reducer.h9
-rw-r--r--chromium/v8/src/compiler/js-call-reducer.cc557
-rw-r--r--chromium/v8/src/compiler/js-call-reducer.h67
-rw-r--r--chromium/v8/src/compiler/js-context-specialization.cc20
-rw-r--r--chromium/v8/src/compiler/js-frame-specialization.cc36
-rw-r--r--chromium/v8/src/compiler/js-generic-lowering.cc355
-rw-r--r--chromium/v8/src/compiler/js-global-object-specialization.cc320
-rw-r--r--chromium/v8/src/compiler/js-global-object-specialization.h83
-rw-r--r--chromium/v8/src/compiler/js-graph.cc53
-rw-r--r--chromium/v8/src/compiler/js-graph.h14
-rw-r--r--chromium/v8/src/compiler/js-inlining-heuristic.cc141
-rw-r--r--chromium/v8/src/compiler/js-inlining-heuristic.h62
-rw-r--r--chromium/v8/src/compiler/js-inlining.cc298
-rw-r--r--chromium/v8/src/compiler/js-inlining.h28
-rw-r--r--chromium/v8/src/compiler/js-intrinsic-lowering.cc471
-rw-r--r--chromium/v8/src/compiler/js-intrinsic-lowering.h44
-rw-r--r--chromium/v8/src/compiler/js-native-context-specialization.cc1033
-rw-r--r--chromium/v8/src/compiler/js-native-context-specialization.h116
-rw-r--r--chromium/v8/src/compiler/js-operator.cc738
-rw-r--r--chromium/v8/src/compiler/js-operator.h461
-rw-r--r--chromium/v8/src/compiler/js-type-feedback-lowering.cc119
-rw-r--r--chromium/v8/src/compiler/js-type-feedback-lowering.h66
-rw-r--r--chromium/v8/src/compiler/js-type-feedback.cc364
-rw-r--r--chromium/v8/src/compiler/js-type-feedback.h119
-rw-r--r--chromium/v8/src/compiler/js-typed-lowering.cc1820
-rw-r--r--chromium/v8/src/compiler/js-typed-lowering.h55
-rw-r--r--chromium/v8/src/compiler/linkage.cc285
-rw-r--r--chromium/v8/src/compiler/linkage.h93
-rw-r--r--chromium/v8/src/compiler/live-range-separator.cc169
-rw-r--r--chromium/v8/src/compiler/live-range-separator.h5
-rw-r--r--chromium/v8/src/compiler/load-elimination.cc15
-rw-r--r--chromium/v8/src/compiler/loop-analysis.h2
-rw-r--r--chromium/v8/src/compiler/loop-peeling.cc5
-rw-r--r--chromium/v8/src/compiler/machine-operator-reducer.cc44
-rw-r--r--chromium/v8/src/compiler/machine-operator.cc168
-rw-r--r--chromium/v8/src/compiler/machine-operator.h91
-rw-r--r--chromium/v8/src/compiler/machine-type.cc46
-rw-r--r--chromium/v8/src/compiler/machine-type.h130
-rw-r--r--chromium/v8/src/compiler/mips/OWNERS1
-rw-r--r--chromium/v8/src/compiler/mips/code-generator-mips.cc536
-rw-r--r--chromium/v8/src/compiler/mips/instruction-codes-mips.h23
-rw-r--r--chromium/v8/src/compiler/mips/instruction-scheduler-mips.cc26
-rw-r--r--chromium/v8/src/compiler/mips/instruction-selector-mips.cc667
-rw-r--r--chromium/v8/src/compiler/mips64/OWNERS1
-rw-r--r--chromium/v8/src/compiler/mips64/code-generator-mips64.cc648
-rw-r--r--chromium/v8/src/compiler/mips64/instruction-codes-mips64.h35
-rw-r--r--chromium/v8/src/compiler/mips64/instruction-scheduler-mips64.cc26
-rw-r--r--chromium/v8/src/compiler/mips64/instruction-selector-mips64.cc971
-rw-r--r--chromium/v8/src/compiler/move-optimizer.cc207
-rw-r--r--chromium/v8/src/compiler/move-optimizer.h11
-rw-r--r--chromium/v8/src/compiler/node-cache.h3
-rw-r--r--chromium/v8/src/compiler/node-matchers.h26
-rw-r--r--chromium/v8/src/compiler/node-properties.cc126
-rw-r--r--chromium/v8/src/compiler/node-properties.h32
-rw-r--r--chromium/v8/src/compiler/node.cc16
-rw-r--r--chromium/v8/src/compiler/node.h1
-rw-r--r--chromium/v8/src/compiler/opcodes.h77
-rw-r--r--chromium/v8/src/compiler/operator-properties.cc19
-rw-r--r--chromium/v8/src/compiler/operator.h61
-rw-r--r--chromium/v8/src/compiler/osr.cc2
-rw-r--r--chromium/v8/src/compiler/pipeline-statistics.cc6
-rw-r--r--chromium/v8/src/compiler/pipeline-statistics.h4
-rw-r--r--chromium/v8/src/compiler/pipeline.cc268
-rw-r--r--chromium/v8/src/compiler/pipeline.h16
-rw-r--r--chromium/v8/src/compiler/ppc/code-generator-ppc.cc372
-rw-r--r--chromium/v8/src/compiler/ppc/instruction-codes-ppc.h12
-rw-r--r--chromium/v8/src/compiler/ppc/instruction-scheduler-ppc.cc143
-rw-r--r--chromium/v8/src/compiler/ppc/instruction-selector-ppc.cc555
-rw-r--r--chromium/v8/src/compiler/raw-machine-assembler.cc202
-rw-r--r--chromium/v8/src/compiler/raw-machine-assembler.h211
-rw-r--r--chromium/v8/src/compiler/register-allocator-verifier.cc61
-rw-r--r--chromium/v8/src/compiler/register-allocator-verifier.h5
-rw-r--r--chromium/v8/src/compiler/register-allocator.cc1365
-rw-r--r--chromium/v8/src/compiler/register-allocator.h169
-rw-r--r--chromium/v8/src/compiler/register-configuration.cc76
-rw-r--r--chromium/v8/src/compiler/register-configuration.h56
-rw-r--r--chromium/v8/src/compiler/representation-change.cc537
-rw-r--r--chromium/v8/src/compiler/representation-change.h498
-rw-r--r--chromium/v8/src/compiler/schedule.cc14
-rw-r--r--chromium/v8/src/compiler/schedule.h6
-rw-r--r--chromium/v8/src/compiler/scheduler.cc126
-rw-r--r--chromium/v8/src/compiler/select-lowering.cc2
-rw-r--r--chromium/v8/src/compiler/simplified-lowering.cc1480
-rw-r--r--chromium/v8/src/compiler/simplified-lowering.h26
-rw-r--r--chromium/v8/src/compiler/simplified-operator-reducer.cc21
-rw-r--r--chromium/v8/src/compiler/simplified-operator-reducer.h7
-rw-r--r--chromium/v8/src/compiler/simplified-operator.cc23
-rw-r--r--chromium/v8/src/compiler/simplified-operator.h9
-rw-r--r--chromium/v8/src/compiler/state-values-utils.cc2
-rw-r--r--chromium/v8/src/compiler/type-hint-analyzer.cc98
-rw-r--r--chromium/v8/src/compiler/type-hint-analyzer.h51
-rw-r--r--chromium/v8/src/compiler/type-hints.cc83
-rw-r--r--chromium/v8/src/compiler/type-hints.h84
-rw-r--r--chromium/v8/src/compiler/typer.cc564
-rw-r--r--chromium/v8/src/compiler/typer.h23
-rw-r--r--chromium/v8/src/compiler/verifier.cc79
-rw-r--r--chromium/v8/src/compiler/verifier.h6
-rw-r--r--chromium/v8/src/compiler/wasm-compiler.cc2031
-rw-r--r--chromium/v8/src/compiler/wasm-compiler.h190
-rw-r--r--chromium/v8/src/compiler/wasm-linkage.cc282
-rw-r--r--chromium/v8/src/compiler/x64/code-generator-x64.cc513
-rw-r--r--chromium/v8/src/compiler/x64/instruction-codes-x64.h15
-rw-r--r--chromium/v8/src/compiler/x64/instruction-scheduler-x64.cc182
-rw-r--r--chromium/v8/src/compiler/x64/instruction-selector-x64.cc577
-rw-r--r--chromium/v8/src/compiler/x87/code-generator-x87.cc299
-rw-r--r--chromium/v8/src/compiler/x87/instruction-codes-x87.h3
-rw-r--r--chromium/v8/src/compiler/x87/instruction-scheduler-x87.cc26
-rw-r--r--chromium/v8/src/compiler/x87/instruction-selector-x87.cc494
-rw-r--r--chromium/v8/src/compiler/zone-pool.h9
-rw-r--r--chromium/v8/src/context-measure.cc4
-rw-r--r--chromium/v8/src/context-measure.h10
-rw-r--r--chromium/v8/src/contexts-inl.h26
-rw-r--r--chromium/v8/src/contexts.cc104
-rw-r--r--chromium/v8/src/contexts.h129
-rw-r--r--chromium/v8/src/conversions-inl.h8
-rw-r--r--chromium/v8/src/conversions.cc5
-rw-r--r--chromium/v8/src/counters.h20
-rw-r--r--chromium/v8/src/crankshaft/OWNERS7
-rw-r--r--chromium/v8/src/crankshaft/arm/OWNERS1
-rw-r--r--chromium/v8/src/crankshaft/arm/lithium-arm.cc (renamed from chromium/v8/src/arm/lithium-arm.cc)99
-rw-r--r--chromium/v8/src/crankshaft/arm/lithium-arm.h (renamed from chromium/v8/src/arm/lithium-arm.h)136
-rw-r--r--chromium/v8/src/crankshaft/arm/lithium-codegen-arm.cc (renamed from chromium/v8/src/arm/lithium-codegen-arm.cc)378
-rw-r--r--chromium/v8/src/crankshaft/arm/lithium-codegen-arm.h (renamed from chromium/v8/src/arm/lithium-codegen-arm.h)38
-rw-r--r--chromium/v8/src/crankshaft/arm/lithium-gap-resolver-arm.cc (renamed from chromium/v8/src/arm/lithium-gap-resolver-arm.cc)4
-rw-r--r--chromium/v8/src/crankshaft/arm/lithium-gap-resolver-arm.h (renamed from chromium/v8/src/arm/lithium-gap-resolver-arm.h)11
-rw-r--r--chromium/v8/src/crankshaft/arm64/OWNERS1
-rw-r--r--chromium/v8/src/crankshaft/arm64/delayed-masm-arm64-inl.h (renamed from chromium/v8/src/arm64/delayed-masm-arm64-inl.h)11
-rw-r--r--chromium/v8/src/crankshaft/arm64/delayed-masm-arm64.cc (renamed from chromium/v8/src/arm64/delayed-masm-arm64.cc)4
-rw-r--r--chromium/v8/src/crankshaft/arm64/delayed-masm-arm64.h (renamed from chromium/v8/src/arm64/delayed-masm-arm64.h)11
-rw-r--r--chromium/v8/src/crankshaft/arm64/lithium-arm64.cc (renamed from chromium/v8/src/arm64/lithium-arm64.cc)100
-rw-r--r--chromium/v8/src/crankshaft/arm64/lithium-arm64.h (renamed from chromium/v8/src/arm64/lithium-arm64.h)175
-rw-r--r--chromium/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc (renamed from chromium/v8/src/arm64/lithium-codegen-arm64.cc)383
-rw-r--r--chromium/v8/src/crankshaft/arm64/lithium-codegen-arm64.h (renamed from chromium/v8/src/arm64/lithium-codegen-arm64.h)33
-rw-r--r--chromium/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc (renamed from chromium/v8/src/arm64/lithium-gap-resolver-arm64.cc)6
-rw-r--r--chromium/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h (renamed from chromium/v8/src/arm64/lithium-gap-resolver-arm64.h)13
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-alias-analysis.h (renamed from chromium/v8/src/hydrogen-alias-analysis.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-bce.cc (renamed from chromium/v8/src/hydrogen-bce.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-bce.h (renamed from chromium/v8/src/hydrogen-bce.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-bch.cc (renamed from chromium/v8/src/hydrogen-bch.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-bch.h (renamed from chromium/v8/src/hydrogen-bch.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-canonicalize.cc (renamed from chromium/v8/src/hydrogen-canonicalize.cc)5
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-canonicalize.h (renamed from chromium/v8/src/hydrogen-canonicalize.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-check-elimination.cc (renamed from chromium/v8/src/hydrogen-check-elimination.cc)6
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-check-elimination.h (renamed from chromium/v8/src/hydrogen-check-elimination.h)13
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-dce.cc (renamed from chromium/v8/src/hydrogen-dce.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-dce.h (renamed from chromium/v8/src/hydrogen-dce.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-dehoist.cc (renamed from chromium/v8/src/hydrogen-dehoist.cc)3
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-dehoist.h (renamed from chromium/v8/src/hydrogen-dehoist.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-environment-liveness.cc (renamed from chromium/v8/src/hydrogen-environment-liveness.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-environment-liveness.h (renamed from chromium/v8/src/hydrogen-environment-liveness.h)12
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-escape-analysis.cc (renamed from chromium/v8/src/hydrogen-escape-analysis.cc)5
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-escape-analysis.h (renamed from chromium/v8/src/hydrogen-escape-analysis.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-flow-engine.h (renamed from chromium/v8/src/hydrogen-flow-engine.h)13
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-gvn.cc (renamed from chromium/v8/src/hydrogen-gvn.cc)5
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-gvn.h (renamed from chromium/v8/src/hydrogen-gvn.h)13
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-infer-representation.cc (renamed from chromium/v8/src/hydrogen-infer-representation.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-infer-representation.h (renamed from chromium/v8/src/hydrogen-infer-representation.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-infer-types.cc (renamed from chromium/v8/src/hydrogen-infer-types.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-infer-types.h (renamed from chromium/v8/src/hydrogen-infer-types.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-instructions.cc (renamed from chromium/v8/src/hydrogen-instructions.cc)86
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-instructions.h (renamed from chromium/v8/src/hydrogen-instructions.h)442
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-load-elimination.cc (renamed from chromium/v8/src/hydrogen-load-elimination.cc)9
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-load-elimination.h (renamed from chromium/v8/src/hydrogen-load-elimination.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-mark-deoptimize.cc (renamed from chromium/v8/src/hydrogen-mark-deoptimize.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-mark-deoptimize.h (renamed from chromium/v8/src/hydrogen-mark-deoptimize.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-mark-unreachable.cc (renamed from chromium/v8/src/hydrogen-mark-unreachable.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-mark-unreachable.h (renamed from chromium/v8/src/hydrogen-mark-unreachable.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-osr.cc (renamed from chromium/v8/src/hydrogen-osr.cc)5
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-osr.h (renamed from chromium/v8/src/hydrogen-osr.h)13
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-range-analysis.cc (renamed from chromium/v8/src/hydrogen-range-analysis.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-range-analysis.h (renamed from chromium/v8/src/hydrogen-range-analysis.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-redundant-phi.cc (renamed from chromium/v8/src/hydrogen-redundant-phi.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-redundant-phi.h (renamed from chromium/v8/src/hydrogen-redundant-phi.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-removable-simulates.cc (renamed from chromium/v8/src/hydrogen-removable-simulates.cc)7
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-removable-simulates.h (renamed from chromium/v8/src/hydrogen-removable-simulates.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-representation-changes.cc (renamed from chromium/v8/src/hydrogen-representation-changes.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-representation-changes.h (renamed from chromium/v8/src/hydrogen-representation-changes.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-sce.cc (renamed from chromium/v8/src/hydrogen-sce.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-sce.h (renamed from chromium/v8/src/hydrogen-sce.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-store-elimination.cc (renamed from chromium/v8/src/hydrogen-store-elimination.cc)5
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-store-elimination.h (renamed from chromium/v8/src/hydrogen-store-elimination.h)13
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-types.cc (renamed from chromium/v8/src/hydrogen-types.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-types.h (renamed from chromium/v8/src/hydrogen-types.h)9
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-uint32-analysis.cc (renamed from chromium/v8/src/hydrogen-uint32-analysis.cc)2
-rw-r--r--chromium/v8/src/crankshaft/hydrogen-uint32-analysis.h (renamed from chromium/v8/src/hydrogen-uint32-analysis.h)11
-rw-r--r--chromium/v8/src/crankshaft/hydrogen.cc (renamed from chromium/v8/src/hydrogen.cc)963
-rw-r--r--chromium/v8/src/crankshaft/hydrogen.h (renamed from chromium/v8/src/hydrogen.h)99
-rw-r--r--chromium/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc (renamed from chromium/v8/src/ia32/lithium-codegen-ia32.cc)373
-rw-r--r--chromium/v8/src/crankshaft/ia32/lithium-codegen-ia32.h (renamed from chromium/v8/src/ia32/lithium-codegen-ia32.h)38
-rw-r--r--chromium/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc (renamed from chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc)52
-rw-r--r--chromium/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.h (renamed from chromium/v8/src/ia32/lithium-gap-resolver-ia32.h)15
-rw-r--r--chromium/v8/src/crankshaft/ia32/lithium-ia32.cc (renamed from chromium/v8/src/ia32/lithium-ia32.cc)103
-rw-r--r--chromium/v8/src/crankshaft/ia32/lithium-ia32.h (renamed from chromium/v8/src/ia32/lithium-ia32.h)138
-rw-r--r--chromium/v8/src/crankshaft/lithium-allocator-inl.h (renamed from chromium/v8/src/lithium-allocator-inl.h)27
-rw-r--r--chromium/v8/src/crankshaft/lithium-allocator.cc (renamed from chromium/v8/src/lithium-allocator.cc)105
-rw-r--r--chromium/v8/src/crankshaft/lithium-allocator.h (renamed from chromium/v8/src/lithium-allocator.h)17
-rw-r--r--chromium/v8/src/crankshaft/lithium-codegen.cc (renamed from chromium/v8/src/lithium-codegen.cc)102
-rw-r--r--chromium/v8/src/crankshaft/lithium-codegen.h (renamed from chromium/v8/src/lithium-codegen.h)16
-rw-r--r--chromium/v8/src/crankshaft/lithium-inl.h (renamed from chromium/v8/src/lithium-inl.h)28
-rw-r--r--chromium/v8/src/crankshaft/lithium.cc (renamed from chromium/v8/src/lithium.cc)108
-rw-r--r--chromium/v8/src/crankshaft/lithium.h (renamed from chromium/v8/src/lithium.h)12
-rw-r--r--chromium/v8/src/crankshaft/mips/OWNERS6
-rw-r--r--chromium/v8/src/crankshaft/mips/lithium-codegen-mips.cc (renamed from chromium/v8/src/mips/lithium-codegen-mips.cc)450
-rw-r--r--chromium/v8/src/crankshaft/mips/lithium-codegen-mips.h (renamed from chromium/v8/src/mips/lithium-codegen-mips.h)37
-rw-r--r--chromium/v8/src/crankshaft/mips/lithium-gap-resolver-mips.cc (renamed from chromium/v8/src/mips/lithium-gap-resolver-mips.cc)5
-rw-r--r--chromium/v8/src/crankshaft/mips/lithium-gap-resolver-mips.h (renamed from chromium/v8/src/mips/lithium-gap-resolver-mips.h)11
-rw-r--r--chromium/v8/src/crankshaft/mips/lithium-mips.cc (renamed from chromium/v8/src/mips/lithium-mips.cc)99
-rw-r--r--chromium/v8/src/crankshaft/mips/lithium-mips.h (renamed from chromium/v8/src/mips/lithium-mips.h)136
-rw-r--r--chromium/v8/src/crankshaft/mips64/OWNERS6
-rw-r--r--chromium/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc (renamed from chromium/v8/src/mips64/lithium-codegen-mips64.cc)388
-rw-r--r--chromium/v8/src/crankshaft/mips64/lithium-codegen-mips64.h (renamed from chromium/v8/src/mips64/lithium-codegen-mips64.h)37
-rw-r--r--chromium/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc (renamed from chromium/v8/src/mips64/lithium-gap-resolver-mips64.cc)5
-rw-r--r--chromium/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.h (renamed from chromium/v8/src/mips64/lithium-gap-resolver-mips64.h)11
-rw-r--r--chromium/v8/src/crankshaft/mips64/lithium-mips64.cc (renamed from chromium/v8/src/mips64/lithium-mips64.cc)99
-rw-r--r--chromium/v8/src/crankshaft/mips64/lithium-mips64.h (renamed from chromium/v8/src/mips64/lithium-mips64.h)136
-rw-r--r--chromium/v8/src/crankshaft/ppc/OWNERS5
-rw-r--r--chromium/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc (renamed from chromium/v8/src/ppc/lithium-codegen-ppc.cc)404
-rw-r--r--chromium/v8/src/crankshaft/ppc/lithium-codegen-ppc.h (renamed from chromium/v8/src/ppc/lithium-codegen-ppc.h)41
-rw-r--r--chromium/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc (renamed from chromium/v8/src/ppc/lithium-gap-resolver-ppc.cc)5
-rw-r--r--chromium/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.h (renamed from chromium/v8/src/ppc/lithium-gap-resolver-ppc.h)12
-rw-r--r--chromium/v8/src/crankshaft/ppc/lithium-ppc.cc (renamed from chromium/v8/src/ppc/lithium-ppc.cc)99
-rw-r--r--chromium/v8/src/crankshaft/ppc/lithium-ppc.h (renamed from chromium/v8/src/ppc/lithium-ppc.h)133
-rw-r--r--chromium/v8/src/crankshaft/typing.cc (renamed from chromium/v8/src/typing.cc)79
-rw-r--r--chromium/v8/src/crankshaft/typing.h (renamed from chromium/v8/src/typing.h)18
-rw-r--r--chromium/v8/src/crankshaft/unique.h (renamed from chromium/v8/src/unique.h)10
-rw-r--r--chromium/v8/src/crankshaft/x64/lithium-codegen-x64.cc (renamed from chromium/v8/src/x64/lithium-codegen-x64.cc)593
-rw-r--r--chromium/v8/src/crankshaft/x64/lithium-codegen-x64.h (renamed from chromium/v8/src/x64/lithium-codegen-x64.h)37
-rw-r--r--chromium/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc (renamed from chromium/v8/src/x64/lithium-gap-resolver-x64.cc)39
-rw-r--r--chromium/v8/src/crankshaft/x64/lithium-gap-resolver-x64.h (renamed from chromium/v8/src/x64/lithium-gap-resolver-x64.h)11
-rw-r--r--chromium/v8/src/crankshaft/x64/lithium-x64.cc (renamed from chromium/v8/src/x64/lithium-x64.cc)98
-rw-r--r--chromium/v8/src/crankshaft/x64/lithium-x64.h (renamed from chromium/v8/src/x64/lithium-x64.h)135
-rw-r--r--chromium/v8/src/crankshaft/x87/OWNERS1
-rw-r--r--chromium/v8/src/crankshaft/x87/lithium-codegen-x87.cc (renamed from chromium/v8/src/x87/lithium-codegen-x87.cc)433
-rw-r--r--chromium/v8/src/crankshaft/x87/lithium-codegen-x87.h (renamed from chromium/v8/src/x87/lithium-codegen-x87.h)37
-rw-r--r--chromium/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc (renamed from chromium/v8/src/x87/lithium-gap-resolver-x87.cc)49
-rw-r--r--chromium/v8/src/crankshaft/x87/lithium-gap-resolver-x87.h (renamed from chromium/v8/src/x87/lithium-gap-resolver-x87.h)15
-rw-r--r--chromium/v8/src/crankshaft/x87/lithium-x87.cc (renamed from chromium/v8/src/x87/lithium-x87.cc)103
-rw-r--r--chromium/v8/src/crankshaft/x87/lithium-x87.h (renamed from chromium/v8/src/x87/lithium-x87.h)138
-rw-r--r--chromium/v8/src/d8.cc99
-rw-r--r--chromium/v8/src/d8.gyp16
-rw-r--r--chromium/v8/src/d8.js28
-rw-r--r--chromium/v8/src/date.cc16
-rw-r--r--chromium/v8/src/date.h11
-rw-r--r--chromium/v8/src/date.js885
-rw-r--r--chromium/v8/src/dateparser-inl.h19
-rw-r--r--chromium/v8/src/dateparser.cc11
-rw-r--r--chromium/v8/src/dateparser.h5
-rw-r--r--chromium/v8/src/debug/arm/debug-arm.cc32
-rw-r--r--chromium/v8/src/debug/arm64/debug-arm64.cc37
-rw-r--r--chromium/v8/src/debug/debug-evaluate.cc208
-rw-r--r--chromium/v8/src/debug/debug-evaluate.h28
-rw-r--r--chromium/v8/src/debug/debug-frames.cc9
-rw-r--r--chromium/v8/src/debug/debug-frames.h1
-rw-r--r--chromium/v8/src/debug/debug-scopes.cc127
-rw-r--r--chromium/v8/src/debug/debug-scopes.h27
-rw-r--r--chromium/v8/src/debug/debug.cc669
-rw-r--r--chromium/v8/src/debug/debug.h136
-rw-r--r--chromium/v8/src/debug/debug.js101
-rw-r--r--chromium/v8/src/debug/ia32/debug-ia32.cc38
-rw-r--r--chromium/v8/src/debug/liveedit.cc192
-rw-r--r--chromium/v8/src/debug/liveedit.h14
-rw-r--r--chromium/v8/src/debug/liveedit.js45
-rw-r--r--chromium/v8/src/debug/mips/OWNERS1
-rw-r--r--chromium/v8/src/debug/mips/debug-mips.cc31
-rw-r--r--chromium/v8/src/debug/mips64/OWNERS1
-rw-r--r--chromium/v8/src/debug/mips64/debug-mips64.cc31
-rw-r--r--chromium/v8/src/debug/mirrors.js218
-rw-r--r--chromium/v8/src/debug/ppc/debug-ppc.cc32
-rw-r--r--chromium/v8/src/debug/x64/debug-x64.cc39
-rw-r--r--chromium/v8/src/debug/x87/debug-x87.cc38
-rw-r--r--chromium/v8/src/deoptimizer.cc401
-rw-r--r--chromium/v8/src/deoptimizer.h39
-rw-r--r--chromium/v8/src/disassembler.cc10
-rw-r--r--chromium/v8/src/disassembler.h3
-rw-r--r--chromium/v8/src/diy-fp.h3
-rw-r--r--chromium/v8/src/double.h3
-rw-r--r--chromium/v8/src/dtoa.h3
-rw-r--r--chromium/v8/src/effects.h3
-rw-r--r--chromium/v8/src/elements-kind.h3
-rw-r--r--chromium/v8/src/elements.cc385
-rw-r--r--chromium/v8/src/elements.h62
-rw-r--r--chromium/v8/src/execution.cc73
-rw-r--r--chromium/v8/src/execution.h24
-rw-r--r--chromium/v8/src/extensions/externalize-string-extension.h3
-rw-r--r--chromium/v8/src/extensions/free-buffer-extension.h3
-rw-r--r--chromium/v8/src/extensions/gc-extension.h3
-rw-r--r--chromium/v8/src/extensions/statistics-extension.h3
-rw-r--r--chromium/v8/src/extensions/trigger-failure-extension.h3
-rw-r--r--chromium/v8/src/factory.cc375
-rw-r--r--chromium/v8/src/factory.h71
-rw-r--r--chromium/v8/src/fast-dtoa.h3
-rw-r--r--chromium/v8/src/field-index-inl.h6
-rw-r--r--chromium/v8/src/field-index.h5
-rw-r--r--chromium/v8/src/fixed-dtoa.h3
-rw-r--r--chromium/v8/src/flag-definitions.h147
-rw-r--r--chromium/v8/src/flags.h3
-rw-r--r--chromium/v8/src/frames-inl.h7
-rw-r--r--chromium/v8/src/frames.cc106
-rw-r--r--chromium/v8/src/frames.h168
-rw-r--r--chromium/v8/src/full-codegen/arm/full-codegen-arm.cc990
-rw-r--r--chromium/v8/src/full-codegen/arm64/full-codegen-arm64.cc996
-rw-r--r--chromium/v8/src/full-codegen/full-codegen.cc212
-rw-r--r--chromium/v8/src/full-codegen/full-codegen.h220
-rw-r--r--chromium/v8/src/full-codegen/ia32/full-codegen-ia32.cc1023
-rw-r--r--chromium/v8/src/full-codegen/mips/OWNERS1
-rw-r--r--chromium/v8/src/full-codegen/mips/full-codegen-mips.cc1021
-rw-r--r--chromium/v8/src/full-codegen/mips64/OWNERS1
-rw-r--r--chromium/v8/src/full-codegen/mips64/full-codegen-mips64.cc1006
-rw-r--r--chromium/v8/src/full-codegen/ppc/full-codegen-ppc.cc1000
-rw-r--r--chromium/v8/src/full-codegen/x64/full-codegen-x64.cc1023
-rw-r--r--chromium/v8/src/full-codegen/x87/full-codegen-x87.cc1024
-rw-r--r--chromium/v8/src/futex-emulation.cc3
-rw-r--r--chromium/v8/src/futex-emulation.h4
-rw-r--r--chromium/v8/src/gdb-jit.cc4
-rw-r--r--chromium/v8/src/global-handles.cc130
-rw-r--r--chromium/v8/src/global-handles.h22
-rw-r--r--chromium/v8/src/globals.h89
-rw-r--r--chromium/v8/src/handles-inl.h54
-rw-r--r--chromium/v8/src/handles.cc46
-rw-r--r--chromium/v8/src/handles.h54
-rw-r--r--chromium/v8/src/harmony-array-includes.js108
-rw-r--r--chromium/v8/src/harmony-array.js300
-rw-r--r--chromium/v8/src/harmony-concat-spreadable.js19
-rw-r--r--chromium/v8/src/harmony-object-observe.js14
-rw-r--r--chromium/v8/src/harmony-reflect.js20
-rw-r--r--chromium/v8/src/harmony-regexp.js37
-rw-r--r--chromium/v8/src/harmony-sharedarraybuffer.js57
-rw-r--r--chromium/v8/src/harmony-tostring.js19
-rw-r--r--chromium/v8/src/harmony-typedarray.js414
-rw-r--r--chromium/v8/src/hashmap.h3
-rw-r--r--chromium/v8/src/heap/array-buffer-tracker.h4
-rw-r--r--chromium/v8/src/heap/gc-idle-time-handler.cc4
-rw-r--r--chromium/v8/src/heap/gc-tracer.cc330
-rw-r--r--chromium/v8/src/heap/gc-tracer.h102
-rw-r--r--chromium/v8/src/heap/heap-inl.h62
-rw-r--r--chromium/v8/src/heap/heap.cc760
-rw-r--r--chromium/v8/src/heap/heap.h464
-rw-r--r--chromium/v8/src/heap/incremental-marking-inl.h89
-rw-r--r--chromium/v8/src/heap/incremental-marking-job.cc8
-rw-r--r--chromium/v8/src/heap/incremental-marking-job.h4
-rw-r--r--chromium/v8/src/heap/incremental-marking.cc311
-rw-r--r--chromium/v8/src/heap/incremental-marking.h56
-rw-r--r--chromium/v8/src/heap/mark-compact-inl.h71
-rw-r--r--chromium/v8/src/heap/mark-compact.cc2728
-rw-r--r--chromium/v8/src/heap/mark-compact.h220
-rw-r--r--chromium/v8/src/heap/memory-reducer.cc87
-rw-r--r--chromium/v8/src/heap/memory-reducer.h24
-rw-r--r--chromium/v8/src/heap/object-stats.cc6
-rw-r--r--chromium/v8/src/heap/objects-visiting-inl.h370
-rw-r--r--chromium/v8/src/heap/objects-visiting.cc185
-rw-r--r--chromium/v8/src/heap/objects-visiting.h125
-rw-r--r--chromium/v8/src/heap/scavenge-job.cc6
-rw-r--r--chromium/v8/src/heap/scavenge-job.h4
-rw-r--r--chromium/v8/src/heap/scavenger-inl.h6
-rw-r--r--chromium/v8/src/heap/scavenger.cc54
-rw-r--r--chromium/v8/src/heap/scavenger.h6
-rw-r--r--chromium/v8/src/heap/spaces-inl.h94
-rw-r--r--chromium/v8/src/heap/spaces.cc656
-rw-r--r--chromium/v8/src/heap/spaces.h722
-rw-r--r--chromium/v8/src/heap/store-buffer-inl.h4
-rw-r--r--chromium/v8/src/heap/store-buffer.cc100
-rw-r--r--chromium/v8/src/heap/store-buffer.h19
-rw-r--r--chromium/v8/src/i18n.cc15
-rw-r--r--chromium/v8/src/i18n.h3
-rw-r--r--chromium/v8/src/ia32/assembler-ia32-inl.h31
-rw-r--r--chromium/v8/src/ia32/assembler-ia32.cc56
-rw-r--r--chromium/v8/src/ia32/assembler-ia32.h221
-rw-r--r--chromium/v8/src/ia32/builtins-ia32.cc1977
-rw-r--r--chromium/v8/src/ia32/code-stubs-ia32.cc657
-rw-r--r--chromium/v8/src/ia32/code-stubs-ia32.h19
-rw-r--r--chromium/v8/src/ia32/codegen-ia32.cc40
-rw-r--r--chromium/v8/src/ia32/codegen-ia32.h5
-rw-r--r--chromium/v8/src/ia32/deoptimizer-ia32.cc66
-rw-r--r--chromium/v8/src/ia32/disasm-ia32.cc8
-rw-r--r--chromium/v8/src/ia32/frames-ia32.h3
-rw-r--r--chromium/v8/src/ia32/interface-descriptors-ia32.cc89
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.cc398
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.h451
-rw-r--r--chromium/v8/src/ia32/simulator-ia32.h16
-rw-r--r--chromium/v8/src/ic/access-compiler.cc6
-rw-r--r--chromium/v8/src/ic/access-compiler.h6
-rw-r--r--chromium/v8/src/ic/arm/access-compiler-arm.cc1
-rw-r--r--chromium/v8/src/ic/arm/handler-compiler-arm.cc53
-rw-r--r--chromium/v8/src/ic/arm/ic-arm.cc127
-rw-r--r--chromium/v8/src/ic/arm/ic-compiler-arm.cc104
-rw-r--r--chromium/v8/src/ic/arm64/access-compiler-arm64.cc1
-rw-r--r--chromium/v8/src/ic/arm64/handler-compiler-arm64.cc53
-rw-r--r--chromium/v8/src/ic/arm64/ic-arm64.cc64
-rw-r--r--chromium/v8/src/ic/arm64/ic-compiler-arm64.cc108
-rw-r--r--chromium/v8/src/ic/call-optimization.h4
-rw-r--r--chromium/v8/src/ic/handler-compiler.cc39
-rw-r--r--chromium/v8/src/ic/handler-compiler.h9
-rw-r--r--chromium/v8/src/ic/ia32/access-compiler-ia32.cc2
-rw-r--r--chromium/v8/src/ic/ia32/handler-compiler-ia32.cc81
-rw-r--r--chromium/v8/src/ic/ia32/ic-compiler-ia32.cc99
-rw-r--r--chromium/v8/src/ic/ia32/ic-ia32.cc108
-rw-r--r--chromium/v8/src/ic/ia32/stub-cache-ia32.cc2
-rw-r--r--chromium/v8/src/ic/ic-compiler.cc135
-rw-r--r--chromium/v8/src/ic/ic-compiler.h30
-rw-r--r--chromium/v8/src/ic/ic-inl.h36
-rw-r--r--chromium/v8/src/ic/ic-state.cc76
-rw-r--r--chromium/v8/src/ic/ic-state.h50
-rw-r--r--chromium/v8/src/ic/ic.cc613
-rw-r--r--chromium/v8/src/ic/ic.h50
-rw-r--r--chromium/v8/src/ic/mips/OWNERS1
-rw-r--r--chromium/v8/src/ic/mips/access-compiler-mips.cc1
-rw-r--r--chromium/v8/src/ic/mips/handler-compiler-mips.cc53
-rw-r--r--chromium/v8/src/ic/mips/ic-compiler-mips.cc110
-rw-r--r--chromium/v8/src/ic/mips/ic-mips.cc170
-rw-r--r--chromium/v8/src/ic/mips64/OWNERS1
-rw-r--r--chromium/v8/src/ic/mips64/access-compiler-mips64.cc1
-rw-r--r--chromium/v8/src/ic/mips64/handler-compiler-mips64.cc54
-rw-r--r--chromium/v8/src/ic/mips64/ic-compiler-mips64.cc110
-rw-r--r--chromium/v8/src/ic/mips64/ic-mips64.cc171
-rw-r--r--chromium/v8/src/ic/ppc/access-compiler-ppc.cc1
-rw-r--r--chromium/v8/src/ic/ppc/handler-compiler-ppc.cc53
-rw-r--r--chromium/v8/src/ic/ppc/ic-compiler-ppc.cc107
-rw-r--r--chromium/v8/src/ic/ppc/ic-ppc.cc135
-rw-r--r--chromium/v8/src/ic/stub-cache.h4
-rw-r--r--chromium/v8/src/ic/x64/access-compiler-x64.cc2
-rw-r--r--chromium/v8/src/ic/x64/handler-compiler-x64.cc70
-rw-r--r--chromium/v8/src/ic/x64/ic-compiler-x64.cc106
-rw-r--r--chromium/v8/src/ic/x64/ic-x64.cc80
-rw-r--r--chromium/v8/src/ic/x87/access-compiler-x87.cc2
-rw-r--r--chromium/v8/src/ic/x87/handler-compiler-x87.cc81
-rw-r--r--chromium/v8/src/ic/x87/ic-compiler-x87.cc99
-rw-r--r--chromium/v8/src/ic/x87/ic-x87.cc108
-rw-r--r--chromium/v8/src/ic/x87/stub-cache-x87.cc2
-rw-r--r--chromium/v8/src/icu_util.h3
-rw-r--r--chromium/v8/src/identity-map.cc47
-rw-r--r--chromium/v8/src/identity-map.h30
-rw-r--r--chromium/v8/src/interface-descriptors.cc187
-rw-r--r--chromium/v8/src/interface-descriptors.h149
-rw-r--r--chromium/v8/src/interpreter/OWNERS5
-rw-r--r--chromium/v8/src/interpreter/bytecode-array-builder.cc1386
-rw-r--r--chromium/v8/src/interpreter/bytecode-array-builder.h316
-rw-r--r--chromium/v8/src/interpreter/bytecode-array-iterator.cc67
-rw-r--r--chromium/v8/src/interpreter/bytecode-array-iterator.h11
-rw-r--r--chromium/v8/src/interpreter/bytecode-generator.cc1989
-rw-r--r--chromium/v8/src/interpreter/bytecode-generator.h114
-rw-r--r--chromium/v8/src/interpreter/bytecode-register-allocator.cc72
-rw-r--r--chromium/v8/src/interpreter/bytecode-register-allocator.h49
-rw-r--r--chromium/v8/src/interpreter/bytecode-traits.h180
-rw-r--r--chromium/v8/src/interpreter/bytecodes.cc328
-rw-r--r--chromium/v8/src/interpreter/bytecodes.h381
-rw-r--r--chromium/v8/src/interpreter/constant-array-builder.cc174
-rw-r--r--chromium/v8/src/interpreter/constant-array-builder.h97
-rw-r--r--chromium/v8/src/interpreter/control-flow-builders.cc142
-rw-r--r--chromium/v8/src/interpreter/control-flow-builders.h151
-rw-r--r--chromium/v8/src/interpreter/interpreter.cc1383
-rw-r--r--chromium/v8/src/interpreter/interpreter.h47
-rw-r--r--chromium/v8/src/isolate-inl.h4
-rw-r--r--chromium/v8/src/isolate.cc205
-rw-r--r--chromium/v8/src/isolate.h67
-rw-r--r--chromium/v8/src/js/OWNERS11
-rw-r--r--chromium/v8/src/js/array-iterator.js (renamed from chromium/v8/src/array-iterator.js)48
-rw-r--r--chromium/v8/src/js/array.js (renamed from chromium/v8/src/array.js)532
-rw-r--r--chromium/v8/src/js/arraybuffer.js (renamed from chromium/v8/src/arraybuffer.js)70
-rw-r--r--chromium/v8/src/js/collection-iterator.js (renamed from chromium/v8/src/collection-iterator.js)31
-rw-r--r--chromium/v8/src/js/collection.js (renamed from chromium/v8/src/collection.js)62
-rw-r--r--chromium/v8/src/js/generator.js (renamed from chromium/v8/src/generator.js)20
-rw-r--r--chromium/v8/src/js/harmony-atomics.js (renamed from chromium/v8/src/harmony-atomics.js)36
-rw-r--r--chromium/v8/src/js/harmony-object-observe.js17
-rw-r--r--chromium/v8/src/js/harmony-reflect.js37
-rw-r--r--chromium/v8/src/js/harmony-regexp.js60
-rw-r--r--chromium/v8/src/js/harmony-sharedarraybuffer.js31
-rw-r--r--chromium/v8/src/js/harmony-simd.js (renamed from chromium/v8/src/harmony-simd.js)122
-rw-r--r--chromium/v8/src/js/harmony-species.js60
-rw-r--r--chromium/v8/src/js/harmony-unicode-regexps.js39
-rw-r--r--chromium/v8/src/js/i18n.js (renamed from chromium/v8/src/i18n.js)327
-rw-r--r--chromium/v8/src/js/iterator-prototype.js (renamed from chromium/v8/src/iterator-prototype.js)5
-rw-r--r--chromium/v8/src/js/json.js (renamed from chromium/v8/src/json.js)130
-rw-r--r--chromium/v8/src/js/macros.py (renamed from chromium/v8/src/macros.py)193
-rw-r--r--chromium/v8/src/js/math.js (renamed from chromium/v8/src/math.js)90
-rw-r--r--chromium/v8/src/js/messages.js (renamed from chromium/v8/src/messages.js)347
-rw-r--r--chromium/v8/src/js/object-observe.js (renamed from chromium/v8/src/object-observe.js)77
-rw-r--r--chromium/v8/src/js/prologue.js (renamed from chromium/v8/src/prologue.js)95
-rw-r--r--chromium/v8/src/js/promise-extra.js26
-rw-r--r--chromium/v8/src/js/promise.js (renamed from chromium/v8/src/promise.js)318
-rw-r--r--chromium/v8/src/js/proxy.js69
-rw-r--r--chromium/v8/src/js/regexp.js581
-rw-r--r--chromium/v8/src/js/runtime.js191
-rw-r--r--chromium/v8/src/js/spread.js (renamed from chromium/v8/src/harmony-spread.js)5
-rw-r--r--chromium/v8/src/js/string-iterator.js (renamed from chromium/v8/src/string-iterator.js)10
-rw-r--r--chromium/v8/src/js/string.js (renamed from chromium/v8/src/string.js)260
-rw-r--r--chromium/v8/src/js/symbol.js (renamed from chromium/v8/src/symbol.js)15
-rw-r--r--chromium/v8/src/js/templates.js (renamed from chromium/v8/src/templates.js)10
-rw-r--r--chromium/v8/src/js/typedarray.js969
-rw-r--r--chromium/v8/src/js/uri.js (renamed from chromium/v8/src/uri.js)6
-rw-r--r--chromium/v8/src/js/v8natives.js (renamed from chromium/v8/src/v8natives.js)914
-rw-r--r--chromium/v8/src/js/weak-collection.js (renamed from chromium/v8/src/weak-collection.js)50
-rw-r--r--chromium/v8/src/json-stringifier.h20
-rw-r--r--chromium/v8/src/key-accumulator.cc315
-rw-r--r--chromium/v8/src/key-accumulator.h93
-rw-r--r--chromium/v8/src/layout-descriptor-inl.h8
-rw-r--r--chromium/v8/src/layout-descriptor.h4
-rw-r--r--chromium/v8/src/libplatform/default-platform.cc28
-rw-r--r--chromium/v8/src/libplatform/default-platform.h30
-rw-r--r--chromium/v8/src/libplatform/task-queue.cc3
-rw-r--r--chromium/v8/src/libplatform/task-queue.h3
-rw-r--r--chromium/v8/src/libplatform/worker-thread.cc3
-rw-r--r--chromium/v8/src/libplatform/worker-thread.h3
-rw-r--r--chromium/v8/src/list-inl.h3
-rw-r--r--chromium/v8/src/list.h3
-rw-r--r--chromium/v8/src/locked-queue-inl.h91
-rw-r--r--chromium/v8/src/locked-queue.h43
-rw-r--r--chromium/v8/src/log-inl.h17
-rw-r--r--chromium/v8/src/log-utils.h5
-rw-r--r--chromium/v8/src/log.cc4
-rw-r--r--chromium/v8/src/log.h3
-rw-r--r--chromium/v8/src/lookup-inl.h142
-rw-r--r--chromium/v8/src/lookup.cc241
-rw-r--r--chromium/v8/src/lookup.h47
-rw-r--r--chromium/v8/src/machine-type.cc75
-rw-r--r--chromium/v8/src/machine-type.h204
-rw-r--r--chromium/v8/src/macro-assembler.h3
-rw-r--r--chromium/v8/src/messages.cc132
-rw-r--r--chromium/v8/src/messages.h203
-rw-r--r--chromium/v8/src/mips/OWNERS1
-rw-r--r--chromium/v8/src/mips/assembler-mips-inl.h99
-rw-r--r--chromium/v8/src/mips/assembler-mips.cc791
-rw-r--r--chromium/v8/src/mips/assembler-mips.h590
-rw-r--r--chromium/v8/src/mips/builtins-mips.cc1988
-rw-r--r--chromium/v8/src/mips/code-stubs-mips.cc566
-rw-r--r--chromium/v8/src/mips/code-stubs-mips.h12
-rw-r--r--chromium/v8/src/mips/codegen-mips.cc57
-rw-r--r--chromium/v8/src/mips/codegen-mips.h5
-rw-r--r--chromium/v8/src/mips/constants-mips.cc36
-rw-r--r--chromium/v8/src/mips/constants-mips.h615
-rw-r--r--chromium/v8/src/mips/cpu-mips.cc11
-rw-r--r--chromium/v8/src/mips/deoptimizer-mips.cc36
-rw-r--r--chromium/v8/src/mips/disasm-mips.cc133
-rw-r--r--chromium/v8/src/mips/frames-mips.h3
-rw-r--r--chromium/v8/src/mips/interface-descriptors-mips.cc83
-rw-r--r--chromium/v8/src/mips/macro-assembler-mips.cc2454
-rw-r--r--chromium/v8/src/mips/macro-assembler-mips.h277
-rw-r--r--chromium/v8/src/mips/simulator-mips.cc549
-rw-r--r--chromium/v8/src/mips/simulator-mips.h64
-rw-r--r--chromium/v8/src/mips64/OWNERS1
-rw-r--r--chromium/v8/src/mips64/assembler-mips64-inl.h95
-rw-r--r--chromium/v8/src/mips64/assembler-mips64.cc678
-rw-r--r--chromium/v8/src/mips64/assembler-mips64.h602
-rw-r--r--chromium/v8/src/mips64/builtins-mips64.cc1987
-rw-r--r--chromium/v8/src/mips64/code-stubs-mips64.cc574
-rw-r--r--chromium/v8/src/mips64/code-stubs-mips64.h12
-rw-r--r--chromium/v8/src/mips64/codegen-mips64.cc57
-rw-r--r--chromium/v8/src/mips64/codegen-mips64.h5
-rw-r--r--chromium/v8/src/mips64/constants-mips64.cc36
-rw-r--r--chromium/v8/src/mips64/constants-mips64.h684
-rw-r--r--chromium/v8/src/mips64/cpu-mips64.cc13
-rw-r--r--chromium/v8/src/mips64/deoptimizer-mips64.cc36
-rw-r--r--chromium/v8/src/mips64/disasm-mips64.cc178
-rw-r--r--chromium/v8/src/mips64/frames-mips64.h3
-rw-r--r--chromium/v8/src/mips64/interface-descriptors-mips64.cc83
-rw-r--r--chromium/v8/src/mips64/macro-assembler-mips64.cc2442
-rw-r--r--chromium/v8/src/mips64/macro-assembler-mips64.h282
-rw-r--r--chromium/v8/src/mips64/simulator-mips64.cc706
-rw-r--r--chromium/v8/src/mips64/simulator-mips64.h87
-rw-r--r--chromium/v8/src/objects-body-descriptors-inl.h565
-rw-r--r--chromium/v8/src/objects-body-descriptors.h141
-rw-r--r--chromium/v8/src/objects-debug.cc93
-rw-r--r--chromium/v8/src/objects-inl.h830
-rw-r--r--chromium/v8/src/objects-printer.cc446
-rw-r--r--chromium/v8/src/objects.cc6160
-rw-r--r--chromium/v8/src/objects.h1603
-rw-r--r--chromium/v8/src/optimizing-compile-dispatcher.cc1
-rw-r--r--chromium/v8/src/optimizing-compile-dispatcher.h4
-rw-r--r--chromium/v8/src/ostreams.cc1
-rw-r--r--chromium/v8/src/ostreams.h2
-rw-r--r--chromium/v8/src/parsing/OWNERS7
-rw-r--r--chromium/v8/src/parsing/expression-classifier.h (renamed from chromium/v8/src/expression-classifier.h)95
-rw-r--r--chromium/v8/src/parsing/func-name-inferrer.cc (renamed from chromium/v8/src/func-name-inferrer.cc)6
-rw-r--r--chromium/v8/src/parsing/func-name-inferrer.h (renamed from chromium/v8/src/func-name-inferrer.h)9
-rw-r--r--chromium/v8/src/parsing/json-parser.h (renamed from chromium/v8/src/json-parser.h)26
-rw-r--r--chromium/v8/src/parsing/parameter-initializer-rewriter.cc88
-rw-r--r--chromium/v8/src/parsing/parameter-initializer-rewriter.h22
-rw-r--r--chromium/v8/src/parsing/parser-base.h (renamed from chromium/v8/src/preparser.h)1835
-rw-r--r--chromium/v8/src/parsing/parser.cc (renamed from chromium/v8/src/parser.cc)2213
-rw-r--r--chromium/v8/src/parsing/parser.h (renamed from chromium/v8/src/parser.h)446
-rw-r--r--chromium/v8/src/parsing/pattern-rewriter.cc (renamed from chromium/v8/src/pattern-rewriter.cc)328
-rw-r--r--chromium/v8/src/parsing/preparse-data-format.h (renamed from chromium/v8/src/preparse-data-format.h)9
-rw-r--r--chromium/v8/src/parsing/preparse-data.cc (renamed from chromium/v8/src/preparse-data.cc)6
-rw-r--r--chromium/v8/src/parsing/preparse-data.h (renamed from chromium/v8/src/preparse-data.h)11
-rw-r--r--chromium/v8/src/parsing/preparser.cc (renamed from chromium/v8/src/preparser.cc)146
-rw-r--r--chromium/v8/src/parsing/preparser.h1175
-rw-r--r--chromium/v8/src/parsing/rewriter.cc403
-rw-r--r--chromium/v8/src/parsing/rewriter.h (renamed from chromium/v8/src/rewriter.h)17
-rw-r--r--chromium/v8/src/parsing/scanner-character-streams.cc (renamed from chromium/v8/src/scanner-character-streams.cc)5
-rw-r--r--chromium/v8/src/parsing/scanner-character-streams.h (renamed from chromium/v8/src/scanner-character-streams.h)53
-rw-r--r--chromium/v8/src/parsing/scanner.cc (renamed from chromium/v8/src/scanner.cc)109
-rw-r--r--chromium/v8/src/parsing/scanner.h (renamed from chromium/v8/src/scanner.h)56
-rw-r--r--chromium/v8/src/parsing/token.cc (renamed from chromium/v8/src/token.cc)3
-rw-r--r--chromium/v8/src/parsing/token.h (renamed from chromium/v8/src/token.h)19
-rw-r--r--chromium/v8/src/ppc/assembler-ppc-inl.h39
-rw-r--r--chromium/v8/src/ppc/assembler-ppc.cc87
-rw-r--r--chromium/v8/src/ppc/assembler-ppc.h429
-rw-r--r--chromium/v8/src/ppc/builtins-ppc.cc1957
-rw-r--r--chromium/v8/src/ppc/code-stubs-ppc.cc620
-rw-r--r--chromium/v8/src/ppc/code-stubs-ppc.h8
-rw-r--r--chromium/v8/src/ppc/codegen-ppc.cc44
-rw-r--r--chromium/v8/src/ppc/codegen-ppc.h6
-rw-r--r--chromium/v8/src/ppc/constants-ppc.cc44
-rw-r--r--chromium/v8/src/ppc/constants-ppc.h73
-rw-r--r--chromium/v8/src/ppc/deoptimizer-ppc.cc46
-rw-r--r--chromium/v8/src/ppc/disasm-ppc.cc63
-rw-r--r--chromium/v8/src/ppc/frames-ppc.h6
-rw-r--r--chromium/v8/src/ppc/interface-descriptors-ppc.cc83
-rw-r--r--chromium/v8/src/ppc/macro-assembler-ppc.cc627
-rw-r--r--chromium/v8/src/ppc/macro-assembler-ppc.h177
-rw-r--r--chromium/v8/src/ppc/simulator-ppc.cc303
-rw-r--r--chromium/v8/src/ppc/simulator-ppc.h56
-rw-r--r--chromium/v8/src/profiler/allocation-tracker.h3
-rw-r--r--chromium/v8/src/profiler/circular-queue-inl.h3
-rw-r--r--chromium/v8/src/profiler/circular-queue.h3
-rw-r--r--chromium/v8/src/profiler/cpu-profiler-inl.h5
-rw-r--r--chromium/v8/src/profiler/cpu-profiler.cc19
-rw-r--r--chromium/v8/src/profiler/cpu-profiler.h12
-rw-r--r--chromium/v8/src/profiler/heap-profiler.cc7
-rw-r--r--chromium/v8/src/profiler/heap-profiler.h5
-rw-r--r--chromium/v8/src/profiler/heap-snapshot-generator-inl.h10
-rw-r--r--chromium/v8/src/profiler/heap-snapshot-generator.cc207
-rw-r--r--chromium/v8/src/profiler/heap-snapshot-generator.h13
-rw-r--r--chromium/v8/src/profiler/profile-generator-inl.h8
-rw-r--r--chromium/v8/src/profiler/profile-generator.cc39
-rw-r--r--chromium/v8/src/profiler/profile-generator.h22
-rw-r--r--chromium/v8/src/profiler/sampler.h3
-rw-r--r--chromium/v8/src/profiler/strings-storage.cc (renamed from chromium/v8/src/strings-storage.cc)2
-rw-r--r--chromium/v8/src/profiler/strings-storage.h (renamed from chromium/v8/src/strings-storage.h)12
-rw-r--r--chromium/v8/src/profiler/unbound-queue-inl.h3
-rw-r--r--chromium/v8/src/profiler/unbound-queue.h3
-rw-r--r--chromium/v8/src/property-descriptor.cc300
-rw-r--r--chromium/v8/src/property-descriptor.h123
-rw-r--r--chromium/v8/src/property-details.h52
-rw-r--r--chromium/v8/src/property.h3
-rw-r--r--chromium/v8/src/prototype.h48
-rw-r--r--chromium/v8/src/proxy.js210
-rw-r--r--chromium/v8/src/regexp.js463
-rw-r--r--chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc145
-rw-r--r--chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h13
-rw-r--r--chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc129
-rw-r--r--chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h11
-rw-r--r--chromium/v8/src/regexp/bytecodes-irregexp.h24
-rw-r--r--chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc135
-rw-r--r--chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h13
-rw-r--r--chromium/v8/src/regexp/interpreter-irregexp.cc77
-rw-r--r--chromium/v8/src/regexp/interpreter-irregexp.h3
-rw-r--r--chromium/v8/src/regexp/jsregexp-inl.h3
-rw-r--r--chromium/v8/src/regexp/jsregexp.cc299
-rw-r--r--chromium/v8/src/regexp/jsregexp.h197
-rw-r--r--chromium/v8/src/regexp/mips/OWNERS1
-rw-r--r--chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc137
-rw-r--r--chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h13
-rw-r--r--chromium/v8/src/regexp/mips64/OWNERS1
-rw-r--r--chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc127
-rw-r--r--chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h17
-rw-r--r--chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc130
-rw-r--r--chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h14
-rw-r--r--chromium/v8/src/regexp/regexp-ast.cc337
-rw-r--r--chromium/v8/src/regexp/regexp-ast.h496
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h5
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler-irregexp.cc18
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler-irregexp.h9
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc42
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler-tracer.h10
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler.cc14
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler.h21
-rw-r--r--chromium/v8/src/regexp/regexp-parser.cc1180
-rw-r--r--chromium/v8/src/regexp/regexp-parser.h277
-rw-r--r--chromium/v8/src/regexp/regexp-stack.h3
-rw-r--r--chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc137
-rw-r--r--chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h13
-rw-r--r--chromium/v8/src/regexp/x87/regexp-macro-assembler-x87.cc135
-rw-r--r--chromium/v8/src/regexp/x87/regexp-macro-assembler-x87.h13
-rw-r--r--chromium/v8/src/register-configuration.cc162
-rw-r--r--chromium/v8/src/register-configuration.h95
-rw-r--r--chromium/v8/src/rewriter.cc257
-rw-r--r--chromium/v8/src/runtime-profiler.cc10
-rw-r--r--chromium/v8/src/runtime-profiler.h3
-rw-r--r--chromium/v8/src/runtime.js356
-rw-r--r--chromium/v8/src/runtime/runtime-array.cc157
-rw-r--r--chromium/v8/src/runtime/runtime-atomics.cc263
-rw-r--r--chromium/v8/src/runtime/runtime-classes.cc124
-rw-r--r--chromium/v8/src/runtime/runtime-compiler.cc67
-rw-r--r--chromium/v8/src/runtime/runtime-date.cc165
-rw-r--r--chromium/v8/src/runtime/runtime-debug.cc226
-rw-r--r--chromium/v8/src/runtime/runtime-function.cc341
-rw-r--r--chromium/v8/src/runtime/runtime-futex.cc14
-rw-r--r--chromium/v8/src/runtime/runtime-generator.cc8
-rw-r--r--chromium/v8/src/runtime/runtime-i18n.cc9
-rw-r--r--chromium/v8/src/runtime/runtime-internal.cc148
-rw-r--r--chromium/v8/src/runtime/runtime-interpreter.cc83
-rw-r--r--chromium/v8/src/runtime/runtime-json.cc2
-rw-r--r--chromium/v8/src/runtime/runtime-literals.cc46
-rw-r--r--chromium/v8/src/runtime/runtime-liveedit.cc43
-rw-r--r--chromium/v8/src/runtime/runtime-maths.cc62
-rw-r--r--chromium/v8/src/runtime/runtime-numbers.cc27
-rw-r--r--chromium/v8/src/runtime/runtime-object.cc607
-rw-r--r--chromium/v8/src/runtime/runtime-observe.cc25
-rw-r--r--chromium/v8/src/runtime/runtime-proxy.cc167
-rw-r--r--chromium/v8/src/runtime/runtime-regexp.cc263
-rw-r--r--chromium/v8/src/runtime/runtime-scopes.cc263
-rw-r--r--chromium/v8/src/runtime/runtime-simd.cc57
-rw-r--r--chromium/v8/src/runtime/runtime-strings.cc31
-rw-r--r--chromium/v8/src/runtime/runtime-symbol.cc4
-rw-r--r--chromium/v8/src/runtime/runtime-test.cc3
-rw-r--r--chromium/v8/src/runtime/runtime-typedarray.cc67
-rw-r--r--chromium/v8/src/runtime/runtime-utils.h4
-rw-r--r--chromium/v8/src/runtime/runtime.cc26
-rw-r--r--chromium/v8/src/runtime/runtime.h415
-rw-r--r--chromium/v8/src/safepoint-table.h3
-rw-r--r--chromium/v8/src/small-pointer-list.h3
-rw-r--r--chromium/v8/src/snapshot/mksnapshot.cc2
-rw-r--r--chromium/v8/src/snapshot/natives-common.cc7
-rw-r--r--chromium/v8/src/snapshot/natives-external.cc31
-rw-r--r--chromium/v8/src/snapshot/natives.h5
-rw-r--r--chromium/v8/src/snapshot/serialize.cc171
-rw-r--r--chromium/v8/src/snapshot/serialize.h226
-rw-r--r--chromium/v8/src/snapshot/snapshot-common.cc11
-rw-r--r--chromium/v8/src/snapshot/snapshot-source-sink.cc25
-rw-r--r--chromium/v8/src/snapshot/snapshot-source-sink.h7
-rw-r--r--chromium/v8/src/snapshot/snapshot.h6
-rw-r--r--chromium/v8/src/splay-tree-inl.h3
-rw-r--r--chromium/v8/src/splay-tree.h3
-rw-r--r--chromium/v8/src/startup-data-util.cc14
-rw-r--r--chromium/v8/src/string-builder.cc2
-rw-r--r--chromium/v8/src/string-builder.h10
-rw-r--r--chromium/v8/src/string-search.h3
-rw-r--r--chromium/v8/src/string-stream.h3
-rw-r--r--chromium/v8/src/strtod.h3
-rw-r--r--chromium/v8/src/third_party/fdlibm/fdlibm.cc69
-rw-r--r--chromium/v8/src/third_party/fdlibm/fdlibm.h8
-rw-r--r--chromium/v8/src/third_party/fdlibm/fdlibm.js204
-rw-r--r--chromium/v8/src/third_party/vtune/v8vtune.gyp4
-rw-r--r--chromium/v8/src/tracing/trace-event.cc19
-rw-r--r--chromium/v8/src/tracing/trace-event.h535
-rw-r--r--chromium/v8/src/transitions-inl.h28
-rw-r--r--chromium/v8/src/transitions.cc131
-rw-r--r--chromium/v8/src/transitions.h53
-rw-r--r--chromium/v8/src/type-cache.cc24
-rw-r--r--chromium/v8/src/type-cache.h154
-rw-r--r--chromium/v8/src/type-feedback-vector-inl.h146
-rw-r--r--chromium/v8/src/type-feedback-vector.cc334
-rw-r--r--chromium/v8/src/type-feedback-vector.h384
-rw-r--r--chromium/v8/src/type-info.cc154
-rw-r--r--chromium/v8/src/type-info.h54
-rw-r--r--chromium/v8/src/typedarray.js514
-rw-r--r--chromium/v8/src/types-inl.h3
-rw-r--r--chromium/v8/src/types.cc12
-rw-r--r--chromium/v8/src/types.h96
-rw-r--r--chromium/v8/src/typing-asm.cc910
-rw-r--r--chromium/v8/src/typing-asm.h104
-rw-r--r--chromium/v8/src/typing-reset.cc13
-rw-r--r--chromium/v8/src/typing-reset.h8
-rw-r--r--chromium/v8/src/utils.cc32
-rw-r--r--chromium/v8/src/utils.h100
-rw-r--r--chromium/v8/src/v8.cc10
-rw-r--r--chromium/v8/src/v8.h3
-rw-r--r--chromium/v8/src/v8memory.h3
-rw-r--r--chromium/v8/src/v8threads.h3
-rw-r--r--chromium/v8/src/vector.h3
-rw-r--r--chromium/v8/src/version.h3
-rw-r--r--chromium/v8/src/vm-state-inl.h3
-rw-r--r--chromium/v8/src/vm-state.h12
-rw-r--r--chromium/v8/src/wasm/OWNERS5
-rw-r--r--chromium/v8/src/wasm/asm-wasm-builder.cc1045
-rw-r--r--chromium/v8/src/wasm/asm-wasm-builder.h33
-rw-r--r--chromium/v8/src/wasm/ast-decoder.cc1583
-rw-r--r--chromium/v8/src/wasm/ast-decoder.h116
-rw-r--r--chromium/v8/src/wasm/decoder.h233
-rw-r--r--chromium/v8/src/wasm/encoder.cc592
-rw-r--r--chromium/v8/src/wasm/encoder.h157
-rw-r--r--chromium/v8/src/wasm/module-decoder.cc547
-rw-r--r--chromium/v8/src/wasm/module-decoder.h33
-rw-r--r--chromium/v8/src/wasm/wasm-js.cc345
-rw-r--r--chromium/v8/src/wasm/wasm-js.h27
-rw-r--r--chromium/v8/src/wasm/wasm-macro-gen.h265
-rw-r--r--chromium/v8/src/wasm/wasm-module.cc511
-rw-r--r--chromium/v8/src/wasm/wasm-module.h192
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.cc133
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.h476
-rw-r--r--chromium/v8/src/wasm/wasm-result.cc53
-rw-r--r--chromium/v8/src/wasm/wasm-result.h116
-rw-r--r--chromium/v8/src/x64/assembler-x64-inl.h32
-rw-r--r--chromium/v8/src/x64/assembler-x64.cc247
-rw-r--r--chromium/v8/src/x64/assembler-x64.h591
-rw-r--r--chromium/v8/src/x64/builtins-x64.cc1963
-rw-r--r--chromium/v8/src/x64/code-stubs-x64.cc668
-rw-r--r--chromium/v8/src/x64/code-stubs-x64.h19
-rw-r--r--chromium/v8/src/x64/codegen-x64.cc174
-rw-r--r--chromium/v8/src/x64/codegen-x64.h5
-rw-r--r--chromium/v8/src/x64/deoptimizer-x64.cc37
-rw-r--r--chromium/v8/src/x64/disasm-x64.cc143
-rw-r--r--chromium/v8/src/x64/frames-x64.h3
-rw-r--r--chromium/v8/src/x64/interface-descriptors-x64.cc84
-rw-r--r--chromium/v8/src/x64/macro-assembler-x64.cc1135
-rw-r--r--chromium/v8/src/x64/macro-assembler-x64.h285
-rw-r--r--chromium/v8/src/x64/simulator-x64.h14
-rw-r--r--chromium/v8/src/x87/assembler-x87-inl.h33
-rw-r--r--chromium/v8/src/x87/assembler-x87.cc43
-rw-r--r--chromium/v8/src/x87/assembler-x87.h214
-rw-r--r--chromium/v8/src/x87/builtins-x87.cc1984
-rw-r--r--chromium/v8/src/x87/code-stubs-x87.cc651
-rw-r--r--chromium/v8/src/x87/code-stubs-x87.h19
-rw-r--r--chromium/v8/src/x87/codegen-x87.cc49
-rw-r--r--chromium/v8/src/x87/codegen-x87.h5
-rw-r--r--chromium/v8/src/x87/deoptimizer-x87.cc50
-rw-r--r--chromium/v8/src/x87/frames-x87.h3
-rw-r--r--chromium/v8/src/x87/interface-descriptors-x87.cc89
-rw-r--r--chromium/v8/src/x87/macro-assembler-x87.cc396
-rw-r--r--chromium/v8/src/x87/macro-assembler-x87.h401
-rw-r--r--chromium/v8/src/x87/simulator-x87.h14
-rw-r--r--chromium/v8/src/zone-allocator.h3
-rw-r--r--chromium/v8/src/zone-containers.h6
-rw-r--r--chromium/v8/src/zone-type-cache.h98
-rw-r--r--chromium/v8/test/benchmarks/benchmarks.gyp26
-rw-r--r--chromium/v8/test/benchmarks/benchmarks.isolate14
-rw-r--r--chromium/v8/test/bot_default.gyp32
-rw-r--r--chromium/v8/test/bot_default.isolate14
-rw-r--r--chromium/v8/test/cctest/cctest.gyp58
-rw-r--r--chromium/v8/test/cctest/cctest.isolate16
-rw-r--r--chromium/v8/test/default.gyp31
-rw-r--r--chromium/v8/test/default.isolate13
-rw-r--r--chromium/v8/test/ignition.gyp27
-rw-r--r--chromium/v8/test/ignition.isolate9
-rw-r--r--chromium/v8/test/intl/intl.gyp26
-rw-r--r--chromium/v8/test/intl/intl.isolate14
-rw-r--r--chromium/v8/test/message/message.gyp26
-rw-r--r--chromium/v8/test/message/message.isolate14
-rw-r--r--chromium/v8/test/mjsunit/mjsunit.gyp26
-rw-r--r--chromium/v8/test/mjsunit/mjsunit.isolate23
-rw-r--r--chromium/v8/test/mozilla/mozilla.gyp26
-rw-r--r--chromium/v8/test/mozilla/mozilla.isolate14
-rw-r--r--chromium/v8/test/optimize_for_size.gyp29
-rw-r--r--chromium/v8/test/optimize_for_size.isolate11
-rw-r--r--chromium/v8/test/preparser/preparser.gyp26
-rw-r--r--chromium/v8/test/preparser/preparser.isolate14
-rw-r--r--chromium/v8/test/simdjs/simdjs.gyp26
-rw-r--r--chromium/v8/test/simdjs/simdjs.isolate14
-rw-r--r--chromium/v8/test/test262/test262.gyp26
-rw-r--r--chromium/v8/test/test262/test262.isolate14
-rw-r--r--chromium/v8/test/unittests/unittests.gyp38
-rw-r--r--chromium/v8/test/unittests/unittests.isolate15
-rw-r--r--chromium/v8/test/webkit/webkit.gyp26
-rw-r--r--chromium/v8/test/webkit/webkit.isolate14
-rwxr-xr-xchromium/v8/tools/bash-completion.sh8
-rw-r--r--chromium/v8/tools/check-static-initializers.gyp26
-rw-r--r--chromium/v8/tools/check-static-initializers.isolate16
-rwxr-xr-xchromium/v8/tools/cpu.sh39
-rw-r--r--chromium/v8/tools/disasm.py4
-rwxr-xr-xchromium/v8/tools/eval_gc_nvp.py25
-rwxr-xr-xchromium/v8/tools/eval_gc_time.sh107
-rw-r--r--chromium/v8/tools/gc_nvp_common.py2
-rw-r--r--chromium/v8/tools/gen-postmortem-metadata.py72
-rw-r--r--chromium/v8/tools/gyp/v8.gyp566
-rw-r--r--chromium/v8/tools/isolate_driver.py65
-rwxr-xr-xchromium/v8/tools/js2c.py6
-rwxr-xr-xchromium/v8/tools/ll_prof.py34
-rw-r--r--chromium/v8/tools/luci-go/linux64/isolate.sha11
-rw-r--r--chromium/v8/tools/luci-go/mac64/isolate.sha11
-rw-r--r--chromium/v8/tools/luci-go/win64/isolate.exe.sha11
-rw-r--r--chromium/v8/tools/parser-shell.cc10
-rwxr-xr-xchromium/v8/tools/presubmit.py71
-rwxr-xr-xchromium/v8/tools/release/auto_push.py6
-rwxr-xr-xchromium/v8/tools/release/auto_roll.py222
-rwxr-xr-xchromium/v8/tools/release/chromium_roll.py160
-rw-r--r--chromium/v8/tools/release/common_includes.py86
-rwxr-xr-xchromium/v8/tools/release/merge_to_branch.py45
-rwxr-xr-xchromium/v8/tools/release/mergeinfo.py112
-rwxr-xr-xchromium/v8/tools/release/releases.py9
-rwxr-xr-xchromium/v8/tools/release/search_related_commits.py16
-rwxr-xr-xchromium/v8/tools/release/test_mergeinfo.py180
-rw-r--r--chromium/v8/tools/release/test_scripts.py177
-rwxr-xr-xchromium/v8/tools/run-deopt-fuzzer.py6
-rwxr-xr-xchromium/v8/tools/run-tests.py123
-rwxr-xr-xchromium/v8/tools/run-valgrind.py35
-rw-r--r--chromium/v8/tools/shell-utils.h3
-rw-r--r--chromium/v8/tools/testrunner/local/execution.py185
-rw-r--r--chromium/v8/tools/testrunner/local/perfdata.py26
-rw-r--r--chromium/v8/tools/testrunner/local/pool.py29
-rw-r--r--chromium/v8/tools/testrunner/local/progress.py37
-rw-r--r--chromium/v8/tools/testrunner/local/statusfile.py36
-rw-r--r--chromium/v8/tools/testrunner/local/testsuite.py23
-rw-r--r--chromium/v8/tools/testrunner/network/endpoint.py1
-rw-r--r--chromium/v8/tools/testrunner/objects/context.py8
-rw-r--r--chromium/v8/tools/testrunner/objects/testcase.py8
-rw-r--r--chromium/v8/tools/testrunner/testrunner.isolate14
-rwxr-xr-xchromium/v8/tools/try_perf.py30
-rw-r--r--chromium/v8/tools/v8heapconst.py464
-rw-r--r--chromium/v8/tools/whitespace.txt2
1180 files changed, 119003 insertions, 75197 deletions
diff --git a/chromium/v8/AUTHORS b/chromium/v8/AUTHORS
index 9dfb07328e0..c9be8bbcda7 100644
--- a/chromium/v8/AUTHORS
+++ b/chromium/v8/AUTHORS
@@ -32,6 +32,7 @@ StrongLoop, Inc. <*@strongloop.com>
Aaron Bieber <deftly@gmail.com>
Abdulla Kamar <abdulla.kamar@gmail.com>
Akinori MUSHA <knu@FreeBSD.org>
+Alex Kodat <akodat@rocketsoftware.com>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
Alexandre Vassalotti <avassalotti@gmail.com>
@@ -51,9 +52,11 @@ Daniel James <dnljms@gmail.com>
Douglas Crosher <dtc-v8@scieneer.com>
Dusan Milosavljevic <dusan.m.milosavljevic@gmail.com>
Erich Ocean <erich.ocean@me.com>
+Evan Lucas <evan.lucas@help.com>
Fedor Indutny <fedor@indutny.com>
Felix Geisendörfer <haimuiba@gmail.com>
Filipe David Manana <fdmanana@gmail.com>
+Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Geoffrey Garside <ggarside@gmail.com>
Han Choongwoo <cwhan.tunz@gmail.com>
Hirofumi Mako <mkhrfm@gmail.com>
@@ -81,6 +84,7 @@ Michael Lutz <michi@icosahedron.de>
Michael Smith <mike@w3.org>
Mike Gilbert <floppymaster@gmail.com>
Mike Pennisi <mike@mikepennisi.com>
+Milton Chiang <milton.chiang@mediatek.com>
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
@@ -100,7 +104,8 @@ Stefan Penner <stefan.penner@gmail.com>
Tobias Burnus <burnus@net-b.de>
Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
+Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Yu Yin <xwafish@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
-柳荣一 <admin@web-tinker.com>
+柳荣一 <admin@web-tinker.com> \ No newline at end of file
diff --git a/chromium/v8/BUILD.gn b/chromium/v8/BUILD.gn
index 9dfdaa3cdaa..88097133c43 100644
--- a/chromium/v8/BUILD.gn
+++ b/chromium/v8/BUILD.gn
@@ -7,12 +7,22 @@ import("//build/config/arm.gni")
import("//build/config/mips.gni")
import("//build/config/sanitizers/sanitizers.gni")
+if (is_android) {
+ import("//build/config/android/rules.gni")
+}
+
# Because standalone V8 builds are not supported, assume this is part of a
# Chromium build.
-import("//build/module_args/v8.gni")
+import("//build_overrides/v8.gni")
import("snapshot_toolchain.gni")
+declare_args() {
+ # Enable the snapshot feature, for fast context creation.
+ # http://v8project.blogspot.com/2015/09/custom-startup-snapshots.html
+ v8_use_snapshot = true
+}
+
# TODO(jochen): These will need to be user-settable to support standalone V8
# builds.
v8_deprecation_warnings = false
@@ -24,7 +34,6 @@ v8_enable_verify_heap = false
v8_interpreted_regexp = false
v8_object_print = false
v8_postmortem_support = false
-v8_use_snapshot = true
v8_random_seed = "314159265"
v8_toolset_for_d8 = "host"
@@ -71,6 +80,14 @@ config("external_config") {
include_dirs = [ "include" ]
}
+# This config should only be applied to code that needs to be explicitly
+# aware of whether we are using startup data or not.
+config("external_startup_data") {
+ if (v8_use_external_startup_data) {
+ defines = [ "V8_USE_EXTERNAL_STARTUP_DATA" ]
+ }
+}
+
config("features") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -154,12 +171,22 @@ config("toolchain") {
if (v8_target_arch == "mips64el") {
defines += [ "V8_TARGET_ARCH_MIPS64" ]
}
+ if (v8_target_arch == "s390") {
+ defines += [ "V8_TARGET_ARCH_S390" ]
+ }
+ if (v8_target_arch == "s390x") {
+ defines += [
+ "V8_TARGET_ARCH_S390",
+ "V8_TARGET_ARCH_S390X",
+ ]
+ }
if (v8_target_arch == "x86") {
defines += [ "V8_TARGET_ARCH_IA32" ]
}
if (v8_target_arch == "x64") {
defines += [ "V8_TARGET_ARCH_X64" ]
}
+
if (is_win) {
defines += [ "WIN32" ]
# TODO(jochen): Support v8_enable_prof.
@@ -194,35 +221,33 @@ action("js2c") {
inputs = [ "tools/jsmin.py" ]
sources = [
- "src/macros.py",
+ "src/js/macros.py",
"src/messages.h",
- "src/prologue.js",
- "src/runtime.js",
- "src/v8natives.js",
- "src/symbol.js",
- "src/array.js",
- "src/string.js",
- "src/uri.js",
- "src/math.js",
+ "src/js/prologue.js",
+ "src/js/runtime.js",
+ "src/js/v8natives.js",
+ "src/js/symbol.js",
+ "src/js/array.js",
+ "src/js/string.js",
+ "src/js/uri.js",
+ "src/js/math.js",
"src/third_party/fdlibm/fdlibm.js",
- "src/date.js",
- "src/regexp.js",
- "src/arraybuffer.js",
- "src/typedarray.js",
- "src/iterator-prototype.js",
- "src/generator.js",
- "src/object-observe.js",
- "src/collection.js",
- "src/weak-collection.js",
- "src/collection-iterator.js",
- "src/promise.js",
- "src/messages.js",
- "src/json.js",
- "src/array-iterator.js",
- "src/string-iterator.js",
- "src/templates.js",
- "src/harmony-array.js",
- "src/harmony-typedarray.js",
+ "src/js/regexp.js",
+ "src/js/arraybuffer.js",
+ "src/js/typedarray.js",
+ "src/js/iterator-prototype.js",
+ "src/js/generator.js",
+ "src/js/object-observe.js",
+ "src/js/collection.js",
+ "src/js/weak-collection.js",
+ "src/js/collection-iterator.js",
+ "src/js/promise.js",
+ "src/js/messages.js",
+ "src/js/json.js",
+ "src/js/array-iterator.js",
+ "src/js/string-iterator.js",
+ "src/js/templates.js",
+ "src/js/spread.js",
"src/debug/mirrors.js",
"src/debug/debug.js",
"src/debug/liveedit.js",
@@ -233,7 +258,7 @@ action("js2c") {
]
if (v8_enable_i18n_support) {
- sources += [ "src/i18n.js" ]
+ sources += [ "src/js/i18n.js" ]
}
args = [
@@ -250,40 +275,6 @@ action("js2c") {
}
}
-action("js2c_code_stubs") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
- script = "tools/js2c.py"
-
- # The script depends on this other script, this rule causes a rebuild if it
- # changes.
- inputs = [ "tools/jsmin.py" ]
-
- sources = [
- "src/macros.py",
- "src/messages.h",
- "src/code-stubs.js"
- ]
-
- outputs = [
- "$target_gen_dir/code-stub-libraries.cc",
- ]
-
- args = [
- rebase_path("$target_gen_dir/code-stub-libraries.cc",
- root_build_dir),
- "CODE_STUB",
- ] + rebase_path(sources, root_build_dir)
-
- if (v8_use_external_startup_data) {
- outputs += [ "$target_gen_dir/libraries_code_stub.bin" ]
- args += [
- "--startup_blob",
- rebase_path("$target_gen_dir/libraries_code_stub.bin", root_build_dir),
- ]
- }
-}
-
action("js2c_experimental") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -294,20 +285,19 @@ action("js2c_experimental") {
inputs = [ "tools/jsmin.py" ]
sources = [
- "src/macros.py",
+ "src/js/macros.py",
"src/messages.h",
- "src/proxy.js",
- "src/generator.js",
- "src/harmony-atomics.js",
- "src/harmony-array-includes.js",
- "src/harmony-concat-spreadable.js",
- "src/harmony-tostring.js",
- "src/harmony-regexp.js",
- "src/harmony-reflect.js",
- "src/harmony-spread.js",
- "src/harmony-object-observe.js",
- "src/harmony-sharedarraybuffer.js",
- "src/harmony-simd.js"
+ "src/js/proxy.js",
+ "src/js/generator.js",
+ "src/js/harmony-atomics.js",
+ "src/js/harmony-regexp.js",
+ "src/js/harmony-reflect.js",
+ "src/js/harmony-object-observe.js",
+ "src/js/harmony-sharedarraybuffer.js",
+ "src/js/harmony-simd.js",
+ "src/js/harmony-species.js",
+ "src/js/harmony-unicode-regexps.js",
+ "src/js/promise-extra.js"
]
outputs = [
@@ -396,7 +386,7 @@ action("d8_js2c") {
inputs = [
"src/d8.js",
- "src/macros.py",
+ "src/js/macros.py",
]
outputs = [
@@ -407,13 +397,25 @@ action("d8_js2c") {
rebase_path(inputs, root_build_dir)
}
+if (is_android) {
+ android_assets("v8_external_startup_data_assets") {
+ if (v8_use_external_startup_data) {
+ deps = [
+ "//v8",
+ ]
+ renaming_sources = v8_external_startup_data_renaming_sources
+ renaming_destinations = v8_external_startup_data_renaming_destinations
+ disable_compression = true
+ }
+ }
+}
+
if (v8_use_external_startup_data) {
action("natives_blob") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":js2c",
- ":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
@@ -421,7 +423,6 @@ if (v8_use_external_startup_data) {
sources = [
"$target_gen_dir/libraries.bin",
- "$target_gen_dir/libraries_code_stub.bin",
"$target_gen_dir/libraries_experimental.bin",
"$target_gen_dir/libraries_extras.bin",
"$target_gen_dir/libraries_experimental_extras.bin",
@@ -509,7 +510,6 @@ source_set("v8_nosnapshot") {
deps = [
":js2c",
- ":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
@@ -518,7 +518,6 @@ source_set("v8_nosnapshot") {
sources = [
"$target_gen_dir/libraries.cc",
- "$target_gen_dir/code-stub-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/experimental-extras-libraries.cc",
@@ -544,7 +543,6 @@ source_set("v8_snapshot") {
deps = [
":js2c",
- ":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
@@ -558,7 +556,6 @@ source_set("v8_snapshot") {
sources = [
"$target_gen_dir/libraries.cc",
- "$target_gen_dir/code-stub-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/experimental-extras-libraries.cc",
@@ -580,7 +577,6 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
- ":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
@@ -610,7 +606,10 @@ source_set("v8_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
+ # TODO(fmeawad): This needs to be updated to support standalone V8 builds.
+ "../base/trace_event/common/trace_event_common.h",
"include/v8-debug.h",
+ "include/v8-experimental.h",
"include/v8-platform.h",
"include/v8-profiler.h",
"include/v8-testing.h",
@@ -620,12 +619,16 @@ source_set("v8_base") {
"include/v8config.h",
"src/accessors.cc",
"src/accessors.h",
+ "src/address-map.cc",
+ "src/address-map.h",
"src/allocation.cc",
"src/allocation.h",
"src/allocation-site-scopes.cc",
"src/allocation-site-scopes.h",
"src/api.cc",
"src/api.h",
+ "src/api-experimental.cc",
+ "src/api-experimental.h",
"src/api-natives.cc",
"src/api-natives.h",
"src/arguments.cc",
@@ -634,16 +637,28 @@ source_set("v8_base") {
"src/assembler.h",
"src/assert-scope.h",
"src/assert-scope.cc",
- "src/ast-expression-visitor.cc",
- "src/ast-expression-visitor.h",
- "src/ast-literal-reindexer.cc",
- "src/ast-literal-reindexer.h",
- "src/ast-numbering.cc",
- "src/ast-numbering.h",
- "src/ast-value-factory.cc",
- "src/ast-value-factory.h",
- "src/ast.cc",
- "src/ast.h",
+ "src/ast/ast-expression-rewriter.cc",
+ "src/ast/ast-expression-rewriter.h",
+ "src/ast/ast-expression-visitor.cc",
+ "src/ast/ast-expression-visitor.h",
+ "src/ast/ast-literal-reindexer.cc",
+ "src/ast/ast-literal-reindexer.h",
+ "src/ast/ast-numbering.cc",
+ "src/ast/ast-numbering.h",
+ "src/ast/ast-value-factory.cc",
+ "src/ast/ast-value-factory.h",
+ "src/ast/ast.cc",
+ "src/ast/ast.h",
+ "src/ast/modules.cc",
+ "src/ast/modules.h",
+ "src/ast/prettyprinter.cc",
+ "src/ast/prettyprinter.h",
+ "src/ast/scopeinfo.cc",
+ "src/ast/scopeinfo.h",
+ "src/ast/scopes.cc",
+ "src/ast/scopes.h",
+ "src/ast/variables.cc",
+ "src/ast/variables.h",
"src/atomic-utils.h",
"src/background-parsing-task.cc",
"src/background-parsing-task.h",
@@ -684,6 +699,8 @@ source_set("v8_base") {
"src/compilation-statistics.h",
"src/compiler/access-builder.cc",
"src/compiler/access-builder.h",
+ "src/compiler/access-info.cc",
+ "src/compiler/access-info.h",
"src/compiler/all-nodes.cc",
"src/compiler/all-nodes.h",
"src/compiler/ast-graph-builder.cc",
@@ -692,6 +709,10 @@ source_set("v8_base") {
"src/compiler/ast-loop-assignment-analyzer.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",
+ "src/compiler/branch-elimination.cc",
+ "src/compiler/branch-elimination.h",
+ "src/compiler/bytecode-branch-analysis.cc",
+ "src/compiler/bytecode-branch-analysis.h",
"src/compiler/bytecode-graph-builder.cc",
"src/compiler/bytecode-graph-builder.h",
"src/compiler/change-lowering.cc",
@@ -702,6 +723,8 @@ source_set("v8_base") {
"src/compiler/code-generator-impl.h",
"src/compiler/code-generator.cc",
"src/compiler/code-generator.h",
+ "src/compiler/code-stub-assembler.cc",
+ "src/compiler/code-stub-assembler.h",
"src/compiler/common-node-cache.cc",
"src/compiler/common-node-cache.h",
"src/compiler/common-operator-reducer.cc",
@@ -717,6 +740,12 @@ source_set("v8_base") {
"src/compiler/dead-code-elimination.cc",
"src/compiler/dead-code-elimination.h",
"src/compiler/diamond.h",
+ "src/compiler/escape-analysis.cc",
+ "src/compiler/escape-analysis.h",
+ "src/compiler/escape-analysis-reducer.cc",
+ "src/compiler/escape-analysis-reducer.h",
+ "src/compiler/fast-accessor-assembler.cc",
+ "src/compiler/fast-accessor-assembler.h",
"src/compiler/frame.cc",
"src/compiler/frame.h",
"src/compiler/frame-elider.cc",
@@ -738,6 +767,8 @@ source_set("v8_base") {
"src/compiler/greedy-allocator.cc",
"src/compiler/greedy-allocator.h",
"src/compiler/instruction-codes.h",
+ "src/compiler/instruction-scheduler.cc",
+ "src/compiler/instruction-scheduler.h",
"src/compiler/instruction-selector-impl.h",
"src/compiler/instruction-selector.cc",
"src/compiler/instruction-selector.h",
@@ -747,6 +778,8 @@ source_set("v8_base") {
"src/compiler/interpreter-assembler.h",
"src/compiler/js-builtin-reducer.cc",
"src/compiler/js-builtin-reducer.h",
+ "src/compiler/js-call-reducer.cc",
+ "src/compiler/js-call-reducer.h",
"src/compiler/js-context-relaxation.cc",
"src/compiler/js-context-relaxation.h",
"src/compiler/js-context-specialization.cc",
@@ -755,18 +788,20 @@ source_set("v8_base") {
"src/compiler/js-frame-specialization.h",
"src/compiler/js-generic-lowering.cc",
"src/compiler/js-generic-lowering.h",
+ "src/compiler/js-global-object-specialization.cc",
+ "src/compiler/js-global-object-specialization.h",
"src/compiler/js-graph.cc",
"src/compiler/js-graph.h",
"src/compiler/js-inlining.cc",
"src/compiler/js-inlining.h",
+ "src/compiler/js-inlining-heuristic.cc",
+ "src/compiler/js-inlining-heuristic.h",
"src/compiler/js-intrinsic-lowering.cc",
"src/compiler/js-intrinsic-lowering.h",
+ "src/compiler/js-native-context-specialization.cc",
+ "src/compiler/js-native-context-specialization.h",
"src/compiler/js-operator.cc",
"src/compiler/js-operator.h",
- "src/compiler/js-type-feedback.cc",
- "src/compiler/js-type-feedback.h",
- "src/compiler/js-type-feedback-lowering.cc",
- "src/compiler/js-type-feedback-lowering.h",
"src/compiler/js-typed-lowering.cc",
"src/compiler/js-typed-lowering.h",
"src/compiler/jump-threading.cc",
@@ -786,8 +821,6 @@ source_set("v8_base") {
"src/compiler/machine-operator-reducer.h",
"src/compiler/machine-operator.cc",
"src/compiler/machine-operator.h",
- "src/compiler/machine-type.cc",
- "src/compiler/machine-type.h",
"src/compiler/move-optimizer.cc",
"src/compiler/move-optimizer.h",
"src/compiler/node-aux-data.h",
@@ -819,8 +852,7 @@ source_set("v8_base") {
"src/compiler/register-allocator.h",
"src/compiler/register-allocator-verifier.cc",
"src/compiler/register-allocator-verifier.h",
- "src/compiler/register-configuration.cc",
- "src/compiler/register-configuration.h",
+ "src/compiler/representation-change.cc",
"src/compiler/representation-change.h",
"src/compiler/schedule.cc",
"src/compiler/schedule.h",
@@ -840,12 +872,19 @@ source_set("v8_base") {
"src/compiler/state-values-utils.h",
"src/compiler/tail-call-optimization.cc",
"src/compiler/tail-call-optimization.h",
+ "src/compiler/type-hint-analyzer.cc",
+ "src/compiler/type-hint-analyzer.h",
+ "src/compiler/type-hints.cc",
+ "src/compiler/type-hints.h",
"src/compiler/typer.cc",
"src/compiler/typer.h",
"src/compiler/value-numbering-reducer.cc",
"src/compiler/value-numbering-reducer.h",
"src/compiler/verifier.cc",
"src/compiler/verifier.h",
+ "src/compiler/wasm-compiler.cc",
+ "src/compiler/wasm-compiler.h",
+ "src/compiler/wasm-linkage.cc",
"src/compiler/zone-pool.cc",
"src/compiler/zone-pool.h",
"src/compiler.cc",
@@ -860,6 +899,68 @@ source_set("v8_base") {
"src/conversions.h",
"src/counters.cc",
"src/counters.h",
+ "src/crankshaft/hydrogen-alias-analysis.h",
+ "src/crankshaft/hydrogen-bce.cc",
+ "src/crankshaft/hydrogen-bce.h",
+ "src/crankshaft/hydrogen-bch.cc",
+ "src/crankshaft/hydrogen-bch.h",
+ "src/crankshaft/hydrogen-canonicalize.cc",
+ "src/crankshaft/hydrogen-canonicalize.h",
+ "src/crankshaft/hydrogen-check-elimination.cc",
+ "src/crankshaft/hydrogen-check-elimination.h",
+ "src/crankshaft/hydrogen-dce.cc",
+ "src/crankshaft/hydrogen-dce.h",
+ "src/crankshaft/hydrogen-dehoist.cc",
+ "src/crankshaft/hydrogen-dehoist.h",
+ "src/crankshaft/hydrogen-environment-liveness.cc",
+ "src/crankshaft/hydrogen-environment-liveness.h",
+ "src/crankshaft/hydrogen-escape-analysis.cc",
+ "src/crankshaft/hydrogen-escape-analysis.h",
+ "src/crankshaft/hydrogen-flow-engine.h",
+ "src/crankshaft/hydrogen-gvn.cc",
+ "src/crankshaft/hydrogen-gvn.h",
+ "src/crankshaft/hydrogen-infer-representation.cc",
+ "src/crankshaft/hydrogen-infer-representation.h",
+ "src/crankshaft/hydrogen-infer-types.cc",
+ "src/crankshaft/hydrogen-infer-types.h",
+ "src/crankshaft/hydrogen-instructions.cc",
+ "src/crankshaft/hydrogen-instructions.h",
+ "src/crankshaft/hydrogen-load-elimination.cc",
+ "src/crankshaft/hydrogen-load-elimination.h",
+ "src/crankshaft/hydrogen-mark-deoptimize.cc",
+ "src/crankshaft/hydrogen-mark-deoptimize.h",
+ "src/crankshaft/hydrogen-mark-unreachable.cc",
+ "src/crankshaft/hydrogen-mark-unreachable.h",
+ "src/crankshaft/hydrogen-osr.cc",
+ "src/crankshaft/hydrogen-osr.h",
+ "src/crankshaft/hydrogen-range-analysis.cc",
+ "src/crankshaft/hydrogen-range-analysis.h",
+ "src/crankshaft/hydrogen-redundant-phi.cc",
+ "src/crankshaft/hydrogen-redundant-phi.h",
+ "src/crankshaft/hydrogen-removable-simulates.cc",
+ "src/crankshaft/hydrogen-removable-simulates.h",
+ "src/crankshaft/hydrogen-representation-changes.cc",
+ "src/crankshaft/hydrogen-representation-changes.h",
+ "src/crankshaft/hydrogen-sce.cc",
+ "src/crankshaft/hydrogen-sce.h",
+ "src/crankshaft/hydrogen-store-elimination.cc",
+ "src/crankshaft/hydrogen-store-elimination.h",
+ "src/crankshaft/hydrogen-types.cc",
+ "src/crankshaft/hydrogen-types.h",
+ "src/crankshaft/hydrogen-uint32-analysis.cc",
+ "src/crankshaft/hydrogen-uint32-analysis.h",
+ "src/crankshaft/hydrogen.cc",
+ "src/crankshaft/hydrogen.h",
+ "src/crankshaft/lithium-allocator-inl.h",
+ "src/crankshaft/lithium-allocator.cc",
+ "src/crankshaft/lithium-allocator.h",
+ "src/crankshaft/lithium-codegen.cc",
+ "src/crankshaft/lithium-codegen.h",
+ "src/crankshaft/lithium.cc",
+ "src/crankshaft/lithium.h",
+ "src/crankshaft/typing.cc",
+ "src/crankshaft/typing.h",
+ "src/crankshaft/unique.h",
"src/date.cc",
"src/date.h",
"src/dateparser-inl.h",
@@ -892,7 +993,6 @@ source_set("v8_base") {
"src/elements.h",
"src/execution.cc",
"src/execution.h",
- "src/expression-classifier.h",
"src/extensions/externalize-string-extension.cc",
"src/extensions/externalize-string-extension.h",
"src/extensions/free-buffer-extension.cc",
@@ -919,8 +1019,6 @@ source_set("v8_base") {
"src/frames.h",
"src/full-codegen/full-codegen.cc",
"src/full-codegen/full-codegen.h",
- "src/func-name-inferrer.cc",
- "src/func-name-inferrer.h",
"src/futex-emulation.cc",
"src/futex-emulation.h",
"src/gdb-jit.cc",
@@ -968,58 +1066,6 @@ source_set("v8_base") {
"src/heap/store-buffer-inl.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
- "src/hydrogen-alias-analysis.h",
- "src/hydrogen-bce.cc",
- "src/hydrogen-bce.h",
- "src/hydrogen-bch.cc",
- "src/hydrogen-bch.h",
- "src/hydrogen-canonicalize.cc",
- "src/hydrogen-canonicalize.h",
- "src/hydrogen-check-elimination.cc",
- "src/hydrogen-check-elimination.h",
- "src/hydrogen-dce.cc",
- "src/hydrogen-dce.h",
- "src/hydrogen-dehoist.cc",
- "src/hydrogen-dehoist.h",
- "src/hydrogen-environment-liveness.cc",
- "src/hydrogen-environment-liveness.h",
- "src/hydrogen-escape-analysis.cc",
- "src/hydrogen-escape-analysis.h",
- "src/hydrogen-flow-engine.h",
- "src/hydrogen-instructions.cc",
- "src/hydrogen-instructions.h",
- "src/hydrogen.cc",
- "src/hydrogen.h",
- "src/hydrogen-gvn.cc",
- "src/hydrogen-gvn.h",
- "src/hydrogen-infer-representation.cc",
- "src/hydrogen-infer-representation.h",
- "src/hydrogen-infer-types.cc",
- "src/hydrogen-infer-types.h",
- "src/hydrogen-load-elimination.cc",
- "src/hydrogen-load-elimination.h",
- "src/hydrogen-mark-deoptimize.cc",
- "src/hydrogen-mark-deoptimize.h",
- "src/hydrogen-mark-unreachable.cc",
- "src/hydrogen-mark-unreachable.h",
- "src/hydrogen-osr.cc",
- "src/hydrogen-osr.h",
- "src/hydrogen-range-analysis.cc",
- "src/hydrogen-range-analysis.h",
- "src/hydrogen-redundant-phi.cc",
- "src/hydrogen-redundant-phi.h",
- "src/hydrogen-removable-simulates.cc",
- "src/hydrogen-removable-simulates.h",
- "src/hydrogen-representation-changes.cc",
- "src/hydrogen-representation-changes.h",
- "src/hydrogen-sce.cc",
- "src/hydrogen-sce.h",
- "src/hydrogen-store-elimination.cc",
- "src/hydrogen-store-elimination.h",
- "src/hydrogen-types.cc",
- "src/hydrogen-types.h",
- "src/hydrogen-uint32-analysis.cc",
- "src/hydrogen-uint32-analysis.h",
"src/i18n.cc",
"src/i18n.h",
"src/icu_util.cc",
@@ -1051,39 +1097,41 @@ source_set("v8_base") {
"src/interpreter/bytecode-array-iterator.h",
"src/interpreter/bytecode-generator.cc",
"src/interpreter/bytecode-generator.h",
+ "src/interpreter/bytecode-register-allocator.cc",
+ "src/interpreter/bytecode-register-allocator.h",
+ "src/interpreter/bytecode-traits.h",
+ "src/interpreter/constant-array-builder.cc",
+ "src/interpreter/constant-array-builder.h",
+ "src/interpreter/control-flow-builders.cc",
+ "src/interpreter/control-flow-builders.h",
"src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
"src/isolate-inl.h",
"src/isolate.cc",
"src/isolate.h",
- "src/json-parser.h",
"src/json-stringifier.h",
+ "src/key-accumulator.h",
+ "src/key-accumulator.cc",
"src/layout-descriptor-inl.h",
"src/layout-descriptor.cc",
"src/layout-descriptor.h",
"src/list-inl.h",
"src/list.h",
- "src/lithium-allocator-inl.h",
- "src/lithium-allocator.cc",
- "src/lithium-allocator.h",
- "src/lithium-codegen.cc",
- "src/lithium-codegen.h",
- "src/lithium.cc",
- "src/lithium.h",
"src/log-inl.h",
"src/log-utils.cc",
"src/log-utils.h",
"src/log.cc",
"src/log.h",
- "src/lookup-inl.h",
"src/lookup.cc",
"src/lookup.h",
"src/macro-assembler.h",
+ "src/machine-type.cc",
+ "src/machine-type.h",
"src/messages.cc",
"src/messages.h",
- "src/modules.cc",
- "src/modules.h",
"src/msan.h",
+ "src/objects-body-descriptors-inl.h",
+ "src/objects-body-descriptors.h",
"src/objects-debug.cc",
"src/objects-inl.h",
"src/objects-printer.cc",
@@ -1093,18 +1141,31 @@ source_set("v8_base") {
"src/optimizing-compile-dispatcher.h",
"src/ostreams.cc",
"src/ostreams.h",
- "src/pattern-rewriter.cc",
- "src/parser.cc",
- "src/parser.h",
+ "src/parsing/expression-classifier.h",
+ "src/parsing/func-name-inferrer.cc",
+ "src/parsing/func-name-inferrer.h",
+ "src/parsing/json-parser.h",
+ "src/parsing/parameter-initializer-rewriter.cc",
+ "src/parsing/parameter-initializer-rewriter.h",
+ "src/parsing/parser-base.h",
+ "src/parsing/parser.cc",
+ "src/parsing/parser.h",
+ "src/parsing/pattern-rewriter.cc",
+ "src/parsing/preparse-data-format.h",
+ "src/parsing/preparse-data.cc",
+ "src/parsing/preparse-data.h",
+ "src/parsing/preparser.cc",
+ "src/parsing/preparser.h",
+ "src/parsing/rewriter.cc",
+ "src/parsing/rewriter.h",
+ "src/parsing/scanner-character-streams.cc",
+ "src/parsing/scanner-character-streams.h",
+ "src/parsing/scanner.cc",
+ "src/parsing/scanner.h",
+ "src/parsing/token.cc",
+ "src/parsing/token.h",
"src/pending-compilation-error-handler.cc",
"src/pending-compilation-error-handler.h",
- "src/preparse-data-format.h",
- "src/preparse-data.cc",
- "src/preparse-data.h",
- "src/preparser.cc",
- "src/preparser.h",
- "src/prettyprinter.cc",
- "src/prettyprinter.h",
"src/profiler/allocation-tracker.cc",
"src/profiler/allocation-tracker.h",
"src/profiler/circular-queue-inl.h",
@@ -1122,20 +1183,24 @@ source_set("v8_base") {
"src/profiler/profile-generator.h",
"src/profiler/sampler.cc",
"src/profiler/sampler.h",
+ "src/profiler/strings-storage.cc",
+ "src/profiler/strings-storage.h",
"src/profiler/unbound-queue-inl.h",
"src/profiler/unbound-queue.h",
+ "src/property-descriptor.cc",
+ "src/property-descriptor.h",
"src/property-details.h",
"src/property.cc",
"src/property.h",
"src/prototype.h",
- "src/rewriter.cc",
- "src/rewriter.h",
"src/regexp/bytecodes-irregexp.h",
"src/regexp/interpreter-irregexp.cc",
"src/regexp/interpreter-irregexp.h",
"src/regexp/jsregexp-inl.h",
"src/regexp/jsregexp.cc",
"src/regexp/jsregexp.h",
+ "src/regexp/regexp-ast.cc",
+ "src/regexp/regexp-ast.h",
"src/regexp/regexp-macro-assembler-irregexp-inl.h",
"src/regexp/regexp-macro-assembler-irregexp.cc",
"src/regexp/regexp-macro-assembler-irregexp.h",
@@ -1143,8 +1208,12 @@ source_set("v8_base") {
"src/regexp/regexp-macro-assembler-tracer.h",
"src/regexp/regexp-macro-assembler.cc",
"src/regexp/regexp-macro-assembler.h",
+ "src/regexp/regexp-parser.cc",
+ "src/regexp/regexp-parser.h",
"src/regexp/regexp-stack.cc",
"src/regexp/regexp-stack.h",
+ "src/register-configuration.cc",
+ "src/register-configuration.h",
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
"src/runtime/runtime-array.cc",
@@ -1183,14 +1252,6 @@ source_set("v8_base") {
"src/runtime/runtime.h",
"src/safepoint-table.cc",
"src/safepoint-table.h",
- "src/scanner-character-streams.cc",
- "src/scanner-character-streams.h",
- "src/scanner.cc",
- "src/scanner.h",
- "src/scopeinfo.cc",
- "src/scopeinfo.h",
- "src/scopes.cc",
- "src/scopes.h",
"src/signature.h",
"src/simulator.h",
"src/small-pointer-list.h",
@@ -1211,15 +1272,15 @@ source_set("v8_base") {
"src/string-search.h",
"src/string-stream.cc",
"src/string-stream.h",
- "src/strings-storage.cc",
- "src/strings-storage.h",
"src/strtod.cc",
"src/strtod.h",
- "src/token.cc",
- "src/token.h",
+ "src/tracing/trace-event.cc",
+ "src/tracing/trace-event.h",
"src/transitions-inl.h",
"src/transitions.cc",
"src/transitions.h",
+ "src/type-cache.cc",
+ "src/type-cache.h",
"src/type-feedback-vector-inl.h",
"src/type-feedback-vector.cc",
"src/type-feedback-vector.h",
@@ -1232,8 +1293,6 @@ source_set("v8_base") {
"src/typing-asm.h",
"src/typing-reset.cc",
"src/typing-reset.h",
- "src/typing.cc",
- "src/typing.h",
"src/unicode-inl.h",
"src/unicode.cc",
"src/unicode.h",
@@ -1241,7 +1300,6 @@ source_set("v8_base") {
"src/unicode-cache.h",
"src/unicode-decoder.cc",
"src/unicode-decoder.h",
- "src/unique.h",
"src/utils.cc",
"src/utils.h",
"src/v8.cc",
@@ -1249,13 +1307,28 @@ source_set("v8_base") {
"src/v8memory.h",
"src/v8threads.cc",
"src/v8threads.h",
- "src/variables.cc",
- "src/variables.h",
"src/version.cc",
"src/version.h",
"src/vm-state-inl.h",
"src/vm-state.h",
- "src/zone-type-cache.h",
+ "src/wasm/asm-wasm-builder.cc",
+ "src/wasm/asm-wasm-builder.h",
+ "src/wasm/ast-decoder.cc",
+ "src/wasm/ast-decoder.h",
+ "src/wasm/decoder.h",
+ "src/wasm/encoder.cc",
+ "src/wasm/encoder.h",
+ "src/wasm/module-decoder.cc",
+ "src/wasm/module-decoder.h",
+ "src/wasm/wasm-js.cc",
+ "src/wasm/wasm-js.h",
+ "src/wasm/wasm-macro-gen.h",
+ "src/wasm/wasm-module.cc",
+ "src/wasm/wasm-module.h",
+ "src/wasm/wasm-opcodes.cc",
+ "src/wasm/wasm-opcodes.h",
+ "src/wasm/wasm-result.cc",
+ "src/wasm/wasm-result.h",
"src/zone.cc",
"src/zone.h",
"src/zone-allocator.h",
@@ -1266,6 +1339,18 @@ source_set("v8_base") {
if (v8_target_arch == "x86") {
sources += [
+ "src/crankshaft/ia32/lithium-codegen-ia32.cc",
+ "src/crankshaft/ia32/lithium-codegen-ia32.h",
+ "src/crankshaft/ia32/lithium-gap-resolver-ia32.cc",
+ "src/crankshaft/ia32/lithium-gap-resolver-ia32.h",
+ "src/crankshaft/ia32/lithium-ia32.cc",
+ "src/crankshaft/ia32/lithium-ia32.h",
+ "src/compiler/ia32/code-generator-ia32.cc",
+ "src/compiler/ia32/instruction-codes-ia32.h",
+ "src/compiler/ia32/instruction-scheduler-ia32.cc",
+ "src/compiler/ia32/instruction-selector-ia32.cc",
+ "src/debug/ia32/debug-ia32.cc",
+ "src/full-codegen/ia32/full-codegen-ia32.cc",
"src/ia32/assembler-ia32-inl.h",
"src/ia32/assembler-ia32.cc",
"src/ia32/assembler-ia32.h",
@@ -1280,19 +1365,8 @@ source_set("v8_base") {
"src/ia32/frames-ia32.cc",
"src/ia32/frames-ia32.h",
"src/ia32/interface-descriptors-ia32.cc",
- "src/ia32/lithium-codegen-ia32.cc",
- "src/ia32/lithium-codegen-ia32.h",
- "src/ia32/lithium-gap-resolver-ia32.cc",
- "src/ia32/lithium-gap-resolver-ia32.h",
- "src/ia32/lithium-ia32.cc",
- "src/ia32/lithium-ia32.h",
"src/ia32/macro-assembler-ia32.cc",
"src/ia32/macro-assembler-ia32.h",
- "src/compiler/ia32/code-generator-ia32.cc",
- "src/compiler/ia32/instruction-codes-ia32.h",
- "src/compiler/ia32/instruction-selector-ia32.cc",
- "src/debug/ia32/debug-ia32.cc",
- "src/full-codegen/ia32/full-codegen-ia32.cc",
"src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc",
"src/ic/ia32/ic-ia32.cc",
@@ -1303,6 +1377,25 @@ source_set("v8_base") {
]
} else if (v8_target_arch == "x64") {
sources += [
+ "src/compiler/x64/code-generator-x64.cc",
+ "src/compiler/x64/instruction-codes-x64.h",
+ "src/compiler/x64/instruction-scheduler-x64.cc",
+ "src/compiler/x64/instruction-selector-x64.cc",
+ "src/crankshaft/x64/lithium-codegen-x64.cc",
+ "src/crankshaft/x64/lithium-codegen-x64.h",
+ "src/crankshaft/x64/lithium-gap-resolver-x64.cc",
+ "src/crankshaft/x64/lithium-gap-resolver-x64.h",
+ "src/crankshaft/x64/lithium-x64.cc",
+ "src/crankshaft/x64/lithium-x64.h",
+ "src/debug/x64/debug-x64.cc",
+ "src/full-codegen/x64/full-codegen-x64.cc",
+ "src/ic/x64/access-compiler-x64.cc",
+ "src/ic/x64/handler-compiler-x64.cc",
+ "src/ic/x64/ic-x64.cc",
+ "src/ic/x64/ic-compiler-x64.cc",
+ "src/ic/x64/stub-cache-x64.cc",
+ "src/regexp/x64/regexp-macro-assembler-x64.cc",
+ "src/regexp/x64/regexp-macro-assembler-x64.h",
"src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc",
"src/x64/assembler-x64.h",
@@ -1317,26 +1410,8 @@ source_set("v8_base") {
"src/x64/frames-x64.cc",
"src/x64/frames-x64.h",
"src/x64/interface-descriptors-x64.cc",
- "src/x64/lithium-codegen-x64.cc",
- "src/x64/lithium-codegen-x64.h",
- "src/x64/lithium-gap-resolver-x64.cc",
- "src/x64/lithium-gap-resolver-x64.h",
- "src/x64/lithium-x64.cc",
- "src/x64/lithium-x64.h",
"src/x64/macro-assembler-x64.cc",
"src/x64/macro-assembler-x64.h",
- "src/compiler/x64/code-generator-x64.cc",
- "src/compiler/x64/instruction-codes-x64.h",
- "src/compiler/x64/instruction-selector-x64.cc",
- "src/debug/x64/debug-x64.cc",
- "src/full-codegen/x64/full-codegen-x64.cc",
- "src/ic/x64/access-compiler-x64.cc",
- "src/ic/x64/handler-compiler-x64.cc",
- "src/ic/x64/ic-x64.cc",
- "src/ic/x64/ic-compiler-x64.cc",
- "src/ic/x64/stub-cache-x64.cc",
- "src/regexp/x64/regexp-macro-assembler-x64.cc",
- "src/regexp/x64/regexp-macro-assembler-x64.h",
]
} else if (v8_target_arch == "arm") {
sources += [
@@ -1357,19 +1432,20 @@ source_set("v8_base") {
"src/arm/frames-arm.h",
"src/arm/interface-descriptors-arm.cc",
"src/arm/interface-descriptors-arm.h",
- "src/arm/lithium-arm.cc",
- "src/arm/lithium-arm.h",
- "src/arm/lithium-codegen-arm.cc",
- "src/arm/lithium-codegen-arm.h",
- "src/arm/lithium-gap-resolver-arm.cc",
- "src/arm/lithium-gap-resolver-arm.h",
"src/arm/macro-assembler-arm.cc",
"src/arm/macro-assembler-arm.h",
"src/arm/simulator-arm.cc",
"src/arm/simulator-arm.h",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
+ "src/compiler/arm/instruction-scheduler-arm.cc",
"src/compiler/arm/instruction-selector-arm.cc",
+ "src/crankshaft/arm/lithium-arm.cc",
+ "src/crankshaft/arm/lithium-arm.h",
+ "src/crankshaft/arm/lithium-codegen-arm.cc",
+ "src/crankshaft/arm/lithium-codegen-arm.h",
+ "src/crankshaft/arm/lithium-gap-resolver-arm.cc",
+ "src/crankshaft/arm/lithium-gap-resolver-arm.h",
"src/debug/arm/debug-arm.cc",
"src/full-codegen/arm/full-codegen-arm.cc",
"src/ic/arm/access-compiler-arm.cc",
@@ -1395,9 +1471,6 @@ source_set("v8_base") {
"src/arm64/decoder-arm64.cc",
"src/arm64/decoder-arm64.h",
"src/arm64/decoder-arm64-inl.h",
- "src/arm64/delayed-masm-arm64.cc",
- "src/arm64/delayed-masm-arm64.h",
- "src/arm64/delayed-masm-arm64-inl.h",
"src/arm64/deoptimizer-arm64.cc",
"src/arm64/disasm-arm64.cc",
"src/arm64/disasm-arm64.h",
@@ -1409,12 +1482,6 @@ source_set("v8_base") {
"src/arm64/instrument-arm64.h",
"src/arm64/interface-descriptors-arm64.cc",
"src/arm64/interface-descriptors-arm64.h",
- "src/arm64/lithium-arm64.cc",
- "src/arm64/lithium-arm64.h",
- "src/arm64/lithium-codegen-arm64.cc",
- "src/arm64/lithium-codegen-arm64.h",
- "src/arm64/lithium-gap-resolver-arm64.cc",
- "src/arm64/lithium-gap-resolver-arm64.h",
"src/arm64/macro-assembler-arm64.cc",
"src/arm64/macro-assembler-arm64.h",
"src/arm64/macro-assembler-arm64-inl.h",
@@ -1424,7 +1491,17 @@ source_set("v8_base") {
"src/arm64/utils-arm64.h",
"src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h",
+ "src/compiler/arm64/instruction-scheduler-arm64.cc",
"src/compiler/arm64/instruction-selector-arm64.cc",
+ "src/crankshaft/arm64/delayed-masm-arm64.cc",
+ "src/crankshaft/arm64/delayed-masm-arm64.h",
+ "src/crankshaft/arm64/delayed-masm-arm64-inl.h",
+ "src/crankshaft/arm64/lithium-arm64.cc",
+ "src/crankshaft/arm64/lithium-arm64.h",
+ "src/crankshaft/arm64/lithium-codegen-arm64.cc",
+ "src/crankshaft/arm64/lithium-codegen-arm64.h",
+ "src/crankshaft/arm64/lithium-gap-resolver-arm64.cc",
+ "src/crankshaft/arm64/lithium-gap-resolver-arm64.h",
"src/debug/arm64/debug-arm64.cc",
"src/full-codegen/arm64/full-codegen-arm64.cc",
"src/ic/arm64/access-compiler-arm64.cc",
@@ -1437,6 +1514,23 @@ source_set("v8_base") {
]
} else if (v8_target_arch == "mipsel") {
sources += [
+ "src/compiler/mips/code-generator-mips.cc",
+ "src/compiler/mips/instruction-codes-mips.h",
+ "src/compiler/mips/instruction-scheduler-mips.cc",
+ "src/compiler/mips/instruction-selector-mips.cc",
+ "src/crankshaft/mips/lithium-codegen-mips.cc",
+ "src/crankshaft/mips/lithium-codegen-mips.h",
+ "src/crankshaft/mips/lithium-gap-resolver-mips.cc",
+ "src/crankshaft/mips/lithium-gap-resolver-mips.h",
+ "src/crankshaft/mips/lithium-mips.cc",
+ "src/crankshaft/mips/lithium-mips.h",
+ "src/debug/mips/debug-mips.cc",
+ "src/full-codegen/mips/full-codegen-mips.cc",
+ "src/ic/mips/access-compiler-mips.cc",
+ "src/ic/mips/handler-compiler-mips.cc",
+ "src/ic/mips/ic-mips.cc",
+ "src/ic/mips/ic-compiler-mips.cc",
+ "src/ic/mips/stub-cache-mips.cc",
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
"src/mips/assembler-mips-inl.h",
@@ -1453,31 +1547,32 @@ source_set("v8_base") {
"src/mips/frames-mips.cc",
"src/mips/frames-mips.h",
"src/mips/interface-descriptors-mips.cc",
- "src/mips/lithium-codegen-mips.cc",
- "src/mips/lithium-codegen-mips.h",
- "src/mips/lithium-gap-resolver-mips.cc",
- "src/mips/lithium-gap-resolver-mips.h",
- "src/mips/lithium-mips.cc",
- "src/mips/lithium-mips.h",
"src/mips/macro-assembler-mips.cc",
"src/mips/macro-assembler-mips.h",
"src/mips/simulator-mips.cc",
"src/mips/simulator-mips.h",
- "src/compiler/mips/code-generator-mips.cc",
- "src/compiler/mips/instruction-codes-mips.h",
- "src/compiler/mips/instruction-selector-mips.cc",
- "src/debug/mips/debug-mips.cc",
- "src/full-codegen/mips/full-codegen-mips.cc",
- "src/ic/mips/access-compiler-mips.cc",
- "src/ic/mips/handler-compiler-mips.cc",
- "src/ic/mips/ic-mips.cc",
- "src/ic/mips/ic-compiler-mips.cc",
- "src/ic/mips/stub-cache-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.h",
]
} else if (v8_target_arch == "mips64el") {
sources += [
+ "compiler/mips64/code-generator-mips64.cc",
+ "compiler/mips64/instruction-codes-mips64.h",
+ "compiler/mips64/instruction-scheduler-mips64.cc",
+ "compiler/mips64/instruction-selector-mips64.cc",
+ "src/crankshaft/mips64/lithium-codegen-mips64.cc",
+ "src/crankshaft/mips64/lithium-codegen-mips64.h",
+ "src/crankshaft/mips64/lithium-gap-resolver-mips64.cc",
+ "src/crankshaft/mips64/lithium-gap-resolver-mips64.h",
+ "src/crankshaft/mips64/lithium-mips64.cc",
+ "src/crankshaft/mips64/lithium-mips64.h",
+ "src/debug/mips64/debug-mips64.cc",
+ "src/full-codegen/mips64/full-codegen-mips64.cc",
+ "src/ic/mips64/access-compiler-mips64.cc",
+ "src/ic/mips64/handler-compiler-mips64.cc",
+ "src/ic/mips64/ic-mips64.cc",
+ "src/ic/mips64/ic-compiler-mips64.cc",
+ "src/ic/mips64/stub-cache-mips64.cc",
"src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h",
"src/mips64/assembler-mips64-inl.h",
@@ -1494,23 +1589,10 @@ source_set("v8_base") {
"src/mips64/frames-mips64.cc",
"src/mips64/frames-mips64.h",
"src/mips64/interface-descriptors-mips64.cc",
- "src/mips64/lithium-codegen-mips64.cc",
- "src/mips64/lithium-codegen-mips64.h",
- "src/mips64/lithium-gap-resolver-mips64.cc",
- "src/mips64/lithium-gap-resolver-mips64.h",
- "src/mips64/lithium-mips64.cc",
- "src/mips64/lithium-mips64.h",
"src/mips64/macro-assembler-mips64.cc",
"src/mips64/macro-assembler-mips64.h",
"src/mips64/simulator-mips64.cc",
"src/mips64/simulator-mips64.h",
- "src/debug/mips64/debug-mips64.cc",
- "src/full-codegen/mips64/full-codegen-mips64.cc",
- "src/ic/mips64/access-compiler-mips64.cc",
- "src/ic/mips64/handler-compiler-mips64.cc",
- "src/ic/mips64/ic-mips64.cc",
- "src/ic/mips64/ic-compiler-mips64.cc",
- "src/ic/mips64/stub-cache-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
]
diff --git a/chromium/v8/ChangeLog b/chromium/v8/ChangeLog
index 3c6e5d8850b..54bcbe4275a 100644
--- a/chromium/v8/ChangeLog
+++ b/chromium/v8/ChangeLog
@@ -1,3 +1,3737 @@
+2016-01-14: Version 4.9.385
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.384
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.383
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.382
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.381
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.380
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.379
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.378
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.377
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.376
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.375
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.374
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.373
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.372
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.371
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.370
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.369
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.368
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.367
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.366
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.365
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.364
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.363
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.362
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.361
+
+ Disable concurrent osr (issue 4650).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.360
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.359
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.358
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.357
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.356
+
+ [wasm] Rename the WASM object to _WASMEXP_ (Chromium issue 575167).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.355
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.354
+
+ Reland of "[Proxies] Ship Proxies + Reflect." (issues 1543, 3931).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.353
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.352
+
+ Gracefully handle proxies in AllCanWrite() (issue 1543, Chromium issue
+ 576662).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.351
+
+ [wasm] Fix double to int conversions (Chromium issue 576560).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.350
+
+ [Proxies] Ship Proxies + Reflect (issues 1543, 3931).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.349
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.348
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.347
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.346
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.345
+
+ Add @@species/better subclassing support to Promises (issue 4633,
+ Chromium issue 575314).
+
+ TypedArray and ArrayBuffer support for @@species (issue 4093).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.344
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.343
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.342
+
+ Ship ES2015 sloppy-mode const semantics (issue 3305).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.341
+
+ Partial rollback of Promise error checking (issue 4633).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.340
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.339
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.338
+
+ [wasm] Fix set_local appearing in unreachable code (Chromium issue
+ 575861).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.337
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.336
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.335
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.334
+
+ [wasm] Avoid crashing if parsing fails in asm -> wasm (Chromium issue
+ 575369).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.333
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.332
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.331
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-09: Version 4.9.330
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.329
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.328
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.327
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.326
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.325
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.324
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.323
+
+ Fix sloppy block-scoped function hoisting with nested zones (Chromium
+ issue 537816).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.322
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.321
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.320
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.319
+
+ [wasm] Fix validation error for missing return statement in asm.js
+ module (Chromium issue 575364).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.318
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.317
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.316
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.315
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.314
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.313
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.312
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.311
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.310
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.309
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.308
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.307
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.306
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.305
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.304
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.303
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.302
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.301
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.300
+
+ Add Array support for @@species and subclassing (issue 4093).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-06: Version 4.9.299
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-06: Version 4.9.298
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-06: Version 4.9.297
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.296
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.295
+
+ Ship ES2015 sloppy-mode function hoisting, let, class (issues 3305,
+ 4285).
+
+ Ship destructuring assignment (issue 811).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.294
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.293
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.292
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.291
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.290
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.289
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.288
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.287
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.286
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.285
+
+ Accept time zones like GMT-8 in the legacy date parser (Chromium issue
+ 422858).
+
+ Timezone name check fix (Chromium issue 364374).
+
+ Add a --harmony-species flag, defining @@species on constructors (issue
+ 4093).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-04: Version 4.9.284
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-04: Version 4.9.283
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-04: Version 4.9.282
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-04: Version 4.9.281
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-04: Version 4.9.280
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-01: Version 4.9.279
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-31: Version 4.9.278
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-31: Version 4.9.277
+
+ Fix 'illegal access' in Date constructor edge case (issue 4640).
+
+ Reland of Use ES2015-style TypedArray prototype chain (patchset #1 id:1
+ of https://codereview.chromium.org/1554523002/ ) (issue 4085).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-30: Version 4.9.276
+
+ Reland "Clean up promises and fix an edge case bug (patchset #4 id:60001
+ of https://codereview.chromium.org/1488783002/ )" (issue 3641).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-30: Version 4.9.275
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-30: Version 4.9.274
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-30: Version 4.9.273
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-29: Version 4.9.272
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-29: Version 4.9.271
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-29: Version 4.9.270
+
+ [crankshaft] Don't inline array resize operations if receiver's proto is
+ not a JSObject (Chromium issue 571064).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-29: Version 4.9.269
+
+ [ic] Fixed receiver_map register trashing in KeyedStoreIC megamorphic
+ (Chromium issue 571370).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-29: Version 4.9.268
+
+ Use ES2015-style TypedArray prototype chain (issue 4085).
+
+ Guard the property RegExp.prototype.unicode behind --harmony-regexp-
+ unicode (issue 4644).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-28: Version 4.9.267
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-28: Version 4.9.266
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-26: Version 4.9.265
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-26: Version 4.9.264
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-25: Version 4.9.263
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-24: Version 4.9.262
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-24: Version 4.9.261
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-23: Version 4.9.260
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-23: Version 4.9.259
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-23: Version 4.9.258
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-23: Version 4.9.257
+
+ [elements] Enable left-trimming again (issue 4606).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-23: Version 4.9.256
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-23: Version 4.9.255
+
+ Reland of Add web compat workarounds for ES2015 RegExp semantics
+ (patchset #3 id:40001 of https://codereview.chromium.org/1543723002/ )
+ (issues 4617, 4637).
+
+ Add web compat workarounds for ES2015 RegExp semantics (issues 4617,
+ 4637).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-22: Version 4.9.254
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-22: Version 4.9.253
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.252
+
+ [ES6] Stage sloppy function block scoping (issue 3305).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.251
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.250
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.249
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.248
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.247
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.246
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.245
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-20: Version 4.9.244
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-20: Version 4.9.243
+
+ Mark all APIs without callers in Blink as deprecated.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-19: Version 4.9.242
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-19: Version 4.9.241
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.240
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.239
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.238
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.237
+
+ Stage Proxies and Reflect behind --harmony flag (issues 1543, 3931).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.236
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.235
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.234
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.233
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.232
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.231
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.230
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.229
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.228
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.227
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.226
+
+ [IC] Fix "compatible receiver" checks hidden behind interceptors
+ (Chromium issue 497632).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.225
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.224
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.223
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.222
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.221
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-16: Version 4.9.220
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-16: Version 4.9.219
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-16: Version 4.9.218
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-16: Version 4.9.217
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-16: Version 4.9.216
+
+ Stage destructuring assignment (issue 811).
+
+ Update DEPS entry for tracing to point at correct location.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-16: Version 4.9.215
+
+ [harmony] unstage regexp lookbehind assertions (issue 4545).
+
+ Move Object.observe back to shipping temporarily (Chromium issues
+ 552100, 569417, 569647).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-14: Version 4.9.214
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-14: Version 4.9.213
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-14: Version 4.9.212
+
+ [harmony] stage regexp lookbehind assertions (issue 4545).
+
+ [es6] ship regexp sticky flag (issue 4342).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-14: Version 4.9.211
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.210
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.209
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.208
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.207
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.206
+
+ [es6] Support Function name inference in variable declarations (issue
+ 3699).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.205
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.204
+
+ Disable --harmony-object-observe (Chromium issue 552100).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.203
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.202
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.201
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.200
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.199
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.198
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.197
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.196
+
+ Re-re-land FastAccessorBuilder (Chromium issue 508898).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.195
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.194
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.193
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.192
+
+ Unstage non-standard Promise functions (issue 3237).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.191
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.190
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.189
+
+ Allow ICU to normalize time zones (Chromium issue 487322).
+
+ Fix FuncNameInferrer usage in ParseAssignmentExpression (issue 4595).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.188
+
+ Fix Function subclassing (issues 3101, 3330, 4597).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.187
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.186
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.185
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.184
+
+ Re-land FastAccessorBuilder (Chromium issue 508898).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.183
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.182
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.181
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.180
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.179
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.178
+
+ Implement FastAccessorBuilder (Chromium issue 508898).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.177
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-09: Version 4.9.176
+
+ Updated the check for unmodfied objects to handle Smi Objects (Chromium
+ issue 553287).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-09: Version 4.9.175
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-09: Version 4.9.174
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-08: Version 4.9.173
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-08: Version 4.9.172
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-08: Version 4.9.171
+
+ Remove usage of deprecated APIs from api interceptor tests (issue 4341).
+
+ Deprecate Promise::Chain from V8 APIs (issue 3237).
+
+ Set the Gregorian changeover date to the beginning of time in Intl
+ (Chromium issue 537382).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.170
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.169
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.168
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.167
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.166
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.165
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.164
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.163
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.162
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.161
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.160
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.159
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.158
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-06: Version 4.9.157
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-06: Version 4.9.156
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-06: Version 4.9.155
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-05: Version 4.9.154
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-05: Version 4.9.153
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-05: Version 4.9.152
+
+ Clean up promises and fix an edge case bug (issue 3641).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.151
+
+ [es6] implement destructuring assignment (issue 811).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.150
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.149
+
+ Mark deprecated debugger APIs as such.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.148
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.147
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.146
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.145
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.144
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.143
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.142
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.141
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.140
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.139
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.138
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.137
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.136
+
+ Mark BooleanObject::New() as deprecated.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-03: Version 4.9.135
+
+ For non-prototype objects constructed using base==new.target, use the
+ cached constructor to render the name (Chromium issue 563791).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-03: Version 4.9.134
+
+ Deprecate non-standard Array methods and clarify Object::isArray.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-03: Version 4.9.133
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-03: Version 4.9.132
+
+ Fix inobject slack tracking for both subclassing and non-subclassing
+ cases (Chromium issue 563339).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-03: Version 4.9.131
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-03: Version 4.9.130
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.129
+
+ Removed support deprecated (//@|/*@) source(URL|MappingURL)= (Chromium
+ issue 558998).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.128
+
+ Improve rendering of callsite with non-function target (issue 3953).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.127
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.126
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.125
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.124
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.123
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.122
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.121
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.120
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.119
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.118
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.117
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.116
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.115
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.114
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.113
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.112
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.111
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.110
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.109
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.108
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.107
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.106
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.105
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.104
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.103
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.102
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.101
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.100
+
+ Move RMA::Label out of the class, so it can be forward declared
+ (Chromium issue 508898).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.99
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.98
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.97
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.96
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.95
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-29: Version 4.9.94
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-29: Version 4.9.93
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-28: Version 4.9.92
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-28: Version 4.9.91
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.90
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.89
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.88
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.87
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.86
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.85
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.84
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.83
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.82
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.81
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.80
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.79
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.78
+
+ Mark PromiseRejectMessage::GetStackTrace as deprecated.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.77
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.76
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.75
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.74
+
+ Add explicit Isolate parameter to Exception::CreateMessage() (Chromium
+ issue 495801).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.73
+
+ Allow in-object properties in JSArrayBuffer (issue 4531).
+
+ Allow in-object properties in JSTypedArray and JSDataView (issue 4531).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.72
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.71
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.70
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.69
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-25: Version 4.9.68
+
+ Reland shipping of --harmony-destructuring-bind (issue 811).
+
+ Fix promotion of JSFunctions with in-object properties (issue 4572,
+ Chromium issue 561481).
+
+ Allow in-object properties in JSCollections, JSWeakCollections and
+ JSRegExp (issue 4531).
+
+ Fix JSFunction's in-object properties initialization (issue 4572).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-25: Version 4.9.67
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-25: Version 4.9.66
+
+ Removed support deprecated (//@|/*@) source(URL|MappingURL)= (Chromium
+ issue 558998).
+
+ PPC: Reshuffle registers in JSConstructStub to avoid trashing costructor
+ and new.target on fast path (so we don't need to push/pop them)
+ (Chromium issue 560239).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.65
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.64
+
+ Move --harmony-destructuring-bind to shipping (issue 811).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.63
+
+ Reshuffle registers in JSConstructStub to avoid trashing costructor and
+ new.target on fast path (so we don't need to push/pop them) (Chromium
+ issue 560239).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.62
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.61
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.60
+
+ Allow in-object properties in JSFunctions (issue 4531).
+
+ Disable non-standard Promise functions in staging (issue 3237).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.59
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.58
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.57
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.56
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.55
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.54
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.53
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.52
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.51
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.50
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.49
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.48
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.47
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.46
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-22: Version 4.9.45
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-22: Version 4.9.44
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-22: Version 4.9.43
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-21: Version 4.9.42
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.41
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.40
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.39
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.38
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.37
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.36
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.35
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.34
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.33
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.32
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.31
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.30
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.29
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.28
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.27
+
+ [V8] Unify get function name for debugging purpose (Chromium issue
+ 17356).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.26
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.25
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.24
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.23
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.22
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.21
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.20
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-18: Version 4.9.19
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-18: Version 4.9.18
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-18: Version 4.9.17
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-18: Version 4.9.16
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-18: Version 4.9.15
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-18: Version 4.9.14
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-17: Version 4.9.13
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-17: Version 4.9.12
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-17: Version 4.9.11
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-17: Version 4.9.10
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-16: Version 4.9.9
+
+ Map v8::Object to v8::internal::JSReceiver.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-16: Version 4.9.8
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-16: Version 4.9.7
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-16: Version 4.9.6
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-16: Version 4.9.5
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-16: Version 4.9.4
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-15: Version 4.9.3
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-15: Version 4.9.2
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-15: Version 4.9.1
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-13: Version 4.8.294
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-13: Version 4.8.293
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.292
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.291
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.290
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.289
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.288
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.287
+
+ [JSON stringifier] Correctly load array elements (Chromium issue
+ 554946).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.286
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.285
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.284
+
+ Ship --harmony-default-parameters (issue 2160).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.283
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.282
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.281
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.280
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.279
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.278
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.277
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.276
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.275
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.274
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.273
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.272
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.271
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.270
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.269
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-10: Version 4.8.268
+
+ Make JSFunction::BodyDescriptor the only single place that knows how to
+ iterate JSFunction's body (issue 4531).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-10: Version 4.8.267
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-09: Version 4.8.266
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-09: Version 4.8.265
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-09: Version 4.8.264
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-09: Version 4.8.263
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-09: Version 4.8.262
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-09: Version 4.8.261
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-09: Version 4.8.260
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-09: Version 4.8.259
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-09: Version 4.8.258
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-07: Version 4.8.257
+
+ Update v8_external_startup_data_assets for
+ https://codereview.chromium.org/1422793004/ (Chromium issue 547162).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-07: Version 4.8.256
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-07: Version 4.8.255
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-06: Version 4.8.254
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-06: Version 4.8.253
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-06: Version 4.8.252
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-06: Version 4.8.251
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-06: Version 4.8.250
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-06: Version 4.8.249
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-06: Version 4.8.248
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-06: Version 4.8.247
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.246
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.245
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.244
+
+ Re-ship @@toStringTag (issue 3502).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.243
+
+ Stage --harmony-destructuring and --harmony-default-parameters (issues
+ 811, 2160).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.242
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.241
+
+ Re-re-land "[es6] Implement destructuring binding in try/catch" (issue
+ 811).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.240
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.239
+
+ Add v8_external_startup_data GN rule (Chromium issue 547162).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.238
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.237
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.236
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.235
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.234
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.233
+
+ Implement flag and source getters on RegExp.prototype (issues 3715,
+ 4528).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.232
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-05: Version 4.8.231
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.230
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.229
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.228
+
+ Ship Harmony ToLength (issue 3087).
+
+ Avoid creating indexed elements at index maxUint32 (issue 4516).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.227
+
+ Implement flag and source getters on RegExp.prototype (issues 3715,
+ 4528).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.226
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.225
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.224
+
+ [es6] Implement destructuring binding in try/catch (issue 811).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.223
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.222
+
+ Implement flag and source getters on RegExp.prototype (issues 3715,
+ 4528).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.221
+
+ Implement flag and source getters on RegExp.prototype (issues 3715,
+ 4528).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.220
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.219
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-04: Version 4.8.218
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-03: Version 4.8.217
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-03: Version 4.8.216
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-03: Version 4.8.215
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-03: Version 4.8.214
+
+ [es6] Fix Object built-in subclassing (issue 3886).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-03: Version 4.8.213
+
+ [es6] Fix WeakMap/Set built-ins subclasssing (issues 3101, 3330).
+
+ Reland "[es6] Fix Function and GeneratorFunction built-ins subclassing."
+ (issues 3101, 3330).
+
+ Implement flag and source getters on RegExp.prototype (issues 3715,
+ 4528).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-03: Version 4.8.212
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-03: Version 4.8.211
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-03: Version 4.8.210
+
+ For now, don't assume failed-access-check callback to throw (Chromium
+ issue 548194).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-03: Version 4.8.209
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.208
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.207
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.206
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.205
+
+ Mark GetCallingContext as soon-to-be deprecated (Chromium issue 541703).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.204
+
+ Add boolean to tell if V8 zaps allocated memory (Chromium issue 546492).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.203
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.202
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.201
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.200
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.199
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.198
+
+ [es6] Fix Function and GeneratorFunction built-ins subclassing (issues
+ 3101, 3330).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.197
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.196
+
+ Reland "[es6] Better support for built-ins subclassing." (issues 3101,
+ 3330, 4419).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-02: Version 4.8.195
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-30: Version 4.8.194
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-30: Version 4.8.193
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-30: Version 4.8.192
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-30: Version 4.8.191
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-30: Version 4.8.190
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-30: Version 4.8.189
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-30: Version 4.8.188
+
+ [es6] Better support for built-ins subclassing (issues 3101, 3330).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-30: Version 4.8.187
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-30: Version 4.8.186
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-30: Version 4.8.185
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-29: Version 4.8.184
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-29: Version 4.8.183
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-29: Version 4.8.182
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-29: Version 4.8.181
+
+ Reland v8::Private and related APIs.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-29: Version 4.8.180
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-29: Version 4.8.179
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-29: Version 4.8.178
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-29: Version 4.8.177
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-29: Version 4.8.176
+
+ Stage ES2015 ToLength semantics (issues 3087, 4244).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-28: Version 4.8.175
+
+ Avoid calling %AddElement with a number out of array index range (issue
+ 4516).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-28: Version 4.8.174
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-28: Version 4.8.173
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-28: Version 4.8.172
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-28: Version 4.8.171
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-28: Version 4.8.170
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-28: Version 4.8.169
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-28: Version 4.8.168
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-28: Version 4.8.167
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-28: Version 4.8.166
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-28: Version 4.8.165
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-27: Version 4.8.164
+
+ Update to ES2015 == semantics for Symbol/SIMD wrappers (issue 3593).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-27: Version 4.8.163
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-27: Version 4.8.162
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-27: Version 4.8.161
+
+ Fix deoptimization at ForInStatement::BodyId() (issue 4381).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-27: Version 4.8.160
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-27: Version 4.8.159
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-27: Version 4.8.158
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-27: Version 4.8.157
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-27: Version 4.8.156
+
+ Check that array length stays a safe integer in Array.prototype.push
+ (issue 3087).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-26: Version 4.8.155
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-26: Version 4.8.154
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-26: Version 4.8.153
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-26: Version 4.8.152
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-26: Version 4.8.151
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-26: Version 4.8.150
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-26: Version 4.8.149
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-26: Version 4.8.148
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-25: Version 4.8.147
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-25: Version 4.8.146
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-24: Version 4.8.145
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-24: Version 4.8.144
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-24: Version 4.8.143
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-24: Version 4.8.142
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-23: Version 4.8.141
+
+ Stage ES2015-style const in sloppy mode (issue 3739).
+
+ Map v8::Function to JSReceiver + IsCallable.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-23: Version 4.8.140
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-22: Version 4.8.139
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-22: Version 4.8.138
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-22: Version 4.8.137
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-22: Version 4.8.136
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-22: Version 4.8.135
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-22: Version 4.8.134
+
+ [es6] stage sticky regexps and RegExp.prototype.flags (issue 4342).
+
+ [Crankshaft] Don't do HMathFloorOfDiv optimization for kUint32 values
+ (issue 4507).
+
+ Fix user options for fractional digits in Intl.NumberFormatter (Chromium
+ issue 544122).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-22: Version 4.8.133
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.132
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.131
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.130
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.129
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.128
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.127
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.126
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.125
+
+ [es6] Handle super properly when rewriting arrow parameter initializers
+ (issue 4395).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.124
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.123
+
+ [es6] Fix scoping for default parameters in arrow functions (issue
+ 4395).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.122
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.121
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-21: Version 4.8.120
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-20: Version 4.8.119
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-20: Version 4.8.118
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-20: Version 4.8.117
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-20: Version 4.8.116
+
+ Refactor array construction for map, filter (Chromium issue 544991).
+
+ [es6] Fix scoping for default parameters in arrow functions (issue
+ 4395).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-20: Version 4.8.115
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-20: Version 4.8.114
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-20: Version 4.8.113
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-20: Version 4.8.112
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-20: Version 4.8.111
+
+ [es6] stage sticky regexps and RegExp.prototype.flags (issue 4342).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.110
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.109
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.108
+
+ ll_prof: Do not use the deprecated tempfile.mktemp() (issue 1306).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.107
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.106
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.105
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.104
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.103
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.102
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.101
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.100
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.99
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.98
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.97
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-19: Version 4.8.96
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-18: Version 4.8.95
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-18: Version 4.8.94
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-18: Version 4.8.93
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-17: Version 4.8.92
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-16: Version 4.8.91
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-16: Version 4.8.90
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-16: Version 4.8.89
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-16: Version 4.8.88
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-16: Version 4.8.87
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-16: Version 4.8.86
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-16: Version 4.8.85
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-16: Version 4.8.84
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-16: Version 4.8.83
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-16: Version 4.8.82
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-16: Version 4.8.81
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-16: Version 4.8.80
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.79
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.78
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.77
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.76
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.75
+
+ Make RegExp use ToLength on lastIndex when flag is turned on (issue
+ 4244).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.74
+
+ Take Symbol-keyed properties into account in Object.freeze and friends
+ (Chromium issue 539875).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.73
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.72
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.71
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.70
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.69
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.68
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.67
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-15: Version 4.8.66
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-14: Version 4.8.65
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-14: Version 4.8.64
+
+ Add methods to extras' InternalPackedArray (issue 4276).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-14: Version 4.8.63
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-14: Version 4.8.62
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-14: Version 4.8.61
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-14: Version 4.8.60
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-14: Version 4.8.59
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-14: Version 4.8.58
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-14: Version 4.8.57
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-14: Version 4.8.56
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-14: Version 4.8.55
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-13: Version 4.8.54
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-13: Version 4.8.53
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-13: Version 4.8.52
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-13: Version 4.8.51
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-13: Version 4.8.50
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-13: Version 4.8.49
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-13: Version 4.8.48
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-13: Version 4.8.47
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-13: Version 4.8.46
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-13: Version 4.8.45
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-13: Version 4.8.44
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-12: Version 4.8.43
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-12: Version 4.8.42
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-12: Version 4.8.41
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-12: Version 4.8.40
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-12: Version 4.8.39
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-12: Version 4.8.38
+
+ Test for var declarations in eval which conflict with let (issue 4454).
+
+ Don't compile functions in a context the caller doesn't have access to
+ (Chromium issue 541703).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-12: Version 4.8.37
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-12: Version 4.8.36
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-12: Version 4.8.35
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-11: Version 4.8.34
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-09: Version 4.8.33
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-09: Version 4.8.32
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-09: Version 4.8.31
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-09: Version 4.8.30
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-09: Version 4.8.29
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-09: Version 4.8.28
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-09: Version 4.8.27
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-09: Version 4.8.26
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-09: Version 4.8.25
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-08: Version 4.8.24
+
+ Stage --harmony_sloppy_function (issue 4285).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-08: Version 4.8.23
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-08: Version 4.8.22
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-08: Version 4.8.21
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-08: Version 4.8.20
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-08: Version 4.8.19
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-08: Version 4.8.18
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-08: Version 4.8.17
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-08: Version 4.8.16
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.15
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.14
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.13
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.12
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.11
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.10
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.9
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.8
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.7
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.6
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.5
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.4
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-07: Version 4.8.3
+
+ Destructuring array without initializer throws an exception (issue
+ 4462).
+
+ Disable VirtualAlloc randomization on 32-bit Windows hosts (Chromium
+ issue 394591).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-06: Version 4.8.2
+
+ Fix legacy const for-of/in destructuring (issue 4461).
+
+ [es6] Align Promise.resolve with the spec (issues 4161, 4341).
+
+ Prohibit let in lexical bindings (issue 4403).
+
+ Ensure scopes are backed by blocks in the body of for loops (Chromium
+ issues 536750, 536751).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-10-04: Version 4.8.1
+
+ [cross-context] create new function prototypes in the context of the
+ function (issue 4217).
+
+ Fix completion of try..finally (issue 2529).
+
+ Extend Annex B 3.3 sloppy-mode block-scoped hoisting to scripts, eval
+ (issue 4441).
+
+ [V8] Add name of function for function's closure scope (Chromium issue
+ 493156).
+
+ Performance and stability improvements on all platforms.
+
+
2015-05-17: Sentinel
The ChangeLog file is no longer maintained on bleeding_edge. This
diff --git a/chromium/v8/DEPS b/chromium/v8/DEPS
index 85d14c75fd3..07b11a4ae38 100644
--- a/chromium/v8/DEPS
+++ b/chromium/v8/DEPS
@@ -8,25 +8,34 @@ vars = {
deps = {
"v8/build/gyp":
- Var("git_url") + "/external/gyp.git" + "@" + "01528c7244837168a1c80f06ff60fa5a9793c824",
+ Var("git_url") + "/external/gyp.git" + "@" + "b85ad3e578da830377dbc1843aa4fbc5af17a192",
"v8/third_party/icu":
- Var("git_url") + "/chromium/deps/icu.git" + "@" + "423fc7e1107fb08ccf007c4aeb76dcab8b2747c1",
+ Var("git_url") + "/chromium/deps/icu.git" + "@" + "8d342a405be5ae8aacb1e16f0bc31c3a4fbf26a2",
"v8/buildtools":
- Var("git_url") + "/chromium/buildtools.git" + "@" + "e7111440c07a883b82ffbbe6d26c744dfc6c9673",
+ Var("git_url") + "/chromium/buildtools.git" + "@" + "0f8e6e4b126ee88137930a0ae4776c4741808740",
+ "v8/base/trace_event/common":
+ Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "d83d44b13d07c2fd0a40101a7deef9b93b841732",
"v8/tools/swarming_client":
- Var('git_url') + '/external/swarming.client.git' + '@' + "6e5d2b21f0ac98396cd736097a985346feed1328",
+ Var('git_url') + '/external/swarming.client.git' + '@' + "9cdd76171e517a430a72dcd7d66ade67e109aa00",
"v8/testing/gtest":
- Var("git_url") + "/external/googletest.git" + "@" + "9855a87157778d39b95eccfb201a9dc90f6d61c6",
+ Var("git_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
Var("git_url") + "/external/googlemock.git" + "@" + "0421b6f358139f02e102c9c332ce19a33faf75be",
+ "v8/test/benchmarks/data":
+ Var("git_url") + "/v8/deps/third_party/benchmarks.git" + "@" + "05d7188267b4560491ff9155c5ee13e207ecd65f",
+ "v8/test/mozilla/data":
+ Var("git_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
+ "v8/test/simdjs/data": Var("git_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "c8ef63c728283debc25891123eb00482fee4b8cd",
+ "v8/test/test262/data":
+ Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "67ba34b03a46bac4254223ae25f42c7b959540f0",
"v8/tools/clang":
- Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "0150e39a3112dbc7e4c7a3ab25276b8d7781f3b6",
+ Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "24e8c1c92fe54ef8ed7651b5850c056983354a4a",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
- Var("git_url") + "/android_tools.git" + "@" + "4238a28593b7e6178c95431f91ca8c24e45fa7eb",
+ Var("git_url") + "/android_tools.git" + "@" + "f4c36ad89b2696b37d9cd7ca7d984b691888b188",
},
"win": {
"v8/third_party/cygwin":
@@ -93,6 +102,46 @@ hooks = [
"-s", "v8/buildtools/linux64/clang-format.sha1",
],
},
+ # Pull luci-go binaries (isolate, swarming) using checked-in hashes.
+ {
+ 'name': 'luci-go_win',
+ 'pattern': '.',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--platform=win32',
+ '--no_auth',
+ '--bucket', 'chromium-luci',
+ '-d', 'v8/tools/luci-go/win64',
+ ],
+ },
+ {
+ 'name': 'luci-go_mac',
+ 'pattern': '.',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--platform=darwin',
+ '--no_auth',
+ '--bucket', 'chromium-luci',
+ '-d', 'v8/tools/luci-go/mac64',
+ ],
+ },
+ {
+ 'name': 'luci-go_linux',
+ 'pattern': '.',
+ 'action': [ 'download_from_google_storage',
+ '--no_resume',
+ '--platform=linux*',
+ '--no_auth',
+ '--bucket', 'chromium-luci',
+ '-d', 'v8/tools/luci-go/linux64',
+ ],
+ },
+ {
+ # Update the Windows toolchain if necessary.
+ 'name': 'win_toolchain',
+ 'pattern': '.',
+ 'action': ['python', 'v8/build/vs_toolchain.py', 'update'],
+ },
# Pull binutils for linux, enabled debug fission for faster linking /
# debugging when used with clang on Ubuntu Precise.
# https://code.google.com/p/chromium/issues/detail?id=352046
diff --git a/chromium/v8/Makefile b/chromium/v8/Makefile
index 28c1af2e07f..6ae9b245768 100644
--- a/chromium/v8/Makefile
+++ b/chromium/v8/Makefile
@@ -220,12 +220,6 @@ ifeq ($(arm_test_noprobe), on)
GYPFLAGS += -Darm_test_noprobe=on
endif
-# Optionally enable wasm prototype.
-# Assume you've placed a link to v8-native-prototype in third_party/wasm.
-ifeq ($(wasm), on)
- GYPFLAGS += -Dv8_wasm=1
-endif
-
# ----------------- available targets: --------------------
# - "grokdump": rebuilds heap constants lists used by grokdump
# - any arch listed in ARCHES (see below)
@@ -244,7 +238,8 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64
+ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 \
+ s390 s390x
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
diff --git a/chromium/v8/PRESUBMIT.py b/chromium/v8/PRESUBMIT.py
index ab9bba88455..f8516afc44e 100644
--- a/chromium/v8/PRESUBMIT.py
+++ b/chromium/v8/PRESUBMIT.py
@@ -69,6 +69,7 @@ def _V8PresubmitChecks(input_api, output_api):
from presubmit import SourceProcessor
from presubmit import CheckExternalReferenceRegistration
from presubmit import CheckAuthorizedAuthor
+ from presubmit import CheckStatusFiles
results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
@@ -80,6 +81,8 @@ def _V8PresubmitChecks(input_api, output_api):
if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"External references registration check failed"))
+ if not CheckStatusFiles(input_api.PresubmitLocalPath()):
+ results.append(output_api.PresubmitError("Status file check failed"))
results.extend(CheckAuthorizedAuthor(input_api, output_api))
return results
@@ -272,28 +275,3 @@ def CheckChangeOnCommit(input_api, output_api):
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results
-
-
-def GetPreferredTryMasters(project, change):
- return {
- 'tryserver.v8': {
- 'v8_linux_rel': set(['defaulttests']),
- 'v8_linux_dbg': set(['defaulttests']),
- 'v8_linux_nodcheck_rel': set(['defaulttests']),
- 'v8_linux_gcc_compile_rel': set(['defaulttests']),
- 'v8_linux64_rel': set(['defaulttests']),
- 'v8_linux64_asan_rel': set(['defaulttests']),
- 'v8_linux64_avx2_rel': set(['defaulttests']),
- 'v8_win_rel': set(['defaulttests']),
- 'v8_win_compile_dbg': set(['defaulttests']),
- 'v8_win_nosnap_shared_compile_rel': set(['defaulttests']),
- 'v8_win64_rel': set(['defaulttests']),
- 'v8_mac_rel': set(['defaulttests']),
- 'v8_linux_arm_rel': set(['defaulttests']),
- 'v8_linux_arm64_rel': set(['defaulttests']),
- 'v8_linux_mipsel_compile_rel': set(['defaulttests']),
- 'v8_linux_mips64el_compile_rel': set(['defaulttests']),
- 'v8_android_arm_compile_rel': set(['defaulttests']),
- 'v8_linux_chromium_gn_rel': set(['defaulttests']),
- },
- }
diff --git a/chromium/v8/README.md b/chromium/v8/README.md
index 804df5edb47..840c4971f91 100644
--- a/chromium/v8/README.md
+++ b/chromium/v8/README.md
@@ -10,7 +10,7 @@ browser from Google.
V8 can run standalone, or can be embedded into any C++ application.
-V8 Project page: https://code.google.com/p/v8/
+V8 Project page: https://github.com/v8/v8/wiki
Getting the Code
@@ -36,5 +36,5 @@ configuration in `.git/config`:
Contributing
=============
-Please follow the instructions mentioned on the
-[V8 wiki](https://code.google.com/p/v8-wiki/wiki/Contributing).
+Please follow the instructions mentioned on the
+[V8 wiki](https://github.com/v8/v8/wiki/Contributing).
diff --git a/chromium/v8/WATCHLISTS b/chromium/v8/WATCHLISTS
index f57dfa14025..845de7e3d74 100644
--- a/chromium/v8/WATCHLISTS
+++ b/chromium/v8/WATCHLISTS
@@ -42,6 +42,17 @@
'debugger': {
'filepath': 'src/debug/',
},
+ 'interpreter': {
+ 'filepath': 'src/interpreter/',
+ 'filepath': 'test/cctest/interpreter/',
+ 'filepath': 'test/unittests/interpreter/',
+ },
+ 'feature_shipping_status': {
+ 'filepath': 'src/flag-definitions.h',
+ },
+ 'gc_changes': {
+ 'filepath': 'src/heap/',
+ },
},
'WATCHLISTS': {
@@ -54,5 +65,16 @@
'debugger': [
'yangguo@chromium.org',
],
+ 'interpreter': [
+ 'rmcilroy@chromium.org',
+ 'oth@chromium.org',
+ ],
+ 'feature_shipping_status': [
+ 'hablich@chromium.org',
+ ],
+ 'gc_changes': [
+ 'hpayer@chromium.org',
+ 'ulan@chromium.org',
+ ],
},
}
diff --git a/chromium/v8/build/all.gyp b/chromium/v8/build/all.gyp
index 4aeb507dcab..0a05a2f02fd 100644
--- a/chromium/v8/build/all.gyp
+++ b/chromium/v8/build/all.gyp
@@ -19,6 +19,24 @@
'../tools/parser-shell.gyp:parser-shell',
],
}],
+ ['test_isolation_mode != "noop"', {
+ 'dependencies': [
+ '../test/bot_default.gyp:*',
+ '../test/benchmarks/benchmarks.gyp:*',
+ '../test/default.gyp:*',
+ '../test/ignition.gyp:*',
+ '../test/intl/intl.gyp:*',
+ '../test/message/message.gyp:*',
+ '../test/mjsunit/mjsunit.gyp:*',
+ '../test/mozilla/mozilla.gyp:*',
+ '../test/optimize_for_size.gyp:*',
+ '../test/preparser/preparser.gyp:*',
+ '../test/simdjs/simdjs.gyp:*',
+ '../test/test262/test262.gyp:*',
+ '../test/webkit/webkit.gyp:*',
+ '../tools/check-static-initializers.gyp:*',
+ ],
+ }],
]
}
]
diff --git a/chromium/v8/build/config/win/msvs_dependencies.isolate b/chromium/v8/build/config/win/msvs_dependencies.isolate
new file mode 100644
index 00000000000..ff922273634
--- /dev/null
+++ b/chromium/v8/build/config/win/msvs_dependencies.isolate
@@ -0,0 +1,77 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'conditions': [
+ # Copy the VS runtime DLLs into the isolate so that they
+ # don't have to be preinstalled on the target machine.
+ #
+ # VS2013 runtimes
+ ['OS=="win" and msvs_version==2013 and component=="shared_library" and CONFIGURATION_NAME=="Debug"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/x64/msvcp120d.dll',
+ '<(PRODUCT_DIR)/x64/msvcr120d.dll',
+ ],
+ },
+ }],
+ ['OS=="win" and msvs_version==2013 and component=="shared_library" and CONFIGURATION_NAME=="Release"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/x64/msvcp120.dll',
+ '<(PRODUCT_DIR)/x64/msvcr120.dll',
+ ],
+ },
+ }],
+ ['OS=="win" and msvs_version==2013 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/msvcp120d.dll',
+ '<(PRODUCT_DIR)/msvcr120d.dll',
+ ],
+ },
+ }],
+ ['OS=="win" and msvs_version==2013 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/msvcp120.dll',
+ '<(PRODUCT_DIR)/msvcr120.dll',
+ ],
+ },
+ }],
+ # VS2015 runtimes
+ ['OS=="win" and msvs_version==2015 and component=="shared_library" and CONFIGURATION_NAME=="Debug"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/x64/msvcp140d.dll',
+ '<(PRODUCT_DIR)/x64/vccorlib140d.dll',
+ ],
+ },
+ }],
+ ['OS=="win" and msvs_version==2015 and component=="shared_library" and CONFIGURATION_NAME=="Release"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/x64/msvcp140.dll',
+ '<(PRODUCT_DIR)/x64/vccorlib140.dll',
+ ],
+ },
+ }],
+ ['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/msvcp140d.dll',
+ '<(PRODUCT_DIR)/vccorlib140d.dll',
+ ],
+ },
+ }],
+ ['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/msvcp140.dll',
+ '<(PRODUCT_DIR)/vccorlib140.dll',
+ ],
+ },
+ }],
+ ],
+} \ No newline at end of file
diff --git a/chromium/v8/build/features.gypi b/chromium/v8/build/features.gypi
index 741ba75eb6d..5a21a63e324 100644
--- a/chromium/v8/build/features.gypi
+++ b/chromium/v8/build/features.gypi
@@ -39,6 +39,8 @@
'v8_trace_maps%': 0,
+ # Enable the snapshot feature, for fast context creation.
+ # http://v8project.blogspot.com/2015/09/custom-startup-snapshots.html
'v8_use_snapshot%': 'true',
'v8_enable_verify_predictable%': 0,
@@ -65,9 +67,6 @@
# Set to 1 to enable DCHECKs in release builds.
'dcheck_always_on%': 0,
- # Set to 1 to enable building with wasm prototype.
- 'v8_wasm%': 0,
-
# Enable/disable JavaScript API accessors.
'v8_js_accessors%': 0,
},
@@ -109,12 +108,6 @@
['dcheck_always_on!=0', {
'defines': ['DEBUG',],
}],
- ['v8_wasm!=0', {
- 'defines': ['V8_WASM',],
- }],
- ['v8_js_accessors!=0', {
- 'defines': ['V8_JS_ACCESSORS'],
- }],
], # conditions
'configurations': {
'DebugBaseCommon': {
diff --git a/chromium/v8/build/get_landmines.py b/chromium/v8/build/get_landmines.py
index 434b980c6db..ea0ae0d4152 100755
--- a/chromium/v8/build/get_landmines.py
+++ b/chromium/v8/build/get_landmines.py
@@ -24,6 +24,8 @@ def main():
print 'Moar clobbering...'
print 'Remove build/android.gypi'
print 'Cleanup after windows ninja switch attempt.'
+ print 'Switching to pinned msvs toolchain.'
+ print 'Clobbering to hopefully resolve problem with mksnapshot'
return 0
diff --git a/chromium/v8/build/gyp_environment.py b/chromium/v8/build/gyp_environment.py
index f1cee6ef8e0..7a4e6221482 100644
--- a/chromium/v8/build/gyp_environment.py
+++ b/chromium/v8/build/gyp_environment.py
@@ -10,6 +10,7 @@ make sure settings are consistent between them, all setup should happen here.
import os
import sys
+import vs_toolchain
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
V8_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
@@ -50,3 +51,4 @@ def set_environment():
# Update the environment based on v8.gyp_env
gyp_env_path = os.path.join(os.path.dirname(V8_ROOT), 'v8.gyp_env')
apply_gyp_environment(gyp_env_path)
+ vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs()
diff --git a/chromium/v8/build/gyp_v8 b/chromium/v8/build/gyp_v8
index 8a8ffa1bdda..8813f2c1216 100755
--- a/chromium/v8/build/gyp_v8
+++ b/chromium/v8/build/gyp_v8
@@ -30,6 +30,7 @@
# This script is wrapper for V8 that adds some support for how GYP
# is invoked by V8 beyond what can be done in the gclient hooks.
+import argparse
import glob
import gyp_environment
import os
@@ -37,6 +38,7 @@ import platform
import shlex
import subprocess
import sys
+import vs_toolchain
script_dir = os.path.dirname(os.path.realpath(__file__))
v8_root = os.path.abspath(os.path.join(script_dir, os.pardir))
@@ -49,6 +51,25 @@ sys.path.insert(
1, os.path.abspath(os.path.join(v8_root, 'tools', 'generate_shim_headers')))
+def GetOutputDirectory():
+ """Returns the output directory that GYP will use."""
+
+ # Handle command line generator flags.
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-G', dest='genflags', default=[], action='append')
+ genflags = parser.parse_known_args()[0].genflags
+
+ # Handle generator flags from the environment.
+ genflags += shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', ''))
+
+ needle = 'output_dir='
+ for item in genflags:
+ if item.startswith(needle):
+ return item[len(needle):]
+
+ return 'out'
+
+
def additional_include_files(args=[]):
"""
Returns a list of additional (.gypi) files to include, without
@@ -82,6 +103,13 @@ def additional_include_files(args=[]):
def run_gyp(args):
rc = gyp.main(args)
+ vs2013_runtime_dll_dirs = vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs()
+ if vs2013_runtime_dll_dirs:
+ x64_runtime, x86_runtime = vs2013_runtime_dll_dirs
+ vs_toolchain.CopyVsRuntimeDlls(
+ os.path.join(v8_root, GetOutputDirectory()),
+ (x86_runtime, x64_runtime))
+
if rc != 0:
print 'Error running GYP'
sys.exit(rc)
@@ -130,6 +158,7 @@ if __name__ == '__main__':
# Generate for the architectures supported on the given platform.
gyp_args = list(args)
+ gyp_args.extend(['-D', 'gyp_output_dir=' + GetOutputDirectory()])
gyp_generators = os.environ.get('GYP_GENERATORS', '')
if platform.system() == 'Linux' and gyp_generators != 'ninja':
# Work around for crbug.com/331475.
diff --git a/chromium/v8/build/isolate.gypi b/chromium/v8/build/isolate.gypi
index 9e2a3bfee39..5d30005e74a 100644
--- a/chromium/v8/build/isolate.gypi
+++ b/chromium/v8/build/isolate.gypi
@@ -44,6 +44,7 @@
'extension': 'isolate',
'inputs': [
# Files that are known to be involved in this step.
+ '<(DEPTH)/tools/isolate_driver.py',
'<(DEPTH)/tools/swarming_client/isolate.py',
'<(DEPTH)/tools/swarming_client/run_isolated.py',
],
@@ -52,7 +53,7 @@
],
'action': [
'python',
- '<(DEPTH)/tools/swarming_client/isolate.py',
+ '<(DEPTH)/tools/isolate_driver.py',
'<(test_isolation_mode)',
'--isolated', '<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).isolated',
'--isolate', '<(RULE_INPUT_PATH)',
@@ -66,8 +67,29 @@
'--path-variable', 'DEPTH', '<(DEPTH)',
'--path-variable', 'PRODUCT_DIR', '<(PRODUCT_DIR)',
+ '--config-variable', 'CONFIGURATION_NAME=<(CONFIGURATION_NAME)',
'--config-variable', 'OS=<(OS)',
+ '--config-variable', 'asan=<(asan)',
+ '--config-variable', 'cfi_vptr=<(cfi_vptr)',
+ '--config-variable', 'icu_use_data_file_flag=0',
+ '--config-variable', 'msan=<(msan)',
+ '--config-variable', 'tsan=<(tsan)',
+ '--config-variable', 'component=<(component)',
+ '--config-variable', 'target_arch=<(target_arch)',
+ '--config-variable', 'use_custom_libcxx=<(use_custom_libcxx)',
'--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
+ '--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)',
+ ],
+ 'conditions': [
+ ['OS=="win"', {
+ 'action': [
+ '--config-variable', 'msvs_version=2013',
+ ],
+ }, {
+ 'action': [
+ '--config-variable', 'msvs_version=0',
+ ],
+ }],
],
},
],
diff --git a/chromium/v8/build/standalone.gypi b/chromium/v8/build/standalone.gypi
index 7250579d27c..273d72b744e 100644
--- a/chromium/v8/build/standalone.gypi
+++ b/chromium/v8/build/standalone.gypi
@@ -42,8 +42,7 @@
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1,
'v8_deprecation_warnings': 1,
- # TODO(jochen): Turn this on.
- 'v8_imminent_deprecation_warnings%': 0,
+ 'v8_imminent_deprecation_warnings': 1,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'release_extra_cflags%': '',
@@ -68,11 +67,15 @@
'host_arch%': '<(host_arch)',
'target_arch%': '<(host_arch)',
'base_dir%': '<!(cd <(DEPTH) && python -c "import os; print os.getcwd()")',
+
+ # Instrument for code coverage with gcov.
+ 'coverage%': 0,
},
'base_dir%': '<(base_dir)',
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(target_arch)',
+ 'coverage%': '<(coverage)',
'asan%': 0,
'lsan%': 0,
'msan%': 0,
@@ -97,11 +100,16 @@
'cfi_blacklist%': '<(base_dir)/tools/cfi/blacklist.txt',
+ # Set to 1 to enable fast builds.
+ # TODO(machenbach): Only configured for windows.
+ 'fastbuild%': 0,
+
# goma settings.
# 1 to use goma.
# If no gomadir is set, it uses the default gomadir.
'use_goma%': 0,
'gomadir%': '',
+
'conditions': [
# Set default gomadir.
['OS=="win"', {
@@ -109,10 +117,11 @@
}, {
'gomadir': '<!(/bin/echo -n ${HOME}/goma)',
}],
- ['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le"', {
- 'host_clang%': '1',
+ ['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le" and host_arch!="s390" and host_arch!="s390x" and \
+ coverage==0', {
+ 'host_clang%': 1,
}, {
- 'host_clang%': '0',
+ 'host_clang%': 0,
}],
# linux_use_bundled_gold: whether to use the gold linker binary checked
# into third_party/binutils. Force this off via GYP_DEFINES when you
@@ -127,7 +136,7 @@
# TODO(machenbach): Remove the conditions as more configurations are
# supported.
- ['OS=="linux"', {
+ ['OS=="linux" or OS=="win"', {
'test_isolation_mode%': 'check',
}, {
'test_isolation_mode%': 'noop',
@@ -155,6 +164,8 @@
'cfi_diag%': '<(cfi_diag)',
'cfi_blacklist%': '<(cfi_blacklist)',
'test_isolation_mode%': '<(test_isolation_mode)',
+ 'fastbuild%': '<(fastbuild)',
+ 'coverage%': '<(coverage)',
# Add a simple extras solely for the purpose of the cctests
'v8_extra_library_files': ['../test/cctest/test-extra.js'],
@@ -206,12 +217,8 @@
['OS=="win" and use_goma==1', {
# goma doesn't support pch yet.
'chromium_win_pch': 0,
- # goma doesn't support PDB yet, so win_z7=1 or fastbuild=1.
- 'conditions': [
- ['win_z7==0 and fastbuild==0', {
- 'fastbuild': 1,
- }],
- ],
+ # goma doesn't support PDB yet.
+ 'fastbuild%': 1,
}],
['((v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \
(OS=="linux" or OS=="mac")) or (v8_target_arch=="ppc64" and OS=="linux")', {
@@ -220,7 +227,7 @@
'v8_enable_gdbjit%': 0,
}],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
- (v8_target_arch!="x87" and v8_target_arch!="x32")', {
+ (v8_target_arch!="x87" and v8_target_arch!="x32") and coverage==0', {
'clang%': 1,
}, {
'clang%': 0,
@@ -405,13 +412,16 @@
],
},
'conditions':[
- ['(clang==1 or host_clang==1) and OS!="win"', {
+ ['clang==0', {
+ 'cflags+': ['-Wno-sign-compare',],
+ }],
+ ['clang==1 or host_clang==1', {
# This is here so that all files get recompiled after a clang roll and
# when turning clang on or off.
# (defines are passed via the command line, and build systems rebuild
# things when their commandline changes). Nothing should ever read this
# define.
- 'defines': ['CR_CLANG_REVISION=<!(<(DEPTH)/tools/clang/scripts/update.sh --print-revision)'],
+ 'defines': ['CR_CLANG_REVISION=<!(python <(DEPTH)/tools/clang/scripts/update.py --print-revision)'],
'conditions': [
['host_clang==1', {
'target_conditions': [
@@ -433,6 +443,23 @@
}],
],
}],
+ ['fastbuild!=0', {
+ 'conditions': [
+ ['OS=="win" and fastbuild==1', {
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ # This tells the linker to generate .pdbs, so that
+ # we can get meaningful stack traces.
+ 'GenerateDebugInformation': 'true',
+ },
+ 'VCCLCompilerTool': {
+ # No debug info to be generated by compiler.
+ 'DebugInformationFormat': '0',
+ },
+ },
+ }],
+ ],
+ }], # fastbuild!=0
],
'target_conditions': [
['v8_code == 0', {
@@ -492,8 +519,8 @@
'target_defaults': {
'conditions': [
# Common options for AddressSanitizer, LeakSanitizer,
- # ThreadSanitizer and MemorySanitizer.
- ['asan==1 or lsan==1 or tsan==1 or msan==1', {
+ # ThreadSanitizer, MemorySanitizer and CFI builds.
+ ['asan==1 or lsan==1 or tsan==1 or msan==1 or cfi_vptr==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
@@ -557,9 +584,11 @@
'cflags': [
'-fsanitize=memory',
'-fsanitize-memory-track-origins=<(msan_track_origins)',
+ '-fPIC',
],
'ldflags': [
'-fsanitize=memory',
+ '-pie',
],
'defines': [
'MEMORY_SANITIZER',
@@ -657,6 +686,7 @@
'-pedantic',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
+ '-Wno-gnu-zero-variadic-macro-arguments',
],
'cflags_cc': [
'-Wnon-virtual-dtor',
@@ -666,6 +696,16 @@
],
'ldflags': [ '-pthread', ],
'conditions': [
+ # Don't warn about TRACE_EVENT_* macros with zero arguments passed to
+ # ##__VA_ARGS__. C99 strict mode prohibits having zero variadic macro
+ # arguments in gcc.
+ [ 'clang==0', {
+ 'cflags!' : [
+ '-pedantic' ,
+ # Don't warn about unrecognized command line option.
+ '-Wno-gnu-zero-variadic-macro-arguments',
+ ],
+ }],
[ 'clang==1 and (v8_target_arch=="x64" or v8_target_arch=="arm64" \
or v8_target_arch=="mips64el")', {
'cflags': [ '-Wshorten-64-to-32' ],
@@ -679,6 +719,11 @@
[ 'component=="shared_library"', {
'cflags': [ '-fPIC', ],
}],
+ [ 'coverage==1', {
+ 'cflags!': [ '-O3', '-O2', '-O1', ],
+ 'cflags': [ '-fprofile-arcs', '-ftest-coverage', '-O0'],
+ 'ldflags': [ '-fprofile-arcs'],
+ }],
],
},
}],
@@ -692,6 +737,7 @@
'-Wno-unused-parameter',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
+ '-Wno-gnu-zero-variadic-macro-arguments',
],
'cflags_cc': [
'-Wnon-virtual-dtor',
@@ -799,7 +845,6 @@
4309, # Truncation of constant value
4311, # Pointer truncation from 'type' to 'type'
4312, # Conversion from 'type1' to 'type2' of greater size
- 4481, # Nonstandard extension used: override specifier 'keyword'
4505, # Unreferenced local function has been removed
4510, # Default constructor could not be generated
4512, # Assignment operator could not be generated
@@ -916,6 +961,7 @@
'-Wno-unused-parameter',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
+ '-Wno-gnu-zero-variadic-macro-arguments',
],
},
'conditions': [
@@ -1197,6 +1243,16 @@
['CC', '<(clang_dir)/bin/clang-cl'],
],
}],
+ ['OS=="linux" and target_arch=="arm" and host_arch!="arm" and clang==0 and "<(GENERATOR)"=="ninja"', {
+ # Set default ARM cross tools on linux. These can be overridden
+ # using CC,CXX,CC.host and CXX.host environment variables.
+ 'make_global_settings': [
+ ['CC', '<!(which arm-linux-gnueabihf-gcc)'],
+ ['CXX', '<!(which arm-linux-gnueabihf-g++)'],
+ ['CC.host', '<(host_cc)'],
+ ['CXX.host', '<(host_cxx)'],
+ ],
+ }],
# TODO(yyanagisawa): supports GENERATOR==make
# make generator doesn't support CC_wrapper without CC
# in make_global_settings yet.
@@ -1275,7 +1331,7 @@
['_toolset=="target"', {
'cflags': [
'-fno-sanitize-trap=cfi',
- '-fsanitize-recover=cfi',
+ '-fno-sanitize-recover=cfi',
],
'cflags_cc!': [
'-fno-rtti',
@@ -1285,7 +1341,7 @@
],
'ldflags': [
'-fno-sanitize-trap=cfi',
- '-fsanitize-recover=cfi',
+ '-fno-sanitize-recover=cfi',
],
}],
],
diff --git a/chromium/v8/build/toolchain.gypi b/chromium/v8/build/toolchain.gypi
index bd081e17910..e1cd791490a 100644
--- a/chromium/v8/build/toolchain.gypi
+++ b/chromium/v8/build/toolchain.gypi
@@ -135,6 +135,7 @@
'conditions': [
['host_arch=="ia32" or host_arch=="x64" or \
host_arch=="ppc" or host_arch=="ppc64" or \
+ host_arch=="s390" or host_arch=="s390x" or \
clang==1', {
'variables': {
'host_cxx_is_biarch%': 1,
@@ -145,8 +146,8 @@
},
}],
['target_arch=="ia32" or target_arch=="x64" or target_arch=="x87" or \
- target_arch=="ppc" or target_arch=="ppc64" or \
- clang==1', {
+ target_arch=="ppc" or target_arch=="ppc64" or target_arch=="s390" or \
+ target_arch=="s390x" or clang==1', {
'variables': {
'target_cxx_is_biarch%': 1,
},
@@ -297,6 +298,23 @@
'V8_TARGET_ARCH_ARM64',
],
}],
+ ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_S390',
+ ],
+ 'conditions': [
+ ['v8_target_arch=="s390x"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_S390X',
+ ],
+ }],
+ ['v8_host_byteorder=="little"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_S390_LE_SIM',
+ ],
+ }],
+ ],
+ }], # s390
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
'defines': [
'V8_TARGET_ARCH_PPC',
@@ -357,6 +375,9 @@
['ld_r_path!=""', {
'ldflags': ['-Wl,--rpath=<(ld_r_path)'],
}],
+ [ 'clang==1', {
+ 'cflags': ['-integrated-as'],
+ }],
],
}],
],
@@ -406,7 +427,12 @@
'FPU_MODE_FP64',
],
'cflags!': ['-mfp32', '-mfpxx'],
- 'cflags': ['-mips32r6', '-Wa,-mips32r6'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32r6'],
+ }],
+ ],
+ 'cflags': ['-mips32r6'],
'ldflags': ['-mips32r6'],
}],
['mips_arch_variant=="r2"', {
@@ -432,8 +458,11 @@
],
'cflags': ['-mfp32'],
}],
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32r2'],
+ }],
],
- 'cflags': ['-mips32r2', '-Wa,-mips32r2'],
+ 'cflags': ['-mips32r2'],
'ldflags': ['-mips32r2'],
}],
['mips_arch_variant=="r1"', {
@@ -441,7 +470,12 @@
'FPU_MODE_FP32',
],
'cflags!': ['-mfp64', '-mfpxx'],
- 'cflags': ['-mips32', '-Wa,-mips32'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32'],
+ }],
+ ],
+ 'cflags': ['-mips32'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="rx"', {
@@ -450,7 +484,12 @@
'FPU_MODE_FPXX',
],
'cflags!': ['-mfp64', '-mfp32'],
- 'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32'],
+ }],
+ ],
+ 'cflags': ['-mips32', '-mfpxx'],
'ldflags': ['-mips32'],
}],
],
@@ -589,7 +628,12 @@
'FPU_MODE_FP64',
],
'cflags!': ['-mfp32', '-mfpxx'],
- 'cflags': ['-mips32r6', '-Wa,-mips32r6'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32r6'],
+ }],
+ ],
+ 'cflags': ['-mips32r6'],
'ldflags': ['-mips32r6'],
}],
['mips_arch_variant=="r2"', {
@@ -615,13 +659,21 @@
],
'cflags': ['-mfp32'],
}],
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32r2'],
+ }],
],
- 'cflags': ['-mips32r2', '-Wa,-mips32r2'],
+ 'cflags': ['-mips32r2'],
'ldflags': ['-mips32r2'],
}],
['mips_arch_variant=="r1"', {
'cflags!': ['-mfp64', '-mfpxx'],
- 'cflags': ['-mips32', '-Wa,-mips32'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32'],
+ }],
+ ],
+ 'cflags': ['-mips32'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="rx"', {
@@ -630,7 +682,12 @@
'FPU_MODE_FPXX',
],
'cflags!': ['-mfp64', '-mfp32'],
- 'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32'],
+ }],
+ ],
+ 'cflags': ['-mips32', '-mfpxx'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="loongson"', {
@@ -638,8 +695,13 @@
'_MIPS_ARCH_LOONGSON',
'FPU_MODE_FP32',
],
- 'cflags!': ['-mfp64', '-mfp32', '-mfpxx'],
- 'cflags': ['-mips3', '-Wa,-mips3'],
+ 'cflags!': ['-mfp64', '-mfpxx'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips3'],
+ }],
+ ],
+ 'cflags': ['-mips3', '-mfp32'],
}],
],
}, {
@@ -800,12 +862,22 @@
}],
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
- 'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips64r6'],
+ }],
+ ],
+ 'cflags': ['-mips64r6', '-mabi=64'],
'ldflags': ['-mips64r6', '-mabi=64'],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
- 'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips64r2'],
+ }],
+ ],
+ 'cflags': ['-mips64r2', '-mabi=64'],
'ldflags': ['-mips64r2', '-mabi=64'],
}],
],
@@ -925,13 +997,21 @@
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="x87" or v8_target_arch=="mips" or \
- v8_target_arch=="mipsel" or v8_target_arch=="ppc")', {
+ v8_target_arch=="mipsel" or v8_target_arch=="ppc" or \
+ v8_target_arch=="s390")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [
['host_cxx_is_biarch==1', {
- 'cflags': [ '-m32' ],
- 'ldflags': [ '-m32' ]
+ 'conditions': [
+ ['host_arch=="s390" or host_arch=="s390x"', {
+ 'cflags': [ '-m31' ],
+ 'ldflags': [ '-m31' ]
+ },{
+ 'cflags': [ '-m32' ],
+ 'ldflags': [ '-m32' ]
+ }],
+ ],
}],
],
'xcode_settings': {
@@ -941,8 +1021,15 @@
['_toolset=="target"', {
'conditions': [
['target_cxx_is_biarch==1 and nacl_target_arch!="nacl_x64"', {
- 'cflags': [ '-m32' ],
- 'ldflags': [ '-m32' ],
+ 'conditions': [
+ ['host_arch=="s390" or host_arch=="s390x"', {
+ 'cflags': [ '-m31' ],
+ 'ldflags': [ '-m31' ]
+ },{
+ 'cflags': [ '-m32' ],
+ 'ldflags': [ '-m32' ],
+ }],
+ ],
}],
],
'xcode_settings': {
@@ -953,7 +1040,7 @@
}],
['(OS=="linux" or OS=="android") and \
(v8_target_arch=="x64" or v8_target_arch=="arm64" or \
- v8_target_arch=="ppc64")', {
+ v8_target_arch=="ppc64" or v8_target_arch=="s390x")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [
diff --git a/chromium/v8/build/vs_toolchain.py b/chromium/v8/build/vs_toolchain.py
new file mode 100644
index 00000000000..294ade38182
--- /dev/null
+++ b/chromium/v8/build/vs_toolchain.py
@@ -0,0 +1,268 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import pipes
+import shutil
+import subprocess
+import sys
+import vs_toolchain
+
+
+script_dir = os.path.dirname(os.path.realpath(__file__))
+chrome_src = os.path.abspath(os.path.join(script_dir, os.pardir))
+SRC_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.insert(1, os.path.join(chrome_src, 'tools'))
+sys.path.insert(0, os.path.join(chrome_src, 'build', 'gyp', 'pylib'))
+json_data_file = os.path.join(script_dir, 'win_toolchain.json')
+
+
+import gyp
+
+
+def SetEnvironmentAndGetRuntimeDllDirs():
+ """Sets up os.environ to use the depot_tools VS toolchain with gyp, and
+ returns the location of the VS runtime DLLs so they can be copied into
+ the output directory after gyp generation.
+ """
+ vs2013_runtime_dll_dirs = None
+ depot_tools_win_toolchain = \
+ bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
+ # When running on a non-Windows host, only do this if the SDK has explicitly
+ # been downloaded before (in which case json_data_file will exist).
+ if ((sys.platform in ('win32', 'cygwin') or os.path.exists(json_data_file))
+ and depot_tools_win_toolchain):
+ if not os.path.exists(json_data_file):
+ Update()
+ with open(json_data_file, 'r') as tempf:
+ toolchain_data = json.load(tempf)
+
+ toolchain = toolchain_data['path']
+ version = toolchain_data['version']
+ win_sdk = toolchain_data.get('win_sdk')
+ if not win_sdk:
+ win_sdk = toolchain_data['win8sdk']
+ wdk = toolchain_data['wdk']
+ # TODO(scottmg): The order unfortunately matters in these. They should be
+ # split into separate keys for x86 and x64. (See CopyVsRuntimeDlls call
+ # below). http://crbug.com/345992
+ vs2013_runtime_dll_dirs = toolchain_data['runtime_dirs']
+
+ os.environ['GYP_MSVS_OVERRIDE_PATH'] = toolchain
+ os.environ['GYP_MSVS_VERSION'] = version
+ # We need to make sure windows_sdk_path is set to the automated
+ # toolchain values in GYP_DEFINES, but don't want to override any
+ # otheroptions.express
+ # values there.
+ gyp_defines_dict = gyp.NameValueListToDict(gyp.ShlexEnv('GYP_DEFINES'))
+ gyp_defines_dict['windows_sdk_path'] = win_sdk
+ os.environ['GYP_DEFINES'] = ' '.join('%s=%s' % (k, pipes.quote(str(v)))
+ for k, v in gyp_defines_dict.iteritems())
+ os.environ['WINDOWSSDKDIR'] = win_sdk
+ os.environ['WDK_DIR'] = wdk
+ # Include the VS runtime in the PATH in case it's not machine-installed.
+ runtime_path = ';'.join(vs2013_runtime_dll_dirs)
+ os.environ['PATH'] = runtime_path + ';' + os.environ['PATH']
+ return vs2013_runtime_dll_dirs
+
+
+def _VersionNumber():
+ """Gets the standard version number ('120', '140', etc.) based on
+ GYP_MSVS_VERSION."""
+ if os.environ['GYP_MSVS_VERSION'] == '2013':
+ return '120'
+ elif os.environ['GYP_MSVS_VERSION'] == '2015':
+ return '140'
+ else:
+ raise ValueError('Unexpected GYP_MSVS_VERSION')
+
+
+def _CopyRuntimeImpl(target, source):
+ """Copy |source| to |target| if it doesn't already exist or if it
+ needs to be updated.
+ """
+ if (os.path.isdir(os.path.dirname(target)) and
+ (not os.path.isfile(target) or
+ os.stat(target).st_mtime != os.stat(source).st_mtime)):
+ print 'Copying %s to %s...' % (source, target)
+ if os.path.exists(target):
+ os.unlink(target)
+ shutil.copy2(source, target)
+
+
+def _CopyRuntime2013(target_dir, source_dir, dll_pattern):
+ """Copy both the msvcr and msvcp runtime DLLs, only if the target doesn't
+ exist, but the target directory does exist."""
+ for file_part in ('p', 'r'):
+ dll = dll_pattern % file_part
+ target = os.path.join(target_dir, dll)
+ source = os.path.join(source_dir, dll)
+ _CopyRuntimeImpl(target, source)
+
+
+def _CopyRuntime2015(target_dir, source_dir, dll_pattern):
+ """Copy both the msvcp and vccorlib runtime DLLs, only if the target doesn't
+ exist, but the target directory does exist."""
+ for file_part in ('msvcp', 'vccorlib'):
+ dll = dll_pattern % file_part
+ target = os.path.join(target_dir, dll)
+ source = os.path.join(source_dir, dll)
+ _CopyRuntimeImpl(target, source)
+
+
+def CopyVsRuntimeDlls(output_dir, runtime_dirs):
+ """Copies the VS runtime DLLs from the given |runtime_dirs| to the output
+ directory so that even if not system-installed, built binaries are likely to
+ be able to run.
+
+ This needs to be run after gyp has been run so that the expected target
+ output directories are already created.
+ """
+ x86, x64 = runtime_dirs
+ out_debug = os.path.join(output_dir, 'Debug')
+ out_debug_nacl64 = os.path.join(output_dir, 'Debug', 'x64')
+ out_release = os.path.join(output_dir, 'Release')
+ out_release_nacl64 = os.path.join(output_dir, 'Release', 'x64')
+ out_debug_x64 = os.path.join(output_dir, 'Debug_x64')
+ out_release_x64 = os.path.join(output_dir, 'Release_x64')
+
+ if os.path.exists(out_debug) and not os.path.exists(out_debug_nacl64):
+ os.makedirs(out_debug_nacl64)
+ if os.path.exists(out_release) and not os.path.exists(out_release_nacl64):
+ os.makedirs(out_release_nacl64)
+ if os.environ.get('GYP_MSVS_VERSION') == '2015':
+ _CopyRuntime2015(out_debug, x86, '%s140d.dll')
+ _CopyRuntime2015(out_release, x86, '%s140.dll')
+ _CopyRuntime2015(out_debug_x64, x64, '%s140d.dll')
+ _CopyRuntime2015(out_release_x64, x64, '%s140.dll')
+ _CopyRuntime2015(out_debug_nacl64, x64, '%s140d.dll')
+ _CopyRuntime2015(out_release_nacl64, x64, '%s140.dll')
+ else:
+ # VS2013 is the default.
+ _CopyRuntime2013(out_debug, x86, 'msvc%s120d.dll')
+ _CopyRuntime2013(out_release, x86, 'msvc%s120.dll')
+ _CopyRuntime2013(out_debug_x64, x64, 'msvc%s120d.dll')
+ _CopyRuntime2013(out_release_x64, x64, 'msvc%s120.dll')
+ _CopyRuntime2013(out_debug_nacl64, x64, 'msvc%s120d.dll')
+ _CopyRuntime2013(out_release_nacl64, x64, 'msvc%s120.dll')
+
+ # Copy the PGO runtime library to the release directories.
+ if os.environ.get('GYP_MSVS_OVERRIDE_PATH'):
+ pgo_x86_runtime_dir = os.path.join(os.environ.get('GYP_MSVS_OVERRIDE_PATH'),
+ 'VC', 'bin')
+ pgo_x64_runtime_dir = os.path.join(pgo_x86_runtime_dir, 'amd64')
+ pgo_runtime_dll = 'pgort' + _VersionNumber() + '.dll'
+ source_x86 = os.path.join(pgo_x86_runtime_dir, pgo_runtime_dll)
+ if os.path.exists(source_x86):
+ _CopyRuntimeImpl(os.path.join(out_release, pgo_runtime_dll), source_x86)
+ source_x64 = os.path.join(pgo_x64_runtime_dir, pgo_runtime_dll)
+ if os.path.exists(source_x64):
+ _CopyRuntimeImpl(os.path.join(out_release_x64, pgo_runtime_dll),
+ source_x64)
+
+
+def CopyDlls(target_dir, configuration, target_cpu):
+ """Copy the VS runtime DLLs into the requested directory as needed.
+
+ configuration is one of 'Debug' or 'Release'.
+ target_cpu is one of 'x86' or 'x64'.
+
+ The debug configuration gets both the debug and release DLLs; the
+ release config only the latter.
+ """
+ vs2013_runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
+ if not vs2013_runtime_dll_dirs:
+ return
+
+ x64_runtime, x86_runtime = vs2013_runtime_dll_dirs
+ runtime_dir = x64_runtime if target_cpu == 'x64' else x86_runtime
+ _CopyRuntime2013(
+ target_dir, runtime_dir, 'msvc%s' + _VersionNumber() + '.dll')
+ if configuration == 'Debug':
+ _CopyRuntime2013(
+ target_dir, runtime_dir, 'msvc%s' + _VersionNumber() + 'd.dll')
+
+
+def _GetDesiredVsToolchainHashes():
+ """Load a list of SHA1s corresponding to the toolchains that we want installed
+ to build with."""
+ if os.environ.get('GYP_MSVS_VERSION') == '2015':
+ return ['49ae4b60d898182fc3f521c2fcda82c453915011']
+ else:
+ # Default to VS2013.
+ return ['ee7d718ec60c2dc5d255bbe325909c2021a7efef']
+
+
+def Update(force=False):
+ """Requests an update of the toolchain to the specific hashes we have at
+ this revision. The update outputs a .json of the various configuration
+ information required to pass to gyp which we use in |GetToolchainDir()|.
+ """
+ if force != False and force != '--force':
+ print >>sys.stderr, 'Unknown parameter "%s"' % force
+ return 1
+ if force == '--force' or os.path.exists(json_data_file):
+ force = True
+
+ depot_tools_win_toolchain = \
+ bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
+ if ((sys.platform in ('win32', 'cygwin') or force) and
+ depot_tools_win_toolchain):
+ import find_depot_tools
+ depot_tools_path = find_depot_tools.add_depot_tools_to_path()
+ get_toolchain_args = [
+ sys.executable,
+ os.path.join(depot_tools_path,
+ 'win_toolchain',
+ 'get_toolchain_if_necessary.py'),
+ '--output-json', json_data_file,
+ ] + _GetDesiredVsToolchainHashes()
+ if force:
+ get_toolchain_args.append('--force')
+ subprocess.check_call(get_toolchain_args)
+
+ return 0
+
+
+def GetToolchainDir():
+ """Gets location information about the current toolchain (must have been
+ previously updated by 'update'). This is used for the GN build."""
+ runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
+
+ # If WINDOWSSDKDIR is not set, search the default SDK path and set it.
+ if not 'WINDOWSSDKDIR' in os.environ:
+ default_sdk_path = 'C:\\Program Files (x86)\\Windows Kits\\8.1'
+ if os.path.isdir(default_sdk_path):
+ os.environ['WINDOWSSDKDIR'] = default_sdk_path
+
+ print '''vs_path = "%s"
+sdk_path = "%s"
+vs_version = "%s"
+wdk_dir = "%s"
+runtime_dirs = "%s"
+''' % (
+ os.environ['GYP_MSVS_OVERRIDE_PATH'],
+ os.environ['WINDOWSSDKDIR'],
+ os.environ['GYP_MSVS_VERSION'],
+ os.environ.get('WDK_DIR', ''),
+ ';'.join(runtime_dll_dirs or ['None']))
+
+
+def main():
+ commands = {
+ 'update': Update,
+ 'get_toolchain_dir': GetToolchainDir,
+ 'copy_dlls': CopyDlls,
+ }
+ if len(sys.argv) < 2 or sys.argv[1] not in commands:
+ print >>sys.stderr, 'Expected one of: %s' % ', '.join(commands)
+ return 1
+ return commands[sys.argv[1]](*sys.argv[2:])
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/chromium/v8/docs/README.md b/chromium/v8/docs/README.md
new file mode 100644
index 00000000000..0eded673b84
--- /dev/null
+++ b/chromium/v8/docs/README.md
@@ -0,0 +1,2 @@
+The documentation for V8 can be found at the
+[V8 Wiki](https://github.com/v8/v8/wiki).
diff --git a/chromium/v8/docs/arm_debugging_with_the_simulator.md b/chromium/v8/docs/arm_debugging_with_the_simulator.md
deleted file mode 100644
index fbb41561423..00000000000
--- a/chromium/v8/docs/arm_debugging_with_the_simulator.md
+++ /dev/null
@@ -1,205 +0,0 @@
-# ARM debugging with the simulator
-
-The simulator and debugger can be very helpful when working with v8 code generation.
-
- * It is convenient as it allows you to test code generation without access to actual hardware.
- * No cross or native compilation is needed.
- * The simulator fully supports the debugging of generated code.
-
-Please note that this simulator is designed for v8 purposes. Only the features used by v8 are implemented, and you might encounter unimplemented features or instructions. In this case, feel free to implement them and submit the code!
-
-
-## Details on the ARM Debugger
-
-Compile the ARM simulator shell with:
-```
-make arm.debug
-```
-on an x86 host using your regular compiler.
-
-### Starting the Debugger
-There are different ways of starting the debugger:
-
-```
-$ out/arm.debug/d8 --stop_sim_at <n>
-```
-The simulator will start the debugger after executing n instructions.
-
-```
-$ out/arm.debug/d8 --stop_at <function name>
-```
-
-The simulator will stop at the given JavaScript function.
-
-Also you can directly generate 'stop' instructions in the ARM code. Stops are generated with
-
-```
-Assembler::stop(const char* msg, Condition cond, int32_t code)
-```
-
-When the Simulator hits a stop, it will print msg and start the debugger.
-
-
-### Debugging commands.
-
-**Usual commands:**
-
-Enter `help` in the debugger prompt to get details on available commands. These include usual gdb-like commands, such as stepi, cont, disasm, etc. If the Simulator is run under gdb, the “gdb” debugger command will give control to gdb. You can then use cont from gdb to go back to the debugger.
-
-
-**Debugger specific commands:**
-
-Here's a list of the ARM debugger specific commands, along with examples.
-The JavaScript file “func.js” used below contains:
-
-```
-function test() {
- print(“In function test.”);
-}
-test();
-```
-
- * **printobject** `<`register`>` (alias po), will describe an object held in a register.
-
-```
-$ out/arm.debug/d8 func.js --stop_at test
-
-Simulator hit stop-at
- 0xb544d6a8 e92d4902 stmdb sp!, {r1, r8, fp, lr}
-sim> print r0
-r0: 0xb547ec15 -1253577707
-sim> printobject r0
-r0:
-0xb547ec15: [Function]
- - map = 0x0xb540ff01
- - initial_map =
- - shared_info = 0xb547eb2d <SharedFunctionInfo>
- - name = #test
- - context = 0xb60083f1 <FixedArray[52]>
- - code = 0xb544d681 <Code>
- #arguments: 0xb545a15d <Proxy> (callback)
- #length: 0xb545a14d <Proxy> (callback)
- #name: 0xb545a155 <Proxy> (callback)
- #prototype: 0xb545a145 <Proxy> (callback)
- #caller: 0xb545a165 <Proxy> (callback)
-```
-
- * **break** `<`address`>`, will insert a breakpoint at the specified address.
-
- * **del**, will delete the current breakpoint.
-
-You can have only one such breakpoint. This is useful if you want to insert a breakpoint at runtime.
-```
-$ out/arm.debug/d8 func.js --stop_at test
-
-Simulator hit stop-at
- 0xb53a1ee8 e92d4902 stmdb sp!, {r1, r8, fp, lr}
-sim> disasm 5
- 0xb53a1ee8 e92d4902 stmdb sp!, {r1, r8, fp, lr}
- 0xb53a1eec e28db008 add fp, sp, #8
- 0xb53a1ef0 e59a200c ldr r2, [r10, #+12]
- 0xb53a1ef4 e28fe004 add lr, pc, #4
- 0xb53a1ef8 e15d0002 cmp sp, r2
-sim> break 0xb53a1ef8
-sim> cont
- 0xb53a1ef8 e15d0002 cmp sp, r2
-sim> disasm 5
- 0xb53a1ef8 e15d0002 cmp sp, r2
- 0xb53a1efc 359ff034 ldrcc pc, [pc, #+52]
- 0xb53a1f00 e5980017 ldr r0, [r8, #+23]
- 0xb53a1f04 e59f1030 ldr r1, [pc, #+48]
- 0xb53a1f08 e52d0004 str r0, [sp, #-4]!
-sim> break 0xb53a1f08
-setting breakpoint failed
-sim> del
-sim> break 0xb53a1f08
-sim> cont
- 0xb53a1f08 e52d0004 str r0, [sp, #-4]!
-sim> del
-sim> cont
-In function test.
-```
-
- * Generated `stop` instuctions, will work as breakpoints with a few additional features.
-
-The first argument is a help message, the second is the condition, and the third is the stop code. If a code is specified, and is less than 256, the stop is said to be “watched”, and can be disabled/enabled; a counter also keeps track of how many times the Simulator hits this code.
-
-If we are working on this v8 C++ code, which is reached when running our JavaScript file.
-
-```
-__ stop("My stop.", al, 123);
-__ mov(r0, r0);
-__ mov(r0, r0);
-__ mov(r0, r0);
-__ mov(r0, r0);
-__ mov(r0, r0);
-__ stop("My second stop.", al, 0x1);
-__ mov(r1, r1);
-__ mov(r1, r1);
-__ mov(r1, r1);
-__ mov(r1, r1);
-__ mov(r1, r1);
-```
-
-Here's a sample debugging session:
-
-We hit the first stop.
-
-```
-Simulator hit My stop.
- 0xb53559e8 e1a00000 mov r0, r0
-```
-
-We can see the following stop using disasm. The address of the message string is inlined in the code after the svc stop instruction.
-
-```
-sim> disasm
- 0xb53559e8 e1a00000 mov r0, r0
- 0xb53559ec e1a00000 mov r0, r0
- 0xb53559f0 e1a00000 mov r0, r0
- 0xb53559f4 e1a00000 mov r0, r0
- 0xb53559f8 e1a00000 mov r0, r0
- 0xb53559fc ef800001 stop 1 - 0x1
- 0xb5355a00 08338a97 stop message: My second stop
- 0xb5355a04 e1a00000 mov r1, r1
- 0xb5355a08 e1a00000 mov r1, r1
- 0xb5355a0c e1a00000 mov r1, r1
-```
-
-Information can be printed for all (watched) stops which were hit at least once.
-
-```
-sim> stop info all
-Stop information:
-stop 123 - 0x7b: Enabled, counter = 1, My stop.
-sim> cont
-Simulator hit My second stop
- 0xb5355a04 e1a00000 mov r1, r1
-sim> stop info all
-Stop information:
-stop 1 - 0x1: Enabled, counter = 1, My second stop
-stop 123 - 0x7b: Enabled, counter = 1, My stop.
-```
-
-Stops can be disabled or enabled. (Only available for watched stops.)
-
-```
-sim> stop disable 1
-sim> cont
-Simulator hit My stop.
- 0xb5356808 e1a00000 mov r0, r0
-sim> cont
-Simulator hit My stop.
- 0xb5356c28 e1a00000 mov r0, r0
-sim> stop info all
-Stop information:
-stop 1 - 0x1: Disabled, counter = 2, My second stop
-stop 123 - 0x7b: Enabled, counter = 3, My stop.
-sim> stop enable 1
-sim> cont
-Simulator hit My second stop
- 0xb5356c44 e1a00000 mov r1, r1
-sim> stop disable all
-sim> con
-In function test.
-``` \ No newline at end of file
diff --git a/chromium/v8/docs/becoming_v8_committer.md b/chromium/v8/docs/becoming_v8_committer.md
deleted file mode 100644
index 16e8491b06f..00000000000
--- a/chromium/v8/docs/becoming_v8_committer.md
+++ /dev/null
@@ -1,38 +0,0 @@
-## What is a committer?
-
-Technically, a committer is someone who has write access to the V8 SVN repository. A committer can submit his or her own patches or patches from others.
-
-This privilege is granted with some expectation of responsibility: committers are people who care about the V8 project and want to help meet its goals. A committer is not just someone who can make changes, but someone who has demonstrated his or her ability to collaborate with the team, get the most knowledgeable people to review code, contribute high-quality code, and follow through to fix issues (in code or tests).
-
-A committer is a contributor to the V8 projects' success and a citizen helping the projects succeed. See V8CommittersResponsibility.
-
-## How do I become a committer?
-
-In a nutshell, contribute 20 non-trivial patches and get at least three different people to review them (you'll need three people to support you). Then ask someone to nominate you. You're demonstrating your:
-
- * commitment to the project (20 good patches requires a lot of your valuable time),
- * ability to collaborate with the team,
- * understanding of how the team works (policies, processes for testing and code review, etc),
- * understanding of the projects' code base and coding style, and
- * ability to write good code (last but certainly not least)
-
-A current committer nominates you by sending email to v8-committers@googlegroups.com containing:
-
- * your first and last name
- * your Google Code email address
- * an explanation of why you should be a committer,
- * embedded list of links to revisions (about top 10) containing your patches
-
-Two other committers need to second your nomination. If no one objects in 5 working days (U.S.), you're a committer. If anyone objects or wants more information, the committers discuss and usually come to a consensus (within the 5 working days). If issues cannot be resolved, there's a vote among current committers.
-
-Once you get approval from the existing committers, we'll send you instructions for write access to SVN or Git. You'll also be added to v8-committers@googlegroups.com.
-
-In the worst case, this can drag out for two weeks. Keep writing patches! Even in the rare cases where a nomination fails, the objection is usually something easy to address like "more patches" or "not enough people are familiar with this person's work."
-
-## Maintaining committer status
-
-You don't really need to do much to maintain committer status: just keep being awesome and helping the V8 project!
-
-In the unhappy event that a committer continues to disregard good citizenship (or actively disrupts the project), we may need to revoke that person's status. The process is the same as for nominating a new committer: someone suggests the revocation with a good reason, two people second the motion, and a vote may be called if consensus cannot be reached. I hope that's simple enough, and that we never have to test it in practice.
-
-(Source: inspired by http://dev.chromium.org/getting-involved/become-a-committer ) \ No newline at end of file
diff --git a/chromium/v8/docs/building_with_gyp.md b/chromium/v8/docs/building_with_gyp.md
deleted file mode 100644
index 0183fd2de51..00000000000
--- a/chromium/v8/docs/building_with_gyp.md
+++ /dev/null
@@ -1,260 +0,0 @@
-**Build issues? File a bug at code.google.com/p/v8/issues or ask for help on v8-users@googlegroups.com.**
-
-# Building V8
-
-V8 is built with the help of [GYP](http://code.google.com/p/gyp/). GYP is a meta build system of sorts, as it generates build files for a number of other build systems. How you build therefore depends on what "back-end" build system and compiler you're using.
-The instructions below assume that you already have a [checkout of V8](using_git.md) but haven't yet installed the build dependencies.
-
-If you intend to develop on V8, i.e., send patches and work with changelists, you will need to install the dependencies as described [here](using_git.md).
-
-
-## Prerequisite: Installing GYP
-
-First, you need GYP itself. GYP is fetched together with the other dependencies by running:
-
-```
-gclient sync
-```
-
-## Building
-
-### GCC + make
-
-Requires GNU make 3.81 or later. Should work with any GCC >= 4.8 or any recent clang (3.5 highly recommended).
-
-#### Build instructions
-
-
-The top-level Makefile defines a number of targets for each target architecture (`ia32`, `x64`, `arm`, `arm64`) and mode (`debug`, `optdebug`, or `release`). So your basic command for building is:
-```
-make ia32.release
-```
-
-or analogously for the other architectures and modes. You can build both debug and release binaries with just one command:
-```
-make ia32
-```
-
-To automatically build in release mode for the host architecture:
-```
-make native
-```
-
-You can also can build all architectures in a given mode at once:
-```
-make release
-```
-
-Or everything:
-```
-make
-```
-
-#### Optional parameters
-
- * `-j` specifies the number of parallel build processes. Set it (roughly) to the number of CPU cores your machine has. The GYP/make based V8 build also supports distcc, so you can compile with `-j100` or so, provided you have enough machines around.
-
- * `OUTDIR=foo` specifies where the compiled binaries go. It defaults to `./out/`. In this directory, a subdirectory will be created for each architecture and mode. You will find the d8 shell's binary in `foo/ia32.release/d8`, for example.
-
- * `library=shared` or `component=shared_library` (the two are completely equivalent) builds V8 as a shared library (`libv8.so`).
-
- * `soname_version=1.2.3` is only relevant for shared library builds and configures the SONAME of the library. Both the SONAME and the filename of the library will be `libv8.so.1.2.3` if you specify this. Due to a peculiarity in GYP, if you specify a custom SONAME, the library's path will no longer be encoded in the binaries, so you'll have to run d8 as follows:
-```
-LD_LIBRARY_PATH=out/ia32.release/lib.target out/ia32.release/d8
-```
-
- * `console=readline` enables readline support for the d8 shell. You need readline development headers for this (`libreadline-dev` on Ubuntu).
-
- * `disassembler=on` enables the disassembler for release mode binaries (it's always enabled for debug binaries). This is useful if you want to inspect generated machine code.
-
- * `snapshot=off` disables building with a heap snapshot. Compiling will be a little faster, but V8’s start up will be slightly slower.
-
- * `gdbjit=on` enables GDB JIT support.
-
- * `liveobjectlist=on` enables the Live Object List feature.
-
- * `vfp3=off` is only relevant for ARM builds with snapshot and disables the use of VFP3 instructions in the snapshot.
-
- * `debuggersupport=off` disables the javascript debugger.
-
- * `werror=no` omits the -Werror flag. This is especially useful for not officially supported C++ compilers (e.g. newer versions of the GCC) so that compile warnings are ignored.
-
- * `strictaliasing=off` passes the -fno-strict-aliasing flag to GCC. This may help to work around build failures on officially unsupported platforms and/or GCC versions.
-
- * `regexp=interpreted` chooses the interpreted mode of the irregexp regular expression engine instead of the native code mode.
-
- * `hardfp=on` creates "hardfp" binaries on ARM.
-
-### Ninja
-
-To build d8:
-```
-export GYP_GENERATORS=ninja
-build/gyp_v8
-ninja -C out/Debug d8
-```
-
-Specify `out/Release` for a release build. I recommend setting up an alias so that you don't need to type out that build directory path.
-
-If you want to build all targets, use `ninja -C out/Debug all`. It's faster to build only the target you're working on, like `d8` or `unittests`.
-
-Note: You need to set `v8_target_arch` if you want a non-native build, i.e. either
-```
-export GYP_DEFINES="v8_target_arch=arm"
-build/gyp_v8 ...
-```
-or
-```
-build/gyp_v8 -Dv8_target_arch=arm ...
-```
-
-
-#### Using goma (Googlers only)
-
-To use goma you need to set the `use_goma` gyp define, either by passing it to `gyp_v8`, i.e.
-```
-build/gyp_v8 -Duse_goma=1
-```
-or by setting the environment variable `$GYP_DEFINES` appropriately:
-```
-export GYP_DEFINES="use_goma=1"
-```
-Note: You may need to also set `gomadir` to point to the directory where you installed goma, if it's not in the default location.
-
-If you are using goma, you'll also want to bump the job limit, i.e.
-```
-ninja -j 100 -C out/Debug d8
-```
-
-
-### Cross-compiling
-
-Similar to building with Clang, you can also use a cross-compiler. Just export your toolchain (`CXX`/`LINK` environment variables should be enough) and compile. For example:
-```
-export CXX=/path/to/cross-compile-g++
-export LINK=/path/to/cross-compile-g++
-make arm.release
-```
-
-
-### Xcode
-
-From the root of your V8 checkout, run either of:
-```
-build/gyp_v8 -Dtarget_arch=ia32
-build/gyp_v8 -Dtarget_arch=x64
-```
-
-This will generate Xcode project files in `build/` that you can then either open with Xcode or compile directly from the command line:
-```
-xcodebuild -project build/all.xcodeproj -configuration Release
-xcodebuild -project build/all.xcodeproj
-```
-
-Note: If you have configured your `GYP_GENERATORS` environment variable, either unset it, or set it to `xcode` for this to work.
-
-
-#### Custom build settings
-
-You can export the `GYP_DEFINES` environment variable in your shell to configure custom build options. The syntax is `GYP_DEFINES="-Dvariable1=value1 -Dvariable2=value2"` and so on for as many variables as you wish. Possibly interesting options include:
- * `-Dcomponent=shared_library` (see `library=shared` in the [GCC + make](#Optional_parameters.md) section above)
- * `-Dconsole=readline` (see `console=readline`)
- * `-Dv8_enable_disassembler=1` (see `disassembler=on`)
- * `-Dv8_use_snapshot='false'` (see `snapshot=off`)
- * `-Dv8_enable_gdbjit=1` (see `gdbjit=on`)
- * `-Dv8_use_liveobjectlist=true` (see `liveobjectlist=on`)
-
-
-### Visual Studio
-
-You need Visual Studio 2013, older versions might still work at the moment, but this will probably change soon because we intend to use C++11 features.
-
-#### Prerequisites
-
-After you created [checkout of V8](using_git.md), all dependencies will be already installed.
-
-If you are getting errors during build mentioning that 'python' could not be found, add the 'python.exe' to PATH.
-
-If you have Visual Studio 2013 and 2015 installed side-by-side and set the environment variable GYP\_MSVS\_VERSION to '2013'. In that case the right project files are going to be created.
-
-#### Building
- * If you use the command prompt:
- 1. Generate project files:
-```
-python build\gyp_v8
-```
-> > > Specify the path to `python.exe` if you don't have it in your PATH.
-> > > Append `-Dtarget_arch=x64` if you want to build 64bit binaries. If you switch between ia32 and x64 targets, you may have to manually delete the generated .vcproj/.sln files before regenerating them.
-> > > Example:
-```
-third_party/python_26/python.exe build\gyp_v8 -Dtarget_arch=x64
-```
- 1. Build:
-> > > Either open `build\All.sln` in Visual Studio, or compile on the command line as follows (adapt the path as necessary, or simply put `devenv.com` in your PATH):
-```
-"c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\IDE\devenv.com" /build Release build\All.sln
-```
-> > > Replace `Release` with `Debug` to build in Debug mode.
-> > > The built binaries will be in build\Release\ or build\Debug\.
-
- * If you use cygwin, the workflow is the same, but the syntax is slightly different:
- 1. Generate project files:
-```
-build/gyp_v8
-```
-> > > This will spit out a bunch of warnings about missing input files, but it seems to be OK to ignore them. (If you have time to figure this out, we'd happily accept a patch that makes the warnings go away!)
- 1. Build:
-```
-/cygdrive/c/Program\ Files\ (x86)/Microsoft\ Visual\ Studio\ 9.0/Common7/IDE/devenv.com /build Release build/all.sln
-```
-
-
-#### Custom build settings
-
-See the "custom build settings" section for [Xcode](#Xcode) above.
-
-
-#### Running tests
-
-You can abuse the test driver's --buildbot flag to make it find the executables where MSVC puts them:
-```
-python tools/run-tests.py --buildbot --outdir build --arch ia32 --mode Release
-```
-
-
-### MinGW
-
-Building on MinGW is not officially supported, but it is possible. You even have two options:
-
-#### Option 1: With Cygwin Installed
-
-Requirements:
- * MinGW
- * Cygwin, including Python
- * Python from www.python.org _(yes, you need two Python installations!)_
-
-Building:
- 1. Open a MinGW shell
- 1. `export PATH=$PATH:/c/cygwin/bin` _(or wherever you installed Cygwin)_
- 1. `make ia32.release -j8`
-
-Running tests:
- 1. Open a MinGW shell
- 1. `export PATH=/c/Python27:$PATH` _(or wherever you installed Python)_
- 1. `make ia32.release.check -j8`
-
-#### Option 2: Without Cygwin, just MinGW
-
-Requirements:
- * MinGW
- * Python from www.python.org
-
-Building and testing:
- 1. Open a MinGW shell
- 1. `tools/mingw-generate-makefiles.sh` _(re-run this any time a `*`.gyp`*` file changed, such as after updating your checkout)_
- 1. `make ia32.release` _(unfortunately -jX doesn't seem to work here)_
- 1. `make ia32.release.check -j8`
-
-
-# Final Note
-<font color='darkred'><b>If you have problems or questions, please file bugs at code.google.com/p/v8/issues or send mail to v8-users@googlegroups.com. Comments on this page are likely to go unnoticed and unanswered.</b></font> \ No newline at end of file
diff --git a/chromium/v8/docs/contributing.md b/chromium/v8/docs/contributing.md
deleted file mode 100644
index aa8e6659762..00000000000
--- a/chromium/v8/docs/contributing.md
+++ /dev/null
@@ -1,32 +0,0 @@
-Here you will find information that you'll need to be able to contribute to V8. Be sure to read the whole thing before sending us a contribution, including the small print at the end.
-
-## Before you contribute
-
-Before you start working on a larger contribution V8 you should get in touch with us first through the V8 [contributor mailing list](http://groups.google.com/group/v8-dev) so we can help out and possibly guide you; coordinating up front makes it much easier to avoid frustration later on.
-
-## Getting the code
-
-See [UsingGit](using_git.md).
-
-## Submitting code
-
-The source code of V8 follows the [Google C++ Style Guide](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml) so you should familiarize yourself with those guidelines. Before submitting code you must pass all our [tests](http://code.google.com/p/v8-wiki/wiki/Testing), and have to successfully run the presubmit checks:
-
-> `tools/presubmit.py`
-
-The presubmit script uses a linter from Google, `cpplint.py`. External contributors can get this from [here](http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py) and place it in their path.
-
-All submissions, including submissions by project members, require review. We use the same code-review tools and process as the chromium project. In order to submit a patch, you need to get the [depot\_tools](http://dev.chromium.org/developers/how-tos/install-depot-tools) and follow these instructions on [requesting a review](http://dev.chromium.org/developers/contributing-code) (using your V8 workspace instead of a chromium workspace).
-
-### Look out for breakage or regressions
-
-Before submitting your code please check the [buildbot console](http://build.chromium.org/p/client.v8/console) to see that the columns are mostly green before checking in your changes. Otherwise you will not know if your changes break the build or not. When your change is committed watch the [buildbot console](http://build.chromium.org/p/client.v8/console) until the bots turn green after your change.
-
-
-## The small print
-
-Before we can use your code you have to sign the [Google Individual Contributor License Agreement](http://code.google.com/legal/individual-cla-v1.0.html), which you can do online. This is mainly because you own the copyright to your changes, even after your contribution becomes part of our codebase, so we need your permission to use and distribute your code. We also need to be sure of various other things, for instance that you'll tell us if you know that your code infringes on other people's patents. You don't have to do this until after you've submitted your code for review and a member has approved it, but you will have to do it before we can put your code into our codebase.
-
-Contributions made by corporations are covered by a different agreement than the one above, the [Software Grant and Corporate Contributor License Agreement](http://code.google.com/legal/corporate-cla-v1.0.html).
-
-Sign them online [here](https://cla.developers.google.com/) \ No newline at end of file
diff --git a/chromium/v8/docs/cross_compiling_for_arm.md b/chromium/v8/docs/cross_compiling_for_arm.md
deleted file mode 100644
index 30a7196b4a9..00000000000
--- a/chromium/v8/docs/cross_compiling_for_arm.md
+++ /dev/null
@@ -1,151 +0,0 @@
-<font color='darkred'><b><h2>Building V8 with SCons is no longer supported. See <a href='https://code.google.com/p/v8-wiki/wiki/BuildingWithGYP'>BuildingWithGYP</a>.</h2></b></font>
-
----
-
-
-# Using Sourcery G++ Lite
-
-The Sourcery G++ Lite cross compiler suite is a free version of Sourcery G++ from [CodeSourcery](http://www.codesourcery.com). There is a page for the [GNU Toolchain for ARM Processors](http://www.codesourcery.com/sgpp/lite/arm). Determine the version you need for your host/target combination.
-
-The following instructions uses [2009q1-203 for ARM GNU/Linux](http://www.codesourcery.com/sgpp/lite/arm/portal/release858), and if using a different version please change the URLs and `TOOL_PREFIX` below accordingly.
-
-## Installing on host and target
-
-The simplest way of setting this up is to install the full Sourcery G++ Lite package on both the host and target at the same location. This will ensure that all the libraries required are available on both sides. If you want to use the default libraries on the host there is no need the install anything on the target.
-
-The following script will install in `/opt/codesourcery`:
-
-```
-#!/bin/sh
-
-sudo mkdir /opt/codesourcery
-cd /opt/codesourcery
-sudo chown $USERNAME .
-chmod g+ws .
-umask 2
-wget http://www.codesourcery.com/sgpp/lite/arm/portal/package4571/public/arm-none-linux-gnueabi/arm-2009q1-203-arm-none-linux-gnueabi-i686-pc-linux-gnu.tar.bz2
-tar -xvf arm-2009q1-203-arm-none-linux-gnueabi-i686-pc-linux-gnu.tar.bz2
-```
-
-
-## Building using scons without snapshot
-
-The simplest way to build is without snapshot, as that does no involve using the simulator to generate the snapshot. The following script will build the sample shell without snapshot for ARM v7.
-
-```
-#!/bin/sh
-
-export TOOL_PREFIX=/opt/codesourcery/arm-2009q1/bin/arm-none-linux-gnueabi
-export CXX=$TOOL_PREFIX-g++
-export AR=$TOOL_PREFIX-ar
-export RANLIB=$TOOL_PREFIX-ranlib
-export CC=$TOOL_PREFIX-gcc
-export LD=$TOOL_PREFIX-ld
-
-export CCFLAGS="-march=armv7-a -mtune=cortex-a8 -mfpu=vfp"
-export ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
-
-scons wordsize=32 snapshot=off arch=arm sample=shell
-```
-
-If the processor is not Cortex A8 or does not have VFP enabled the `-mtune=cortex-a8` and `-mfpu=vfp` part of `CCFLAGS` needs to be changed accordingly. By default the V8 SCons build adds `-mfloat-abi=softfp`.
-
-If using the default libraries on the target just leave out the setting of `ARM_TARGET_LIB` and if the target libraies are in a different location ARM\_TARGET\_LIB` needs to be adjusted accordingly.
-
-The default for Sourcery G++ Lite is ARM v5te with software floating point emulation, so if testing building for ARM v5te the setting of `CCFLAGS` and `ARM_TARGET_LIB` should be changed to:
-
-```
-CCFLAGS=""
-ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
-
-scons armeabi=soft ...
-```
-
-Relying on defaults in the tool chain might lead to surprises, so for ARM v5te with software floating point emulation the following is more explicit:
-
-```
-CCFLAGS="-march=armv5te"
-ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
-
-scons armeabi=soft ...
-```
-
-If the target has an VFP unit use the following:
-
-```
-CCFLAGS="-mfpu=vfpv3"
-ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
-```
-
-To allow G++ to use Thumb2 instructions and the VFP unit when compiling the C/C++ code use:
-
-```
-CCFLAGS="-mthumb -mfpu=vfpv3"
-ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc/thumb2
-```
-
-_Note:_ V8 will not use Thumb2 instructions in its generated code it always uses the full ARM instruction set.
-
-For other ARM versions please check the Sourcery G++ Lite documentation.
-
-As mentioned above the default for Sourcery G++ Lite used here is ARM v5te with software floating point emulation. However beware that this default might change between versions and that there is no unique defaults for ARM tool chains in general, so always passing `-march` and possibly `-mfpu` is recommended. Passing `-mfloat-abi` is not required as this is controlled by the SCons option `armeabi`.
-
-## Building using scons with snapshot
-
-When building with snapshot the simulator is used to build the snapshot on the host and then building for the target with that snapshot. The following script will accomplish that (using both Thumb2 and VFP instructions):
-
-```
-#!/bin/sh
-
-V8DIR=..
-
-cd host
-
-scons -Y$V8DIR simulator=arm snapshot=on
-mv obj/release/snapshot.cc $V8DIR/src/snapshot.cc
-
-cd ..
-
-export TOOL_PREFIX=/opt/codesourcery/arm-2010.09-103/bin/arm-none-linux-gnueabi
-export CXX=$TOOL_PREFIX-g++
-export AR=$TOOL_PREFIX-ar
-export RANLIB=$TOOL_PREFIX-ranlib
-export CC=$TOOL_PREFIX-gcc
-export LD=$TOOL_PREFIX-ld
-
-export CCFLAGS="-mthumb -march=armv7-a -mfpu=vfpv3"
-export ARM_TARGET_LIB=/opt/codesourcery/arm-2010.09-103/arm-none-linux-gnueabi/libc/thumb2
-
-cd target
-
-scons -Y$V8DIR wordsize=32 snapshot=nobuild arch=armsample=shell
-rm $V8DIR/src/snapshot.cc
-
-cd ..
-```
-
-This script required the two subdirectories `host` and `target`. V8 is first build for the host with the ARM simulator which supports running ARM code on the host. This is used to build a snapshot file which is then used for the actual cross compilation of V8.
-
-## Building for target which supports unaligned access
-
-The default when building V8 for an ARM target (either cross compiling or compiling on an ARM machine) is to disable unaligned memory access. However in some situations (most noticeably handling of regular expressions) performance will be better if unaligned memory access is used on processors which supports it. To enable unaligned memory access set `unalignedaccesses` to `on` when building:
-
-```
-scons unalignedaccesses=on ...
-```
-
-When running in the simulator the default is to enable unaligned memory access, so to test in the simulator with unaligned memory access disabled set `unalignedaccesses` to `off` when building:
-
-```
-scons unalignedaccesses=off simulator=arm ...
-```
-
-## Using V8 with hardfp calling convention
-
-By default V8 uses the softfp calling convention when calling C functions from generated code. However it is possible to use hardfp as well. To enable this set `armeabi` to `hardfp` when building:
-
-```
-scons armeabi=hardfp ...
-```
-
-Passing `armeabi=hardfp` to SCons will automatically set the compiler flag `-mfloat-abi=hardfp`. If using snapshots remember to pass `armeabi=hardfp` when building V8 on the host for generating the snapshot as well. \ No newline at end of file
diff --git a/chromium/v8/docs/d8_on_android.md b/chromium/v8/docs/d8_on_android.md
deleted file mode 100644
index eda64193459..00000000000
--- a/chromium/v8/docs/d8_on_android.md
+++ /dev/null
@@ -1,101 +0,0 @@
-# Prerequisites
- * a Linux/Mac workstation
- * v8 r12178 (on Google Code) or later
- * an Android emulator or device with matching USB cable
- * make sure [building with GYP](http://code.google.com/p/v8-wiki/wiki/BuildingWithGYP) works
-
-
-# Get the code
-
- * Use the instructions from https://code.google.com/p/v8-wiki/wiki/UsingGit to get the code
- * Once you need to add the android dependencies:
-```
-v8$ echo "target_os = ['android']" >> ../.gclient && gclient sync --nohooks
-```
- * The sync will take a while the first time as it downloads the Android NDK to v8/third\_party
- * If you want to use a different NDK, you need to set the gyp variable android\_ndk\_root
-
-
-# Get the Android SDK
- * tested version: `r15`
- * download the SDK from http://developer.android.com/sdk/index.html
- * extract it
- * install the "Platform tools" using the SDK manager that you can start by running `tools/android`
- * now you have a `platform_tools/adb` binary which will be used later; put it in your `PATH` or remember where it is
-
-
-# Set up your device
- * Enable USB debugging (Gingerbread: Settings > Applications > Development > USB debugging; Ice Cream Sandwich: Settings > Developer Options > USB debugging)
- * connect your device to your workstation
- * make sure `adb devices` shows it; you may have to edit `udev` rules to give yourself proper permissions
- * run `adb shell` to get an ssh-like shell on the device. In that shell, do:
-```
-cd /data/local/tmp
-mkdir v8
-cd v8
-```
-
-
-# Push stuff onto the device
- * make sure your device is connected
- * from your workstation's shell:
-```
-adb push /file/you/want/to/push /data/local/tmp/v8/
-```
-
-
-# Compile V8 for Android
-Currently two architectures (`android_arm` and `android_ia32`) are supported, each in `debug` or `release` mode. The following steps work equally well for both ARM and ia32, on either the emulator or real devices.
- * compile:
-```
-make android_arm.release -j16
-```
- * push the resulting binary to the device:
-```
-adb push out/android_arm.release/d8 /data/local/tmp/v8/d8
-```
- * the most comfortable way to run it is from your workstation's shell as a one-off command (rather than starting an interactive shell session on the device), that way you can use pipes or whatever to process the output as necessary:
-```
-adb shell /data/local/tmp/v8/d8 <parameters>
-```
- * warning: when you cancel such an "adb shell whatever" command using Ctrl+C, the process on the phone will sometimes keep running.
- * Alternatively, use the `.check` suffix to automatically push test binaries and test cases onto the device and run them.
-```
-make android_arm.release.check
-```
-
-
-# Profile
- * compile a binary, push it to the device, keep a copy of it on the host
-```
-make android_arm.release -j16
-adb push out/android_arm.release/d8 /data/local/tmp/v8/d8-version.under.test
-cp out/android_arm.release/d8 ./d8-version.under.test
-```
- * get a profiling log and copy it to the host:
-```
-adb shell /data/local/tmp/v8/d8-version.under.test benchmark.js --prof
-adb pull /data/local/tmp/v8/v8.log ./
-```
- * open `v8.log` in your favorite editor and edit the first line to match the full path of the `d8-version.under.test` binary on your workstation (instead of the `/data/local/tmp/v8/` path it had on the device)
- * run the tick processor with the host's `d8` and an appropriate `nm` binary:
-```
-cp out/ia32.release/d8 ./d8 # only required once
-tools/linux-tick-processor --nm=$ANDROID_NDK_ROOT/toolchain/bin/arm-linux-androideabi-nm
-```
-
-# Compile SpiderMonkey for Lollipop
-```
-cd firefox/js/src
-autoconf2.13
-./configure \
- --target=arm-linux-androideabi \
- --with-android-ndk=$ANDROID_NDK_ROOT \
- --with-android-version=21 \
- --without-intl-api \
- --disable-tests \
- --enable-android-libstdcxx \
- --enable-pie
-make
-adb push -p js/src/shell/js /data/local/tmp/js
-``` \ No newline at end of file
diff --git a/chromium/v8/docs/debugger_protocol.md b/chromium/v8/docs/debugger_protocol.md
deleted file mode 100644
index 2cc618fbd4c..00000000000
--- a/chromium/v8/docs/debugger_protocol.md
+++ /dev/null
@@ -1,934 +0,0 @@
-# Introduction
-
-V8 has support for debugging the JavaScript code running in it. There are two API's for this a function based API using JavaScript objects and a message based API using a JSON based protocol. The function based API can be used by an in-process debugger agent, whereas the message based API can be used out of process as well.
-**> The message based API is no longer maintained. Please ask in v8-users@googlegroups.com if you want to attach a debugger to the run-time.**
-
-The debugger protocol is based on [JSON](http://www.json.org/)). Each protocol packet is defined in terms of JSON and is transmitted as a string value. All packets have two basic elements `seq` and `type`.
-
-```
-{ "seq" : <number>,
- "type" : <type>,
- ...
-}
-```
-
-The element `seq` holds the sequence number of the packet. And element type is the type of the packet. The type is a string value with one of the following values `"request"`, `"response"` or `"event"`.
-
-A `"request"` packet has the following structure:
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : <command>
- "arguments" : ...
-}
-```
-
-A `"response"` packet has the following structure. If `success` is true `body` will contain the response data. If `success` is false `message` will contain an error message.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : <command>
- "body" : ...
- "running" : <is the VM running after sending this response>
- "success" : <boolean indicating success>
- "message" : <if command failed this property contains an error message>
-}
-```
-
-An `"event"` packet has the following structure:
-
-```
-{ "seq" : <number>,
- "type" : "event",
- "event" : <event name>
- body : ...
-}
-```
-
-# Request/response pairs
-
-## Request `continue`
-
-The request `continue` is a request from the debugger to start the VM running again. As part of the `continue` request the debugger can specify if it wants the VM to perform a single step action.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "continue",
- "arguments" : { "stepaction" : <"in", "next" or "out">,
- "stepcount" : <number of steps (default 1)>
- }
-}
-```
-
-In the response the property `running` will always be true as the VM will be running after executing the `continue` command. If a single step action is requested the VM will respond with a `break` event after running the step.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "continue",
- "running" : true
- "success" : true
-}
-```
-
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"continue"}
-{"seq":118,"type":"request","command":"continue","arguments":{"stepaction":"out"}}
-{"seq":119,"type":"request","command":"continue","arguments":{"stepaction":"next","stepcount":5}}
-```
-
-## Request `evaluate`
-
-The request `evaluate` is used to evaluate an expression. The body of the result is as described in response object serialization below.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "evaluate",
- "arguments" : { "expression" : <expression to evaluate>,
- "frame" : <number>,
- "global" : <boolean>,
- "disable_break" : <boolean>,
- "additional_context" : [
- { "name" : <name1>, "handle" : <handle1> },
- { "name" : <name2>, "handle" : <handle2> },
- ...
- ]
- }
-}
-```
-
-Optional argument `additional_context` specifies handles that will be visible from the expression under corresponding names (see example below).
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "evaluate",
- "body" : ...
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"evaluate","arguments":{"expression":"1+2"}}
-{"seq":118,"type":"request","command":"evaluate","arguments":{"expression":"a()","frame":3,"disable_break":false}}
-{"seq":119,"type":"request","command":"evaluate","arguments":{"expression":"[o.a,o.b,o.c]","global":true,"disable_break":true}}
-{"seq":120,"type":"request","command":"evaluate","arguments":{"expression":"obj.toString()", "additional_context": [{ "name":"obj","handle":25 }] }}
-```
-
-## Request `lookup`
-
-The request `lookup` is used to lookup objects based on their handle. The individual array elements of the body of the result is as described in response object serialization below.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "lookup",
- "arguments" : { "handles" : <array of handles>,
- "includeSource" : <boolean indicating whether the source will be included when script objects are returned>,
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "lookup",
- "body" : <array of serialized objects indexed using their handle>
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"lookup","arguments":{"handles":"[1]"}}
-{"seq":118,"type":"request","command":"lookup","arguments":{"handles":"[7,12]"}}
-```
-
-## Request `backtrace`
-
-The request `backtrace` returns a backtrace (or stacktrace) from the current execution state. When issuing a request a range of frames can be supplied. The top frame is frame number 0. If no frame range is supplied data for 10 frames will be returned.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "backtrace",
- "arguments" : { "fromFrame" : <number>
- "toFrame" : <number>
- "bottom" : <boolean, set to true if the bottom of the stack is requested>
- }
-}
-```
-
-The response contains the frame data together with the actual frames returned and the toalt frame count.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "backtrace",
- "body" : { "fromFrame" : <number>
- "toFrame" : <number>
- "totalFrames" : <number>
- "frames" : <array of frames - see frame request for details>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-If there are no stack frames the result body only contains `totalFrames` with a value of `0`. When an exception event is generated due to compilation failures it is possible that there are no stack frames.
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"backtrace"}
-{"seq":118,"type":"request","command":"backtrace","arguments":{"toFrame":2}}
-{"seq":119,"type":"request","command":"backtrace","arguments":{"fromFrame":0,"toFrame":9}}
-```
-
-## Request `frame`
-
-The request frame selects a new selected frame and returns information for that. If no frame number is specified the selected frame is returned.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "frame",
- "arguments" : { "number" : <frame number>
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "frame",
- "body" : { "index" : <frame number>,
- "receiver" : <frame receiver>,
- "func" : <function invoked>,
- "script" : <script for the function>,
- "constructCall" : <boolean indicating whether the function was called as constructor>,
- "debuggerFrame" : <boolean indicating whether this is an internal debugger frame>,
- "arguments" : [ { name: <name of the argument - missing of anonymous argument>,
- value: <value of the argument>
- },
- ... <the array contains all the arguments>
- ],
- "locals" : [ { name: <name of the local variable>,
- value: <value of the local variable>
- },
- ... <the array contains all the locals>
- ],
- "position" : <source position>,
- "line" : <source line>,
- "column" : <source column within the line>,
- "sourceLineText" : <text for current source line>,
- "scopes" : [ <array of scopes, see scope request below for format> ],
-
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"frame"}
-{"seq":118,"type":"request","command":"frame","arguments":{"number":1}}
-```
-
-## Request `scope`
-
-The request scope returns information on a givne scope for a givne frame. If no frame number is specified the selected frame is used.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "scope",
- "arguments" : { "number" : <scope number>
- "frameNumber" : <frame number, optional uses selected frame if missing>
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "scope",
- "body" : { "index" : <index of this scope in the scope chain. Index 0 is the top scope
- and the global scope will always have the highest index for a
- frame>,
- "frameIndex" : <index of the frame>,
- "type" : <type of the scope:
- 0: Global
- 1: Local
- 2: With
- 3: Closure
- 4: Catch >,
- "object" : <the scope object defining the content of the scope.
- For local and closure scopes this is transient objects,
- which has a negative handle value>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"scope"}
-{"seq":118,"type":"request","command":"scope","arguments":{"frameNumber":1,"number":1}}
-```
-
-## Request `scopes`
-
-The request scopes returns all the scopes for a given frame. If no frame number is specified the selected frame is returned.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "scopes",
- "arguments" : { "frameNumber" : <frame number, optional uses selected frame if missing>
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "scopes",
- "body" : { "fromScope" : <number of first scope in response>,
- "toScope" : <number of last scope in response>,
- "totalScopes" : <total number of scopes for this frame>,
- "scopes" : [ <array of scopes, see scope request above for format> ],
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"scopes"}
-{"seq":118,"type":"request","command":"scopes","arguments":{"frameNumber":1}}
-```
-
-## Request `scripts`
-
-The request `scripts` retrieves active scripts from the VM. An active script is source code from which there is still live objects in the VM. This request will always force a full garbage collection in the VM.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "scripts",
- "arguments" : { "types" : <types of scripts to retrieve
- set bit 0 for native scripts
- set bit 1 for extension scripts
- set bit 2 for normal scripts
- (default is 4 for normal scripts)>
- "ids" : <array of id's of scripts to return. If this is not specified all scripts are requrned>
- "includeSource" : <boolean indicating whether the source code should be included for the scripts returned>
- "filter" : <string or number: filter string or script id.
- If a number is specified, then only the script with the same number as its script id will be retrieved.
- If a string is specified, then only scripts whose names contain the filter string will be retrieved.>
- }
-}
-```
-
-The request contains an array of the scripts in the VM. This information includes the relative location of the script within the containing resource.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "scripts",
- "body" : [ { "name" : <name of the script>,
- "id" : <id of the script>
- "lineOffset" : <line offset within the containing resource>
- "columnOffset" : <column offset within the containing resource>
- "lineCount" : <number of lines in the script>
- "data" : <optional data object added through the API>
- "source" : <source of the script if includeSource was specified in the request>
- "sourceStart" : <first 80 characters of the script if includeSource was not specified in the request>
- "sourceLength" : <total length of the script in characters>
- "scriptType" : <script type (see request for values)>
- "compilationType" : < How was this script compiled:
- 0 if script was compiled through the API
- 1 if script was compiled through eval
- >
- "evalFromScript" : <if "compilationType" is 1 this is the script from where eval was called>
- "evalFromLocation" : { line : < if "compilationType" is 1 this is the line in the script from where eval was called>
- column : < if "compilationType" is 1 this is the column in the script from where eval was called>
- ]
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"scripts"}
-{"seq":118,"type":"request","command":"scripts","arguments":{"types":7}}
-```
-
-## Request `source`
-
-The request `source` retrieves source code for a frame. It returns a number of source lines running from the `fromLine` to but not including the `toLine`, that is the interval is open on the "to" end. For example, requesting source from line 2 to 4 returns two lines (2 and 3). Also note that the line numbers are 0 based: the first line is line 0.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "source",
- "arguments" : { "frame" : <frame number (default selected frame)>
- "fromLine" : <from line within the source default is line 0>
- "toLine" : <to line within the source this line is not included in
- the result default is the number of lines in the script>
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "source",
- "body" : { "source" : <the source code>
- "fromLine" : <actual from line within the script>
- "toLine" : <actual to line within the script this line is not included in the source>
- "fromPosition" : <actual start position within the script>
- "toPosition" : <actual end position within the script>
- "totalLines" : <total lines in the script>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"source","arguments":{"fromLine":10,"toLine":20}}
-{"seq":118,"type":"request","command":"source","arguments":{"frame":2,"fromLine":10,"toLine":20}}
-```
-
-## Request `setbreakpoint`
-
-The request `setbreakpoint` creates a new break point. This request can be used to set both function and script break points. A function break point sets a break point in an existing function whereas a script break point sets a break point in a named script. A script break point can be set even if the named script is not found.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "setbreakpoint",
- "arguments" : { "type" : <"function" or "script" or "scriptId" or "scriptRegExp">
- "target" : <function expression or script identification>
- "line" : <line in script or function>
- "column" : <character position within the line>
- "enabled" : <initial enabled state. True or false, default is true>
- "condition" : <string with break point condition>
- "ignoreCount" : <number specifying the number of break point hits to ignore, default value is 0>
- }
-}
-```
-
-The result of the `setbreakpoint` request is a response with the number of the newly created break point. This break point number is used in the `changebreakpoint` and `clearbreakpoint` requests.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "setbreakpoint",
- "body" : { "type" : <"function" or "script">
- "breakpoint" : <break point number of the new break point>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"setbreakpoint","arguments":{"type":"function,"target":"f"}}
-{"seq":118,"type":"request","command":"setbreakpoint","arguments":{type:"script","target":"test.js","line":100}}
-{"seq":119,"type":"request","command":"setbreakpoint","arguments":{"type":"function,"target":"f","condition":"i > 7"}}
-```
-
-
-## Request `changebreakpoint`
-
-The request `changebreakpoint` changes the status of a break point.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "changebreakpoint",
- "arguments" : { "breakpoint" : <number of the break point to clear>
- "enabled" : <initial enabled state. True or false, default is true>
- "condition" : <string with break point condition>
- "ignoreCount" : <number specifying the number of break point hits }
-}
-```
-
-## Request `clearbreakpoint`
-
-The request `clearbreakpoint` clears a break point.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "clearbreakpoint",
- "arguments" : { "breakpoint" : <number of the break point to clear>
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "clearbreakpoint",
- "body" : { "type" : <"function" or "script">
- "breakpoint" : <number of the break point cleared>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"clearbreakpoint","arguments":{"type":"function,"breakpoint":1}}
-{"seq":118,"type":"request","command":"clearbreakpoint","arguments":{"type":"script","breakpoint":2}}
-```
-
-## Request `setexceptionbreak`
-
-The request `setexceptionbreak` is a request to enable/disable breaks on all / uncaught exceptions. If the "enabled" argument is not specify, the debuggee will toggle the state of the specified break type.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "setexceptionbreak",
- "arguments" : { "type" : <string: "all", or "uncaught">,
- "enabled" : <optional bool: enables the break type if true>
- }
-}
-```
-
-In response, the break on exception property of the debuggee will be set accordingly, and the following response message will be dispatched to the debugger.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "setexceptionbreak",
- “body” : { "type" : <string: "all" or "uncaught" corresponding to the request.>,
- "enabled" : <bool: true if the break type is currently enabled as a result of the request>
- }
- "running" : true
- "success" : true
-}
-```
-
-Here are a few examples.
-
-```
-{"seq":117,"type":"request","command":"setexceptionbreak","arguments":{"type":"all"}}
-{"seq":118,"type":"request","command":" setexceptionbreak","arguments":{"type":"all",”enabled”:false}}
-{"seq":119,"type":"request","command":" setexceptionbreak","arguments":{"type":"uncaught","enabled":true}}
-```
-
-## Request `v8flags`
-The request v8flags is a request to apply the specified v8 flags (analogous to how they are specified on the command line).
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "v8flags",
- "arguments" : { "flags" : <string: a sequence of v8 flags just like those used on the command line>
- }
-}
-```
-
-In response, the specified flags will be applied in the debuggee if they are legal flags. Their effects vary depending on the implementation of the flag.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "v8flags",
- "running" : true
- "success" : true
-}
-```
-
-Here are a few examples.
-
-```
-{"seq":117,"type":"request","command":"v8flags","arguments":{"flags":"--trace_gc —always_compact"}}
-{"seq":118,"type":"request","command":" v8flags","arguments":{"flags":"--notrace_gc"}}
-```
-
-## Request `version`
-
-The request `version` reports version of the running V8.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "version",
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "type" : "request",
- "body" : { "V8Version": <string, version of V8>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here is an example.
-
-```
-{"seq":1,"type":"request","command":"version"}
-{"seq":134,"request_seq":1,"type":"response","command":"version","success":true,"body":{"V8Version":"1.3.19 (candidate)"},"refs":[],"running":false}
-```
-
-## Request `disconnect`
-
-The request `disconnect` is used to detach the remote debugger from the debuggee. This will trigger the debuggee to disable all active breakpoints and resumes execution if the debuggee was previously stopped at a break.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "disconnect",
-}
-```
-
-The only response for the `disconnect` request is the response to a connect request if the debugger is still able to get a response before the debuggee successfully disconnects.
-
-Here is an examples:
-
-```
-{"seq":117,"type":"request","command":"disconnect"}
-```
-
-## Request `gc`
-The request `gc` is a request to run the garbage collector in the debuggee.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "gc",
- "arguments" : { "type" : <string: "all">,
- }
-}
-```
-
-In response, the debuggee will run the specified GC type and send the following response message:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "gc",
- “body” : { "before" : <int: total heap usage in bytes before the GC>,
- "after" : <int: total heap usage in bytes after the GC>
- }
- "running" : true
- "success" : true
-}
-```
-
-Here is an example.
-
-```
-{"seq":117,"type":"request","command":"gc","arguments":{"type":"all"}}
-```
-
-## Request `listbreakpoints`
-
-The request `listbreakpoints` is used to get information on breakpoints that may have been set by the debugger.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "listbreakpoints",
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "listbreakpoints",
- "body" : { "breakpoints": [ { "type" : <string: "scriptId" or "scriptName".>,
- "script_id" : <int: script id. Only defined if type is scriptId.>,
- "script_name" : <string: script name. Only defined if type is scriptName.>,
- "number" : <int: breakpoint number. Starts from 1.>,
- "line" : <int: line number of this breakpoint. Starts from 0.>,
- "column" : <int: column number of this breakpoint. Starts from 0.>,
- "groupId" : <int: group id of this breakpoint.>,
- "hit_count" : <int: number of times this breakpoint has been hit. Starts from 0.>,
- "active" : <bool: true if this breakpoint is enabled.>,
- "ignoreCount" : <int: remaining number of times to ignore breakpoint. Starts from 0.>,
- "actual_locations" : <actual locations of the breakpoint.>,
- }
- ],
- "breakOnExceptions" : <true if break on all exceptions is enabled>,
- "breakOnUncaughtExceptions" : <true if break on uncaught exceptions is enabled>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here is an examples:
-
-```
-{"seq":117,"type":"request","command":"listbreakpoints"}
-```
-
-
-## Request `setvariablevalue`
-This requests sets the value of a variable from the specified scope.
-
-Request:
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "setvariablevalue",
- "arguments : { "name" : <string: variable name>,
- "scope" : { "number" : <scope number>
- "frameNumber" : <frame number, optional uses selected frame if missing>
- }
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "type" : "request",
- "body" : { "newValue": <object: mirror object of the new value> }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-# Events
-
-## Event `break`
-
-The event `break` indicate that the execution in the VM has stopped due to a break condition. This can be caused by an unconditional break request, by a break point previously set, a stepping action have completed or by executing the `debugger` statement in JavaScript.
-
-```
-{ "seq" : <number>,
- "type" : "event",
-
- "event" : "break",
- "body" : { "invocationText" : <text representation of the stack frame>,
- "sourceLine" : <source line where execution is stopped>,
- "sourceColumn" : <column within the source line where execution is stopped>,
- "sourceLineText" : <text for the source line where execution is stopped>,
- "script" : { name : <resource name of the origin of the script>
- lineOffset : <line offset within the origin of the script>
- columnOffset : <column offset within the origin of the script>
- lineCount : <number of lines in the script>
- "breakpoints" : <array of break point numbers hit if any>
- }
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"event","event":"break","body":{"functionName":"f","sourceLine":1,"sourceColumn":14}}
-{"seq":117,"type":"event","event":"break","body":{"functionName":"g","scriptData":"test.js","sourceLine":12,"sourceColumn":22,"breakpoints":[1]}}
-{"seq":117,"type":"event","event":"break","body":{"functionName":"h","sourceLine":100,"sourceColumn":12,"breakpoints":[3,5,7]}}
-```
-
-## Event `exception`
-
-The event `exception` indicate that the execution in the VM has stopped due to an exception.
-
-```
-{ "seq" : <number>,
- "type" : "event",
- "event" : "exception",
- "body" : { "uncaught" : <boolean>,
- "exception" : ...
- "sourceLine" : <source line where the exception was thrown>,
- "sourceColumn" : <column within the source line from where the exception was thrown>,
- "sourceLineText" : <text for the source line from where the exception was thrown>,
- "script" : { "name" : <name of script>
- "lineOffset" : <number>
- "columnOffset" : <number>
- "lineCount" : <number>
- }
-
- }
-}
-```
-
-# Response object serialization
-
-Some responses contain objects as part of the body, e.g. the response to the evaluate request contains the result of the expression evaluated.
-
-All objects exposed through the debugger is assigned an ID called a handle. This handle is serialized and can be used to identify objects. A handle has a certain lifetime after which it will no longer refer to the same object. Currently the lifetime of handles match the processing of a debug event. For each debug event handles are recycled.
-
-An object can be serialized either as a reference to a given handle or as a value representation containing the object content.
-
-An object serialized as a reference looks follows this where `<handle>` is an integer.
-
-```
-{"ref":<handle>}
-```
-
-For objects serialized as value they all contains the handle and the type of the object.
-
-```
-{ "handle" : <handle>,
- "type" : <"undefined", "null", "boolean", "number", "string", "object", "function" or "frame">
-}
-```
-
-In some situations special transient objects are created by the debugger. These objects are not really visible in from JavaScript, but are created to materialize something inside the VM as an object visible to the debugger. One example of this is the local scope object returned from the `scope` and `scopes` request. Transient objects are identified by having a negative handle. A transient object can never be retrieved using the `lookup` request, so all transient objects referenced will be in the `refs` part of the response. The lifetime of transient objects is basically the request they are involved in.
-
-For the primitive JavaScript types `undefined` and `null` the type describes the value fully.
-
-```
-{"handle":<handle>,"type":"undefined"}
-```
-
-```
-{"handle":<handle>,"type":"null"}
-```
-
-For the rest of the primitive types `boolean`, `number` and `string` the value is part of the result.
-
-```
-{ "handle":<handle>,
- "type" : <"boolean", "number" or "string">
- "value" : <JSON encoded value>
-}
-```
-
-Boolean value.
-
-```
-{"handle":7,"type":"boolean","value":true}
-```
-
-Number value.
-
-```
-{"handle":8,"type":"number","value":42}
-```
-
-String value.
-
-```
-{"handle":9,"type":"string","value":"a string"}
-```
-
-An object is encoded with additional information.
-
-```
-{ "handle" : <handle>,
- "type" : "object",
- "className" : <Class name, ECMA-262 property [[Class]]>,
- "constructorFunction" : {"ref":<handle>},
- "protoObject" : {"ref":<handle>},
- "prototypeObject" : {"ref":<handle>},
- "properties" : [ {"name" : <name>,
- "ref" : <handle>
- },
- ...
- ]
-}
-```
-
-The difference between the `protoObject` and the `prototypeObject` is that the `protoObject` contains a reference to the actual prototype object (for which accessibility is not defined in ECMA-262, but in V8 it is accessible using the `__proto__` property) whereas the `prototypeObject` is the value of the `prototype` property.
-
-Here is an example.
-
-```
-{"handle":3,"type":"object","className":"Object","constructorFunction":{"ref":4},"protoObject":{"ref":5},"prototypeObject":{"ref":6},"properties":[{"name":"a","ref:7},{"name":"b","ref":8}]}
-```
-
-An function is encoded as an object but with additional information in the properties `name`, `inferredName`, `source` and `script`.
-
-```
-{ "handle" : <handle>,
- "type" : "function",
- "className" : "Function",
- "constructorFunction" : {"ref":<handle>},
- "protoObject" : {"ref":<handle>},
- "prototypeObject" : {"ref":<handle>},
- "name" : <function name>,
- "inferredName" : <inferred function name for anonymous functions>
- "source" : <function source>,
- "script" : <reference to function script>,
- "scriptId" : <id of function script>,
- "position" : <function begin position in script>,
- "line" : <function begin source line in script>,
- "column" : <function begin source column in script>,
- "properties" : [ {"name" : <name>,
- "ref" : <handle>
- },
- ...
- ]
-}
-``` \ No newline at end of file
diff --git a/chromium/v8/docs/gdb_jit_interface.md b/chromium/v8/docs/gdb_jit_interface.md
deleted file mode 100644
index 7fcea824eea..00000000000
--- a/chromium/v8/docs/gdb_jit_interface.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# Prerequisites
-
- * V8 3.0.9 or newer
- * GDB 7.0 or newer
- * Linux OS
- * CPU with Intel-compatible architecture (ia32 or x64)
-
-# Introduction
-
-GDB JIT interface integration allows V8 to provide GDB with the symbol and debugging information for a native code emitted in runtime.
-
-When GDB JIT interface is disabled a typical backtrace in GDB will contain frames marked with ??. This frames correspond to dynamically generated code:
-
-```
-#8 0x08281674 in v8::internal::Runtime_SetProperty (args=...) at src/runtime.cc:3758
-#9 0xf5cae28e in ?? ()
-#10 0xf5cc3a0a in ?? ()
-#11 0xf5cc38f4 in ?? ()
-#12 0xf5cbef19 in ?? ()
-#13 0xf5cb09a2 in ?? ()
-#14 0x0809e0a5 in v8::internal::Invoke (construct=false, func=..., receiver=..., argc=0, args=0x0,
- has_pending_exception=0xffffd46f) at src/execution.cc:97
-```
-
-However enabling GDB JIT integration allows GDB to produce more informative stack trace:
-
-```
-#6 0x082857fc in v8::internal::Runtime_SetProperty (args=...) at src/runtime.cc:3758
-#7 0xf5cae28e in ?? ()
-#8 0xf5cc3a0a in loop () at test.js:6
-#9 0xf5cc38f4 in test.js () at test.js:13
-#10 0xf5cbef19 in ?? ()
-#11 0xf5cb09a2 in ?? ()
-#12 0x0809e1f9 in v8::internal::Invoke (construct=false, func=..., receiver=..., argc=0, args=0x0,
- has_pending_exception=0xffffd44f) at src/execution.cc:97
-```
-
-Frames still unknown to GDB correspond to native code without source information. See [GDBJITInterface#KnownLimitations](GDBJITInterface#KnownLimitations.md) for more details.
-
-GDB JIT interface is specified in the GDB documentation: http://sourceware.org/gdb/current/onlinedocs/gdb/JIT-Interface.html
-
-# Enabling GDB JIT integration
-
-GDBJIT currently is by default excluded from the compilation and disabled in runtime. To enable it:
-
- 1. Build V8 library with `ENABLE_GDB_JIT_INTERFACE` defined. If you are using scons to build V8 run it with `gdbjit=on`.
- 1. Pass `--gdbjit` flag when starting V8.
-
-To check that you have enabled GDB JIT integration correctly try setting breakpoint on `__jit_debug_register_code`. This function will be invoked to notify GDB about new code objects.
-
-# Known Limitations
-
- * GDB side of JIT Interface currently (as of GDB 7.2) does not handle registration of code objects very effectively. Each next registration takes more time: with 500 registered objects each next registration takes more than 50ms, with 1000 registered code objects - more than 300 ms. This problem was reported to GDB developers (http://sourceware.org/ml/gdb/2011-01/msg00002.html) but currently there is no solution available. To reduce pressure on GDB current implementation of GDB JIT integration operates in two modes: _default_ and _full_ (enabled by `--gdbjit-full` flag). In _default_ mode V8 notifies GDB only about code objects that have source information attached (this usually includes all user scripts). In _full_ - about all generated code objects (stubs, ICs, trampolines).
-
- * On x64 GDB is unable to properly unwind stack without `.eh_frame` section (Issue 1053 (on Google Code))
-
- * GDB is not notified about code deserialized from the snapshot (Issue 1054 (on Google Code))
-
- * Only Linux OS on Intel-compatible CPUs is supported. For different OSes either a different ELF-header should be generated or a completely different object format should be used.
-
- * Enabling GDB JIT interface disables compacting GC. This is done to reduce pressure on GDB as unregistering and registering each moved code object will incur considerable overhead.
-
- * GDB JIT integration provides only _approximate_ source information. It does not provide any information about local variables, function's arguments, stack layout etc. It does not enable stepping through JavaScript code or setting breakpoint on the given line. However one can set a breakpoint on a function by it's name. \ No newline at end of file
diff --git a/chromium/v8/docs/handling_of_ports.md b/chromium/v8/docs/handling_of_ports.md
deleted file mode 100644
index 9706546e23c..00000000000
--- a/chromium/v8/docs/handling_of_ports.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# General
-This article describes how ports should be handled.
-
-# MIPS
-## Straight-forward MIPS ports
- 1. Do them yourself.
-
-## More complicated MIPS ports
- 1. CC the MIPS team in the CL. Use the mailing list v8-mips-ports.at.googlegroups.com for that purpose.
- 1. The MIPS team will provide you with a patch which you need to merge into your CL.
- 1. Then land the CL.
-
-# PPC (not officially supported)
- 1. Contact/CC the PPC team in the CL if needed. Use the mailing list v8-ppc-ports.at.googlegroups.com for that purpose.
-
-# x87 (not officially supported)
- 1. Contact/CC the x87 team in the CL if needed. Use the mailing list v8-x87-ports.at.googlegroups.com for that purpose.
-
-# ARM
-## Straight-forward ARM ports
- 1. Do them yourself.
-
-## When you are lost
- 1. CC the ARM team in the CL. Use the mailing list v8-arm-ports.at.googlegroups.com for that purpose. \ No newline at end of file
diff --git a/chromium/v8/docs/i18n_support.md b/chromium/v8/docs/i18n_support.md
deleted file mode 100644
index a1eb1c8f0ad..00000000000
--- a/chromium/v8/docs/i18n_support.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# ECMAScript 402
-
-V8 optionally implements the [ECMAScript 402](http://www.ecma-international.org/ecma-402/1.0/) API. The API is enabled by default, but can be turned off at compile time.
-
-
-## Prerequisites
-
-The i18n implementation adds a dependency on ICU. If you run
-
-```
-make dependencies
-```
-
-a suitable version of ICU is checked out into `third_party/icu`.
-
-
-### Alternative ICU checkout
-
-You can check out the ICU sources at a different location and define the gyp variable `icu_gyp_path` to point at the `icu.gyp` file.
-
-
-### System ICU
-
-Last but not least, you can compile V8 against a version of ICU installed in your system. To do so, specify the gyp variable `use_system_icu=1`. If you also have `want_separate_host_toolset` enabled, the bundled ICU will still be compiled to generate the V8 snapshot. The system ICU will only be used for the target architecture.
-
-
-## Embedding V8
-
-If you embed V8 in your application, but your application itself doesn't use ICU, you will need to initialize ICU before calling into V8 by executing:
-
-```
-v8::V8::InitializeICU();
-```
-
-It is safe to invoke this method if ICU was not compiled in, then it does nothing.
-
-
-## Compiling without i18n support
-
-To build V8 without i18n support use
-
-```
-make i18nsupport=off native
-``` \ No newline at end of file
diff --git a/chromium/v8/docs/javascript.md b/chromium/v8/docs/javascript.md
deleted file mode 100644
index f3a501b9854..00000000000
--- a/chromium/v8/docs/javascript.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Introduction
-
-JavaScript is a dynamically typed scripting language universally used to
-script web content in browsers.
-
-Its specification by ECMA can be found [here](http://www.ecma-international.org/publications/standards/Ecma-262.htm). \ No newline at end of file
diff --git a/chromium/v8/docs/javascript_stack_trace_api.md b/chromium/v8/docs/javascript_stack_trace_api.md
deleted file mode 100644
index 4a0d104c05e..00000000000
--- a/chromium/v8/docs/javascript_stack_trace_api.md
+++ /dev/null
@@ -1,161 +0,0 @@
-All internal errors thrown in V8 capture a stack trace when they are created that can be accessed from JavaScript through the error.stack property. V8 also has various hooks for controlling how stack traces are collected and formatted, and for allowing custom errors to also collect stack traces. This document outlines V8's JavaScript stack trace API.
-
-### Basic stack traces
-
-By default, almost all errors thrown by V8 have a `stack` property that holds the topmost 10 stack frames, formatted as a string. Here's an example of a fully formatted stack trace:
-
-```
-ReferenceError: FAIL is not defined
- at Constraint.execute (deltablue.js:525:2)
- at Constraint.recalculate (deltablue.js:424:21)
- at Planner.addPropagate (deltablue.js:701:6)
- at Constraint.satisfy (deltablue.js:184:15)
- at Planner.incrementalAdd (deltablue.js:591:21)
- at Constraint.addConstraint (deltablue.js:162:10)
- at Constraint.BinaryConstraint (deltablue.js:346:7)
- at Constraint.EqualityConstraint (deltablue.js:515:38)
- at chainTest (deltablue.js:807:6)
- at deltaBlue (deltablue.js:879:2)
-```
-
-The stack trace is collected when the error is created and is the same regardless of where or how many times the error is thrown. We collect 10 frames because it is usually enough to be useful but not so many that it has a noticeable performance impact. You can control how many stack frames are collected by setting the variable
-
-```
-Error.stackTraceLimit
-```
-
-Setting it to 0 will disable stack trace collection. Any finite integer value will be used as the maximum number of frames to collect. Setting it to `Infinity` means that all frames will be collected. This variable only affects the current context, it has to be set explicitly for each context that needs a different value. (Note that what is known as a "context" in V8 terminology corresponds to a page or iframe in Google Chrome). To set a different default value that affects all contexts use the
-
-```
---stack-trace-limit <value>
-```
-
-command-line flag to V8. To pass this flag to V8 when running Google Chrome use
-
-```
---js-flags="--stack-trace-limit <value>"
-```
-
-### Stack trace collection for custom exceptions
-The stack trace mechanism used for built-in errors is implemented using a general stack trace collection API that is also available to user scripts. The function
-
-```
-Error.captureStackTrace(error, constructorOpt)
-```
-
-adds a stack property to the given `error` object that will yield the stack trace at the time captureStackTrace was called. The reason for not just returning the formatted stack trace directly is that this way we can postpone the formatting of the stack trace until the stack property is accessed and avoid formatting completely if it never is.
-
-The optional `constructorOpt` parameter allows you to pass in a function value. When collecting the stack trace all frames above the topmost call to this function, including that call, will be left out of the stack trace. This can be useful to hide implementation details that won't be useful to the user. The usual way of defining a custom error that captures a stack trace would be:
-
-```
-function MyError() {
- Error.captureStackTrace(this, MyError);
- // any other initialization
-}
-```
-
-Passing in MyError as a second argument means that the constructor call to MyError won't show up in the stack trace.
-
-### Customizing stack traces
-Unlike Java where the stack trace of an exception is a structured value that allows inspection of the stack state, the stack property in V8 just holds a flat string containing the formatted stack trace. This is for no other reason than compatibility with other browsers. However, this is not hardcoded but only the default behavior and can be overridden by user scripts.
-
-For efficiency stack traces are not formatted when they are captured but on demand, the first time the stack property is accessed. A stack trace is formatted by calling
-
-```
-Error.prepareStackTrace(error, structuredStackTrace)
-```
-
-and using whatever this call returns as the value of the `stack` property. If you assign a different function value to `Error.prepareStackTrace` that function will be used to format stack traces. It will be passed the error object that it is preparing a stack trace for and a structured representation of the stack. User stack trace formatters are free to format the stack trace however they want and even return non-string values. It is safe to retain references to the structured stack trace object after a call to prepareStackTrace completes so that it is also a valid return value. Note that the custom prepareStackTrace function is immediately called at the point when the error object is created (e.g. with `new Error()`).
-
-The structured stack trace is an Array of CallSite objects, each of which represents a stack frame. A CallSite object defines the following methods
-
- * **getThis**: returns the value of this
- * **getTypeName**: returns the type of this as a string. This is the name of the function stored in the constructor field of this, if available, otherwise the object's `[[Class]]` internal property.
- * **getFunction**: returns the current function
- * **getFunctionName**: returns the name of the current function, typically its name property. If a name property is not available an attempt will be made to try to infer a name from the function's context.
- * **getMethodName**: returns the name of the property of this or one of its prototypes that holds the current function
- * **getFileName**: if this function was defined in a script returns the name of the script
- * **getLineNumber**: if this function was defined in a script returns the current line number
- * **getColumnNumber**: if this function was defined in a script returns the current column number
- * **getEvalOrigin**: if this function was created using a call to eval returns a CallSite object representing the location where eval was called
- * **isToplevel**: is this a toplevel invocation, that is, is this the global object?
- * **isEval**: does this call take place in code defined by a call to eval?
- * **isNative**: is this call in native V8 code?
- * **isConstructor**: is this a constructor call?
-
-The default stack trace is created using the CallSite API so any information that is available there is also available through this API.
-
-To maintain restrictions imposed on strict mode functions, frames that have a strict mode function and all frames below (its caller etc.) are not allow to access their receiver and function objects. For those frames, `getFunction()` and `getThis()` will return `undefined`.
-
-### Compatibility
-The API described here is specific to V8 and is not supported by any other JavaScript implementations. Most implementations do provide an `error.stack` property but the format of the stack trace is likely to be different from the format described here. The recommended use of this API is
-
- * Only rely on the layout of the formatted stack trace if you know your code is running in v8.
- * It is safe to set `Error.stackTraceLimit` and `Error.prepareStackTrace` regardless of which implementation is running your code but be aware that it will only have an effect if your code is running in V8.
-
-### Appendix: Stack trace format
-The default stack trace format used by V8 can for each stack frame give the following information:
-
- * Whether the call is a construct call.
- * The type of the this value (Type).
- * The name of the function called (functionName).
- * The name of the property of this or one of its prototypes that holds the function (methodName).
- * The current location within the source (location)
-
-Any of these may be unavailable and different formats for stack frames are used depending on how much of this information is available. If all the above information is available a formatted stack frame will look like this:
-
-```
-at Type.functionName [as methodName] (location)
-```
-
-or, in the case of a construct call
-
-```
-at new functionName (location)
-```
-
-If only one of functionName and methodName is available, or if they are both available but the same, the format will be:
-
-```
-at Type.name (location)
-```
-
-If neither is available `<anonymous>` will be used as the name.
-
-The Type value is the name of the function stored in the constructor field of this. In v8 all constructor calls set this property to the constructor function so unless this field has been actively changed after the object was created it it will hold the name of the function it was created by. If it is unavailable the `[[Class]]` property of the object will be used.
-
-One special case is the global object where the Type is not shown. In that case the stack frame will be formatted as
-
-```
-at functionName [as methodName] (location)
-```
-
-The location itself has several possible formats. Most common is the file name, line and column number within the script that defined the current function
-
-```
-fileName:lineNumber:columnNumber
-```
-
-If the current function was created using eval the format will be
-
-```
-eval at position
-```
-
-where position is the full position where the call to eval occurred. Note that this means that positions can be nested if there are nested calls to eval, for instance:
-
-```
-eval at Foo.a (eval at Bar.z (myscript.js:10:3))
-```
-
-If a stack frame is within V8's libraries the location will be
-
-```
-native
-```
-
-and if is unavailable it will be
-
-```
-unknown location
-``` \ No newline at end of file
diff --git a/chromium/v8/docs/merging_and_patching.md b/chromium/v8/docs/merging_and_patching.md
deleted file mode 100644
index d141f329840..00000000000
--- a/chromium/v8/docs/merging_and_patching.md
+++ /dev/null
@@ -1,66 +0,0 @@
-# Introduction
-
-If you have a patch to the master branch (e.g. an important bug fix) that needs to be merged into one of the production V8 branches, read on.
-
-For the examples, a branched 2.4 version of V8 will be used. Substitute "2.4" with your version number.
-
-**An associated issue is mandatory if a patch is merged. This helps with keeping track of merges.**
-
-# Merge process outlined
-
-The merge process in the Chromium and V8 tracker is driven by labels in the form of
-```
-Merge-[Status]-[Branch]
-```
-The currently important labels for V8 are:
-
- 1. Merge-Request-## initiates the process => This fix should be merged into M-##
- 1. Merge-Review-## The merge is not approved yet for M-## e.g. because Canary coverage is missing
- 1. Merge-Approved-## => Simply means that the Chrome TPM are signing the merge off
- 1. Merge-Merged-$BRANCHNUMBER$ => When the merge is done the Merge-Approved label is swapped with this one. $BRANCHNUMBER$ is the name/number of the V8 branch e.g. 4.3 for M-43.
-
-# Instructions for git using the automated script
-
-## How to check if a commit was already merged/reverted
-
-Use mergeinfo.py to get all the commits which are connected to the HASH according to Git.
-
-```
-tools/release/mergeinfo.py HASH
-```
-
-## Step 1: Run the script
-
-Let's assume you're merging revision af3cf11 to branch 2.4 (please specify full git hashes - abbreviations are used here for simplicity).
-
-```
-tools/release/merge_to_branch.py --branch 2.4 af3cf11
-```
-
-Run the script with '-h' to display its help message, which includes more options (e.g. you can specify a file containing your patch, or you can reverse a patch, specify a custom commit message, or resume a merging process you've canceled before). Note that the script will use a temporary checkout of v8 - it won't touch your work space.
-You can also merge more than one revision at once, just list them all.
-
-```
-tools/release/merge_to_branch.py --branch 2.4 af3cf11 cf33f1b sf3cf09
-```
-
-## Step 2: Send a notification letter to hablich@chromium.org
-
-Saying something like this:
-```
-_Subject:_ Regression fix merged into V8 2.4 branch (Chrome 8)
-
-_Body:_ We have merged a fix to the V8 version 2.4 branch (the version used in Chrome 8)
-
-Version 2.4.9.10: Issue xxx: The parser doesn't parse.
-```
-
-# FAQ
-
-## I get an error during merge that is related to tagging. What should I do?
-When two people are merging at the same time a race-condition can happen in the merge scripts. If this is the case, contact machenbach@chromium.org and hablich@chromium.org.
-## Is there a TL;DR;?
- 1. Create issue
- 1. Add Merge-Request-{Branch} to the issue
- 1. Wait until somebody will add Merge-Approved-{Branch}
- 1. Merge \ No newline at end of file
diff --git a/chromium/v8/docs/profiling_chromium_with_v8.md b/chromium/v8/docs/profiling_chromium_with_v8.md
deleted file mode 100644
index 46cdac44ade..00000000000
--- a/chromium/v8/docs/profiling_chromium_with_v8.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# Introduction
-
-V8's CPU & Heap profilers are trivial to use from V8's shells (see V8Profiler), but it may appear confusing how to use them with Chromium. This page should help you with it.
-
-# Instructions
-
-## Why using V8's profilers with Chromium is different from using them with V8 shells?
-
-Chromium is a complex application, unlike V8 shells. Below is the list of Chromium features that affect profiler usage:
-
- * each renderer is a separate process (OK, not actually each, but let's omit this detail), so they can't share the same log file;
- * sandbox built around renderer process prevents it from writing to a disk;
- * Developer Tools configure profilers for their own purposes;
- * V8's logging code contains some optimizations to simplify logging state checks.
-
-## So, how to run Chromium to get a CPU profile?
-
-Here is how to run Chromium in order to get a CPU profile from the start of the process:
-```
-./Chromium --no-sandbox --js-flags="--logfile=%t.log --prof"
-```
-
-Please note that you wouldn't see profiles in Developer Tools, because all the data is being logged to a file, not to Developer Tools.
-
-### Flags description
-
- * **--no-sandbox** - turns off the renderer sandbox, obviously must have;
- * **--js-flags** - this is the containers for flags passed to V8:
- * **--logfile=%t.log** - specifies a name pattern for log files; **%t** gets expanded into current time in milliseconds, so each process gets its own log file; you can use prefixes and suffixes if you want, like this: **prefix-%t-suffix.log**;
- * **--prof** - tells V8 to write statistical profiling information into the log file.
-
-## Notes
-
-Under Windows, be sure to turn on .MAP file creation for **chrome.dll**, but not for **chrome.exe**. \ No newline at end of file
diff --git a/chromium/v8/docs/release_process.md b/chromium/v8/docs/release_process.md
deleted file mode 100644
index c6b36ad68ee..00000000000
--- a/chromium/v8/docs/release_process.md
+++ /dev/null
@@ -1,57 +0,0 @@
-# Introduction
-
-The V8 release process is tightly connected to [Chrome's](https://www.chromium.org/getting-involved/dev-channel). The V8 team is using all four Chrome release channels to push new versions to the users.
-
-If you want to look up what V8 version is in a Chrome release you can check [OmahaProxy](https://omahaproxy.appspot.com/). For each Chrome release a separate branch is created in the V8 repository to make the trace-back easier e.g. for [Chrome 45.0.2413.0](https://chromium.googlesource.com/v8/v8.git/+/chromium/2413).
-
-# Canary releases
-Every day a new Canary build is pushed to the users via [Chrome's Canary channel](https://www.google.com/chrome/browser/canary.html?platform=win64). Normally the deliverable is the latest, stable enough version from [master](https://chromium.googlesource.com/v8/v8.git/+/roll).
-
-Branches for a Canary normally look like this
-
-```
-remotes/origin/4.5.35
-```
-
-# Dev releases
-Every week a new Dev build is pushed to the users via [Chrome's Dev channel](https://www.google.com/chrome/browser/desktop/index.html?extra=devchannel&platform=win64). Normally the deliverable includes the latest stable enough V8 version on the Canary channel.
-
-Branches for a Dev normally look like this
-
-```
-remotes/origin/4.5.35
-```
-
-# Beta releases
-Roughly every 6 weeks a new major branch is created e.g. [for Chrome 44](https://chromium.googlesource.com/v8/v8.git/+log/branch-heads/4.4). This is happening in sync with the creation of [Chrome's Beta channel](https://www.google.com/chrome/browser/beta.html?platform=win64). The Chrome Beta is pinned to the head of V8's branch. After approx. 6 weeks the branch is promoted to Stable.
-
-Changes are only cherry-picked onto the branch in order to stabilize the version.
-
-Branches for a Beta normally look like this
-
-```
-remotes/branch-heads/4.5
-```
-
-They are based on a Canary branch.
-
-# Stable releases
-Roughly every 6 weeks a new major Stable release is done. No special branch is created as the latest Beta branch is simply promoted to Stable. This version is pushed to the users via [Chrome's Stable channel](https://www.google.com/chrome/browser/desktop/index.html?platform=win64).
-
-Branches for a Stable normally look like this
-
-```
-remotes/branch-heads/4.5
-```
-
-They are promoted (reused) Beta branches.
-
-# Which version should I embed in my application?
-
-The tip of the same branch that Chrome's Stable channel uses.
-
-We often backmerge important bug fixes to a stable branch, so if you care about stability and security and correctness, you should include those updates too -- that's why we recommend "the tip of the branch", as opposed to an exact version.
-
-As soon as a new branch is promoted to Stable, we stop maintaining the previous stable branch. This happens every six weeks, so you should be prepared to update at least this often.
-
-Example: The current stable Chrome release is [44.0.2403.125](https://omahaproxy.appspot.com), with V8 4.4.63.25. So you should embed [branch-heads/4.4](https://chromium.googlesource.com/v8/v8.git/+/branch-heads/4.4). And you should update to branch-heads/4.5 when Chrome 45 is released on the Stable channel. \ No newline at end of file
diff --git a/chromium/v8/docs/runtime_functions.md b/chromium/v8/docs/runtime_functions.md
deleted file mode 100644
index 765ed201fa3..00000000000
--- a/chromium/v8/docs/runtime_functions.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# Introduction
-
-Much of the JavaScript library is implemented in JavaScript code itself,
-using a minimal set of C++ runtime functions callable from JavaScript.
-Some of these are called using names that start with %, and using the flag
-"--allow-natives-syntax". Others are only called by code generated by the
-code generators, and are not visible in JS, even using the % syntax.
-
-<a href='Hidden comment:
-= Details =
-
-Here are the V8 runtime functions, their JS names, if they are visible,
-and their documentation.
-<wiki:comment> \ No newline at end of file
diff --git a/chromium/v8/docs/source.md b/chromium/v8/docs/source.md
deleted file mode 100644
index fa869b4ec21..00000000000
--- a/chromium/v8/docs/source.md
+++ /dev/null
@@ -1,39 +0,0 @@
-**Quick links:** [browse](http://code.google.com/p/v8/source/browse) | [browse bleeding edge](http://code.google.com/p/v8/source/browse/branches/bleeding_edge) | [changes](https://chromium.googlesource.com/v8/v8.git).
-
-## Command-Line Access
-
-### Git
-See [UsingGit](using_git.md).
-
-### Subversion (deprecated)
-
-Use this command to anonymously check out the up-to-date stable version of the project source code:
-
-> `svn checkout http://v8.googlecode.com/svn/trunk/ v8`
-
-If you plan to contribute to V8 but are not a member, use this command to anonymously check out a read-only version of the development branch:
-
-> `svn checkout http://v8.googlecode.com/svn/branches/bleeding_edge/ v8`
-
-If you're a member of the project, use this command to check out a writable development branch as yourself using HTTPS:
-
-> `svn checkout https://v8.googlecode.com/svn/branches/bleeding_edge/ v8 --username <your username>`
-
-When prompted, enter your generated [googlecode.com](http://code.google.com/hosting/settings) password.
-
-## Source Code Branches
-
-There are several different branches of V8; if you're unsure of which version to get, you most likely want the up-to-date stable version in `trunk/`. Here's an overview of the different branches:
-
- * The bleeding edge, `branches/bleeding_edge/`, is where active development takes place. If you're considering contributing to V8 this is the branch to get.
- * Under `trunk/` is the "stable edge", which is updated a few times per week. It is a copy of the bleeding edge that has been successfully tested. Use this if you want to be almost up to date and don't want your code to break whenever we accidentally forget to add a file on the bleeding edge. Some of the trunk revisions are tagged with X.Y.Z.T version labels. When we decide which of X.Y.**.** is the "most stable", it becomes the X.Y branch in subversion.
- * If you want a well-tested version that doesn't change except for bugfixes, use one of the versioned branches (e.g. `branches/3.16/` at the time of this writing). Note that usually only the last two branches are actively maintained; any older branches could have unfixed security holes. You may want to follow the V8 version that Chrome is shipping on its stable (or beta) channels, see http://omahaproxy.appspot.com.
-
-## V8 public API compatibility
-
-V8 public API (basically the files under include/ directory) may change over time. New types/methods may be added without breaking existing functionality. When we decide that want to drop some existing class/methods, we first mark it with [V8\_DEPRECATED](https://code.google.com/p/chromium/codesearch#search/&q=V8_DEPRECATED&sq=package:chromium&type=cs) macro which will cause compile time warnings when the deprecated methods are called by the embedder. We keep deprecated method for one branch and then remove it. E.g. if `v8::CpuProfiler::FindCpuProfile` was plain non deprecated in _3.17_ branch, marked as `V8_DEPRECATED` in _3.18_, it may well be removed in _3.19_ branch.
-
-
-## GUI and IDE Access
-
-This project's Subversion repository may be accessed using many different client programs and plug-ins. See your client's documentation for more information. \ No newline at end of file
diff --git a/chromium/v8/docs/testing.md b/chromium/v8/docs/testing.md
deleted file mode 100644
index a777c0c5a06..00000000000
--- a/chromium/v8/docs/testing.md
+++ /dev/null
@@ -1,58 +0,0 @@
-V8 includes a test framework that allows you to test the engine. The framework lets you run both our own test suites that are included with the source code and others, currently only the Mozilla tests.
-
-## Running the V8 tests
-
-Before you run the tests, you will have to build V8 with GYP using the instructions [here](http://code.google.com/p/v8-wiki/wiki/BuildingWithGYP)
-
-You can append `.check` to any build target to have tests run for it, e.g.
-```
-make ia32.release.check
-make ia32.check
-make release.check
-make check # builds and tests everything (no dot before "check"!)
-```
-
-Before submitting patches, you should always run the quickcheck target, which builds a fast debug build and runs only the most relevant tests:
-```
-make quickcheck
-```
-
-You can also run tests manually:
-```
-tools/run-tests.py --arch-and-mode=ia32.release [--outdir=foo]
-```
-
-Or you can run individual tests:
-```
-tools/run-tests.py --arch=ia32 cctest/test-heap/SymbolTable mjsunit/delete-in-eval
-```
-
-Run the script with `--help` to find out about its other options, `--outdir` defaults to `out`. Also note that using the `cctest` binary to run multiple tests in one process is not supported.
-
-## Running the Mozilla and Test262 tests
-
-The V8 test framework comes with support for running the Mozilla as well as the Test262 test suite. To download the test suites and then run them for the first time, do the following:
-
-```
-tools/run-tests.py --download-data mozilla
-tools/run-tests.py --download-data test262
-```
-
-To run the tests subsequently, you may omit the flag that downloads the test suite:
-
-```
-tools/run-tests.py mozilla
-tools/run-tests.py test262
-```
-
-Note that V8 fails a number of Mozilla tests because they require Firefox-specific extensions.
-
-## Running the WebKit tests
-
-Sometimes all of the above tests pass but WebKit build bots fail. To make sure WebKit tests pass run:
-
-```
-tools/run-tests.py --progress=verbose --outdir=out --arch=ia32 --mode=release webkit --timeout=200
-```
-
-Replace --arch and other parameters with values that match your build options. \ No newline at end of file
diff --git a/chromium/v8/docs/triaging_issues.md b/chromium/v8/docs/triaging_issues.md
deleted file mode 100644
index 981d053cd9d..00000000000
--- a/chromium/v8/docs/triaging_issues.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# How to get an issue triaged
-* *V8 tracker*: Set the state to `Untriaged`
-* *Chromium tracker*: Set the state to `Untriaged` and add the label `Cr-Blink-JavaScript`
-
-# How to assign V8 issues in the Chromium tracker
-Please assign issues to the V8 specialty sheriffs of one of the
-following categories:
-
- * Stability: jkummerow@c....org, adamk@c....org
- * Performance: bmeurer@c....org, mvstanton@c....org
- * Clusterfuzz: Set the bug to the following state:
- * `label:ClusterFuzz label:Cr-Blink-JavaScript status:Available -has:owner`
- * Will show up in [this](https://code.google.com/p/chromium/issues/list?can=2&q=label%3AClusterFuzz+label%3ACr-Blink-JavaScript+status%3AAvailable+-has%3Aowner&colspec=ID+Pri+M+Week+ReleaseBlock+Cr+Status+Owner+Summary+OS+Modified&x=m&y=releaseblock&cells=tiles) query.
- * CC mstarzinger@ and ishell@
-
-Please CC hablich@c....org on all issues.
-
-Assign remaining issues to hablich@c....org.
-
-Use the label Cr-Blink-JavaScript on all issues.
-
-**Please note that this only applies to issues tracked in the Chromium issue tracker.** \ No newline at end of file
diff --git a/chromium/v8/docs/using_git.md b/chromium/v8/docs/using_git.md
deleted file mode 100644
index b5e392aeddf..00000000000
--- a/chromium/v8/docs/using_git.md
+++ /dev/null
@@ -1,147 +0,0 @@
-# Git repository
-
-V8's git repository is located at https://chromium.googlesource.com/v8/v8.git
-
-V8's master branch has also an official git mirror on github: http://github.com/v8/v8-git-mirror.
-
-**Don't just `git-clone` either of these URLs** if you want to build V8 from your checkout, instead follow the instructions below to get everything set up correctly.
-
-## Prerequisites
-
- 1. **Git**. To install using `apt-get`:
-```
-apt-get install git
-```
- 1. **depot\_tools**. See [instructions](http://dev.chromium.org/developers/how-tos/install-depot-tools).
- 1. For **push access**, you need to setup a .netrc file with your git password:
- 1. Go to https://chromium.googlesource.com/new-password - login with your committer account (e.g. @chromium.org account, non-chromium.org ones work too). Note: creating a new password doesn't automatically revoke any previously created passwords.
- 1. Follow the instructions in the "Staying Authenticated" section. It would ask you to copy-paste two lines into your ~/.netrc file.
- 1. In the end, ~/.netrc should have two lines that look like:
-```
-machine chromium.googlesource.com login git-yourusername.chromium.org password <generated pwd>
-machine chromium-review.googlesource.com login git-yourusername.chromium.org password <generated pwd>
-```
- 1. Make sure that ~/.netrc file's permissions are 0600 as many programs refuse to read .netrc files which are readable by anyone other than you.
-
-
-## How to start
-
-Make sure depot\_tools are up-to-date by typing once:
-
-```
-gclient
-```
-
-
-Then get V8, including all branches and dependencies:
-
-```
-fetch v8
-cd v8
-```
-
-After that you're intentionally in a detached head state.
-
-Optionally you can specify how new branches should be tracked:
-
-```
-git config branch.autosetupmerge always
-git config branch.autosetuprebase always
-```
-
-Alternatively, you can create new local branches like this (recommended):
-
-```
-git new-branch mywork
-```
-
-## Staying up-to-date
-
-Update your current branch with git pull. Note that if you're not on a branch, git pull won't work, and you'll need to use git fetch instead.
-
-```
-git pull
-```
-
-Sometimes dependencies of v8 are updated. You can synchronize those by running
-
-```
-gclient sync
-```
-
-## Sending code for reviewing
-
-```
-git cl upload
-```
-
-## Committing
-
-You can use the CQ checkbox on codereview for committing (preferred). See also the [chromium instructions](http://www.chromium.org/developers/testing/commit-queue) for CQ flags and troubleshooting.
-
-If you need more trybots than the default, add the following to your commit message on rietveld (e.g. for adding a nosnap bot):
-
-```
-CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_nosnap_rel
-```
-
-To land manually, update your branch:
-
-```
-git pull --rebase origin
-```
-
-Then commit using
-
-```
-git cl land
-```
-
-# For project members
-
-
-## Try jobs
-
-### Creating a try job from codereview
-
- 1. Upload a CL to rietveld.
-```
-git cl upload
-```
- 1. Try the CL by sending a try job to the try bots like this:
-```
-git cl try
-```
- 1. Wait for the try bots to build and you will get an e-mail with the result. You can also check the try state at your patch on codereview.
- 1. If applying the patch fails you either need to rebase your patch or specify the v8 revision to sync to:
-```
-git cl try --revision=1234
-```
-
-### Creating a try job from a local branch
-
- 1. Commit some changes to a git branch in the local repo.
- 1. Try the change by sending a try job to the try bots like this:
-```
-git try
-```
- 1. Wait for the try bots to build and you will get an e-mail with the result. Note: There are issues with some of the slaves at the moment. Sending try jobs from codereview is recommended.
-
-### Useful arguments
-
-The revision argument tells the try bot what revision of the code base will be used for applying your local changes to. Without the revision, our LKGR revision is used as the base (http://v8-status.appspot.com/lkgr).
-```
-git try --revision=1234
-```
-To avoid running your try job on all bots, use the --bot flag with a comma-separated list of builder names. Example:
-```
-git try --bot=v8_mac_rel
-```
-
-### Viewing the try server
-
-http://build.chromium.org/p/tryserver.v8/waterfall
-
-### Access credentials
-
-If asked for access credentials, use your @chromium.org email address and your generated password from [googlecode.com](http://code.google.com/hosting/settings). \ No newline at end of file
diff --git a/chromium/v8/docs/v8_c_plus_plus_styleand_sops.md b/chromium/v8/docs/v8_c_plus_plus_styleand_sops.md
deleted file mode 100644
index 56865045439..00000000000
--- a/chromium/v8/docs/v8_c_plus_plus_styleand_sops.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Introduction
-
-In general, V8 should conform to Google's/Chrome's C++ Style Guide for new code that is written. Your V8 code should conform to them as much as possible. There will always be cases where Google/Chrome Style Guide conformity or Google/Chrome best practices are extremely cumbersome or underspecified for our use cases. We document these exceptions here.
-
-# Details
-
-Coming Soon \ No newline at end of file
diff --git a/chromium/v8/docs/v8_committers_responsibility.md b/chromium/v8/docs/v8_committers_responsibility.md
deleted file mode 100644
index ee8d2125636..00000000000
--- a/chromium/v8/docs/v8_committers_responsibility.md
+++ /dev/null
@@ -1,41 +0,0 @@
-## Basic commit guidelines
-
-When you're committing to the V8 repositories, ensure that you follow those guidelines:
-
- 1. Find the right reviewer for your changes and for patches you're asked to review.
- 1. Be available on IM and/or email before and after you land the change.
- 1. Watch the [waterfall](http://build.chromium.org/p/client.v8/console) until all bots turn green after your change.
- 1. When landing a TBR change (To Be Reviewed), make sure to notify the people whose code you're changing. Usually just send the review e-mail.
-
-In short, do the right thing for the project, not the easiest thing to get code committed, and above all: use your best judgement.
-
-**Don't be afraid to ask questions. There is always someone who will immediately read messages sent to the v8-committers mailing list who can help you.**
-
-## Changes with multiple reviewers
-
-There are occasionally changes with a lot of reviewers on them, since sometimes several people might need to be in the loop for a change because of multiple areas of responsibility and expertise.
-
-The problem is that without some guidelines, there's no clear responsibility given in these reviews.
-
-If you're the sole reviewer on a change, you know you have to do a good job. When there are three other people, you sometimes assume that somebody else must have looked carefully at some part of the review. Sometimes all the reviewers think this and the change isn't reviewed properly.
-
-In other cases, some reviewers say "LGTM" for a patch, while others are still expecting changes. The author can get confused as to the status of the review, and some patches have been checked in where at least one reviewer expected further changes before committing.
-
-At the same time, we want to encourage many people to participate in the review process and keep tabs on what's going on.
-
-So, here are some guidelines to help clarify the process:
- 1. When a patch author requests more than one reviewer, they should make clear in the review request email what they expect the responsibility of each reviewer to be. For example, you could write this in the email:
-```
-
- a. larry: bitmap changes
- b. sergey: process hacks
- c. everybody else: FYI
-
-```
- 1. In this case, you might be on the review list because you've asked to be in the loop for multiprocess changes, but you wouldn't be the primary reviewer and the author and other reviewers wouldn't be expecting you to review all the diffs in detail.
- 1. If you get a review that includes many other people, and the author didn't do (1), please ask them what part you're responsible for if you don't want to review the whole thing in detail.
- 1. The author should wait for approval from everybody on the reviewer list before checking in.
- 1. People who are on a review without clear review responsibility (i.e. drive-by reviews) should be super responsive and not hold up the review. The patch author should feel free to ping them mercilessly if they are.
- 1. If you're an "FYI" person on a review and you didn't actually review in detail (or at all), but don't have a problem with the patch, note this. You could say something like "rubber stamp" or "ACK" instead of "LGTM." This way the real reviewers know not to trust that you did their work for them, but the author of the patch knows they don't have to wait for further feedback from you. Hopefully we can still keep everybody in the loop but have clear ownership and detailed reviews. It might even speed up some changes since you can quickly "ACK" changes you don't care about, and the author knows they don't have to wait for feedback from you.
-
-(Adapted from: http://dev.chromium.org/developers/committers-responsibility ) \ No newline at end of file
diff --git a/chromium/v8/docs/v8_profiler.md b/chromium/v8/docs/v8_profiler.md
deleted file mode 100644
index 670fe11dd78..00000000000
--- a/chromium/v8/docs/v8_profiler.md
+++ /dev/null
@@ -1,141 +0,0 @@
-# Introduction
-
-V8 has built-in sample based profiling. Profiling is turned off by default, but can be enabled via the --prof command line option. The sampler records stacks of both JavaScript and C/C++ code.
-
-# Build
-Build the d8 shell following the instructions at [BuildingWithGYP](BuildingWithGYP.md).
-
-
-# Command Line
-To start profiling, use the `--prof` option. When profiling, V8 generates a `v8.log` file which contains profiling data.
-
-Windows:
-```
-build\Release\d8 --prof script.js
-```
-
-Other platforms (replace "ia32" with "x64" if you want to profile the x64 build):
-```
-out/ia32.release/d8 --prof script.js
-```
-
-# Process the Generated Output
-
-Log file processing is done using JS scripts running by the d8 shell. For this to work, a `d8` binary (or symlink, or `d8.exe` on Windows) must be in the root of your V8 checkout, or in the path specified by the environment variable `D8_PATH`. Note: this binary is just used to process the log, but not for the actual profiling, so it doesn't matter which version etc. it is.
-
-Windows:
-```
-tools\windows-tick-processor.bat v8.log
-```
-
-Linux:
-```
-tools/linux-tick-processor v8.log
-```
-
-Mac OS X:
-```
-tools/mac-tick-processor v8.log
-```
-
-## Snapshot-based VM build and builtins reporting
-
-When a snapshot-based VM build is being used, code objects from a snapshot that don't correspond to functions are reported with generic names like _"A builtin from the snapshot"_, because their real names are not stored in the snapshot. To see the names the following steps must be taken:
-
- * `--log-snapshot-positions` flag must be passed to VM (along with `--prof`); this way, for deserialized objects the `(memory address, snapshot offset)` pairs are being emitted into profiler log;
-
- * `--snapshot-log=<log file from mksnapshot>` flag must be passed to the tick processor script; a log file from the `mksnapshot` program (a snapshot log) contains address-offset pairs for serialized objects, and their names; using the snapshot log, names can be mapped onto deserialized objects during profiler log processing; the snapshot log file is called `snapshot.log` and resides alongside with V8's compiled files.
-
-An example of usage:
-```
-out/ia32.release/d8 --prof --log-snapshot-positions script.js
-tools/linux-tick-processor --snapshot-log=out/ia32.release/obj.target/v8_snapshot/geni/snapshot.log v8.log
-```
-
-# Programmatic Control of Profiling
-If you would like to control in your application when profile samples are collected, you can do so.
-
-First you'll probably want to use the `--noprof-auto` command line switch which prevents the profiler from automatically starting to record profile ticks.
-
-Profile ticks will not be recorded until your application specifically invokes these APIs:
- * `V8::ResumeProfiler()` - start/resume collection of data
- * `V8::PauseProfiler()` - pause collection of data
-
-# Example Output
-
-```
-Statistical profiling result from benchmarks\v8.log, (4192 ticks, 0 unaccounted, 0 excluded).
-
- [Shared libraries]:
- ticks total nonlib name
- 9 0.2% 0.0% C:\WINDOWS\system32\ntdll.dll
- 2 0.0% 0.0% C:\WINDOWS\system32\kernel32.dll
-
- [JavaScript]:
- ticks total nonlib name
- 741 17.7% 17.7% LazyCompile: am3 crypto.js:108
- 113 2.7% 2.7% LazyCompile: Scheduler.schedule richards.js:188
- 103 2.5% 2.5% LazyCompile: rewrite_nboyer earley-boyer.js:3604
- 103 2.5% 2.5% LazyCompile: TaskControlBlock.run richards.js:324
- 96 2.3% 2.3% Builtin: JSConstructCall
- ...
-
- [C++]:
- ticks total nonlib name
- 94 2.2% 2.2% v8::internal::ScavengeVisitor::VisitPointers
- 33 0.8% 0.8% v8::internal::SweepSpace
- 32 0.8% 0.8% v8::internal::Heap::MigrateObject
- 30 0.7% 0.7% v8::internal::Heap::AllocateArgumentsObject
- ...
-
-
- [GC]:
- ticks total nonlib name
- 458 10.9%
-
- [Bottom up (heavy) profile]:
- Note: percentage shows a share of a particular caller in the total
- amount of its parent calls.
- Callers occupying less than 2.0% are not shown.
-
- ticks parent name
- 741 17.7% LazyCompile: am3 crypto.js:108
- 449 60.6% LazyCompile: montReduce crypto.js:583
- 393 87.5% LazyCompile: montSqrTo crypto.js:603
- 212 53.9% LazyCompile: bnpExp crypto.js:621
- 212 100.0% LazyCompile: bnModPowInt crypto.js:634
- 212 100.0% LazyCompile: RSADoPublic crypto.js:1521
- 181 46.1% LazyCompile: bnModPow crypto.js:1098
- 181 100.0% LazyCompile: RSADoPrivate crypto.js:1628
- ...
-```
-
-# Timeline plot
-The timeline plot visualizes where V8 is spending time. This can be used to find bottlenecks and spot things that are unexpected (for example, too much time spent in the garbage collector). Data for the plot are gathered by both sampling and instrumentation. Linux with gnuplot 4.6 is required.
-
-To create a timeline plot, run V8 as described above, with the option `--log-timer-events` additional to `--prof`:
-```
-out/ia32.release/d8 --prof --log-timer-events script.js
-```
-
-The output is then passed to a plot script, similar to the tick-processor:
-```
-tools/plot-timer-events v8.log
-```
-
-This creates `timer-events.png` in the working directory, which can be opened with most image viewers.
-
-# Options
-Since recording log output comes with a certain performance overhead, the script attempts to correct this using a distortion factor. If not specified, it tries to find out automatically. You can however also specify the distortion factor manually.
-```
-tools/plot-timer-events --distortion=4500 v8.log
-```
-
-You can also manually specify a certain range for which to create the plot or statistical profile, expressed in milliseconds:
-```
-tools/plot-timer-events --distortion=4500 --range=1000,2000 v8.log
-tools/linux-tick-processor --distortion=4500 --range=1000,2000 v8.log
-```
-
-# HTML 5 version
-Both statistical profile and timeline plot are available [in the browser](http://v8.googlecode.com/svn/branches/bleeding_edge/tools/profviz/profviz.html). However, the statistical profile lacks C++ symbol resolution and the Javascript port of gnuplot performs an order of magnitude slower than the native one. \ No newline at end of file
diff --git a/chromium/v8/include/v8-debug.h b/chromium/v8/include/v8-debug.h
index 0b64fb38829..0d0ee739c0e 100644
--- a/chromium/v8/include/v8-debug.h
+++ b/chromium/v8/include/v8-debug.h
@@ -155,8 +155,11 @@ class V8_EXPORT Debug {
*/
typedef void (*DebugMessageDispatchHandler)();
- static bool SetDebugEventListener(EventCallback that,
+ static bool SetDebugEventListener(Isolate* isolate, EventCallback that,
Local<Value> data = Local<Value>());
+ V8_DEPRECATED("Use version with an Isolate",
+ static bool SetDebugEventListener(
+ EventCallback that, Local<Value> data = Local<Value>()));
// Schedule a debugger break to happen when JavaScript code is run
// in the given isolate.
@@ -170,7 +173,9 @@ class V8_EXPORT Debug {
static bool CheckDebugBreak(Isolate* isolate);
// Message based interface. The message protocol is JSON.
- static void SetMessageHandler(MessageHandler handler);
+ static void SetMessageHandler(Isolate* isolate, MessageHandler handler);
+ V8_DEPRECATED("Use version with an Isolate",
+ static void SetMessageHandler(MessageHandler handler));
static void SendCommand(Isolate* isolate,
const uint16_t* command, int length,
@@ -194,10 +199,9 @@ class V8_EXPORT Debug {
* }
* \endcode
*/
- static V8_DEPRECATE_SOON(
- "Use maybe version",
- Local<Value> Call(v8::Local<v8::Function> fun,
- Local<Value> data = Local<Value>()));
+ static V8_DEPRECATED("Use maybe version",
+ Local<Value> Call(v8::Local<v8::Function> fun,
+ Local<Value> data = Local<Value>()));
// TODO(dcarney): data arg should be a MaybeLocal
static MaybeLocal<Value> Call(Local<Context> context,
v8::Local<v8::Function> fun,
@@ -206,8 +210,8 @@ class V8_EXPORT Debug {
/**
* Returns a mirror object for the given object.
*/
- static V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> GetMirror(v8::Local<v8::Value> obj));
+ static V8_DEPRECATED("Use maybe version",
+ Local<Value> GetMirror(v8::Local<v8::Value> obj));
static MaybeLocal<Value> GetMirror(Local<Context> context,
v8::Local<v8::Value> obj);
@@ -242,7 +246,9 @@ class V8_EXPORT Debug {
* "Evaluate" debug command behavior currently is not specified in scope
* of this method.
*/
- static void ProcessDebugMessages();
+ static void ProcessDebugMessages(Isolate* isolate);
+ V8_DEPRECATED("Use version with an Isolate",
+ static void ProcessDebugMessages());
/**
* Debugger is running in its own context which is entered while debugger
@@ -251,7 +257,9 @@ class V8_EXPORT Debug {
* to change. The Context exists only when the debugger is active, i.e. at
* least one DebugEventListener or MessageHandler is set.
*/
- static Local<Context> GetDebugContext();
+ static Local<Context> GetDebugContext(Isolate* isolate);
+ V8_DEPRECATED("Use version with an Isolate",
+ static Local<Context> GetDebugContext());
/**
diff --git a/chromium/v8/include/v8-experimental.h b/chromium/v8/include/v8-experimental.h
new file mode 100644
index 00000000000..f988e14054c
--- /dev/null
+++ b/chromium/v8/include/v8-experimental.h
@@ -0,0 +1,53 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * This header contains a set of experimental V8 APIs. We hope these will
+ * become a part of standard V8, but they may also be removed if we deem the
+ * experiment to not be successul.
+ */
+#ifndef V8_INCLUDE_V8_EXPERIMENTAL_H_
+#define V8_INCLUDE_V8_EXPERIMENTAL_H_
+
+#include "include/v8.h"
+
+namespace v8 {
+namespace experimental {
+
+// Allow the embedder to construct accessors that V8 can compile and use
+// directly, without jumping into the runtime.
+class V8_EXPORT FastAccessorBuilder {
+ public:
+ struct ValueId {
+ size_t value_id;
+ };
+ struct LabelId {
+ size_t label_id;
+ };
+
+ static FastAccessorBuilder* New(Isolate* isolate);
+
+ ValueId IntegerConstant(int int_constant);
+ ValueId GetReceiver();
+ ValueId LoadInternalField(ValueId value_id, int field_no);
+ ValueId LoadValue(ValueId value_id, int offset);
+ ValueId LoadObject(ValueId value_id, int offset);
+ void ReturnValue(ValueId value_id);
+ void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
+ void CheckNotZeroOrReturnNull(ValueId value_id);
+ LabelId MakeLabel();
+ void SetLabel(LabelId label_id);
+ void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
+
+ private:
+ FastAccessorBuilder() = delete;
+ FastAccessorBuilder(const FastAccessorBuilder&) = delete;
+ ~FastAccessorBuilder() = delete;
+ void operator=(const FastAccessorBuilder&) = delete;
+};
+
+} // namespace experimental
+} // namespace v8
+
+#endif // V8_INCLUDE_V8_EXPERIMENTAL_H_
diff --git a/chromium/v8/include/v8-platform.h b/chromium/v8/include/v8-platform.h
index c6cba0f9829..4fbef0f5d96 100644
--- a/chromium/v8/include/v8-platform.h
+++ b/chromium/v8/include/v8-platform.h
@@ -5,6 +5,8 @@
#ifndef V8_V8_PLATFORM_H_
#define V8_V8_PLATFORM_H_
+#include <stdint.h>
+
namespace v8 {
class Isolate;
@@ -107,6 +109,51 @@ class Platform {
* the epoch.
**/
virtual double MonotonicallyIncreasingTime() = 0;
+
+ /**
+ * Called by TRACE_EVENT* macros, don't call this directly.
+ * The name parameter is a category group for example:
+ * TRACE_EVENT0("v8,parse", "V8.Parse")
+ * The pointer returned points to a value with zero or more of the bits
+ * defined in CategoryGroupEnabledFlags.
+ **/
+ virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
+ static uint8_t no = 0;
+ return &no;
+ }
+
+ /**
+ * Gets the category group name of the given category_enabled_flag pointer.
+ * Usually used while serliazing TRACE_EVENTs.
+ **/
+ virtual const char* GetCategoryGroupName(
+ const uint8_t* category_enabled_flag) {
+ static const char dummy[] = "dummy";
+ return dummy;
+ }
+
+ /**
+ * Adds a trace event to the platform tracing system. This function call is
+ * usually the result of a TRACE_* macro from trace_event_common.h when
+ * tracing and the category of the particular trace are enabled. It is not
+ * advisable to call this function on its own; it is really only meant to be
+ * used by the trace macros. The returned handle can be used by
+ * UpdateTraceEventDuration to update the duration of COMPLETE events.
+ */
+ virtual uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ uint64_t id, uint64_t bind_id, int32_t num_args, const char** arg_names,
+ const uint8_t* arg_types, const uint64_t* arg_values,
+ unsigned int flags) {
+ return 0;
+ }
+
+ /**
+ * Sets the duration field of a COMPLETE trace event. It must be called with
+ * the handle returned from AddTraceEvent().
+ **/
+ virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
+ const char* name, uint64_t handle) {}
};
} // namespace v8
diff --git a/chromium/v8/include/v8-testing.h b/chromium/v8/include/v8-testing.h
index d18fc725836..f67bf2530d6 100644
--- a/chromium/v8/include/v8-testing.h
+++ b/chromium/v8/include/v8-testing.h
@@ -39,7 +39,7 @@ class V8_EXPORT Testing {
/**
* Force deoptimization of all functions.
*/
- static void DeoptimizeAll();
+ static void DeoptimizeAll(Isolate* isolate);
};
diff --git a/chromium/v8/include/v8-version.h b/chromium/v8/include/v8-version.h
index 3b59c2cf70b..e367eac3785 100644
--- a/chromium/v8/include/v8-version.h
+++ b/chromium/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4
-#define V8_MINOR_VERSION 7
-#define V8_BUILD_NUMBER 80
-#define V8_PATCH_LEVEL 27
+#define V8_MINOR_VERSION 9
+#define V8_BUILD_NUMBER 385
+#define V8_PATCH_LEVEL 11
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/chromium/v8/include/v8.h b/chromium/v8/include/v8.h
index 39535434112..36df60a5f5d 100644
--- a/chromium/v8/include/v8.h
+++ b/chromium/v8/include/v8.h
@@ -12,8 +12,8 @@
* For other documentation see http://code.google.com/apis/v8/
*/
-#ifndef V8_H_
-#define V8_H_
+#ifndef INCLUDE_V8_H_
+#define INCLUDE_V8_H_
#include <stddef.h>
#include <stdint.h>
@@ -92,6 +92,7 @@ class ObjectTemplate;
class Platform;
class Primitive;
class Promise;
+class Proxy;
class RawOperationDescriptor;
class Script;
class SharedArrayBuffer;
@@ -103,6 +104,7 @@ class String;
class StringObject;
class Symbol;
class SymbolObject;
+class Private;
class Uint32;
class Utils;
class Value;
@@ -135,6 +137,10 @@ class CallHandlerHelper;
class EscapableHandleScope;
template<typename T> class ReturnValue;
+namespace experimental {
+class FastAccessorBuilder;
+} // namespace experimental
+
namespace internal {
class Arguments;
class Heap;
@@ -146,7 +152,7 @@ template<typename T> class CustomArguments;
class PropertyCallbackArguments;
class FunctionCallbackArguments;
class GlobalHandles;
-}
+} // namespace internal
/**
@@ -311,6 +317,7 @@ class Local {
friend class String;
friend class Object;
friend class Context;
+ friend class Private;
template<class F> friend class internal::CustomArguments;
friend Local<Primitive> Undefined(Isolate* isolate);
friend Local<Primitive> Null(Isolate* isolate);
@@ -418,12 +425,12 @@ class WeakCallbackInfo {
V8_INLINE T* GetParameter() const { return parameter_; }
V8_INLINE void* GetInternalField(int index) const;
- V8_INLINE V8_DEPRECATE_SOON("use indexed version",
- void* GetInternalField1() const) {
+ V8_INLINE V8_DEPRECATED("use indexed version",
+ void* GetInternalField1() const) {
return internal_fields_[0];
}
- V8_INLINE V8_DEPRECATE_SOON("use indexed version",
- void* GetInternalField2() const) {
+ V8_INLINE V8_DEPRECATED("use indexed version",
+ void* GetInternalField2() const) {
return internal_fields_[1];
}
@@ -549,13 +556,13 @@ template <class T> class PersistentBase {
* critical form of resource management!
*/
template <typename P>
- V8_INLINE V8_DEPRECATE_SOON(
+ V8_INLINE V8_DEPRECATED(
"use WeakCallbackInfo version",
void SetWeak(P* parameter,
typename WeakCallbackData<T, P>::Callback callback));
template <typename S, typename P>
- V8_INLINE V8_DEPRECATE_SOON(
+ V8_INLINE V8_DEPRECATED(
"use WeakCallbackInfo version",
void SetWeak(P* parameter,
typename WeakCallbackData<S, P>::Callback callback));
@@ -567,7 +574,7 @@ template <class T> class PersistentBase {
// specify a parameter for the callback or the location of two internal
// fields in the dying object.
template <typename P>
- V8_INLINE V8_DEPRECATE_SOON(
+ V8_INLINE V8_DEPRECATED(
"use SetWeak",
void SetPhantom(P* parameter,
typename WeakCallbackInfo<P>::Callback callback,
@@ -603,6 +610,13 @@ template <class T> class PersistentBase {
*/
V8_INLINE void MarkPartiallyDependent();
+ /**
+ * Marks the reference to this object as active. The scavenge garbage
+ * collection should not reclaim the objects marked as active.
+ * This bit is cleared after the each garbage collection pass.
+ */
+ V8_INLINE void MarkActive();
+
V8_INLINE bool IsIndependent() const;
/** Checks if the handle holds the only reference to an object. */
@@ -966,8 +980,8 @@ class V8_EXPORT SealHandleScope {
void operator delete(void*, size_t);
internal::Isolate* isolate_;
- int prev_level_;
internal::Object** prev_limit_;
+ int prev_sealed_level_;
};
@@ -1304,10 +1318,10 @@ class V8_EXPORT ScriptCompiler {
* \return Compiled script object (context independent; for running it must be
* bound to a context).
*/
- static V8_DEPRECATE_SOON("Use maybe version",
- Local<UnboundScript> CompileUnbound(
- Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions));
+ static V8_DEPRECATED("Use maybe version",
+ Local<UnboundScript> CompileUnbound(
+ Isolate* isolate, Source* source,
+ CompileOptions options = kNoCompileOptions));
static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundScript(
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions);
@@ -1323,7 +1337,7 @@ class V8_EXPORT ScriptCompiler {
* when this function was called. When run it will always use this
* context.
*/
- static V8_DEPRECATE_SOON(
+ static V8_DEPRECATED(
"Use maybe version",
Local<Script> Compile(Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions));
@@ -1353,11 +1367,11 @@ class V8_EXPORT ScriptCompiler {
* (ScriptStreamingTask has been run). V8 doesn't construct the source string
* during streaming, so the embedder needs to pass the full source here.
*/
- static V8_DEPRECATE_SOON(
- "Use maybe version",
- Local<Script> Compile(Isolate* isolate, StreamedSource* source,
- Local<String> full_source_string,
- const ScriptOrigin& origin));
+ static V8_DEPRECATED("Use maybe version",
+ Local<Script> Compile(Isolate* isolate,
+ StreamedSource* source,
+ Local<String> full_source_string,
+ const ScriptOrigin& origin));
static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
Local<Context> context, StreamedSource* source,
Local<String> full_source_string, const ScriptOrigin& origin);
@@ -1483,7 +1497,7 @@ class V8_EXPORT Message {
* Returns the index within the line of the last character where
* the error occurred.
*/
- V8_DEPRECATE_SOON("Use maybe version", int GetEndColumn() const);
+ V8_DEPRECATED("Use maybe version", int GetEndColumn() const);
V8_WARN_UNUSED_RESULT Maybe<int> GetEndColumn(Local<Context> context) const;
/**
@@ -1595,8 +1609,7 @@ class V8_EXPORT StackFrame {
/**
* Returns the name of the resource that contains the script for the
* function for this StackFrame or sourceURL value if the script name
- * is undefined and its source ends with //# sourceURL=... string or
- * deprecated //@ sourceURL=... string.
+ * is undefined and its source ends with //# sourceURL=... string.
*/
Local<String> GetScriptNameOrSourceURL() const;
@@ -1652,8 +1665,8 @@ class V8_EXPORT JSON {
* \param json_string The string to parse.
* \return The corresponding value if successfully parsed.
*/
- static V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> Parse(Local<String> json_string));
+ static V8_DEPRECATED("Use maybe version",
+ Local<Value> Parse(Local<String> json_string));
static V8_WARN_UNUSED_RESULT MaybeLocal<Value> Parse(
Isolate* isolate, Local<String> json_string);
};
@@ -1728,7 +1741,8 @@ class V8_EXPORT Value : public Data {
bool IsFunction() const;
/**
- * Returns true if this value is an array.
+ * Returns true if this value is an array. Note that it will return false for
+ * an Proxy for an array.
*/
bool IsArray() const;
@@ -1941,6 +1955,11 @@ class V8_EXPORT Value : public Data {
*/
bool IsSharedArrayBuffer() const;
+ /**
+ * Returns true if this value is a JavaScript Proxy.
+ */
+ bool IsProxy() const;
+
V8_WARN_UNUSED_RESULT MaybeLocal<Boolean> ToBoolean(
Local<Context> context) const;
@@ -1964,34 +1983,34 @@ class V8_EXPORT Value : public Data {
Local<Number> ToNumber(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<String> ToString(Isolate* isolate) const);
- V8_DEPRECATE_SOON("Use maybe version",
- Local<String> ToDetailString(Isolate* isolate) const);
+ V8_DEPRECATED("Use maybe version",
+ Local<String> ToDetailString(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Object> ToObject(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Integer> ToInteger(Isolate* isolate) const);
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Uint32> ToUint32(Isolate* isolate) const);
+ V8_DEPRECATED("Use maybe version",
+ Local<Uint32> ToUint32(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Int32> ToInt32(Isolate* isolate) const);
inline V8_DEPRECATE_SOON("Use maybe version",
Local<Boolean> ToBoolean() const);
- inline V8_DEPRECATE_SOON("Use maybe version", Local<Number> ToNumber() const);
+ inline V8_DEPRECATED("Use maybe version", Local<Number> ToNumber() const);
inline V8_DEPRECATE_SOON("Use maybe version", Local<String> ToString() const);
- inline V8_DEPRECATE_SOON("Use maybe version",
- Local<String> ToDetailString() const);
+ inline V8_DEPRECATED("Use maybe version",
+ Local<String> ToDetailString() const);
inline V8_DEPRECATE_SOON("Use maybe version", Local<Object> ToObject() const);
inline V8_DEPRECATE_SOON("Use maybe version",
Local<Integer> ToInteger() const);
- inline V8_DEPRECATE_SOON("Use maybe version", Local<Uint32> ToUint32() const);
- inline V8_DEPRECATE_SOON("Use maybe version", Local<Int32> ToInt32() const);
+ inline V8_DEPRECATED("Use maybe version", Local<Uint32> ToUint32() const);
+ inline V8_DEPRECATED("Use maybe version", Local<Int32> ToInt32() const);
/**
* Attempts to convert a string to an array index.
* Returns an empty handle if the conversion fails.
*/
- V8_DEPRECATE_SOON("Use maybe version", Local<Uint32> ToArrayIndex() const);
+ V8_DEPRECATED("Use maybe version", Local<Uint32> ToArrayIndex() const);
V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToArrayIndex(
Local<Context> context) const;
@@ -2179,6 +2198,8 @@ class V8_EXPORT String : public Name {
public:
virtual ~ExternalStringResourceBase() {}
+ virtual bool IsCompressible() const { return false; }
+
protected:
ExternalStringResourceBase() {}
@@ -2295,7 +2316,7 @@ class V8_EXPORT String : public Name {
int length = -1);
/** Allocates a new string from Latin-1 data.*/
- static V8_DEPRECATE_SOON(
+ static V8_DEPRECATED(
"Use maybe version",
Local<String> NewFromOneByte(Isolate* isolate, const uint8_t* data,
NewStringType type = kNormalString,
@@ -2334,10 +2355,9 @@ class V8_EXPORT String : public Name {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
- static V8_DEPRECATE_SOON(
- "Use maybe version",
- Local<String> NewExternal(Isolate* isolate,
- ExternalStringResource* resource));
+ static V8_DEPRECATED("Use maybe version",
+ Local<String> NewExternal(
+ Isolate* isolate, ExternalStringResource* resource));
static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalTwoByte(
Isolate* isolate, ExternalStringResource* resource);
@@ -2447,8 +2467,8 @@ class V8_EXPORT Symbol : public Name {
Local<Value> Name() const;
// Create a symbol. If name is not empty, it will be used as the description.
- static Local<Symbol> New(
- Isolate *isolate, Local<String> name = Local<String>());
+ static Local<Symbol> New(Isolate* isolate,
+ Local<String> name = Local<String>());
// Access global symbol registry.
// Note that symbols created this way are never collected, so
@@ -2465,6 +2485,7 @@ class V8_EXPORT Symbol : public Name {
static Local<Symbol> GetIterator(Isolate* isolate);
static Local<Symbol> GetUnscopables(Isolate* isolate);
static Local<Symbol> GetToStringTag(Isolate* isolate);
+ static Local<Symbol> GetIsConcatSpreadable(Isolate* isolate);
V8_INLINE static Symbol* Cast(v8::Value* obj);
@@ -2475,6 +2496,34 @@ class V8_EXPORT Symbol : public Name {
/**
+ * A private symbol
+ *
+ * This is an experimental feature. Use at your own risk.
+ */
+class V8_EXPORT Private : public Data {
+ public:
+ // Returns the print name string of the private symbol, or undefined if none.
+ Local<Value> Name() const;
+
+ // Create a private symbol. If name is not empty, it will be the description.
+ static Local<Private> New(Isolate* isolate,
+ Local<String> name = Local<String>());
+
+ // Retrieve a global private symbol. If a symbol with this name has not
+ // been retrieved in the same isolate before, it is created.
+ // Note that private symbols created this way are never collected, so
+ // they should only be used for statically fixed properties.
+ // Also, there is only one global name space for the names used as keys.
+ // To minimize the potential for clashes, use qualified names as keys,
+ // e.g., "Class#property".
+ static Local<Private> ForApi(Isolate* isolate, Local<String> name);
+
+ private:
+ Private();
+};
+
+
+/**
* A JavaScript number value (ECMA-262, 4.3.20)
*/
class V8_EXPORT Number : public Primitive {
@@ -2625,13 +2674,13 @@ class V8_EXPORT Object : public Value {
// will only be returned if the interceptor doesn't return a value.
//
// Note also that this only works for named properties.
- V8_DEPRECATE_SOON("Use CreateDataProperty",
- bool ForceSet(Local<Value> key, Local<Value> value,
- PropertyAttribute attribs = None));
- V8_DEPRECATE_SOON("Use CreateDataProperty",
- Maybe<bool> ForceSet(Local<Context> context,
- Local<Value> key, Local<Value> value,
- PropertyAttribute attribs = None));
+ V8_DEPRECATED("Use CreateDataProperty / DefineOwnProperty",
+ bool ForceSet(Local<Value> key, Local<Value> value,
+ PropertyAttribute attribs = None));
+ V8_DEPRECATED("Use CreateDataProperty / DefineOwnProperty",
+ Maybe<bool> ForceSet(Local<Context> context, Local<Value> key,
+ Local<Value> value,
+ PropertyAttribute attribs = None));
V8_DEPRECATE_SOON("Use maybe version", Local<Value> Get(Local<Value> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
@@ -2646,16 +2695,16 @@ class V8_EXPORT Object : public Value {
* any combination of ReadOnly, DontEnum and DontDelete. Returns
* None when the property doesn't exist.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- PropertyAttribute GetPropertyAttributes(Local<Value> key));
+ V8_DEPRECATED("Use maybe version",
+ PropertyAttribute GetPropertyAttributes(Local<Value> key));
V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetPropertyAttributes(
Local<Context> context, Local<Value> key);
/**
* Returns Object.getOwnPropertyDescriptor as per ES5 section 15.2.3.3.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> GetOwnPropertyDescriptor(Local<String> key));
+ V8_DEPRECATED("Use maybe version",
+ Local<Value> GetOwnPropertyDescriptor(Local<String> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetOwnPropertyDescriptor(
Local<Context> context, Local<String> key);
@@ -2667,27 +2716,27 @@ class V8_EXPORT Object : public Value {
// TODO(dcarney): mark V8_WARN_UNUSED_RESULT
Maybe<bool> Delete(Local<Context> context, Local<Value> key);
- V8_DEPRECATE_SOON("Use maybe version", bool Has(uint32_t index));
+ V8_DEPRECATED("Use maybe version", bool Has(uint32_t index));
V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context, uint32_t index);
- V8_DEPRECATE_SOON("Use maybe version", bool Delete(uint32_t index));
+ V8_DEPRECATED("Use maybe version", bool Delete(uint32_t index));
// TODO(dcarney): mark V8_WARN_UNUSED_RESULT
Maybe<bool> Delete(Local<Context> context, uint32_t index);
- V8_DEPRECATE_SOON("Use maybe version",
- bool SetAccessor(Local<String> name,
- AccessorGetterCallback getter,
- AccessorSetterCallback setter = 0,
- Local<Value> data = Local<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None));
- V8_DEPRECATE_SOON("Use maybe version",
- bool SetAccessor(Local<Name> name,
- AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = 0,
- Local<Value> data = Local<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None));
+ V8_DEPRECATED("Use maybe version",
+ bool SetAccessor(Local<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter = 0,
+ Local<Value> data = Local<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None));
+ V8_DEPRECATED("Use maybe version",
+ bool SetAccessor(Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = 0,
+ Local<Value> data = Local<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None));
// TODO(dcarney): mark V8_WARN_UNUSED_RESULT
Maybe<bool> SetAccessor(Local<Context> context, Local<Name> name,
AccessorNameGetterCallback getter,
@@ -2702,6 +2751,18 @@ class V8_EXPORT Object : public Value {
AccessControl settings = DEFAULT);
/**
+ * Functionality for private properties.
+ * This is an experimental feature, use at your own risk.
+ * Note: Private properties are not inherited. Do not rely on this, since it
+ * may change.
+ */
+ Maybe<bool> HasPrivate(Local<Context> context, Local<Private> key);
+ Maybe<bool> SetPrivate(Local<Context> context, Local<Private> key,
+ Local<Value> value);
+ Maybe<bool> DeletePrivate(Local<Context> context, Local<Private> key);
+ MaybeLocal<Value> GetPrivate(Local<Context> context, Local<Private> key);
+
+ /**
* Returns an array containing the names of the enumerable properties
* of this object, including properties from prototype objects. The
* array returned by this method contains the same values as would
@@ -2732,8 +2793,7 @@ class V8_EXPORT Object : public Value {
* be skipped by __proto__ and it does not consult the security
* handler.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- bool SetPrototype(Local<Value> prototype));
+ V8_DEPRECATED("Use maybe version", bool SetPrototype(Local<Value> prototype));
V8_WARN_UNUSED_RESULT Maybe<bool> SetPrototype(Local<Context> context,
Local<Value> prototype);
@@ -2748,7 +2808,7 @@ class V8_EXPORT Object : public Value {
* This is different from Value::ToString() that may call
* user-defined toString function. This one does not.
*/
- V8_DEPRECATE_SOON("Use maybe version", Local<String> ObjectProtoToString());
+ V8_DEPRECATED("Use maybe version", Local<String> ObjectProtoToString());
V8_WARN_UNUSED_RESULT MaybeLocal<String> ObjectProtoToString(
Local<Context> context);
@@ -2793,8 +2853,7 @@ class V8_EXPORT Object : public Value {
void SetAlignedPointerInInternalField(int index, void* value);
// Testers for local properties.
- V8_DEPRECATE_SOON("Use maybe version",
- bool HasOwnProperty(Local<String> key));
+ V8_DEPRECATED("Use maybe version", bool HasOwnProperty(Local<String> key));
V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
Local<Name> key);
V8_DEPRECATE_SOON("Use maybe version",
@@ -2814,7 +2873,7 @@ class V8_EXPORT Object : public Value {
* If result.IsEmpty() no real property was located in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use maybe version",
Local<Value> GetRealNamedPropertyInPrototypeChain(Local<String> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedPropertyInPrototypeChain(
@@ -2825,7 +2884,7 @@ class V8_EXPORT Object : public Value {
* which can be None or any combination of ReadOnly, DontEnum and DontDelete.
* Interceptors in the prototype chain are not called.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use maybe version",
Maybe<PropertyAttribute> GetRealNamedPropertyAttributesInPrototypeChain(
Local<String> key));
@@ -2838,8 +2897,8 @@ class V8_EXPORT Object : public Value {
* in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> GetRealNamedProperty(Local<String> key));
+ V8_DEPRECATED("Use maybe version",
+ Local<Value> GetRealNamedProperty(Local<String> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedProperty(
Local<Context> context, Local<Name> key);
@@ -2848,9 +2907,9 @@ class V8_EXPORT Object : public Value {
* None or any combination of ReadOnly, DontEnum and DontDelete.
* Interceptors in the prototype chain are not called.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
- Local<String> key));
+ V8_DEPRECATED("Use maybe version",
+ Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
+ Local<String> key));
V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
Local<Context> context, Local<Name> key);
@@ -2869,16 +2928,12 @@ class V8_EXPORT Object : public Value {
*/
int GetIdentityHash();
- /**
- * Access hidden properties on JavaScript objects. These properties are
- * hidden from the executing JavaScript and only accessible through the V8
- * C++ API. Hidden properties introduced by V8 internally (for example the
- * identity hash) are prefixed with "v8::".
- */
- // TODO(dcarney): convert these to take a isolate and optionally bailout?
- bool SetHiddenValue(Local<String> key, Local<Value> value);
- Local<Value> GetHiddenValue(Local<String> key);
- bool DeleteHiddenValue(Local<String> key);
+ V8_DEPRECATED("Use v8::Object::SetPrivate instead.",
+ bool SetHiddenValue(Local<String> key, Local<Value> value));
+ V8_DEPRECATED("Use v8::Object::GetPrivate instead.",
+ Local<Value> GetHiddenValue(Local<String> key));
+ V8_DEPRECATED("Use v8::Object::DeletePrivate instead.",
+ bool DeleteHiddenValue(Local<String> key));
/**
* Clone this object with a fast but shallow copy. Values will point
@@ -2903,9 +2958,9 @@ class V8_EXPORT Object : public Value {
* Call an Object as a function if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> CallAsFunction(Local<Value> recv, int argc,
- Local<Value> argv[]));
+ V8_DEPRECATED("Use maybe version",
+ Local<Value> CallAsFunction(Local<Value> recv, int argc,
+ Local<Value> argv[]));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsFunction(Local<Context> context,
Local<Value> recv,
int argc,
@@ -2916,9 +2971,8 @@ class V8_EXPORT Object : public Value {
* ObjectTemplate::SetCallAsFunctionHandler method.
* Note: This method behaves like the Function::NewInstance method.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> CallAsConstructor(int argc,
- Local<Value> argv[]));
+ V8_DEPRECATED("Use maybe version",
+ Local<Value> CallAsConstructor(int argc, Local<Value> argv[]));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsConstructor(
Local<Context> context, int argc, Local<Value> argv[]);
@@ -2950,10 +3004,11 @@ class V8_EXPORT Array : public Object {
* Clones an element at index |index|. Returns an empty
* handle if cloning fails (for any reason).
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Object> CloneElementAt(uint32_t index));
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> CloneElementAt(
- Local<Context> context, uint32_t index);
+ V8_DEPRECATED("Cloning is not supported.",
+ Local<Object> CloneElementAt(uint32_t index));
+ V8_DEPRECATED("Cloning is not supported.",
+ MaybeLocal<Object> CloneElementAt(Local<Context> context,
+ uint32_t index));
/**
* Creates a JavaScript array with the given length. If the length
@@ -2996,15 +3051,6 @@ class V8_EXPORT Map : public Object {
*/
static Local<Map> New(Isolate* isolate);
- /**
- * Creates a new Map containing the elements of array, which must be formatted
- * in the same manner as the array returned from AsArray().
- * Guaranteed to be side-effect free if the array contains no holes.
- */
- static V8_WARN_UNUSED_RESULT V8_DEPRECATED(
- "Use mutation methods instead",
- MaybeLocal<Map> FromArray(Local<Context> context, Local<Array> array));
-
V8_INLINE static Map* Cast(Value* obj);
private:
@@ -3037,14 +3083,6 @@ class V8_EXPORT Set : public Object {
*/
static Local<Set> New(Isolate* isolate);
- /**
- * Creates a new Set containing the items in array.
- * Guaranteed to be side-effect free if the array contains no holes.
- */
- static V8_WARN_UNUSED_RESULT V8_DEPRECATED(
- "Use mutation methods instead",
- MaybeLocal<Set> FromArray(Local<Context> context, Local<Array> array));
-
V8_INLINE static Set* Cast(Value* obj);
private:
@@ -3192,13 +3230,12 @@ class V8_EXPORT Function : public Object {
Local<Function> New(Isolate* isolate, FunctionCallback callback,
Local<Value> data = Local<Value>(), int length = 0));
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Object> NewInstance(int argc, Local<Value> argv[])
- const);
+ V8_DEPRECATED("Use maybe version",
+ Local<Object> NewInstance(int argc, Local<Value> argv[]) const);
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
Local<Context> context, int argc, Local<Value> argv[]) const;
- V8_DEPRECATE_SOON("Use maybe version", Local<Object> NewInstance() const);
+ V8_DEPRECATED("Use maybe version", Local<Object> NewInstance() const);
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
Local<Context> context) const {
return NewInstance(context, 0, nullptr);
@@ -3223,6 +3260,12 @@ class V8_EXPORT Function : public Object {
Local<Value> GetInferredName() const;
/**
+ * displayName if it is set, otherwise name if it is configured, otherwise
+ * function name, otherwise inferred name.
+ */
+ Local<Value> GetDebugName() const;
+
+ /**
* User-defined name assigned to the "displayName" property of this function.
* Used to facilitate debugging and profiling of JavaScript code.
*/
@@ -3311,18 +3354,19 @@ class V8_EXPORT Promise : public Object {
* an argument. If the promise is already resolved/rejected, the handler is
* invoked at the end of turn.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Promise> Chain(Local<Function> handler));
- V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Chain(Local<Context> context,
- Local<Function> handler);
+ V8_DEPRECATED("Use maybe version of Then",
+ Local<Promise> Chain(Local<Function> handler));
+ V8_DEPRECATED("Use Then",
+ V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Chain(
+ Local<Context> context, Local<Function> handler));
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Promise> Catch(Local<Function> handler));
+ V8_DEPRECATED("Use maybe version",
+ Local<Promise> Catch(Local<Function> handler));
V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Catch(Local<Context> context,
Local<Function> handler);
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Promise> Then(Local<Function> handler));
+ V8_DEPRECATED("Use maybe version",
+ Local<Promise> Then(Local<Function> handler));
V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
Local<Function> handler);
@@ -3340,6 +3384,32 @@ class V8_EXPORT Promise : public Object {
};
+/**
+ * An instance of the built-in Proxy constructor (ECMA-262, 6th Edition,
+ * 26.2.1).
+ */
+class V8_EXPORT Proxy : public Object {
+ public:
+ Local<Object> GetTarget();
+ Local<Value> GetHandler();
+ bool IsRevoked();
+ void Revoke();
+
+ /**
+ * Creates a new empty Map.
+ */
+ static MaybeLocal<Proxy> New(Local<Context> context,
+ Local<Object> local_target,
+ Local<Object> local_handler);
+
+ V8_INLINE static Proxy* Cast(Value* obj);
+
+ private:
+ Proxy();
+ static void CheckCast(Value* obj);
+};
+
+
#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
// The number of required internal fields can be defined by embedder.
#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
@@ -3897,7 +3967,8 @@ class V8_EXPORT NumberObject : public Object {
*/
class V8_EXPORT BooleanObject : public Object {
public:
- static Local<Value> New(bool value);
+ static Local<Value> New(Isolate* isolate, bool value);
+ V8_DEPRECATED("Pass an isolate", static Local<Value> New(bool value));
bool ValueOf() const;
@@ -3955,7 +4026,9 @@ class V8_EXPORT RegExp : public Object {
kNone = 0,
kGlobal = 1,
kIgnoreCase = 2,
- kMultiline = 4
+ kMultiline = 4,
+ kSticky = 8,
+ kUnicode = 16
};
/**
@@ -4007,6 +4080,15 @@ class V8_EXPORT External : public Value {
};
+#define V8_INTRINSICS_LIST(F) F(ArrayProto_values, array_values_iterator)
+
+enum Intrinsic {
+#define V8_DECL_INTRINSIC(name, iname) k##name,
+ V8_INTRINSICS_LIST(V8_DECL_INTRINSIC)
+#undef V8_DECL_INTRINSIC
+};
+
+
// --- Templates ---
@@ -4027,13 +4109,6 @@ class V8_EXPORT Template : public Data {
PropertyAttribute attribute = None,
AccessControl settings = DEFAULT);
-#ifdef V8_JS_ACCESSORS
- void SetAccessorProperty(Local<Name> name,
- Local<Function> getter = Local<Function>(),
- Local<Function> setter = Local<Function>(),
- PropertyAttribute attribute = None);
-#endif // V8_JS_ACCESSORS
-
/**
* Whenever the property with the given name is accessed on objects
* created from this Template the getter and setter callbacks
@@ -4076,6 +4151,13 @@ class V8_EXPORT Template : public Data {
Local<AccessorSignature> signature = Local<AccessorSignature>(),
AccessControl settings = DEFAULT);
+ /**
+ * During template instantiation, sets the value with the intrinsic property
+ * from the correct context.
+ */
+ void SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
+ PropertyAttribute attribute = None);
+
private:
Template();
@@ -4235,6 +4317,14 @@ enum AccessType {
/**
+ * Returns true if the given context should be allowed to access the given
+ * object.
+ */
+typedef bool (*AccessCheckCallback)(Local<Context> accessing_context,
+ Local<Object> accessed_object);
+
+
+/**
* Returns true if cross-context access should be allowed to the named
* property with the given key on the host object.
*/
@@ -4357,6 +4447,16 @@ class V8_EXPORT FunctionTemplate : public Template {
Local<Value> data = Local<Value>(),
Local<Signature> signature = Local<Signature>(), int length = 0);
+ /**
+ * Creates a function template with a fast handler. If a fast handler is set,
+ * the callback cannot be null.
+ */
+ static Local<FunctionTemplate> NewWithFastHandler(
+ Isolate* isolate, FunctionCallback callback,
+ experimental::FastAccessorBuilder* fast_handler = nullptr,
+ Local<Value> data = Local<Value>(),
+ Local<Signature> signature = Local<Signature>(), int length = 0);
+
/** Returns the unique function instance in the current execution context.*/
V8_DEPRECATE_SOON("Use maybe version", Local<Function> GetFunction());
V8_WARN_UNUSED_RESULT MaybeLocal<Function> GetFunction(
@@ -4367,8 +4467,9 @@ class V8_EXPORT FunctionTemplate : public Template {
* callback is called whenever the function created from this
* FunctionTemplate is called.
*/
- void SetCallHandler(FunctionCallback callback,
- Local<Value> data = Local<Value>());
+ void SetCallHandler(
+ FunctionCallback callback, Local<Value> data = Local<Value>(),
+ experimental::FastAccessorBuilder* fast_handler = nullptr);
/** Set the predefined length property for the FunctionTemplate. */
void SetLength(int length);
@@ -4519,7 +4620,7 @@ class V8_EXPORT ObjectTemplate : public Template {
static Local<ObjectTemplate> New(
Isolate* isolate,
Local<FunctionTemplate> constructor = Local<FunctionTemplate>());
- static V8_DEPRECATE_SOON("Use isolate version", Local<ObjectTemplate> New());
+ static V8_DEPRECATED("Use isolate version", Local<ObjectTemplate> New());
/** Creates a new instance of this template.*/
V8_DEPRECATE_SOON("Use maybe version", Local<Object> NewInstance());
@@ -4642,16 +4743,21 @@ class V8_EXPORT ObjectTemplate : public Template {
void MarkAsUndetectable();
/**
- * Sets access check callbacks on the object template and enables
- * access checks.
+ * Sets access check callback on the object template and enables access
+ * checks.
*
* When accessing properties on instances of this object template,
* the access check callback will be called to determine whether or
* not to allow cross-context access to the properties.
*/
- void SetAccessCheckCallbacks(NamedSecurityCallback named_handler,
- IndexedSecurityCallback indexed_handler,
- Local<Value> data = Local<Value>());
+ void SetAccessCheckCallback(AccessCheckCallback callback,
+ Local<Value> data = Local<Value>());
+
+ V8_DEPRECATED(
+ "Use SetAccessCheckCallback instead",
+ void SetAccessCheckCallbacks(NamedSecurityCallback named_handler,
+ IndexedSecurityCallback indexed_handler,
+ Local<Value> data = Local<Value>()));
/**
* Gets the number of internal fields for objects generated from
@@ -4702,21 +4808,6 @@ class V8_EXPORT AccessorSignature : public Data {
};
-/**
- * A utility for determining the type of objects based on the template
- * they were constructed from.
- */
-class V8_EXPORT TypeSwitch : public Data {
- public:
- static Local<TypeSwitch> New(Local<FunctionTemplate> type);
- static Local<TypeSwitch> New(int argc, Local<FunctionTemplate> types[]);
- int match(Local<Value> value);
-
- private:
- TypeSwitch();
-};
-
-
// --- Extensions ---
class V8_EXPORT ExternalOneByteStringResourceImpl
@@ -4862,7 +4953,9 @@ class V8_EXPORT Exception {
* Will try to reconstruct the original stack trace from the exception value,
* or capture the current stack trace if not available.
*/
- static Local<Message> CreateMessage(Local<Value> exception);
+ static Local<Message> CreateMessage(Isolate* isolate, Local<Value> exception);
+ V8_DEPRECATED("Use version with an Isolate*",
+ static Local<Message> CreateMessage(Local<Value> exception));
/**
* Returns the original stack trace that was captured at the creation time
@@ -4927,8 +5020,10 @@ class PromiseRejectMessage {
V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
V8_INLINE Local<Value> GetValue() const { return value_; }
- // DEPRECATED. Use v8::Exception::CreateMessage(GetValue())->GetStackTrace()
- V8_INLINE Local<StackTrace> GetStackTrace() const { return stack_trace_; }
+ V8_DEPRECATED("Use v8::Exception::CreateMessage(GetValue())->GetStackTrace()",
+ V8_INLINE Local<StackTrace> GetStackTrace() const) {
+ return stack_trace_;
+ }
private:
Local<Promise> promise_;
@@ -4980,12 +5075,6 @@ enum GCCallbackFlags {
kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3
};
-V8_DEPRECATE_SOON("Use GCCallBack instead",
- typedef void (*GCPrologueCallback)(GCType type,
- GCCallbackFlags flags));
-V8_DEPRECATE_SOON("Use GCCallBack instead",
- typedef void (*GCEpilogueCallback)(GCType type,
- GCCallbackFlags flags));
typedef void (*GCCallback)(GCType type, GCCallbackFlags flags);
typedef void (*InterruptCallback)(Isolate* isolate, void* data);
@@ -5006,6 +5095,7 @@ class V8_EXPORT HeapStatistics {
size_t total_available_size() { return total_available_size_; }
size_t used_heap_size() { return used_heap_size_; }
size_t heap_size_limit() { return heap_size_limit_; }
+ size_t does_zap_garbage() { return does_zap_garbage_; }
private:
size_t total_heap_size_;
@@ -5014,6 +5104,7 @@ class V8_EXPORT HeapStatistics {
size_t total_available_size_;
size_t used_heap_size_;
size_t heap_size_limit_;
+ bool does_zap_garbage_;
friend class V8;
friend class Isolate;
@@ -5351,6 +5442,18 @@ class V8_EXPORT Isolate {
kSlotsBufferOverflow = 5,
kObjectObserve = 6,
kForcedGC = 7,
+ kSloppyMode = 8,
+ kStrictMode = 9,
+ kStrongMode = 10,
+ kRegExpPrototypeStickyGetter = 11,
+ kRegExpPrototypeToString = 12,
+ kRegExpPrototypeUnicodeGetter = 13,
+ kIntlV8Parse = 14,
+ kIntlPattern = 15,
+ kIntlResolved = 16,
+ kPromiseChain = 17,
+ kPromiseAccept = 18,
+ kPromiseDefer = 19,
kUseCounterFeatureCount // This enum value must be last.
};
@@ -5378,6 +5481,19 @@ class V8_EXPORT Isolate {
static Isolate* GetCurrent();
/**
+ * Custom callback used by embedders to help V8 determine if it should abort
+ * when it throws and no internal handler is predicted to catch the
+ * exception. If --abort-on-uncaught-exception is used on the command line,
+ * then V8 will abort if either:
+ * - no custom callback is set.
+ * - the custom callback set returns true.
+ * Otherwise, the custom callback will not be called and V8 will not abort.
+ */
+ typedef bool (*AbortOnUncaughtExceptionCallback)(Isolate*);
+ void SetAbortOnUncaughtExceptionCallback(
+ AbortOnUncaughtExceptionCallback callback);
+
+ /**
* Methods below this point require holding a lock (using Locker) in
* a multi-threaded environment.
*/
@@ -5405,6 +5521,15 @@ class V8_EXPORT Isolate {
void Dispose();
/**
+ * Discards all V8 thread-specific data for the Isolate. Should be used
+ * if a thread is terminating and it has used an Isolate that will outlive
+ * the thread -- all thread-specific data for an Isolate is discarded when
+ * an Isolate is disposed so this call is pointless if an Isolate is about
+ * to be Disposed.
+ */
+ void DiscardThreadSpecificMetadata();
+
+ /**
* Associate embedder-specific data with the isolate. |slot| has to be
* between 0 and GetNumberOfDataSlots() - 1.
*/
@@ -5508,7 +5633,10 @@ class V8_EXPORT Isolate {
/** Returns true if this isolate has a current context. */
bool InContext();
- /** Returns the context that is on the top of the stack. */
+ /**
+ * Returns the context of the currently running JavaScript, or the context
+ * on the top of the stack if no JavaScript is running.
+ */
Local<Context> GetCurrentContext();
/**
@@ -5516,9 +5644,12 @@ class V8_EXPORT Isolate {
* context of the top-most JavaScript frame. If there are no
* JavaScript frames an empty handle is returned.
*/
- Local<Context> GetCallingContext();
+ V8_DEPRECATE_SOON(
+ "Calling context concept is not compatible with tail calls, and will be "
+ "removed.",
+ Local<Context> GetCallingContext());
- /** Returns the last entered context. */
+ /** Returns the last context entered through V8's C++ API. */
Local<Context> GetEnteredContext();
/**
@@ -5562,14 +5693,6 @@ class V8_EXPORT Isolate {
template<typename T, typename S>
void SetReference(const Persistent<T>& parent, const Persistent<S>& child);
- V8_DEPRECATE_SOON("Use GCCallBack instead",
- typedef void (*GCPrologueCallback)(Isolate* isolate,
- GCType type,
- GCCallbackFlags flags));
- V8_DEPRECATE_SOON("Use GCCallBack instead",
- typedef void (*GCEpilogueCallback)(Isolate* isolate,
- GCType type,
- GCCallbackFlags flags));
typedef void (*GCCallback)(Isolate* isolate, GCType type,
GCCallbackFlags flags);
@@ -5756,8 +5879,8 @@ class V8_EXPORT Isolate {
*/
bool IdleNotificationDeadline(double deadline_in_seconds);
- V8_DEPRECATE_SOON("use IdleNotificationDeadline()",
- bool IdleNotification(int idle_time_in_ms));
+ V8_DEPRECATED("use IdleNotificationDeadline()",
+ bool IdleNotification(int idle_time_in_ms));
/**
* Optional notification that the system is running low on memory.
@@ -5777,6 +5900,18 @@ class V8_EXPORT Isolate {
int ContextDisposedNotification(bool dependant_context = true);
/**
+ * Optional notification that the isolate switched to the foreground.
+ * V8 uses these notifications to guide heuristics.
+ */
+ void IsolateInForegroundNotification();
+
+ /**
+ * Optional notification that the isolate switched to the background.
+ * V8 uses these notifications to guide heuristics.
+ */
+ void IsolateInBackgroundNotification();
+
+ /**
* Allows the host application to provide the address of a function that is
* notified each time code is added, moved or removed.
*
@@ -5905,6 +6040,13 @@ class V8_EXPORT Isolate {
*/
void VisitHandlesForPartialDependence(PersistentHandleVisitor* visitor);
+ /**
+ * Iterates through all the persistent handles in the current isolate's heap
+ * that have class_ids and are weak to be marked as inactive if there is no
+ * pending activity for the handle.
+ */
+ void VisitWeakHandles(PersistentHandleVisitor* visitor);
+
private:
template <class K, class V, class Traits>
friend class PersistentValueMapBase;
@@ -5959,7 +6101,7 @@ typedef uintptr_t (*ReturnAddressLocationResolver)(
class V8_EXPORT V8 {
public:
/** Set the callback to invoke in case of fatal errors. */
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void SetFatalErrorHandler(FatalErrorCallback that));
@@ -5967,7 +6109,7 @@ class V8_EXPORT V8 {
* Set the callback to invoke to check if code generation from
* strings should be allowed.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version", void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback that));
@@ -5975,7 +6117,7 @@ class V8_EXPORT V8 {
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
*/
- V8_INLINE static V8_DEPRECATE_SOON("no alternative", bool IsDead());
+ V8_INLINE static V8_DEPRECATED("Use isolate version", bool IsDead());
/**
* Hand startup data to V8, in case the embedder has chosen to build
@@ -6011,7 +6153,7 @@ class V8_EXPORT V8 {
* If data is specified, it will be passed to the callback when it is called.
* Otherwise, the exception object will be passed to the callback instead.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
bool AddMessageListener(MessageCallback that,
Local<Value> data = Local<Value>()));
@@ -6019,14 +6161,14 @@ class V8_EXPORT V8 {
/**
* Remove all message listeners from the specified callback function.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version", void RemoveMessageListeners(MessageCallback that));
/**
* Tells V8 to capture current stack trace when uncaught exception occurs
* and report it to the message listeners. The option is off by default.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void SetCaptureStackTraceForUncaughtExceptions(
bool capture, int frame_limit = 10,
@@ -6048,7 +6190,7 @@ class V8_EXPORT V8 {
static const char* GetVersion();
/** Callback function for reporting failed access checks.*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback));
@@ -6062,7 +6204,7 @@ class V8_EXPORT V8 {
* register the same callback function two times with different
* GCType filters.
*/
- static V8_DEPRECATE_SOON(
+ static V8_DEPRECATED(
"Use isolate version",
void AddGCPrologueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll));
@@ -6071,7 +6213,7 @@ class V8_EXPORT V8 {
* This function removes callback which was installed by
* AddGCPrologueCallback function.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void RemoveGCPrologueCallback(GCCallback callback));
@@ -6085,7 +6227,7 @@ class V8_EXPORT V8 {
* register the same callback function two times with different
* GCType filters.
*/
- static V8_DEPRECATE_SOON(
+ static V8_DEPRECATED(
"Use isolate version",
void AddGCEpilogueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll));
@@ -6094,7 +6236,7 @@ class V8_EXPORT V8 {
* This function removes callback which was installed by
* AddGCEpilogueCallback function.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void RemoveGCEpilogueCallback(GCCallback callback));
@@ -6102,7 +6244,7 @@ class V8_EXPORT V8 {
* Enables the host application to provide a mechanism to be notified
* and perform custom logging when V8 Allocates Executable Memory.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
@@ -6111,7 +6253,7 @@ class V8_EXPORT V8 {
/**
* Removes callback that was installed by AddMemoryAllocationCallback.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback));
@@ -6143,8 +6285,8 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to terminate the current JS execution.
*/
- V8_INLINE static V8_DEPRECATE_SOON("Use isolate version",
- void TerminateExecution(Isolate* isolate));
+ V8_INLINE static V8_DEPRECATED("Use isolate version",
+ void TerminateExecution(Isolate* isolate));
/**
* Is V8 terminating JavaScript execution.
@@ -6156,7 +6298,7 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to check.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
bool IsExecutionTerminating(Isolate* isolate = NULL));
@@ -6176,7 +6318,7 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to resume execution capability.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version", void CancelTerminateExecution(Isolate* isolate));
/**
@@ -6195,15 +6337,15 @@ class V8_EXPORT V8 {
* heap. GC is not invoked prior to iterating, therefore there is no
* guarantee that visited objects are still alive.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
- "Use isoalte version",
+ V8_INLINE static V8_DEPRECATED(
+ "Use isolate version",
void VisitExternalResources(ExternalResourceVisitor* visitor));
/**
* Iterates through all the persistent handles in the current isolate's heap
* that have class_ids.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor));
@@ -6211,7 +6353,7 @@ class V8_EXPORT V8 {
* Iterates through all the persistent handles in isolate's heap that have
* class_ids.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitHandlesWithClassIds(Isolate* isolate,
PersistentHandleVisitor* visitor));
@@ -6223,7 +6365,7 @@ class V8_EXPORT V8 {
* garbage collection but is free to visit an arbitrary superset of these
* objects.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitHandlesForPartialDependence(Isolate* isolate,
PersistentHandleVisitor* visitor));
@@ -6381,7 +6523,7 @@ class V8_EXPORT TryCatch {
* all TryCatch blocks should be stack allocated because the memory
* location itself is compared against JavaScript try/catch blocks.
*/
- V8_DEPRECATE_SOON("Use isolate version", TryCatch());
+ V8_DEPRECATED("Use isolate version", TryCatch());
/**
* Creates a new try/catch block and registers it with v8. Note that
@@ -6987,6 +7129,7 @@ class Internals {
static const int kNodeStateIsNearDeathValue = 4;
static const int kNodeIsIndependentShift = 3;
static const int kNodeIsPartiallyDependentShift = 4;
+ static const int kNodeIsActiveShift = 4;
static const int kJSObjectType = 0xb7;
static const int kFirstNonstringType = 0x80;
@@ -7066,7 +7209,7 @@ class Internals {
V8_INLINE static void SetEmbedderData(v8::Isolate* isolate,
uint32_t slot,
void* data) {
- uint8_t *addr = reinterpret_cast<uint8_t *>(isolate) +
+ uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset + slot * kApiPointerSize;
*reinterpret_cast<void**>(addr) = data;
}
@@ -7314,6 +7457,15 @@ void PersistentBase<T>::MarkPartiallyDependent() {
template <class T>
+void PersistentBase<T>::MarkActive() {
+ typedef internal::Internals I;
+ if (this->IsEmpty()) return;
+ I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_), true,
+ I::kNodeIsActiveShift);
+}
+
+
+template <class T>
void PersistentBase<T>::SetWrapperClassId(uint16_t class_id) {
typedef internal::Internals I;
if (this->IsEmpty()) return;
@@ -7936,6 +8088,14 @@ Promise* Promise::Cast(v8::Value* value) {
}
+Proxy* Proxy::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Proxy*>(value);
+}
+
+
Promise::Resolver* Promise::Resolver::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@@ -8360,4 +8520,4 @@ void V8::VisitHandlesForPartialDependence(Isolate* isolate,
#undef TYPE_CHECK
-#endif // V8_H_
+#endif // INCLUDE_V8_H_
diff --git a/chromium/v8/include/v8config.h b/chromium/v8/include/v8config.h
index 4f4b2cd55ea..d2be68561c0 100644
--- a/chromium/v8/include/v8config.h
+++ b/chromium/v8/include/v8config.h
@@ -163,7 +163,6 @@
//
// V8_HAS_CXX11_ALIGNAS - alignas specifier supported
// V8_HAS_CXX11_ALIGNOF - alignof(type) operator supported
-// V8_HAS_CXX11_STATIC_ASSERT - static_assert() supported
//
// Compiler-specific feature detection
//
@@ -174,6 +173,7 @@
// supported
// V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported
// V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported
+// V8_HAS_ATTRIBUTE_NORETURN - __attribute__((noreturn)) supported
// V8_HAS_ATTRIBUTE_UNUSED - __attribute__((unused)) supported
// V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported
// V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
@@ -190,6 +190,7 @@
// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
// V8_HAS_DECLSPEC_SELECTANY - __declspec(selectany) supported
+// V8_HAS_DECLSPEC_NORETURN - __declspec(noreturn) supported
// V8_HAS___FORCEINLINE - __forceinline supported
//
// Note that testing for compilers and/or features must be done using #if
@@ -212,6 +213,7 @@
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
+# define V8_HAS_ATTRIBUTE_NORETURN (__has_attribute(noreturn))
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
@@ -227,7 +229,6 @@
# define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow))
# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
-# define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
#elif defined(__GNUC__)
@@ -253,6 +254,7 @@
# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE (V8_GNUC_PREREQ(4, 5, 0))
# define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
+# define V8_HAS_ATTRIBUTE_NORETURN (V8_GNUC_PREREQ(2, 5, 0))
# define V8_HAS_ATTRIBUTE_UNUSED (V8_GNUC_PREREQ(2, 95, 0))
# define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
@@ -273,7 +275,6 @@
# if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
# define V8_HAS_CXX11_ALIGNAS (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0))
-# define V8_HAS_CXX11_STATIC_ASSERT (V8_GNUC_PREREQ(4, 3, 0))
# endif
#endif
@@ -285,6 +286,7 @@
# define V8_HAS_DECLSPEC_DEPRECATED 1
# define V8_HAS_DECLSPEC_NOINLINE 1
# define V8_HAS_DECLSPEC_SELECTANY 1
+# define V8_HAS_DECLSPEC_NORETURN 1
# define V8_HAS___FORCEINLINE 1
@@ -319,6 +321,18 @@
#endif
+// A macro used to tell the compiler that a particular function never returns.
+// Use like:
+// V8_NORETURN void MyAbort() { abort(); }
+#if V8_HAS_ATTRIBUTE_NORETURN
+# define V8_NORETURN __attribute__((noreturn))
+#elif HAS_DECLSPEC_NORETURN
+# define V8_NORETURN __declspec(noreturn)
+#else
+# define V8_NORETURN /* NOT SUPPORTED */
+#endif
+
+
// A macro (V8_DEPRECATED) to mark classes or functions as deprecated.
#if defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
#define V8_DEPRECATED(message, declarator) \
diff --git a/chromium/v8/infra/config/cq.cfg b/chromium/v8/infra/config/cq.cfg
index c5603743531..6d3624992c0 100644
--- a/chromium/v8/infra/config/cq.cfg
+++ b/chromium/v8/infra/config/cq.cfg
@@ -28,22 +28,49 @@ verifiers {
builders { name: "v8_android_arm_compile_rel" }
builders { name: "v8_linux64_asan_rel" }
builders { name: "v8_linux64_avx2_rel" }
- builders { name: "v8_linux64_rel" }
+ builders { name: "v8_linux64_rel_ng" }
+ builders {
+ name: "v8_linux64_rel_ng_triggered"
+ triggered_by: "v8_linux64_rel_ng"
+ }
builders { name: "v8_linux_arm64_rel" }
builders { name: "v8_linux_arm_rel" }
builders { name: "v8_linux_chromium_gn_rel" }
- builders { name: "v8_linux_dbg" }
+ builders { name: "v8_linux_dbg_ng" }
+ builders {
+ name: "v8_linux_dbg_ng_triggered"
+ triggered_by: "v8_linux_dbg_ng"
+ }
builders { name: "v8_linux_gcc_compile_rel" }
builders { name: "v8_linux_mipsel_compile_rel" }
builders { name: "v8_linux_mips64el_compile_rel" }
builders { name: "v8_linux_nodcheck_rel" }
- builders { name: "v8_linux_rel" }
+ builders { name: "v8_linux_rel_ng" }
+ builders {
+ name: "v8_linux_rel_ng_triggered"
+ triggered_by: "v8_linux_rel_ng"
+ }
builders { name: "v8_mac_rel" }
builders { name: "v8_presubmit" }
- builders { name: "v8_win64_rel" }
+ builders { name: "v8_win64_rel_ng" }
+ builders {
+ name: "v8_win64_rel_ng_triggered"
+ triggered_by: "v8_win64_rel_ng"
+ }
builders { name: "v8_win_compile_dbg" }
builders { name: "v8_win_nosnap_shared_compile_rel" }
- builders { name: "v8_win_rel" }
+ builders { name: "v8_win_rel_ng" }
+ builders {
+ name: "v8_win_rel_ng_triggered"
+ triggered_by: "v8_win_rel_ng"
+ }
+ }
+ buckets {
+ name: "tryserver.blink"
+ builders {
+ name: "linux_blink_rel"
+ experiment_percentage: 20
+ }
}
}
diff --git a/chromium/v8/samples/samples.gyp b/chromium/v8/samples/samples.gyp
index 0c8e5cc7640..7e0608b2130 100644
--- a/chromium/v8/samples/samples.gyp
+++ b/chromium/v8/samples/samples.gyp
@@ -40,10 +40,6 @@
'include_dirs': [
'..',
],
- 'defines': [
- # TODO(jochen): Remove again after this is globally turned on.
- 'V8_IMMINENT_DEPRECATION_WARNINGS',
- ],
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [
diff --git a/chromium/v8/samples/shell.cc b/chromium/v8/samples/shell.cc
index ad222850843..b89ffdd180e 100644
--- a/chromium/v8/samples/shell.cc
+++ b/chromium/v8/samples/shell.cc
@@ -412,9 +412,11 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
fprintf(stderr, "^");
}
fprintf(stderr, "\n");
- v8::String::Utf8Value stack_trace(
- try_catch->StackTrace(context).ToLocalChecked());
- if (stack_trace.length() > 0) {
+ v8::Local<v8::Value> stack_trace_string;
+ if (try_catch->StackTrace(context).ToLocal(&stack_trace_string) &&
+ stack_trace_string->IsString() &&
+ v8::Local<v8::String>::Cast(stack_trace_string)->Length() > 0) {
+ v8::String::Utf8Value stack_trace(stack_trace_string);
const char* stack_trace_string = ToCString(stack_trace);
fprintf(stderr, "%s\n", stack_trace_string);
}
diff --git a/chromium/v8/snapshot_toolchain.gni b/chromium/v8/snapshot_toolchain.gni
index c419c3ff06c..4932110489d 100644
--- a/chromium/v8/snapshot_toolchain.gni
+++ b/chromium/v8/snapshot_toolchain.gni
@@ -34,10 +34,10 @@
if (host_cpu == "x64" && host_os == "linux") {
if (target_cpu == "arm" || target_cpu == "mipsel" || target_cpu == "x86") {
snapshot_toolchain = "//build/toolchain/linux:clang_x86"
- } else if (target_cpu == "x64") {
+ } else if (target_cpu == "x64" || target_cpu == "arm64" || target_cpu == "mips64el") {
snapshot_toolchain = "//build/toolchain/linux:clang_x64"
} else {
- assert(false, "Need environment for this arch")
+ assert(false, "Need environment for this arch: $target_cpu")
}
} else {
snapshot_toolchain = default_toolchain
diff --git a/chromium/v8/src/DEPS b/chromium/v8/src/DEPS
index 26b08087401..b54cd045634 100644
--- a/chromium/v8/src/DEPS
+++ b/chromium/v8/src/DEPS
@@ -1,7 +1,10 @@
include_rules = [
+ "+base/trace_event/common/trace_event_common.h",
"+src",
"-src/compiler",
"+src/compiler/pipeline.h",
+ "+src/compiler/code-stub-assembler.h",
+ "+src/compiler/wasm-compiler.h",
"-src/heap",
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
@@ -24,4 +27,7 @@ specific_include_rules = {
"d8\.cc": [
"+include/libplatform/libplatform.h",
],
+ "api-experimental\.cc": [
+ "+src/compiler/fast-accessor-assembler.h",
+ ],
}
diff --git a/chromium/v8/src/OWNERS b/chromium/v8/src/OWNERS
index f38fecad4ea..94b7fbb32e7 100644
--- a/chromium/v8/src/OWNERS
+++ b/chromium/v8/src/OWNERS
@@ -1,2 +1,4 @@
per-file i18n.*=cira@chromium.org
per-file i18n.*=mnita@google.com
+per-file typing-asm.*=aseemgarg@chromium.org
+per-file typing-asm.*=bradnelson@chromium.org
diff --git a/chromium/v8/src/accessors.cc b/chromium/v8/src/accessors.cc
index b89917f73ec..2094cdb20db 100644
--- a/chromium/v8/src/accessors.cc
+++ b/chromium/v8/src/accessors.cc
@@ -161,7 +161,8 @@ void Accessors::ArgumentsIteratorSetter(
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<JSObject> object_handle = Utils::OpenHandle(*info.This());
+ Handle<JSObject> object_handle =
+ Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
Handle<Object> value_handle = Utils::OpenHandle(*val);
Handle<Name> name_handle = Utils::OpenHandle(*name);
@@ -198,18 +199,6 @@ void Accessors::ArrayLengthGetter(
}
-// Tries to non-observably convert |value| to a valid array length.
-// Returns false if it fails.
-static bool FastAsArrayLength(Isolate* isolate, Handle<Object> value,
- uint32_t* length) {
- if (value->ToArrayLength(length)) return true;
- // We don't support AsArrayLength, so use AsArrayIndex for now. This just
- // misses out on kMaxUInt32.
- if (value->IsString()) return String::cast(*value)->AsArrayIndex(length);
- return false;
-}
-
-
void Accessors::ArrayLengthSetter(
v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
@@ -217,31 +206,14 @@ void Accessors::ArrayLengthSetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<JSObject> object = Utils::OpenHandle(*info.This());
+ Handle<JSReceiver> object = Utils::OpenHandle(*info.This());
Handle<JSArray> array = Handle<JSArray>::cast(object);
Handle<Object> length_obj = Utils::OpenHandle(*val);
uint32_t length = 0;
- if (!FastAsArrayLength(isolate, length_obj, &length)) {
- Handle<Object> uint32_v;
- if (!Object::ToUint32(isolate, length_obj).ToHandle(&uint32_v)) {
- isolate->OptionalRescheduleException(false);
- return;
- }
-
- Handle<Object> number_v;
- if (!Object::ToNumber(length_obj).ToHandle(&number_v)) {
- isolate->OptionalRescheduleException(false);
- return;
- }
-
- if (uint32_v->Number() != number_v->Number()) {
- Handle<Object> exception = isolate->factory()->NewRangeError(
- MessageTemplate::kInvalidArrayLength);
- return isolate->ScheduleThrow(*exception);
- }
-
- CHECK(uint32_v->ToArrayLength(&length));
+ if (!JSArray::AnythingToArrayLength(isolate, length_obj, &length)) {
+ isolate->OptionalRescheduleException(false);
+ return;
}
if (JSArray::ObservableSetLength(array, length).is_null()) {
@@ -260,7 +232,6 @@ Handle<AccessorInfo> Accessors::ArrayLengthInfo(
}
-
//
// Accessors::StringLength
//
@@ -1074,7 +1045,12 @@ void Accessors::FunctionNameGetter(
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
- Handle<Object> result(function->shared()->name(), isolate);
+ Handle<Object> result;
+ if (function->shared()->name_should_print_as_anonymous()) {
+ result = isolate->factory()->anonymous_string();
+ } else {
+ result = handle(function->shared()->name(), isolate);
+ }
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@@ -1200,20 +1176,7 @@ Handle<Object> GetFunctionArguments(Isolate* isolate,
return ArgumentsForInlinedFunction(frame, function, function_index);
}
- if (!frame->is_optimized()) {
- // If there is an arguments variable in the stack, we return that.
- Handle<ScopeInfo> scope_info(function->shared()->scope_info());
- int index = scope_info->StackSlotIndex(
- isolate->heap()->arguments_string());
- if (index >= 0) {
- Handle<Object> arguments(frame->GetExpression(index), isolate);
- if (!arguments->IsArgumentsMarker()) return arguments;
- }
- }
-
- // If there is no arguments variable in the stack or we have an
- // optimized frame, we find the frame that holds the actual arguments
- // passed to the function.
+ // Find the frame that holds the actual arguments passed to the function.
it.AdvanceToArgumentsFrame();
frame = it.frame();
@@ -1359,19 +1322,13 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
// If caller is a built-in function and caller's caller is also built-in,
// use that instead.
JSFunction* potential_caller = caller;
- while (potential_caller != NULL && potential_caller->IsBuiltin()) {
+ while (potential_caller != NULL && potential_caller->shared()->IsBuiltin()) {
caller = potential_caller;
potential_caller = it.next();
}
if (!caller->shared()->native() && potential_caller != NULL) {
caller = potential_caller;
}
- // If caller is bound, return null. This is compatible with JSC, and
- // allows us to make bound functions use the strict function map
- // and its associated throwing caller and arguments.
- if (caller->shared()->bound()) {
- return MaybeHandle<JSFunction>();
- }
// Censor if the caller is not a sloppy mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310
diff --git a/chromium/v8/src/accessors.h b/chromium/v8/src/accessors.h
index 3c0079de89d..6c1765c404d 100644
--- a/chromium/v8/src/accessors.h
+++ b/chromium/v8/src/accessors.h
@@ -106,6 +106,7 @@ class Accessors : public AllStatic {
Handle<ExecutableAccessorInfo> accessor);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ACCESSORS_H_
diff --git a/chromium/v8/src/address-map.cc b/chromium/v8/src/address-map.cc
new file mode 100644
index 00000000000..681661af297
--- /dev/null
+++ b/chromium/v8/src/address-map.cc
@@ -0,0 +1,38 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/address-map.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+RootIndexMap::RootIndexMap(Isolate* isolate) {
+ map_ = isolate->root_index_map();
+ if (map_ != NULL) return;
+ map_ = new HashMap(HashMap::PointersMatch);
+ for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
+ Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
+ Object* root = isolate->heap()->root(root_index);
+ // Omit root entries that can be written after initialization. They must
+ // not be referenced through the root list in the snapshot.
+ if (root->IsHeapObject() &&
+ isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
+ HeapObject* heap_object = HeapObject::cast(root);
+ HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
+ if (entry != NULL) {
+ // Some are initialized to a previous value in the root list.
+ DCHECK_LT(GetValue(entry), i);
+ } else {
+ SetValue(LookupEntry(map_, heap_object, true), i);
+ }
+ }
+ }
+ isolate->set_root_index_map(map_);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/address-map.h b/chromium/v8/src/address-map.h
new file mode 100644
index 00000000000..df32f89c1ea
--- /dev/null
+++ b/chromium/v8/src/address-map.h
@@ -0,0 +1,184 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ADDRESS_MAP_H_
+#define V8_ADDRESS_MAP_H_
+
+#include "src/assert-scope.h"
+#include "src/hashmap.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class AddressMapBase {
+ protected:
+ static void SetValue(HashMap::Entry* entry, uint32_t v) {
+ entry->value = reinterpret_cast<void*>(v);
+ }
+
+ static uint32_t GetValue(HashMap::Entry* entry) {
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
+ }
+
+ inline static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
+ bool insert) {
+ if (insert) {
+ map->LookupOrInsert(Key(obj), Hash(obj));
+ }
+ return map->Lookup(Key(obj), Hash(obj));
+ }
+
+ private:
+ static uint32_t Hash(HeapObject* obj) {
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
+ }
+
+ static void* Key(HeapObject* obj) {
+ return reinterpret_cast<void*>(obj->address());
+ }
+};
+
+
+class RootIndexMap : public AddressMapBase {
+ public:
+ explicit RootIndexMap(Isolate* isolate);
+
+ static const int kInvalidRootIndex = -1;
+
+ int Lookup(HeapObject* obj) {
+ HashMap::Entry* entry = LookupEntry(map_, obj, false);
+ if (entry) return GetValue(entry);
+ return kInvalidRootIndex;
+ }
+
+ private:
+ HashMap* map_;
+
+ DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
+};
+
+
+class BackReference {
+ public:
+ explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
+
+ BackReference() : bitfield_(kInvalidValue) {}
+
+ static BackReference SourceReference() { return BackReference(kSourceValue); }
+
+ static BackReference GlobalProxyReference() {
+ return BackReference(kGlobalProxyValue);
+ }
+
+ static BackReference LargeObjectReference(uint32_t index) {
+ return BackReference(SpaceBits::encode(LO_SPACE) |
+ ChunkOffsetBits::encode(index));
+ }
+
+ static BackReference DummyReference() { return BackReference(kDummyValue); }
+
+ static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
+ uint32_t chunk_offset) {
+ DCHECK(IsAligned(chunk_offset, kObjectAlignment));
+ DCHECK_NE(LO_SPACE, space);
+ return BackReference(
+ SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
+ ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
+ }
+
+ bool is_valid() const { return bitfield_ != kInvalidValue; }
+ bool is_source() const { return bitfield_ == kSourceValue; }
+ bool is_global_proxy() const { return bitfield_ == kGlobalProxyValue; }
+
+ AllocationSpace space() const {
+ DCHECK(is_valid());
+ return SpaceBits::decode(bitfield_);
+ }
+
+ uint32_t chunk_offset() const {
+ DCHECK(is_valid());
+ return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
+ }
+
+ uint32_t large_object_index() const {
+ DCHECK(is_valid());
+ DCHECK(chunk_index() == 0);
+ return ChunkOffsetBits::decode(bitfield_);
+ }
+
+ uint32_t chunk_index() const {
+ DCHECK(is_valid());
+ return ChunkIndexBits::decode(bitfield_);
+ }
+
+ uint32_t reference() const {
+ DCHECK(is_valid());
+ return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
+ }
+
+ uint32_t bitfield() const { return bitfield_; }
+
+ private:
+ static const uint32_t kInvalidValue = 0xFFFFFFFF;
+ static const uint32_t kSourceValue = 0xFFFFFFFE;
+ static const uint32_t kGlobalProxyValue = 0xFFFFFFFD;
+ static const uint32_t kDummyValue = 0xFFFFFFFC;
+ static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
+ static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
+
+ public:
+ static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1;
+
+ private:
+ class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
+ class ChunkIndexBits
+ : public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
+ class SpaceBits
+ : public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> {
+ };
+
+ uint32_t bitfield_;
+};
+
+
+// Mapping objects to their location after deserialization.
+// This is used during building, but not at runtime by V8.
+class BackReferenceMap : public AddressMapBase {
+ public:
+ BackReferenceMap()
+ : no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {}
+
+ ~BackReferenceMap() { delete map_; }
+
+ BackReference Lookup(HeapObject* obj) {
+ HashMap::Entry* entry = LookupEntry(map_, obj, false);
+ return entry ? BackReference(GetValue(entry)) : BackReference();
+ }
+
+ void Add(HeapObject* obj, BackReference b) {
+ DCHECK(b.is_valid());
+ DCHECK_NULL(LookupEntry(map_, obj, false));
+ HashMap::Entry* entry = LookupEntry(map_, obj, true);
+ SetValue(entry, b.bitfield());
+ }
+
+ void AddSourceString(String* string) {
+ Add(string, BackReference::SourceReference());
+ }
+
+ void AddGlobalProxy(HeapObject* global_proxy) {
+ Add(global_proxy, BackReference::GlobalProxyReference());
+ }
+
+ private:
+ DisallowHeapAllocation no_allocation_;
+ HashMap* map_;
+ DISALLOW_COPY_AND_ASSIGN(BackReferenceMap);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ADDRESS_MAP_H_
diff --git a/chromium/v8/src/allocation-site-scopes.h b/chromium/v8/src/allocation-site-scopes.h
index 9e287c13107..70dd63e1dde 100644
--- a/chromium/v8/src/allocation-site-scopes.h
+++ b/chromium/v8/src/allocation-site-scopes.h
@@ -5,7 +5,7 @@
#ifndef V8_ALLOCATION_SITE_SCOPES_H_
#define V8_ALLOCATION_SITE_SCOPES_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/handles.h"
#include "src/objects.h"
#include "src/zone.h"
@@ -36,7 +36,7 @@ class AllocationSiteContext {
void InitializeTraversal(Handle<AllocationSite> site) {
top_ = site;
- current_ = Handle<AllocationSite>(*top_, isolate());
+ current_ = Handle<AllocationSite>::New(*top_, isolate());
}
private:
@@ -95,6 +95,7 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ALLOCATION_SITE_SCOPES_H_
diff --git a/chromium/v8/src/allocation.h b/chromium/v8/src/allocation.h
index 292e1fe23b5..7c1e023b864 100644
--- a/chromium/v8/src/allocation.h
+++ b/chromium/v8/src/allocation.h
@@ -88,6 +88,7 @@ class FreeStoreAllocationPolicy {
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ALLOCATION_H_
diff --git a/chromium/v8/src/api-experimental.cc b/chromium/v8/src/api-experimental.cc
new file mode 100644
index 00000000000..2b49e9723a5
--- /dev/null
+++ b/chromium/v8/src/api-experimental.cc
@@ -0,0 +1,126 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * Implementation for v8-experimental.h.
+ */
+
+#include "src/api-experimental.h"
+
+#include "include/v8.h"
+#include "include/v8-experimental.h"
+#include "src/api.h"
+#include "src/compiler/fast-accessor-assembler.h"
+
+namespace {
+
+
+v8::internal::compiler::FastAccessorAssembler* FromApi(
+ v8::experimental::FastAccessorBuilder* builder) {
+ return reinterpret_cast<v8::internal::compiler::FastAccessorAssembler*>(
+ builder);
+}
+
+
+v8::experimental::FastAccessorBuilder* FromInternal(
+ v8::internal::compiler::FastAccessorAssembler* fast_accessor_assembler) {
+ return reinterpret_cast<v8::experimental::FastAccessorBuilder*>(
+ fast_accessor_assembler);
+}
+
+} // namespace
+
+namespace v8 {
+namespace internal {
+namespace experimental {
+
+
+MaybeHandle<Code> BuildCodeFromFastAccessorBuilder(
+ v8::experimental::FastAccessorBuilder* fast_handler) {
+ i::MaybeHandle<i::Code> code;
+ if (fast_handler != nullptr) {
+ auto faa = FromApi(fast_handler);
+ code = faa->Build();
+ CHECK(!code.is_null());
+ delete faa;
+ }
+ return code;
+}
+
+} // namespace experimental
+} // namespace internal
+
+
+namespace experimental {
+
+
+FastAccessorBuilder* FastAccessorBuilder::New(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal::compiler::FastAccessorAssembler* faa =
+ new internal::compiler::FastAccessorAssembler(i_isolate);
+ return FromInternal(faa);
+}
+
+
+FastAccessorBuilder::ValueId FastAccessorBuilder::IntegerConstant(
+ int const_value) {
+ return FromApi(this)->IntegerConstant(const_value);
+}
+
+
+FastAccessorBuilder::ValueId FastAccessorBuilder::GetReceiver() {
+ return FromApi(this)->GetReceiver();
+}
+
+
+FastAccessorBuilder::ValueId FastAccessorBuilder::LoadInternalField(
+ ValueId value, int field_no) {
+ return FromApi(this)->LoadInternalField(value, field_no);
+}
+
+
+FastAccessorBuilder::ValueId FastAccessorBuilder::LoadValue(ValueId value_id,
+ int offset) {
+ return FromApi(this)->LoadValue(value_id, offset);
+}
+
+
+FastAccessorBuilder::ValueId FastAccessorBuilder::LoadObject(ValueId value_id,
+ int offset) {
+ return FromApi(this)->LoadObject(value_id, offset);
+}
+
+
+void FastAccessorBuilder::ReturnValue(ValueId value) {
+ FromApi(this)->ReturnValue(value);
+}
+
+
+void FastAccessorBuilder::CheckFlagSetOrReturnNull(ValueId value_id, int mask) {
+ FromApi(this)->CheckFlagSetOrReturnNull(value_id, mask);
+}
+
+
+void FastAccessorBuilder::CheckNotZeroOrReturnNull(ValueId value_id) {
+ FromApi(this)->CheckNotZeroOrReturnNull(value_id);
+}
+
+
+FastAccessorBuilder::LabelId FastAccessorBuilder::MakeLabel() {
+ return FromApi(this)->MakeLabel();
+}
+
+
+void FastAccessorBuilder::SetLabel(LabelId label_id) {
+ FromApi(this)->SetLabel(label_id);
+}
+
+
+void FastAccessorBuilder::CheckNotZeroOrJump(ValueId value_id,
+ LabelId label_id) {
+ FromApi(this)->CheckNotZeroOrJump(value_id, label_id);
+}
+
+} // namespace experimental
+} // namespace v8
diff --git a/chromium/v8/src/api-experimental.h b/chromium/v8/src/api-experimental.h
new file mode 100644
index 00000000000..bc0bc55739b
--- /dev/null
+++ b/chromium/v8/src/api-experimental.h
@@ -0,0 +1,28 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_API_EXPERIMENTAL_H_
+#define V8_API_EXPERIMENTAL_H_
+
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+class Code;
+} // internal;
+namespace experimental {
+class FastAccessorBuilder;
+} // experimental
+
+namespace internal {
+namespace experimental {
+
+v8::internal::MaybeHandle<v8::internal::Code> BuildCodeFromFastAccessorBuilder(
+ v8::experimental::FastAccessorBuilder* fast_handler);
+
+} // namespace experimental
+} // namespace internal
+} // namespace v8
+
+#endif // V8_API_EXPERIMENTAL_H_
diff --git a/chromium/v8/src/api-natives.cc b/chromium/v8/src/api-natives.cc
index 051ea4a17b1..bc71e3ef90d 100644
--- a/chromium/v8/src/api-natives.cc
+++ b/chromium/v8/src/api-natives.cc
@@ -37,25 +37,6 @@ MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
}
-MaybeHandle<JSFunction> InstantiateFunctionOrMaybeDont(Isolate* isolate,
- Handle<Object> data) {
- DCHECK(data->IsFunctionTemplateInfo() || data->IsJSFunction());
- if (data->IsFunctionTemplateInfo()) {
- // A function template needs to be instantiated.
- return InstantiateFunction(isolate,
- Handle<FunctionTemplateInfo>::cast(data));
-#ifdef V8_JS_ACCESSORS
- } else if (data->IsJSFunction()) {
- // If we already have a proper function, we do not need additional work.
- // (This should only happen for JavaScript API accessors.)
- return Handle<JSFunction>::cast(data);
-#endif // V8_JS_ACCESSORS
- } else {
- UNREACHABLE();
- return MaybeHandle<JSFunction>();
- }
-}
-
MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
Handle<JSObject> object,
Handle<Name> name,
@@ -63,14 +44,18 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
Handle<Object> setter,
PropertyAttributes attributes) {
if (!getter->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, getter,
- InstantiateFunctionOrMaybeDont(isolate, getter),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, getter,
+ InstantiateFunction(isolate,
+ Handle<FunctionTemplateInfo>::cast(getter)),
+ Object);
}
if (!setter->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, setter,
- InstantiateFunctionOrMaybeDont(isolate, setter),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, setter,
+ InstantiateFunction(isolate,
+ Handle<FunctionTemplateInfo>::cast(setter)),
+ Object);
}
RETURN_ON_EXCEPTION(isolate, JSObject::DefineAccessor(object, name, getter,
setter, attributes),
@@ -102,8 +87,10 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
}
#endif
- return Object::AddDataProperty(&it, value, attributes, STRICT,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+ MAYBE_RETURN_NULL(
+ Object::AddDataProperty(&it, value, attributes, Object::THROW_ON_ERROR,
+ Object::CERTAINLY_NOT_STORE_FROM_KEYED));
+ return value;
}
@@ -148,6 +135,20 @@ class AccessCheckDisableScope {
};
+Object* GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) {
+ Handle<Context> native_context = isolate->native_context();
+ DCHECK(!native_context.is_null());
+ switch (intrinsic) {
+#define GET_INTRINSIC_VALUE(name, iname) \
+ case v8::k##name: \
+ return native_context->iname();
+ V8_INTRINSICS_LIST(GET_INTRINSIC_VALUE)
+#undef GET_INTRINSIC_VALUE
+ }
+ return nullptr;
+}
+
+
MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
Handle<TemplateInfo> data) {
auto property_list = handle(data->property_list(), isolate);
@@ -162,23 +163,40 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
int i = 0;
for (int c = 0; c < data->number_of_properties(); c++) {
auto name = handle(Name::cast(properties.get(i++)), isolate);
- PropertyDetails details(Smi::cast(properties.get(i++)));
- PropertyAttributes attributes = details.attributes();
- PropertyKind kind = details.kind();
+ auto bit = handle(properties.get(i++), isolate);
+ if (bit->IsSmi()) {
+ PropertyDetails details(Smi::cast(*bit));
+ PropertyAttributes attributes = details.attributes();
+ PropertyKind kind = details.kind();
+
+ if (kind == kData) {
+ auto prop_data = handle(properties.get(i++), isolate);
+
+ RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
+ prop_data, attributes),
+ JSObject);
+ } else {
+ auto getter = handle(properties.get(i++), isolate);
+ auto setter = handle(properties.get(i++), isolate);
+ RETURN_ON_EXCEPTION(isolate,
+ DefineAccessorProperty(isolate, obj, name, getter,
+ setter, attributes),
+ JSObject);
+ }
+ } else {
+ // Intrinsic data property --- Get appropriate value from the current
+ // context.
+ PropertyDetails details(Smi::cast(properties.get(i++)));
+ PropertyAttributes attributes = details.attributes();
+ DCHECK_EQ(kData, details.kind());
- if (kind == kData) {
- auto prop_data = handle(properties.get(i++), isolate);
+ v8::Intrinsic intrinsic =
+ static_cast<v8::Intrinsic>(Smi::cast(properties.get(i++))->value());
+ auto prop_data = handle(GetIntrinsic(isolate, intrinsic), isolate);
RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
prop_data, attributes),
JSObject);
- } else {
- auto getter = handle(properties.get(i++), isolate);
- auto setter = handle(properties.get(i++), isolate);
- RETURN_ON_EXCEPTION(isolate,
- DefineAccessorProperty(isolate, obj, name, getter,
- setter, attributes),
- JSObject);
}
}
return obj;
@@ -268,9 +286,9 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
JSObject::GetProperty(parent_instance,
isolate->factory()->prototype_string()),
JSFunction);
- RETURN_ON_EXCEPTION(
- isolate, JSObject::SetPrototype(prototype, parent_prototype, false),
- JSFunction);
+ MAYBE_RETURN(JSObject::SetPrototype(prototype, parent_prototype, false,
+ Object::THROW_ON_ERROR),
+ MaybeHandle<JSFunction>());
}
}
auto function = ApiNatives::CreateApiFunction(
@@ -377,21 +395,25 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
}
+void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
+ Handle<Name> name, v8::Intrinsic intrinsic,
+ PropertyAttributes attributes) {
+ const int kSize = 4;
+ auto value = handle(Smi::FromInt(intrinsic), isolate);
+ auto intrinsic_marker = isolate->factory()->true_value();
+ PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+ auto details_handle = handle(details.AsSmi(), isolate);
+ Handle<Object> data[kSize] = {name, intrinsic_marker, details_handle, value};
+ AddPropertyToPropertyList(isolate, info, kSize, data);
+}
+
+
void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<TemplateInfo> info,
- Handle<Name> name, Handle<Object> getter,
- Handle<Object> setter,
+ Handle<Name> name,
+ Handle<FunctionTemplateInfo> getter,
+ Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes) {
-#ifdef V8_JS_ACCESSORS
- DCHECK(getter.is_null() || getter->IsFunctionTemplateInfo() ||
- getter->IsJSFunction());
- DCHECK(setter.is_null() || setter->IsFunctionTemplateInfo() ||
- setter->IsJSFunction());
-#else
- DCHECK(getter.is_null() || getter->IsFunctionTemplateInfo());
- DCHECK(setter.is_null() || setter->IsFunctionTemplateInfo());
-#endif // V8_JS_ACCESSORS
-
const int kSize = 4;
PropertyDetails details(attributes, ACCESSOR, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
@@ -416,8 +438,16 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate,
Handle<JSFunction> ApiNatives::CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
Handle<Object> prototype, ApiInstanceType instance_type) {
- Handle<Code> code = isolate->builtins()->HandleApiCall();
- Handle<Code> construct_stub = isolate->builtins()->JSConstructStubApi();
+ Handle<Code> code;
+ if (obj->call_code()->IsCallHandlerInfo() &&
+ CallHandlerInfo::cast(obj->call_code())->fast_handler()->IsCode()) {
+ code = isolate->builtins()->HandleFastApiCall();
+ } else {
+ code = isolate->builtins()->HandleApiCall();
+ }
+ Handle<Code> construct_stub =
+ prototype.is_null() ? isolate->builtins()->ConstructedNonConstructable()
+ : isolate->builtins()->JSConstructStubApi();
obj->set_instantiated(true);
Handle<JSFunction> result;
@@ -518,7 +548,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Mark instance as callable in the map.
if (!obj->instance_call_handler()->IsUndefined()) {
map->set_is_callable();
- map->set_is_constructor(true);
+ map->set_is_constructor();
}
// Recursively copy parent instance templates' accessors,
diff --git a/chromium/v8/src/api-natives.h b/chromium/v8/src/api-natives.h
index 0639677b155..fcca4a5a17e 100644
--- a/chromium/v8/src/api-natives.h
+++ b/chromium/v8/src/api-natives.h
@@ -44,9 +44,14 @@ class ApiNatives {
Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes);
+ static void AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
+ Handle<Name> name, v8::Intrinsic intrinsic,
+ PropertyAttributes attributes);
+
static void AddAccessorProperty(Isolate* isolate, Handle<TemplateInfo> info,
- Handle<Name> name, Handle<Object> getter,
- Handle<Object> setter,
+ Handle<Name> name,
+ Handle<FunctionTemplateInfo> getter,
+ Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes);
static void AddNativeDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
diff --git a/chromium/v8/src/api.cc b/chromium/v8/src/api.cc
index cfc170ee8e1..8274a735689 100644
--- a/chromium/v8/src/api.cc
+++ b/chromium/v8/src/api.cc
@@ -9,9 +9,13 @@
#include <sanitizer/asan_interface.h>
#endif // V8_USE_ADDRESS_SANITIZER
#include <cmath> // For isnan.
+#include <limits>
+#include <vector>
#include "include/v8-debug.h"
+#include "include/v8-experimental.h"
#include "include/v8-profiler.h"
#include "include/v8-testing.h"
+#include "src/api-experimental.h"
#include "src/api-natives.h"
#include "src/assert-scope.h"
#include "src/background-parsing-task.h"
@@ -33,9 +37,10 @@
#include "src/global-handles.h"
#include "src/icu_util.h"
#include "src/isolate-inl.h"
-#include "src/json-parser.h"
#include "src/messages.h"
-#include "src/parser.h"
+#include "src/parsing/json-parser.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/scanner-character-streams.h"
#include "src/pending-compilation-error-handler.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/heap-profiler.h"
@@ -43,11 +48,11 @@
#include "src/profiler/profile-generator-inl.h"
#include "src/profiler/sampler.h"
#include "src/property.h"
+#include "src/property-descriptor.h"
#include "src/property-details.h"
#include "src/prototype.h"
#include "src/runtime/runtime.h"
#include "src/runtime-profiler.h"
-#include "src/scanner-character-streams.h"
#include "src/simulator.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
@@ -158,6 +163,7 @@ class CallDepthScope {
do_callback_(do_callback) {
// TODO(dcarney): remove this when blink stops crashing.
DCHECK(!isolate_->external_caught_exception());
+ isolate_->IncrementJsCallsFromApiCounter();
isolate_->handle_scope_implementer()->IncrementCallDepth();
if (!context_.IsEmpty()) context_->Enter();
}
@@ -734,17 +740,17 @@ SealHandleScope::SealHandleScope(Isolate* isolate) {
i::HandleScopeData* current = internal_isolate->handle_scope_data();
prev_limit_ = current->limit;
current->limit = current->next;
- prev_level_ = current->level;
- current->level = 0;
+ prev_sealed_level_ = current->sealed_level;
+ current->sealed_level = current->level;
}
SealHandleScope::~SealHandleScope() {
i::HandleScopeData* current = isolate_->handle_scope_data();
- DCHECK_EQ(0, current->level);
- current->level = prev_level_;
DCHECK_EQ(current->next, current->limit);
current->limit = prev_limit_;
+ DCHECK_EQ(current->level, current->sealed_level);
+ current->sealed_level = prev_sealed_level_;
}
@@ -880,8 +886,8 @@ int NeanderArray::length() {
i::Object* NeanderArray::get(int offset) {
- DCHECK(0 <= offset);
- DCHECK(offset < length());
+ DCHECK_LE(0, offset);
+ DCHECK_LT(offset, length());
return obj_.get(offset + 1);
}
@@ -930,7 +936,7 @@ void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
// TODO(dcarney): split api to allow values of v8::Value or v8::TemplateInfo.
i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
Utils::OpenHandle(*value),
- static_cast<PropertyAttributes>(attribute));
+ static_cast<i::PropertyAttributes>(attribute));
}
@@ -951,29 +957,10 @@ void Template::SetAccessorProperty(
i::ApiNatives::AddAccessorProperty(
isolate, templ, Utils::OpenHandle(*name),
Utils::OpenHandle(*getter, true), Utils::OpenHandle(*setter, true),
- static_cast<PropertyAttributes>(attribute));
+ static_cast<i::PropertyAttributes>(attribute));
}
-#ifdef V8_JS_ACCESSORS
-void Template::SetAccessorProperty(v8::Local<v8::Name> name,
- v8::Local<Function> getter,
- v8::Local<Function> setter,
- v8::PropertyAttribute attribute) {
- auto templ = Utils::OpenHandle(this);
- auto isolate = templ->GetIsolate();
- ENTER_V8(isolate);
- DCHECK(!name.IsEmpty());
- DCHECK(!getter.IsEmpty() || !setter.IsEmpty());
- i::HandleScope scope(isolate);
- i::ApiNatives::AddAccessorProperty(
- isolate, templ, Utils::OpenHandle(*name),
- Utils::OpenHandle(*getter, true), Utils::OpenHandle(*setter, true),
- static_cast<PropertyAttributes>(attribute));
-}
-#endif // V8_JS_ACCESSORS
-
-
// --- F u n c t i o n T e m p l a t e ---
static void InitializeFunctionTemplate(
i::Handle<i::FunctionTemplateInfo> info) {
@@ -1013,7 +1000,8 @@ void FunctionTemplate::Inherit(v8::Local<FunctionTemplate> value) {
static Local<FunctionTemplate> FunctionTemplateNew(
- i::Isolate* isolate, FunctionCallback callback, v8::Local<Value> data,
+ i::Isolate* isolate, FunctionCallback callback,
+ experimental::FastAccessorBuilder* fast_handler, v8::Local<Value> data,
v8::Local<Signature> signature, int length, bool do_not_cache) {
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
@@ -1031,7 +1019,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
- Utils::ToLocal(obj)->SetCallHandler(callback, data);
+ Utils::ToLocal(obj)->SetCallHandler(callback, data, fast_handler);
}
obj->set_length(length);
obj->set_undetectable(false);
@@ -1042,6 +1030,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
return Utils::ToLocal(obj);
}
+
Local<FunctionTemplate> FunctionTemplate::New(Isolate* isolate,
FunctionCallback callback,
v8::Local<Value> data,
@@ -1053,8 +1042,21 @@ Local<FunctionTemplate> FunctionTemplate::New(Isolate* isolate,
DCHECK(!i_isolate->serializer_enabled());
LOG_API(i_isolate, "FunctionTemplate::New");
ENTER_V8(i_isolate);
- return FunctionTemplateNew(
- i_isolate, callback, data, signature, length, false);
+ return FunctionTemplateNew(i_isolate, callback, nullptr, data, signature,
+ length, false);
+}
+
+
+Local<FunctionTemplate> FunctionTemplate::NewWithFastHandler(
+ Isolate* isolate, FunctionCallback callback,
+ experimental::FastAccessorBuilder* fast_handler, v8::Local<Value> data,
+ v8::Local<Signature> signature, int length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ DCHECK(!i_isolate->serializer_enabled());
+ LOG_API(i_isolate, "FunctionTemplate::NewWithFastHandler");
+ ENTER_V8(i_isolate);
+ return FunctionTemplateNew(i_isolate, callback, fast_handler, data, signature,
+ length, false);
}
@@ -1070,49 +1072,15 @@ Local<AccessorSignature> AccessorSignature::New(
}
-Local<TypeSwitch> TypeSwitch::New(Local<FunctionTemplate> type) {
- Local<FunctionTemplate> types[1] = {type};
- return TypeSwitch::New(1, types);
-}
-
-
-Local<TypeSwitch> TypeSwitch::New(int argc, Local<FunctionTemplate> types[]) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "TypeSwitch::New");
- ENTER_V8(isolate);
- i::Handle<i::FixedArray> vector = isolate->factory()->NewFixedArray(argc);
- for (int i = 0; i < argc; i++)
- vector->set(i, *Utils::OpenHandle(*types[i]));
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::TYPE_SWITCH_INFO_TYPE);
- i::Handle<i::TypeSwitchInfo> obj =
- i::Handle<i::TypeSwitchInfo>::cast(struct_obj);
- obj->set_types(*vector);
- return Utils::ToLocal(obj);
-}
-
-
-int TypeSwitch::match(v8::Local<Value> value) {
- i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
- LOG_API(info->GetIsolate(), "TypeSwitch::match");
- i::Handle<i::Object> obj = Utils::OpenHandle(*value);
- i::FixedArray* types = i::FixedArray::cast(info->types());
- for (int i = 0; i < types->length(); i++) {
- if (i::FunctionTemplateInfo::cast(types->get(i))->IsTemplateFor(*obj))
- return i + 1;
- }
- return 0;
-}
-
-
#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
i::Handle<i::Object> foreign = FromCData(obj->GetIsolate(), cdata); \
(obj)->setter(*foreign); \
} while (false)
-void FunctionTemplate::SetCallHandler(FunctionCallback callback,
- v8::Local<Value> data) {
+void FunctionTemplate::SetCallHandler(
+ FunctionCallback callback, v8::Local<Value> data,
+ experimental::FastAccessorBuilder* fast_handler) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetCallHandler");
i::Isolate* isolate = info->GetIsolate();
@@ -1123,6 +1091,11 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
+ i::MaybeHandle<i::Code> code =
+ i::experimental::BuildCodeFromFastAccessorBuilder(fast_handler);
+ if (!code.is_null()) {
+ obj->set_fast_handler(*code.ToHandleChecked());
+ }
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
@@ -1138,7 +1111,7 @@ static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
obj->set_name(*Utils::OpenHandle(*name));
if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
- obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
+ obj->set_property_attributes(static_cast<i::PropertyAttributes>(attributes));
if (!signature.IsEmpty()) {
obj->set_expected_receiver_type(*Utils::OpenHandle(*signature));
}
@@ -1352,6 +1325,18 @@ void Template::SetNativeDataProperty(v8::Local<Name> name,
}
+void Template::SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
+ PropertyAttribute attribute) {
+ auto templ = Utils::OpenHandle(this);
+ i::Isolate* isolate = templ->GetIsolate();
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
+ intrinsic,
+ static_cast<i::PropertyAttributes>(attribute));
+}
+
+
void ObjectTemplate::SetAccessor(v8::Local<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter,
@@ -1440,6 +1425,33 @@ void ObjectTemplate::MarkAsUndetectable() {
}
+void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
+ Local<Value> data) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ auto cons = EnsureConstructor(isolate, this);
+ EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetAccessCheckCallback");
+
+ i::Handle<i::Struct> struct_info =
+ isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
+ i::Handle<i::AccessCheckInfo> info =
+ i::Handle<i::AccessCheckInfo>::cast(struct_info);
+
+ SET_FIELD_WRAPPED(info, set_callback, callback);
+ SET_FIELD_WRAPPED(info, set_named_callback, nullptr);
+ SET_FIELD_WRAPPED(info, set_indexed_callback, nullptr);
+
+ if (data.IsEmpty()) {
+ data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
+ info->set_data(*Utils::OpenHandle(*data));
+
+ cons->set_access_check_info(*info);
+ cons->set_needs_access_check(true);
+}
+
+
void ObjectTemplate::SetAccessCheckCallbacks(
NamedSecurityCallback named_callback,
IndexedSecurityCallback indexed_callback, Local<Value> data) {
@@ -1454,6 +1466,7 @@ void ObjectTemplate::SetAccessCheckCallbacks(
i::Handle<i::AccessCheckInfo> info =
i::Handle<i::AccessCheckInfo>::cast(struct_info);
+ SET_FIELD_WRAPPED(info, set_callback, nullptr);
SET_FIELD_WRAPPED(info, set_named_callback, named_callback);
SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback);
@@ -1933,8 +1946,9 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
i::Handle<i::SharedFunctionInfo> outer_info(context->closure()->shared(),
isolate);
for (size_t i = 0; i < context_extension_count; ++i) {
- i::Handle<i::JSObject> extension =
+ i::Handle<i::JSReceiver> extension =
Utils::OpenHandle(*context_extensions[i]);
+ if (!extension->IsJSObject()) return Local<Function>();
i::Handle<i::JSFunction> closure(context->closure(), isolate);
context = factory->NewWithContext(closure, context, extension);
}
@@ -1968,7 +1982,8 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
Utils::OpenHandle(*v8_context->Global()), 0,
nullptr).ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Function);
- RETURN_ESCAPED(Utils::ToLocal(i::Handle<i::JSFunction>::cast(result)));
+ RETURN_ESCAPED(
+ Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(result)));
}
@@ -2105,7 +2120,7 @@ v8::TryCatch::TryCatch()
// Special handling for simulators which have a separate JS stack.
js_stack_comparable_address_ =
reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch(
- v8::internal::GetCurrentStackPosition()));
+ isolate_, v8::internal::GetCurrentStackPosition()));
isolate_->RegisterTryCatchHandler(this);
}
@@ -2122,7 +2137,7 @@ v8::TryCatch::TryCatch(v8::Isolate* isolate)
// Special handling for simulators which have a separate JS stack.
js_stack_comparable_address_ =
reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch(
- v8::internal::GetCurrentStackPosition()));
+ isolate_, v8::internal::GetCurrentStackPosition()));
isolate_->RegisterTryCatchHandler(this);
}
@@ -2141,7 +2156,7 @@ v8::TryCatch::~TryCatch() {
isolate_->RestorePendingMessageFromTryCatch(this);
}
isolate_->UnregisterTryCatchHandler(this);
- v8::internal::SimulatorStack::UnregisterCTryCatch();
+ v8::internal::SimulatorStack::UnregisterCTryCatch(isolate_);
reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc);
DCHECK(!isolate_->thread_local_top()->rethrowing_message_);
} else {
@@ -2152,7 +2167,7 @@ v8::TryCatch::~TryCatch() {
isolate_->CancelScheduledExceptionFromTryCatch(this);
}
isolate_->UnregisterTryCatchHandler(this);
- v8::internal::SimulatorStack::UnregisterCTryCatch();
+ v8::internal::SimulatorStack::UnregisterCTryCatch(isolate_);
}
}
@@ -2686,9 +2701,7 @@ bool Value::IsFalse() const {
}
-bool Value::IsFunction() const {
- return Utils::OpenHandle(this)->IsJSFunction();
-}
+bool Value::IsFunction() const { return Utils::OpenHandle(this)->IsCallable(); }
bool Value::IsName() const {
@@ -2753,9 +2766,7 @@ bool Value::IsSharedArrayBuffer() const {
}
-bool Value::IsObject() const {
- return Utils::OpenHandle(this)->IsJSObject();
-}
+bool Value::IsObject() const { return Utils::OpenHandle(this)->IsJSReceiver(); }
bool Value::IsNumber() const {
@@ -2763,6 +2774,9 @@ bool Value::IsNumber() const {
}
+bool Value::IsProxy() const { return Utils::OpenHandle(this)->IsJSProxy(); }
+
+
#define VALUE_IS_SPECIFIC_TYPE(Type, Class) \
bool Value::Is##Type() const { \
i::Handle<i::Object> obj = Utils::OpenHandle(this); \
@@ -2868,6 +2882,12 @@ bool Value::IsSetIterator() const {
}
+bool Value::IsPromise() const {
+ auto self = Utils::OpenHandle(this);
+ return i::Object::IsPromise(self);
+}
+
+
MaybeLocal<String> Value::ToString(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsString()) return ToApiHandle<String>(obj);
@@ -2886,12 +2906,16 @@ Local<String> Value::ToString(Isolate* isolate) const {
MaybeLocal<String> Value::ToDetailString(Local<Context> context) const {
- auto obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsString()) return ToApiHandle<String>(obj);
PREPARE_FOR_EXECUTION(context, "ToDetailString", String);
Local<String> result;
- has_pending_exception =
- !ToLocal<String>(i::Execution::ToDetailString(isolate, obj), &result);
+ i::Handle<i::Object> args[] = {obj};
+ has_pending_exception = !ToLocal<String>(
+ i::Execution::TryCall(isolate, isolate->no_side_effects_to_string_fun(),
+ isolate->factory()->undefined_value(),
+ arraysize(args), args),
+ &result);
RETURN_ON_FAILED_EXECUTION(String);
RETURN_ESCAPED(result);
}
@@ -3019,16 +3043,14 @@ void External::CheckCast(v8::Value* that) {
void v8::Object::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSObject(),
- "v8::Object::Cast()",
+ Utils::ApiCheck(obj->IsJSReceiver(), "v8::Object::Cast()",
"Could not convert to object");
}
void v8::Function::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSFunction(),
- "v8::Function::Cast()",
+ Utils::ApiCheck(obj->IsCallable(), "v8::Function::Cast()",
"Could not convert to function");
}
@@ -3129,6 +3151,12 @@ void v8::Promise::Resolver::CheckCast(Value* that) {
}
+void v8::Proxy::CheckCast(Value* that) {
+ Utils::ApiCheck(that->IsProxy(), "v8::Proxy::Cast()",
+ "Could not convert to proxy");
+}
+
+
void v8::ArrayBuffer::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(
@@ -3459,13 +3487,14 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
v8::Local<Value> value) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::CreateDataProperty()",
bool);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
isolate, self, key_obj, i::LookupIterator::OWN);
- Maybe<bool> result = i::JSObject::CreateDataProperty(&it, value_obj);
+ Maybe<bool> result =
+ i::JSReceiver::CreateDataProperty(&it, value_obj, i::Object::DONT_THROW);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -3477,11 +3506,12 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
v8::Local<Value> value) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::CreateDataProperty()",
bool);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::LookupIterator it(isolate, self, index, i::LookupIterator::OWN);
- Maybe<bool> result = i::JSObject::CreateDataProperty(&it, value_obj);
+ Maybe<bool> result =
+ i::JSReceiver::CreateDataProperty(&it, value_obj, i::Object::DONT_THROW);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -3494,65 +3524,55 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
v8::PropertyAttribute attributes) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DefineOwnProperty()",
bool);
- auto self = Utils::OpenHandle(this);
- auto key_obj = Utils::OpenHandle(*key);
- auto value_obj = Utils::OpenHandle(*value);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- if (self->IsAccessCheckNeeded() && !isolate->MayAccess(self)) {
- isolate->ReportFailedAccessCheck(self);
+ if (self->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()),
+ i::Handle<i::JSObject>::cast(self))) {
+ isolate->ReportFailedAccessCheck(i::Handle<i::JSObject>::cast(self));
return Nothing<bool>();
}
- i::Handle<i::FixedArray> desc = isolate->factory()->NewFixedArray(3);
- desc->set(0, isolate->heap()->ToBoolean(!(attributes & v8::ReadOnly)));
- desc->set(1, isolate->heap()->ToBoolean(!(attributes & v8::DontEnum)));
- desc->set(2, isolate->heap()->ToBoolean(!(attributes & v8::DontDelete)));
- i::Handle<i::JSArray> desc_array =
- isolate->factory()->NewJSArrayWithElements(desc, i::FAST_ELEMENTS, 3);
- i::Handle<i::Object> args[] = {self, key_obj, value_obj, desc_array};
- i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
- i::Handle<i::JSFunction> fun = isolate->object_define_own_property();
- i::Handle<i::Object> result;
- has_pending_exception =
- !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
- .ToHandle(&result);
+ i::PropertyDescriptor desc;
+ desc.set_writable(!(attributes & v8::ReadOnly));
+ desc.set_enumerable(!(attributes & v8::DontEnum));
+ desc.set_configurable(!(attributes & v8::DontDelete));
+ desc.set_value(value_obj);
+ Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
+ isolate, self, key_obj, &desc, i::Object::DONT_THROW);
+ // Even though we said DONT_THROW, there might be accessors that do throw.
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(result->BooleanValue());
+ return success;
}
MUST_USE_RESULT
static i::MaybeHandle<i::Object> DefineObjectProperty(
i::Handle<i::JSObject> js_object, i::Handle<i::Object> key,
- i::Handle<i::Object> value, PropertyAttributes attrs) {
+ i::Handle<i::Object> value, i::PropertyAttributes attrs) {
i::Isolate* isolate = js_object->GetIsolate();
- // Check if the given key is an array index.
- uint32_t index = 0;
- if (key->ToArrayIndex(&index)) {
- return i::JSObject::SetOwnElementIgnoreAttributes(js_object, index, value,
- attrs);
- }
-
- i::Handle<i::Name> name;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, name,
- i::Object::ToName(isolate, key),
- i::MaybeHandle<i::Object>());
+ bool success = false;
+ i::LookupIterator it = i::LookupIterator::PropertyOrElement(
+ isolate, js_object, key, &success, i::LookupIterator::OWN);
+ if (!success) return i::MaybeHandle<i::Object>();
- return i::JSObject::DefinePropertyOrElementIgnoreAttributes(js_object, name,
- value, attrs);
+ return i::JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs);
}
Maybe<bool> v8::Object::ForceSet(v8::Local<v8::Context> context,
v8::Local<Value> key, v8::Local<Value> value,
v8::PropertyAttribute attribs) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
- auto self = Utils::OpenHandle(this);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::ForceSet()", bool);
+ auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
auto key_obj = Utils::OpenHandle(*key);
auto value_obj = Utils::OpenHandle(*value);
has_pending_exception =
DefineObjectProperty(self, key_obj, value_obj,
- static_cast<PropertyAttributes>(attribs)).is_null();
+ static_cast<i::PropertyAttributes>(attribs))
+ .is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -3564,17 +3584,26 @@ bool v8::Object::ForceSet(v8::Local<Value> key, v8::Local<Value> value,
PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(),
"v8::Object::ForceSet", false, i::HandleScope,
false);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSObject> self =
+ i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
has_pending_exception =
DefineObjectProperty(self, key_obj, value_obj,
- static_cast<PropertyAttributes>(attribs)).is_null();
+ static_cast<i::PropertyAttributes>(attribs))
+ .is_null();
EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, false);
return true;
}
+Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
+ Local<Value> value) {
+ return DefineOwnProperty(context, Local<Name>(reinterpret_cast<Name*>(*key)),
+ value, DontEnum);
+}
+
+
MaybeLocal<Value> v8::Object::Get(Local<v8::Context> context,
Local<Value> key) {
PREPARE_FOR_EXECUTION(context, "v8::Object::Get()", Value);
@@ -3611,6 +3640,12 @@ Local<Value> v8::Object::Get(uint32_t index) {
}
+MaybeLocal<Value> v8::Object::GetPrivate(Local<Context> context,
+ Local<Private> key) {
+ return Get(context, Local<Value>(reinterpret_cast<Value*>(*key)));
+}
+
+
Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
Local<Context> context, Local<Value> key) {
PREPARE_FOR_EXECUTION_PRIMITIVE(
@@ -3626,8 +3661,8 @@ Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
auto result = i::JSReceiver::GetPropertyAttributes(self, key_name);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
- if (result.FromJust() == ABSENT) {
- return Just(static_cast<PropertyAttribute>(NONE));
+ if (result.FromJust() == i::ABSENT) {
+ return Just(static_cast<PropertyAttribute>(i::NONE));
}
return Just(static_cast<PropertyAttribute>(result.FromJust()));
}
@@ -3636,7 +3671,7 @@ Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
PropertyAttribute v8::Object::GetPropertyAttributes(v8::Local<Value> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return GetPropertyAttributes(context, key)
- .FromMaybe(static_cast<PropertyAttribute>(NONE));
+ .FromMaybe(static_cast<PropertyAttribute>(i::NONE));
}
@@ -3644,17 +3679,18 @@ MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
Local<String> key) {
PREPARE_FOR_EXECUTION(context, "v8::Object::GetOwnPropertyDescriptor()",
Value);
- auto obj = Utils::OpenHandle(this);
- auto key_name = Utils::OpenHandle(*key);
- i::Handle<i::Object> args[] = { obj, key_name };
- i::Handle<i::JSFunction> fun = isolate->object_get_own_property_descriptor();
- i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
- i::Handle<i::Object> result;
- has_pending_exception =
- !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
- .ToHandle(&result);
+ i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
+ i::Handle<i::String> key_name = Utils::OpenHandle(*key);
+
+ i::PropertyDescriptor desc;
+ Maybe<bool> found =
+ i::JSReceiver::GetOwnPropertyDescriptor(isolate, obj, key_name, &desc);
+ has_pending_exception = found.IsNothing();
RETURN_ON_FAILED_EXECUTION(Value);
- RETURN_ESCAPED(Utils::ToLocal(result));
+ if (!found.FromJust()) {
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
+ RETURN_ESCAPED(Utils::ToLocal(desc.ToObject(isolate)));
}
@@ -3680,8 +3716,9 @@ Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
// We do not allow exceptions thrown while setting the prototype
// to propagate outside.
TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- auto result = i::JSObject::SetPrototype(self, value_obj, false);
- has_pending_exception = result.is_null();
+ auto result = i::JSReceiver::SetPrototype(self, value_obj, false,
+ i::Object::THROW_ON_ERROR);
+ has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -3705,6 +3742,7 @@ Local<Object> v8::Object::FindInstanceInPrototypeChain(
return Local<Object>();
}
}
+ // IsTemplateFor() ensures that iter.GetCurrent() can't be a Proxy here.
return Utils::ToLocal(i::handle(iter.GetCurrent<i::JSObject>(), isolate));
}
@@ -3713,8 +3751,10 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, "v8::Object::GetPropertyNames()", Array);
auto self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value;
- has_pending_exception = !i::JSReceiver::GetKeys(
- self, i::JSReceiver::INCLUDE_PROTOS).ToHandle(&value);
+ has_pending_exception =
+ !i::JSReceiver::GetKeys(self, i::JSReceiver::INCLUDE_PROTOS,
+ i::ENUMERABLE_STRINGS)
+ .ToHandle(&value);
RETURN_ON_FAILED_EXECUTION(Array);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
@@ -3735,8 +3775,9 @@ MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, "v8::Object::GetOwnPropertyNames()", Array);
auto self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value;
- has_pending_exception = !i::JSReceiver::GetKeys(
- self, i::JSReceiver::OWN_ONLY).ToHandle(&value);
+ has_pending_exception = !i::JSReceiver::GetKeys(self, i::JSReceiver::OWN_ONLY,
+ i::ENUMERABLE_STRINGS)
+ .ToHandle(&value);
RETURN_ON_FAILED_EXECUTION(Array);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
@@ -3754,63 +3795,13 @@ Local<Array> v8::Object::GetOwnPropertyNames() {
MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
- auto self = Utils::OpenHandle(this);
- auto isolate = self->GetIsolate();
- auto v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- i::Handle<i::Object> name(self->class_name(), isolate);
- i::Handle<i::Object> tag;
-
- // Native implementation of Object.prototype.toString (v8natives.js):
- // var c = %_ClassOf(this);
- // if (c === 'Arguments') c = 'Object';
- // return "[object " + c + "]";
-
- if (!name->IsString()) {
- return v8::String::NewFromUtf8(v8_isolate, "[object ]",
- NewStringType::kNormal);
- }
- auto class_name = i::Handle<i::String>::cast(name);
- if (i::String::Equals(class_name, isolate->factory()->Arguments_string())) {
- return v8::String::NewFromUtf8(v8_isolate, "[object Object]",
- NewStringType::kNormal);
- }
- if (internal::FLAG_harmony_tostring) {
- PREPARE_FOR_EXECUTION(context, "v8::Object::ObjectProtoToString()", String);
- auto toStringTag = isolate->factory()->to_string_tag_symbol();
- has_pending_exception = !i::Runtime::GetObjectProperty(
- isolate, self, toStringTag).ToHandle(&tag);
- RETURN_ON_FAILED_EXECUTION(String);
- if (tag->IsString()) {
- class_name = Utils::OpenHandle(*handle_scope.Escape(
- Utils::ToLocal(i::Handle<i::String>::cast(tag))));
- }
- }
- const char* prefix = "[object ";
- Local<String> str = Utils::ToLocal(class_name);
- const char* postfix = "]";
-
- int prefix_len = i::StrLength(prefix);
- int str_len = str->Utf8Length();
- int postfix_len = i::StrLength(postfix);
-
- int buf_len = prefix_len + str_len + postfix_len;
- i::ScopedVector<char> buf(buf_len);
-
- // Write prefix.
- char* ptr = buf.start();
- i::MemCopy(ptr, prefix, prefix_len * v8::internal::kCharSize);
- ptr += prefix_len;
-
- // Write real content.
- str->WriteUtf8(ptr, str_len);
- ptr += str_len;
-
- // Write postfix.
- i::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
-
- // Copy the buffer into a heap-allocated string and return it.
- return v8::String::NewFromUtf8(v8_isolate, buf.start(),
- NewStringType::kNormal, buf_len);
+ PREPARE_FOR_EXECUTION(context, "v8::Object::ObjectProtoToString", String);
+ auto obj = Utils::OpenHandle(this);
+ Local<String> result;
+ has_pending_exception =
+ !ToLocal<String>(i::JSObject::ObjectProtoToString(isolate, obj), &result);
+ RETURN_ON_FAILED_EXECUTION(String);
+ RETURN_ESCAPED(result);
}
@@ -3822,7 +3813,7 @@ Local<String> v8::Object::ObjectProtoToString() {
Local<String> v8::Object::GetConstructorName() {
auto self = Utils::OpenHandle(this);
- i::Handle<i::String> name(self->constructor_name());
+ i::Handle<i::String> name = i::JSReceiver::GetConstructorName(self);
return Utils::ToLocal(name);
}
@@ -3831,12 +3822,11 @@ Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Delete()", bool);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> obj;
- has_pending_exception =
- !i::Runtime::DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY)
- .ToHandle(&obj);
+ Maybe<bool> result =
+ i::Runtime::DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY);
+ has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(obj->IsTrue());
+ return result;
}
@@ -3846,6 +3836,12 @@ bool v8::Object::Delete(v8::Local<Value> key) {
}
+Maybe<bool> v8::Object::DeletePrivate(Local<Context> context,
+ Local<Private> key) {
+ return Delete(context, Local<Value>(reinterpret_cast<Value*>(*key)));
+}
+
+
Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Get()", bool);
auto self = Utils::OpenHandle(this);
@@ -3874,15 +3870,19 @@ bool v8::Object::Has(v8::Local<Value> key) {
}
+Maybe<bool> v8::Object::HasPrivate(Local<Context> context, Local<Private> key) {
+ return HasOwnProperty(context, Local<Name>(reinterpret_cast<Name*>(*key)));
+}
+
+
Maybe<bool> v8::Object::Delete(Local<Context> context, uint32_t index) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DeleteProperty()",
bool);
auto self = Utils::OpenHandle(this);
- i::Handle<i::Object> obj;
- has_pending_exception =
- !i::JSReceiver::DeleteElement(self, index).ToHandle(&obj);
+ Maybe<bool> result = i::JSReceiver::DeleteElement(self, index);
+ has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(obj->IsTrue());
+ return result;
}
@@ -3909,24 +3909,27 @@ bool v8::Object::Has(uint32_t index) {
template <typename Getter, typename Setter, typename Data>
-static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* obj,
+static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
Local<Name> name, Getter getter,
Setter setter, Data data,
AccessControl settings,
PropertyAttribute attributes) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetAccessor()", bool);
+ if (!Utils::OpenHandle(self)->IsJSObject()) return Just(false);
+ i::Handle<i::JSObject> obj =
+ i::Handle<i::JSObject>::cast(Utils::OpenHandle(self));
v8::Local<AccessorSignature> signature;
auto info = MakeAccessorInfo(name, getter, setter, data, settings, attributes,
signature);
if (info.is_null()) return Nothing<bool>();
- bool fast = Utils::OpenHandle(obj)->HasFastProperties();
+ bool fast = obj->HasFastProperties();
i::Handle<i::Object> result;
has_pending_exception =
- !i::JSObject::SetAccessor(Utils::OpenHandle(obj), info).ToHandle(&result);
+ !i::JSObject::SetAccessor(obj, info).ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
if (result->IsUndefined()) return Nothing<bool>();
if (fast) {
- i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj), 0, "APISetAccessor");
+ i::JSObject::MigrateSlowToFast(obj, 0, "APISetAccessor");
}
return Just(true);
}
@@ -3970,14 +3973,14 @@ void Object::SetAccessorProperty(Local<Name> name, Local<Function> getter,
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return;
i::Handle<i::Object> getter_i = v8::Utils::OpenHandle(*getter);
i::Handle<i::Object> setter_i = v8::Utils::OpenHandle(*setter, true);
if (setter_i.is_null()) setter_i = isolate->factory()->null_value();
- i::JSObject::DefineAccessor(v8::Utils::OpenHandle(this),
- v8::Utils::OpenHandle(*name),
- getter_i,
- setter_i,
- static_cast<PropertyAttributes>(attribute));
+ i::JSObject::DefineAccessor(i::Handle<i::JSObject>::cast(self),
+ v8::Utils::OpenHandle(*name), getter_i, setter_i,
+ static_cast<i::PropertyAttributes>(attribute));
}
@@ -4005,8 +4008,10 @@ Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::HasRealNamedProperty()",
bool);
auto self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return Just(false);
auto key_val = Utils::OpenHandle(*key);
- auto result = i::JSObject::HasRealNamedProperty(self, key_val);
+ auto result = i::JSObject::HasRealNamedProperty(
+ i::Handle<i::JSObject>::cast(self), key_val);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4024,7 +4029,9 @@ Maybe<bool> v8::Object::HasRealIndexedProperty(Local<Context> context,
PREPARE_FOR_EXECUTION_PRIMITIVE(context,
"v8::Object::HasRealIndexedProperty()", bool);
auto self = Utils::OpenHandle(this);
- auto result = i::JSObject::HasRealElementProperty(self, index);
+ if (!self->IsJSObject()) return Just(false);
+ auto result = i::JSObject::HasRealElementProperty(
+ i::Handle<i::JSObject>::cast(self), index);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4042,8 +4049,10 @@ Maybe<bool> v8::Object::HasRealNamedCallbackProperty(Local<Context> context,
PREPARE_FOR_EXECUTION_PRIMITIVE(
context, "v8::Object::HasRealNamedCallbackProperty()", bool);
auto self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return Just(false);
auto key_val = Utils::OpenHandle(*key);
- auto result = i::JSObject::HasRealNamedCallbackProperty(self, key_val);
+ auto result = i::JSObject::HasRealNamedCallbackProperty(
+ i::Handle<i::JSObject>::cast(self), key_val);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4058,13 +4067,15 @@ bool v8::Object::HasRealNamedCallbackProperty(Local<String> key) {
bool v8::Object::HasNamedLookupInterceptor() {
auto self = Utils::OpenHandle(this);
- return self->HasNamedInterceptor();
+ return self->IsJSObject() &&
+ i::Handle<i::JSObject>::cast(self)->HasNamedInterceptor();
}
bool v8::Object::HasIndexedLookupInterceptor() {
auto self = Utils::OpenHandle(this);
- return self->HasIndexedInterceptor();
+ return self->IsJSObject() &&
+ i::Handle<i::JSObject>::cast(self)->HasIndexedInterceptor();
}
@@ -4072,13 +4083,15 @@ MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Local<Context> context, Local<Name> key) {
PREPARE_FOR_EXECUTION(
context, "v8::Object::GetRealNamedPropertyInPrototypeChain()", Value);
- auto self = Utils::OpenHandle(this);
- auto key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return MaybeLocal<Value>();
+ i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::PrototypeIterator iter(isolate, self);
if (iter.IsAtEnd()) return MaybeLocal<Value>();
- auto proto = i::PrototypeIterator::GetCurrent(iter);
+ i::Handle<i::JSReceiver> proto =
+ i::PrototypeIterator::GetCurrent<i::JSReceiver>(iter);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
- isolate, self, key_obj, i::Handle<i::JSReceiver>::cast(proto),
+ isolate, self, key_obj, proto,
i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(i::Object::GetProperty(&it), &result);
@@ -4102,22 +4115,22 @@ v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
PREPARE_FOR_EXECUTION_PRIMITIVE(
context, "v8::Object::GetRealNamedPropertyAttributesInPrototypeChain()",
PropertyAttribute);
- auto self = Utils::OpenHandle(this);
- auto key_obj = Utils::OpenHandle(*key);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return Nothing<PropertyAttribute>();
+ i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::PrototypeIterator iter(isolate, self);
if (iter.IsAtEnd()) return Nothing<PropertyAttribute>();
- auto proto = i::PrototypeIterator::GetCurrent(iter);
+ i::Handle<i::JSReceiver> proto =
+ i::PrototypeIterator::GetCurrent<i::JSReceiver>(iter);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
- isolate, self, key_obj, i::Handle<i::JSReceiver>::cast(proto),
+ isolate, self, key_obj, proto,
i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- auto result = i::JSReceiver::GetPropertyAttributes(&it);
+ Maybe<i::PropertyAttributes> result =
+ i::JSReceiver::GetPropertyAttributes(&it);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
if (!it.IsFound()) return Nothing<PropertyAttribute>();
- if (result.FromJust() == ABSENT) {
- return Just(static_cast<PropertyAttribute>(NONE));
- }
- return Just<PropertyAttribute>(
- static_cast<PropertyAttribute>(result.FromJust()));
+ if (result.FromJust() == i::ABSENT) return Just(None);
+ return Just(static_cast<PropertyAttribute>(result.FromJust()));
}
@@ -4163,8 +4176,8 @@ Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
auto result = i::JSReceiver::GetPropertyAttributes(&it);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
if (!it.IsFound()) return Nothing<PropertyAttribute>();
- if (result.FromJust() == ABSENT) {
- return Just(static_cast<PropertyAttribute>(NONE));
+ if (result.FromJust() == i::ABSENT) {
+ return Just(static_cast<PropertyAttribute>(i::NONE));
}
return Just<PropertyAttribute>(
static_cast<PropertyAttribute>(result.FromJust()));
@@ -4179,7 +4192,7 @@ Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
Local<v8::Object> v8::Object::Clone() {
- auto self = Utils::OpenHandle(this);
+ auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
auto isolate = self->GetIsolate();
ENTER_V8(isolate);
auto result = isolate->factory()->CopyJSObject(self);
@@ -4206,16 +4219,21 @@ int v8::Object::GetIdentityHash() {
bool v8::Object::SetHiddenValue(v8::Local<v8::String> key,
v8::Local<v8::Value> value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (value.IsEmpty()) return DeleteHiddenValue(key);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return false;
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
+ if (value.IsEmpty()) {
+ i::JSObject::DeleteHiddenProperty(i::Handle<i::JSObject>::cast(self),
+ key_string);
+ return true;
+ }
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- i::Handle<i::Object> result =
- i::JSObject::SetHiddenProperty(self, key_string, value_obj);
+ i::Handle<i::Object> result = i::JSObject::SetHiddenProperty(
+ i::Handle<i::JSObject>::cast(self), key_string, value_obj);
return *result == *self;
}
@@ -4223,11 +4241,14 @@ bool v8::Object::SetHiddenValue(v8::Local<v8::String> key,
v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Local<v8::String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return v8::Local<v8::Value>();
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
- i::Handle<i::Object> result(self->GetHiddenProperty(key_string), isolate);
+ i::Handle<i::Object> result(
+ i::Handle<i::JSObject>::cast(self)->GetHiddenProperty(key_string),
+ isolate);
if (result->IsTheHole()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@@ -4237,11 +4258,13 @@ bool v8::Object::DeleteHiddenValue(v8::Local<v8::String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return false;
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
- i::JSObject::DeleteHiddenProperty(self, key_string);
+ i::JSObject::DeleteHiddenProperty(i::Handle<i::JSObject>::cast(self),
+ key_string);
return true;
}
@@ -4309,8 +4332,9 @@ MaybeLocal<Function> Function::New(Local<Context> context,
i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
LOG_API(isolate, "Function::New");
ENTER_V8(isolate);
- return FunctionTemplateNew(isolate, callback, data, Local<Signature>(),
- length, true)->GetFunction(context);
+ return FunctionTemplateNew(isolate, callback, nullptr, data,
+ Local<Signature>(), length, true)
+ ->GetFunction(context);
}
@@ -4336,8 +4360,8 @@ MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
Local<Object> result;
- has_pending_exception =
- !ToLocal<Object>(i::Execution::New(self, argc, args), &result);
+ has_pending_exception = !ToLocal<Object>(
+ i::Execution::New(isolate, self, self, argc, args), &result);
RETURN_ON_FAILED_EXECUTION(Object);
RETURN_ESCAPED(result);
}
@@ -4375,29 +4399,60 @@ Local<v8::Value> Function::Call(v8::Local<v8::Value> recv, int argc,
void Function::SetName(v8::Local<v8::String> name) {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) return;
+ auto func = i::Handle<i::JSFunction>::cast(self);
func->shared()->set_name(*Utils::OpenHandle(*name));
}
Local<Value> Function::GetName() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name(),
- func->GetIsolate()));
+ auto self = Utils::OpenHandle(this);
+ if (self->IsJSBoundFunction()) {
+ auto func = i::Handle<i::JSBoundFunction>::cast(self);
+ return Utils::ToLocal(handle(func->name(), func->GetIsolate()));
+ }
+ if (self->IsJSFunction()) {
+ auto func = i::Handle<i::JSFunction>::cast(self);
+ return Utils::ToLocal(handle(func->shared()->name(), func->GetIsolate()));
+ }
+ return ToApiHandle<Primitive>(
+ self->GetIsolate()->factory()->undefined_value());
}
Local<Value> Function::GetInferredName() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return ToApiHandle<Primitive>(
+ self->GetIsolate()->factory()->undefined_value());
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name(),
func->GetIsolate()));
}
+Local<Value> Function::GetDebugName() const {
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return ToApiHandle<Primitive>(
+ self->GetIsolate()->factory()->undefined_value());
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
+ i::Handle<i::String> name = i::JSFunction::GetDebugName(func);
+ return Utils::ToLocal(i::Handle<i::Object>(*name, name->GetIsolate()));
+}
+
+
Local<Value> Function::GetDisplayName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
i::Handle<i::String> property_name =
isolate->factory()->NewStringFromStaticChars("displayName");
i::Handle<i::Object> value =
@@ -4411,7 +4466,11 @@ Local<Value> Function::GetDisplayName() const {
ScriptOrigin Function::GetScriptOrigin() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return v8::ScriptOrigin(Local<Value>());
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
return GetScriptOriginForScript(func->GetIsolate(), script);
@@ -4424,7 +4483,11 @@ const int Function::kLineOffsetNotFound = -1;
int Function::GetScriptLineNumber() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return kLineOffsetNotFound;
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
return i::Script::GetLineNumber(script, func->shared()->start_position());
@@ -4434,7 +4497,11 @@ int Function::GetScriptLineNumber() const {
int Function::GetScriptColumnNumber() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return kLineOffsetNotFound;
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
return i::Script::GetColumnNumber(script, func->shared()->start_position());
@@ -4444,13 +4511,21 @@ int Function::GetScriptColumnNumber() const {
bool Function::IsBuiltin() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- return func->IsBuiltin();
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return false;
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
+ return func->shared()->IsBuiltin();
}
int Function::ScriptId() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSFunction()) {
+ return v8::UnboundScript::kNoScriptId;
+ }
+ auto func = i::Handle<i::JSFunction>::cast(self);
if (!func->shared()->script()->IsScript()) {
return v8::UnboundScript::kNoScriptId;
}
@@ -4460,16 +4535,14 @@ int Function::ScriptId() const {
Local<v8::Value> Function::GetBoundFunction() const {
- i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- if (!func->shared()->bound()) {
- return v8::Undefined(reinterpret_cast<v8::Isolate*>(func->GetIsolate()));
+ auto self = Utils::OpenHandle(this);
+ if (self->IsJSBoundFunction()) {
+ auto bound_function = i::Handle<i::JSBoundFunction>::cast(self);
+ auto bound_target_function = i::handle(
+ bound_function->bound_target_function(), bound_function->GetIsolate());
+ return Utils::CallableToLocal(bound_target_function);
}
- i::Handle<i::FixedArray> bound_args = i::Handle<i::FixedArray>(
- i::FixedArray::cast(func->function_bindings()));
- i::Handle<i::Object> original(
- bound_args->get(i::JSFunction::kBoundFunctionIndex),
- func->GetIsolate());
- return Utils::ToLocal(i::Handle<i::JSFunction>::cast(original));
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(self->GetIsolate()));
}
@@ -4729,7 +4802,7 @@ class Utf8LengthHelper : public i::AllStatic {
}
static int Calculate(i::ConsString* current, uint8_t* state_out) {
- using namespace internal;
+ using internal::ConsString;
int total_length = 0;
uint8_t state = kInitialState;
while (true) {
@@ -4829,26 +4902,22 @@ class Utf8WriterVisitor {
int remaining,
char* const buffer,
bool replace_invalid_utf8) {
- using namespace unibrow;
- DCHECK(remaining > 0);
+ DCHECK_GT(remaining, 0);
// We can't use a local buffer here because Encode needs to modify
// previous characters in the stream. We know, however, that
// exactly one character will be advanced.
- if (Utf16::IsSurrogatePair(last_character, character)) {
- int written = Utf8::Encode(buffer,
- character,
- last_character,
- replace_invalid_utf8);
- DCHECK(written == 1);
+ if (unibrow::Utf16::IsSurrogatePair(last_character, character)) {
+ int written = unibrow::Utf8::Encode(buffer, character, last_character,
+ replace_invalid_utf8);
+ DCHECK_EQ(written, 1);
return written;
}
// Use a scratch buffer to check the required characters.
- char temp_buffer[Utf8::kMaxEncodedSize];
+ char temp_buffer[unibrow::Utf8::kMaxEncodedSize];
// Can't encode using last_character as gcc has array bounds issues.
- int written = Utf8::Encode(temp_buffer,
- character,
- Utf16::kNoPreviousCharacter,
- replace_invalid_utf8);
+ int written = unibrow::Utf8::Encode(temp_buffer, character,
+ unibrow::Utf16::kNoPreviousCharacter,
+ replace_invalid_utf8);
// Won't fit.
if (written > remaining) return 0;
// Copy over the character from temp_buffer.
@@ -4870,13 +4939,13 @@ class Utf8WriterVisitor {
// unit, or all units have been written out.
template<typename Char>
void Visit(const Char* chars, const int length) {
- using namespace unibrow;
DCHECK(!early_termination_);
if (length == 0) return;
// Copy state to stack.
char* buffer = buffer_;
- int last_character =
- sizeof(Char) == 1 ? Utf16::kNoPreviousCharacter : last_character_;
+ int last_character = sizeof(Char) == 1
+ ? unibrow::Utf16::kNoPreviousCharacter
+ : last_character_;
int i = 0;
// Do a fast loop where there is no exit capacity check.
while (true) {
@@ -4886,7 +4955,8 @@ class Utf8WriterVisitor {
} else {
int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
// Need enough space to write everything but one character.
- STATIC_ASSERT(Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit == 3);
+ STATIC_ASSERT(unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit ==
+ 3);
int max_size_per_char = sizeof(Char) == 1 ? 2 : 3;
int writable_length =
(remaining_capacity - max_size_per_char)/max_size_per_char;
@@ -4898,17 +4968,15 @@ class Utf8WriterVisitor {
// Write the characters to the stream.
if (sizeof(Char) == 1) {
for (; i < fast_length; i++) {
- buffer +=
- Utf8::EncodeOneByte(buffer, static_cast<uint8_t>(*chars++));
+ buffer += unibrow::Utf8::EncodeOneByte(
+ buffer, static_cast<uint8_t>(*chars++));
DCHECK(capacity_ == -1 || (buffer - start_) <= capacity_);
}
} else {
for (; i < fast_length; i++) {
uint16_t character = *chars++;
- buffer += Utf8::Encode(buffer,
- character,
- last_character,
- replace_invalid_utf8_);
+ buffer += unibrow::Utf8::Encode(buffer, character, last_character,
+ replace_invalid_utf8_);
last_character = character;
DCHECK(capacity_ == -1 || (buffer - start_) <= capacity_);
}
@@ -4925,12 +4993,12 @@ class Utf8WriterVisitor {
DCHECK(!skip_capacity_check_);
// Slow loop. Must check capacity on each iteration.
int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
- DCHECK(remaining_capacity >= 0);
+ DCHECK_GE(remaining_capacity, 0);
for (; i < length && remaining_capacity > 0; i++) {
uint16_t character = *chars++;
// remaining_capacity is <= 3 bytes at this point, so we do not write out
// an umatched lead surrogate.
- if (replace_invalid_utf8_ && Utf16::IsLeadSurrogate(character)) {
+ if (replace_invalid_utf8_ && unibrow::Utf16::IsLeadSurrogate(character)) {
early_termination_ = true;
break;
}
@@ -5178,6 +5246,11 @@ Local<Value> Symbol::Name() const {
}
+Local<Value> Private::Name() const {
+ return reinterpret_cast<const Symbol*>(this)->Name();
+}
+
+
double Number::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->Number();
@@ -5221,51 +5294,56 @@ uint32_t Uint32::Value() const {
int v8::Object::InternalFieldCount() {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- return obj->GetInternalFieldCount();
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return 0;
+ return i::Handle<i::JSObject>::cast(self)->GetInternalFieldCount();
}
-static bool InternalFieldOK(i::Handle<i::JSObject> obj,
- int index,
+static bool InternalFieldOK(i::Handle<i::JSReceiver> obj, int index,
const char* location) {
- return Utils::ApiCheck(index < obj->GetInternalFieldCount(),
- location,
- "Internal field out of bounds");
+ return Utils::ApiCheck(
+ obj->IsJSObject() &&
+ (index < i::Handle<i::JSObject>::cast(obj)->GetInternalFieldCount()),
+ location, "Internal field out of bounds");
}
Local<Value> v8::Object::SlowGetInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::GetInternalField()";
if (!InternalFieldOK(obj, index, location)) return Local<Value>();
- i::Handle<i::Object> value(obj->GetInternalField(index), obj->GetIsolate());
+ i::Handle<i::Object> value(
+ i::Handle<i::JSObject>::cast(obj)->GetInternalField(index),
+ obj->GetIsolate());
return Utils::ToLocal(value);
}
void v8::Object::SetInternalField(int index, v8::Local<Value> value) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::SetInternalField()";
if (!InternalFieldOK(obj, index, location)) return;
i::Handle<i::Object> val = Utils::OpenHandle(*value);
- obj->SetInternalField(index, *val);
+ i::Handle<i::JSObject>::cast(obj)->SetInternalField(index, *val);
}
void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::GetAlignedPointerFromInternalField()";
if (!InternalFieldOK(obj, index, location)) return NULL;
- return DecodeSmiToAligned(obj->GetInternalField(index), location);
+ return DecodeSmiToAligned(
+ i::Handle<i::JSObject>::cast(obj)->GetInternalField(index), location);
}
void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::SetAlignedPointerInInternalField()";
if (!InternalFieldOK(obj, index, location)) return;
- obj->SetInternalField(index, EncodeAlignedAsSmi(value, location));
+ i::Handle<i::JSObject>::cast(obj)
+ ->SetInternalField(index, EncodeAlignedAsSmi(value, location));
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
}
@@ -5886,20 +5964,24 @@ double v8::NumberObject::ValueOf() const {
}
-Local<v8::Value> v8::BooleanObject::New(bool value) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "BooleanObject::New");
- ENTER_V8(isolate);
- i::Handle<i::Object> boolean(value
- ? isolate->heap()->true_value()
- : isolate->heap()->false_value(),
- isolate);
+Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, "BooleanObject::New");
+ ENTER_V8(i_isolate);
+ i::Handle<i::Object> boolean(value ? i_isolate->heap()->true_value()
+ : i_isolate->heap()->false_value(),
+ i_isolate);
i::Handle<i::Object> obj =
- i::Object::ToObject(isolate, boolean).ToHandleChecked();
+ i::Object::ToObject(i_isolate, boolean).ToHandleChecked();
return Utils::ToLocal(obj);
}
+Local<v8::Value> v8::BooleanObject::New(bool value) {
+ return New(Isolate::GetCurrent(), value);
+}
+
+
bool v8::BooleanObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -5957,8 +6039,9 @@ MaybeLocal<v8::Value> v8::Date::New(Local<Context> context, double time) {
}
PREPARE_FOR_EXECUTION(context, "Date::New", Value);
Local<Value> result;
- has_pending_exception =
- !ToLocal<Value>(i::Execution::NewDate(isolate, time), &result);
+ has_pending_exception = !ToLocal<Value>(
+ i::JSDate::New(isolate->date_function(), isolate->date_function(), time),
+ &result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -5999,26 +6082,13 @@ void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
}
-static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
- i::Isolate* isolate = i::Isolate::Current();
- uint8_t flags_buf[3];
- int num_flags = 0;
- if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g';
- if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
- if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
- DCHECK(num_flags <= static_cast<int>(arraysize(flags_buf)));
- return isolate->factory()->InternalizeOneByteString(
- i::Vector<const uint8_t>(flags_buf, num_flags));
-}
-
-
MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
Local<String> pattern, Flags flags) {
PREPARE_FOR_EXECUTION(context, "RegExp::New", RegExp);
Local<v8::RegExp> result;
has_pending_exception =
- !ToLocal<RegExp>(i::Execution::NewJSRegExp(Utils::OpenHandle(*pattern),
- RegExpFlagsToString(flags)),
+ !ToLocal<RegExp>(i::JSRegExp::New(Utils::OpenHandle(*pattern),
+ static_cast<i::JSRegExp::Flags>(flags)),
&result);
RETURN_ON_FAILED_EXECUTION(RegExp);
RETURN_ESCAPED(result);
@@ -6040,18 +6110,20 @@ Local<v8::String> v8::RegExp::GetSource() const {
// Assert that the static flags cast in GetFlags is valid.
-#define REGEXP_FLAG_ASSERT_EQ(api_flag, internal_flag) \
- STATIC_ASSERT(static_cast<int>(v8::RegExp::api_flag) == \
- static_cast<int>(i::JSRegExp::internal_flag))
-REGEXP_FLAG_ASSERT_EQ(kNone, NONE);
-REGEXP_FLAG_ASSERT_EQ(kGlobal, GLOBAL);
-REGEXP_FLAG_ASSERT_EQ(kIgnoreCase, IGNORE_CASE);
-REGEXP_FLAG_ASSERT_EQ(kMultiline, MULTILINE);
+#define REGEXP_FLAG_ASSERT_EQ(flag) \
+ STATIC_ASSERT(static_cast<int>(v8::RegExp::flag) == \
+ static_cast<int>(i::JSRegExp::flag))
+REGEXP_FLAG_ASSERT_EQ(kNone);
+REGEXP_FLAG_ASSERT_EQ(kGlobal);
+REGEXP_FLAG_ASSERT_EQ(kIgnoreCase);
+REGEXP_FLAG_ASSERT_EQ(kMultiline);
+REGEXP_FLAG_ASSERT_EQ(kSticky);
+REGEXP_FLAG_ASSERT_EQ(kUnicode);
#undef REGEXP_FLAG_ASSERT_EQ
v8::RegExp::Flags v8::RegExp::GetFlags() const {
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
- return static_cast<RegExp::Flags>(obj->GetFlags().value());
+ return RegExp::Flags(static_cast<int>(obj->GetFlags()));
}
@@ -6097,10 +6169,7 @@ MaybeLocal<Object> Array::CloneElementAt(Local<Context> context,
}
-Local<Object> Array::CloneElementAt(uint32_t index) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(CloneElementAt(context, index), Object);
-}
+Local<Object> Array::CloneElementAt(uint32_t index) { return Local<Object>(); }
Local<v8::Map> v8::Map::New(Isolate* isolate) {
@@ -6203,23 +6272,6 @@ Local<Array> Map::AsArray() const {
}
-MaybeLocal<Map> Map::FromArray(Local<Context> context, Local<Array> array) {
- PREPARE_FOR_EXECUTION(context, "Map::FromArray", Map);
- if (array->Length() % 2 != 0) {
- return MaybeLocal<Map>();
- }
- i::Handle<i::Object> result;
- i::Handle<i::Object> argv[] = {Utils::OpenHandle(*array)};
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->map_from_array(),
- isolate->factory()->undefined_value(),
- arraysize(argv), argv)
- .ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION(Map);
- RETURN_ESCAPED(Local<Map>::Cast(Utils::ToLocal(result)));
-}
-
-
Local<v8::Set> v8::Set::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, "Set::New");
@@ -6304,26 +6356,6 @@ Local<Array> Set::AsArray() const {
}
-MaybeLocal<Set> Set::FromArray(Local<Context> context, Local<Array> array) {
- PREPARE_FOR_EXECUTION(context, "Set::FromArray", Set);
- i::Handle<i::Object> result;
- i::Handle<i::Object> argv[] = {Utils::OpenHandle(*array)};
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->set_from_array(),
- isolate->factory()->undefined_value(),
- arraysize(argv), argv)
- .ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION(Set);
- RETURN_ESCAPED(Local<Set>::Cast(Utils::ToLocal(result)));
-}
-
-
-bool Value::IsPromise() const {
- auto self = Utils::OpenHandle(this);
- return i::Object::IsPromise(self);
-}
-
-
MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, "Promise::Resolver::New", Resolver);
i::Handle<i::Object> result;
@@ -6343,7 +6375,7 @@ Local<Promise::Resolver> Promise::Resolver::New(Isolate* isolate) {
Local<Promise> Promise::Resolver::GetPromise() {
- i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
return Local<Promise>::Cast(Utils::ToLocal(promise));
}
@@ -6390,10 +6422,12 @@ void Promise::Resolver::Reject(Local<Value> value) {
}
-MaybeLocal<Promise> Promise::Chain(Local<Context> context,
- Local<Function> handler) {
+namespace {
+
+MaybeLocal<Promise> DoChain(Value* value, Local<Context> context,
+ Local<Function> handler) {
PREPARE_FOR_EXECUTION(context, "Promise::Chain", Promise);
- auto self = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(value);
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*handler)};
i::Handle<i::Object> result;
has_pending_exception = !i::Execution::Call(isolate, isolate->promise_chain(),
@@ -6403,10 +6437,18 @@ MaybeLocal<Promise> Promise::Chain(Local<Context> context,
RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
+} // namespace
+
+
+MaybeLocal<Promise> Promise::Chain(Local<Context> context,
+ Local<Function> handler) {
+ return DoChain(this, context, handler);
+}
+
Local<Promise> Promise::Chain(Local<Function> handler) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(Chain(context, handler), Promise);
+ RETURN_TO_LOCAL_UNCHECKED(DoChain(this, context, handler), Promise);
}
@@ -6451,7 +6493,7 @@ Local<Promise> Promise::Then(Local<Function> handler) {
bool Promise::HasHandler() {
- i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
i::Isolate* isolate = promise->GetIsolate();
LOG_API(isolate, "Promise::HasRejectHandler");
ENTER_V8(isolate);
@@ -6460,6 +6502,44 @@ bool Promise::HasHandler() {
}
+Local<Object> Proxy::GetTarget() {
+ i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> target(self->target());
+ return Utils::ToLocal(target);
+}
+
+
+Local<Value> Proxy::GetHandler() {
+ i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> handler(self->handler(), self->GetIsolate());
+ return Utils::ToLocal(handler);
+}
+
+
+bool Proxy::IsRevoked() {
+ i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
+ return self->IsRevoked();
+}
+
+
+void Proxy::Revoke() {
+ i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
+ i::JSProxy::Revoke(self);
+}
+
+
+MaybeLocal<Proxy> Proxy::New(Local<Context> context, Local<Object> local_target,
+ Local<Object> local_handler) {
+ PREPARE_FOR_EXECUTION(context, "Proxy::New", Proxy);
+ i::Handle<i::JSReceiver> target = Utils::OpenHandle(*local_target);
+ i::Handle<i::JSReceiver> handler = Utils::OpenHandle(*local_handler);
+ Local<Proxy> result;
+ has_pending_exception =
+ !ToLocal<Proxy>(i::JSProxy::New(isolate, target, handler), &result);
+ RETURN_ON_FAILED_EXECUTION(Proxy);
+ RETURN_ESCAPED(result);
+}
+
bool v8::ArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
@@ -6750,7 +6830,8 @@ Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
static i::Handle<i::Symbol> SymbolFor(i::Isolate* isolate,
i::Handle<i::String> name,
- i::Handle<i::String> part) {
+ i::Handle<i::String> part,
+ bool private_symbol) {
i::Handle<i::JSObject> registry = isolate->GetSymbolRegistry();
i::Handle<i::JSObject> symbols =
i::Handle<i::JSObject>::cast(
@@ -6759,7 +6840,10 @@ static i::Handle<i::Symbol> SymbolFor(i::Isolate* isolate,
i::Object::GetPropertyOrElement(symbols, name).ToHandleChecked();
if (!symbol->IsSymbol()) {
DCHECK(symbol->IsUndefined());
- symbol = isolate->factory()->NewSymbol();
+ if (private_symbol)
+ symbol = isolate->factory()->NewPrivateSymbol();
+ else
+ symbol = isolate->factory()->NewSymbol();
i::Handle<i::Symbol>::cast(symbol)->set_name(*name);
i::JSObject::SetProperty(symbols, name, symbol, i::STRICT).Assert();
}
@@ -6771,7 +6855,7 @@ Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
i::Handle<i::String> part = i_isolate->factory()->for_string();
- return Utils::ToLocal(SymbolFor(i_isolate, i_name, part));
+ return Utils::ToLocal(SymbolFor(i_isolate, i_name, part, false));
}
@@ -6779,7 +6863,7 @@ Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
i::Handle<i::String> part = i_isolate->factory()->for_api_string();
- return Utils::ToLocal(SymbolFor(i_isolate, i_name, part));
+ return Utils::ToLocal(SymbolFor(i_isolate, i_name, part, false));
}
@@ -6801,6 +6885,33 @@ Local<Symbol> v8::Symbol::GetToStringTag(Isolate* isolate) {
}
+Local<Symbol> v8::Symbol::GetIsConcatSpreadable(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ return Utils::ToLocal(i_isolate->factory()->is_concat_spreadable_symbol());
+}
+
+
+Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, "Private::New()");
+ ENTER_V8(i_isolate);
+ i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
+ if (!name.IsEmpty()) symbol->set_name(*Utils::OpenHandle(*name));
+ Local<Symbol> result = Utils::ToLocal(symbol);
+ return v8::Local<Private>(reinterpret_cast<Private*>(*result));
+}
+
+
+Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+ i::Handle<i::String> part = i_isolate->factory()->private_api_string();
+ Local<Symbol> result =
+ Utils::ToLocal(SymbolFor(i_isolate, i_name, part, true));
+ return v8::Local<Private>(reinterpret_cast<Private*>(*result));
+}
+
+
Local<Number> v8::Number::New(Isolate* isolate, double value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
if (std::isnan(value)) {
@@ -7093,6 +7204,12 @@ void Isolate::Dispose() {
}
+void Isolate::DiscardThreadSpecificMetadata() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->DiscardPerThreadDataForThisThread();
+}
+
+
void Isolate::Enter() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->Enter();
@@ -7105,6 +7222,13 @@ void Isolate::Exit() {
}
+void Isolate::SetAbortOnUncaughtExceptionCallback(
+ AbortOnUncaughtExceptionCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetAbortOnUncaughtExceptionCallback(callback);
+}
+
+
Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
Isolate* isolate,
Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
@@ -7168,6 +7292,7 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->total_available_size_ = heap->Available();
heap_statistics->used_heap_size_ = heap->SizeOfObjects();
heap_statistics->heap_size_limit_ = heap->MaxReserved();
+ heap_statistics->does_zap_garbage_ = heap->ShouldZapGarbage();
}
@@ -7362,6 +7487,18 @@ int Isolate::ContextDisposedNotification(bool dependant_context) {
}
+void Isolate::IsolateInForegroundNotification() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->heap()->SetOptimizeForLatency();
+}
+
+
+void Isolate::IsolateInBackgroundNotification() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->heap()->SetOptimizeForMemoryUsage();
+}
+
+
void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -7465,10 +7602,10 @@ class VisitorAdapter : public i::ObjectVisitor {
public:
explicit VisitorAdapter(PersistentHandleVisitor* visitor)
: visitor_(visitor) {}
- virtual void VisitPointers(i::Object** start, i::Object** end) {
+ void VisitPointers(i::Object** start, i::Object** end) override {
UNREACHABLE();
}
- virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
+ void VisitEmbedderReference(i::Object** p, uint16_t class_id) override {
Value* value = ToApi<Value>(i::Handle<i::Object>(p));
visitor_->VisitPersistentHandle(
reinterpret_cast<Persistent<Value>*>(&value), class_id);
@@ -7497,6 +7634,15 @@ void Isolate::VisitHandlesForPartialDependence(
}
+void Isolate::VisitWeakHandles(PersistentHandleVisitor* visitor) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::DisallowHeapAllocation no_allocation;
+ VisitorAdapter visitor_adapter(visitor);
+ isolate->global_handles()->IterateWeakRootsInNewSpaceWithClassIds(
+ &visitor_adapter);
+}
+
+
String::Utf8Value::Utf8Value(v8::Local<v8::Value> obj)
: str_(NULL), length_(0) {
if (obj.IsEmpty()) return;
@@ -7566,14 +7712,22 @@ DEFINE_ERROR(Error, error)
#undef DEFINE_ERROR
+Local<Message> Exception::CreateMessage(Isolate* isolate,
+ Local<Value> exception) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i::HandleScope scope(i_isolate);
+ return Utils::MessageToLocal(
+ scope.CloseAndEscape(i_isolate->CreateMessage(obj, NULL)));
+}
+
+
Local<Message> Exception::CreateMessage(Local<Value> exception) {
i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
if (!obj->IsHeapObject()) return Local<Message>();
i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- return Utils::MessageToLocal(
- scope.CloseAndEscape(isolate->CreateMessage(obj, NULL)));
+ return CreateMessage(reinterpret_cast<Isolate*>(isolate), exception);
}
@@ -7589,20 +7743,26 @@ Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
// --- D e b u g S u p p o r t ---
-bool Debug::SetDebugEventListener(EventCallback that, Local<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
+bool Debug::SetDebugEventListener(Isolate* isolate, EventCallback that,
+ Local<Value> data) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i::HandleScope scope(i_isolate);
+ i::Handle<i::Object> foreign = i_isolate->factory()->undefined_value();
if (that != NULL) {
- foreign = isolate->factory()->NewForeign(FUNCTION_ADDR(that));
+ foreign = i_isolate->factory()->NewForeign(FUNCTION_ADDR(that));
}
- isolate->debug()->SetEventListener(foreign,
- Utils::OpenHandle(*data, true));
+ i_isolate->debug()->SetEventListener(foreign, Utils::OpenHandle(*data, true));
return true;
}
+bool Debug::SetDebugEventListener(EventCallback that, Local<Value> data) {
+ return SetDebugEventListener(
+ reinterpret_cast<Isolate*>(i::Isolate::Current()), that, data);
+}
+
+
void Debug::DebugBreak(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->RequestDebugBreak();
}
@@ -7620,10 +7780,16 @@ bool Debug::CheckDebugBreak(Isolate* isolate) {
}
+void Debug::SetMessageHandler(Isolate* isolate,
+ v8::Debug::MessageHandler handler) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i_isolate->debug()->SetMessageHandler(handler);
+}
+
+
void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
- i::Isolate* isolate = i::Isolate::Current();
- ENTER_V8(isolate);
- isolate->debug()->SetMessageHandler(handler);
+ SetMessageHandler(reinterpret_cast<Isolate*>(i::Isolate::Current()), handler);
}
@@ -7672,12 +7838,13 @@ MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
auto name = isolate->factory()->NewStringFromStaticChars("MakeMirror");
auto fun_obj = i::Object::GetProperty(debug, name).ToHandleChecked();
- auto v8_fun = Utils::ToLocal(i::Handle<i::JSFunction>::cast(fun_obj));
+ auto v8_fun = Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(fun_obj));
const int kArgc = 1;
v8::Local<v8::Value> argv[kArgc] = {obj};
Local<Value> result;
- has_pending_exception = !v8_fun->Call(context, Utils::ToLocal(debug), kArgc,
- argv).ToLocal(&result);
+ has_pending_exception =
+ !v8_fun->Call(context, Utils::ToLocal(debug), kArgc, argv)
+ .ToLocal(&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -7688,15 +7855,25 @@ Local<Value> Debug::GetMirror(v8::Local<v8::Value> obj) {
}
+void Debug::ProcessDebugMessages(Isolate* isolate) {
+ reinterpret_cast<i::Isolate*>(isolate)->debug()->ProcessDebugMessages(true);
+}
+
+
void Debug::ProcessDebugMessages() {
- i::Isolate::Current()->debug()->ProcessDebugMessages(true);
+ ProcessDebugMessages(reinterpret_cast<Isolate*>(i::Isolate::Current()));
+}
+
+
+Local<Context> Debug::GetDebugContext(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ return Utils::ToLocal(i_isolate->debug()->GetDebugContext());
}
Local<Context> Debug::GetDebugContext() {
- i::Isolate* isolate = i::Isolate::Current();
- ENTER_V8(isolate);
- return Utils::ToLocal(isolate->debug()->GetDebugContext());
+ return GetDebugContext(reinterpret_cast<Isolate*>(i::Isolate::Current()));
}
@@ -7719,8 +7896,8 @@ MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
Local<String> CpuProfileNode::GetFunctionName() const {
- i::Isolate* isolate = i::Isolate::Current();
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ i::Isolate* isolate = node->isolate();
const i::CodeEntry* entry = node->entry();
i::Handle<i::String> name =
isolate->factory()->InternalizeUtf8String(entry->name());
@@ -7744,8 +7921,8 @@ int CpuProfileNode::GetScriptId() const {
Local<String> CpuProfileNode::GetScriptResourceName() const {
- i::Isolate* isolate = i::Isolate::Current();
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ i::Isolate* isolate = node->isolate();
return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
node->entry()->resource_name()));
}
@@ -7815,16 +7992,17 @@ const std::vector<CpuProfileDeoptInfo>& CpuProfileNode::GetDeoptInfos() const {
void CpuProfile::Delete() {
- i::Isolate* isolate = i::Isolate::Current();
+ i::CpuProfile* profile = reinterpret_cast<i::CpuProfile*>(this);
+ i::Isolate* isolate = profile->top_down()->isolate();
i::CpuProfiler* profiler = isolate->cpu_profiler();
DCHECK(profiler != NULL);
- profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
+ profiler->DeleteProfile(profile);
}
Local<String> CpuProfile::GetTitle() const {
- i::Isolate* isolate = i::Isolate::Current();
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
+ i::Isolate* isolate = profile->top_down()->isolate();
return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
profile->title()));
}
@@ -7867,7 +8045,7 @@ int CpuProfile::GetSamplesCount() const {
void CpuProfiler::SetSamplingInterval(int us) {
- DCHECK(us >= 0);
+ DCHECK_GE(us, 0);
return reinterpret_cast<i::CpuProfiler*>(this)->set_sampling_interval(
base::TimeDelta::FromMicroseconds(us));
}
@@ -7911,8 +8089,8 @@ HeapGraphEdge::Type HeapGraphEdge::GetType() const {
Local<Value> HeapGraphEdge::GetName() const {
- i::Isolate* isolate = i::Isolate::Current();
i::HeapGraphEdge* edge = ToInternal(this);
+ i::Isolate* isolate = edge->isolate();
switch (edge->type()) {
case i::HeapGraphEdge::kContextVariable:
case i::HeapGraphEdge::kInternal:
@@ -7955,7 +8133,7 @@ HeapGraphNode::Type HeapGraphNode::GetType() const {
Local<String> HeapGraphNode::GetName() const {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = ToInternal(this)->isolate();
return ToApiHandle<String>(
isolate->factory()->InternalizeUtf8String(ToInternal(this)->name()));
}
@@ -7989,7 +8167,7 @@ static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
void HeapSnapshot::Delete() {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = ToInternal(this)->profiler()->isolate();
if (isolate->heap_profiler()->GetSnapshotsCount() > 1) {
ToInternal(this)->Delete();
} else {
@@ -8185,11 +8363,10 @@ void Testing::PrepareStressRun(int run) {
}
-// TODO(svenpanne) Deprecate this.
-void Testing::DeoptimizeAll() {
- i::Isolate* isolate = i::Isolate::Current();
- i::HandleScope scope(isolate);
- internal::Deoptimizer::DeoptimizeAll(isolate);
+void Testing::DeoptimizeAll(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::HandleScope scope(i_isolate);
+ internal::Deoptimizer::DeoptimizeAll(i_isolate);
}
diff --git a/chromium/v8/src/api.h b/chromium/v8/src/api.h
index 12292795980..556765264ae 100644
--- a/chromium/v8/src/api.h
+++ b/chromium/v8/src/api.h
@@ -142,10 +142,9 @@ class RegisteredExtension {
V(ObjectTemplate, ObjectTemplateInfo) \
V(Signature, FunctionTemplateInfo) \
V(AccessorSignature, FunctionTemplateInfo) \
- V(TypeSwitch, TypeSwitchInfo) \
V(Data, Object) \
V(RegExp, JSRegExp) \
- V(Object, JSObject) \
+ V(Object, JSReceiver) \
V(Array, JSArray) \
V(Map, JSMap) \
V(Set, JSSet) \
@@ -168,12 +167,13 @@ class RegisteredExtension {
V(Symbol, Symbol) \
V(Script, JSFunction) \
V(UnboundScript, SharedFunctionInfo) \
- V(Function, JSFunction) \
+ V(Function, JSReceiver) \
V(Message, JSMessageObject) \
V(Context, Context) \
V(External, Object) \
V(StackTrace, JSArray) \
V(StackFrame, JSObject) \
+ V(Proxy, JSProxy) \
V(NativeWeakMap, JSWeakMap)
class Utils {
@@ -192,8 +192,6 @@ class Utils {
v8::internal::Handle<v8::internal::Context> obj);
static inline Local<Value> ToLocal(
v8::internal::Handle<v8::internal::Object> obj);
- static inline Local<Function> ToLocal(
- v8::internal::Handle<v8::internal::JSFunction> obj);
static inline Local<Name> ToLocal(
v8::internal::Handle<v8::internal::Name> obj);
static inline Local<String> ToLocal(
@@ -203,6 +201,8 @@ class Utils {
static inline Local<RegExp> ToLocal(
v8::internal::Handle<v8::internal::JSRegExp> obj);
static inline Local<Object> ToLocal(
+ v8::internal::Handle<v8::internal::JSReceiver> obj);
+ static inline Local<Object> ToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
@@ -210,13 +210,14 @@ class Utils {
v8::internal::Handle<v8::internal::JSMap> obj);
static inline Local<Set> ToLocal(
v8::internal::Handle<v8::internal::JSSet> obj);
+ static inline Local<Proxy> ToLocal(
+ v8::internal::Handle<v8::internal::JSProxy> obj);
static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<ArrayBufferView> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBufferView> obj);
static inline Local<DataView> ToLocal(
v8::internal::Handle<v8::internal::JSDataView> obj);
-
static inline Local<TypedArray> ToLocal(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint8Array> ToLocalUint8Array(
@@ -263,12 +264,12 @@ class Utils {
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<AccessorSignature> AccessorSignatureToLocal(
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
- static inline Local<TypeSwitch> ToLocal(
- v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
static inline Local<External> ExternalToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<NativeWeakMap> NativeWeakMapToLocal(
v8::internal::Handle<v8::internal::JSWeakMap> obj);
+ static inline Local<Function> CallableToLocal(
+ v8::internal::Handle<v8::internal::JSReceiver> obj);
#define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> \
@@ -349,15 +350,16 @@ inline bool ToLocal(v8::internal::MaybeHandle<v8::internal::Object> maybe,
MAKE_TO_LOCAL(ToLocal, Context, Context)
MAKE_TO_LOCAL(ToLocal, Object, Value)
-MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
MAKE_TO_LOCAL(ToLocal, Name, Name)
MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
+MAKE_TO_LOCAL(ToLocal, JSReceiver, Object)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSMap, Map)
MAKE_TO_LOCAL(ToLocal, JSSet, Set)
+MAKE_TO_LOCAL(ToLocal, JSProxy, Proxy)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
@@ -370,7 +372,6 @@ MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature)
MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
-MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
@@ -380,6 +381,7 @@ MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
MAKE_TO_LOCAL(NativeWeakMapToLocal, JSWeakMap, NativeWeakMap)
+MAKE_TO_LOCAL(CallableToLocal, JSReceiver, Function)
#undef MAKE_TO_LOCAL_TYPED_ARRAY
#undef MAKE_TO_LOCAL
@@ -642,6 +644,7 @@ class Testing {
static v8::Testing::StressType stress_type_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_API_H_
diff --git a/chromium/v8/src/arguments.h b/chromium/v8/src/arguments.h
index ed995e7f582..d11a8cd61ee 100644
--- a/chromium/v8/src/arguments.h
+++ b/chromium/v8/src/arguments.h
@@ -29,10 +29,13 @@ namespace internal {
class Arguments BASE_EMBEDDED {
public:
Arguments(int length, Object** arguments)
- : length_(length), arguments_(arguments) { }
+ : length_(length), arguments_(arguments) {
+ DCHECK_GE(length_, 0);
+ }
Object*& operator[] (int index) {
- DCHECK(0 <= index && index < length_);
+ DCHECK_GE(index, 0);
+ DCHECK_LT(static_cast<uint32_t>(index), static_cast<uint32_t>(length_));
return *(reinterpret_cast<Object**>(reinterpret_cast<intptr_t>(arguments_) -
index * kPointerSize));
}
@@ -283,6 +286,7 @@ static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION_RETURN_PAIR(Name) \
RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, Name)
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARGUMENTS_H_
diff --git a/chromium/v8/src/arm/assembler-arm-inl.h b/chromium/v8/src/arm/assembler-arm-inl.h
index 8f8956c9e1c..c9602ea028f 100644
--- a/chromium/v8/src/arm/assembler-arm-inl.h
+++ b/chromium/v8/src/arm/assembler-arm-inl.h
@@ -50,53 +50,11 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
-int Register::NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
-}
-
-
-int DwVfpRegister::NumRegisters() {
+int DoubleRegister::NumRegisters() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
}
-int DwVfpRegister::NumReservedRegisters() {
- return kNumReservedRegisters;
-}
-
-
-int DwVfpRegister::NumAllocatableRegisters() {
- return NumRegisters() - kNumReservedRegisters;
-}
-
-
-// static
-int DwVfpRegister::NumAllocatableAliasedRegisters() {
- return LowDwVfpRegister::kMaxNumLowRegisters - kNumReservedRegisters;
-}
-
-
-int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
- DCHECK(!reg.is(kDoubleRegZero));
- DCHECK(!reg.is(kScratchDoubleReg));
- if (reg.code() > kDoubleRegZero.code()) {
- return reg.code() - kNumReservedRegisters;
- }
- return reg.code();
-}
-
-
-DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < NumAllocatableRegisters());
- DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
- kNumReservedRegisters - 1);
- if (index >= kDoubleRegZero.code()) {
- return from_code(index + kNumReservedRegisters);
- }
- return from_code(index);
-}
-
-
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
@@ -146,7 +104,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -173,7 +132,7 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, host_,
+ Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
@@ -299,7 +258,7 @@ void RelocInfo::WipeOut() {
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else {
- Assembler::set_target_address_at(pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
@@ -514,9 +473,9 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Code* code, Address target) {
+ Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
if (FLAG_enable_embedded_constant_pool) {
- set_target_address_at(constant_pool_entry, code, target);
+ set_target_address_at(isolate, constant_pool_entry, code, target);
} else {
Memory::Address_at(constant_pool_entry) = target;
}
@@ -524,7 +483,7 @@ void Assembler::deserialization_set_special_target_at(
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@@ -614,15 +573,15 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Update the entry in the constant pool.
Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // Assembler::FlushICacheWithoutIsolate(pc, sizeof(target));
+ // Assembler::FlushICache(isolate, pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
// ldr ip, [pp, #...]
@@ -640,7 +599,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc, 2 * kInstrSize);
+ Assembler::FlushICache(isolate, pc, 2 * kInstrSize);
}
} else {
// This is an mov / orr immediate load. Patch the immediate embedded in
@@ -660,12 +619,13 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc, 4 * kInstrSize);
+ Assembler::FlushICache(isolate, pc, 4 * kInstrSize);
}
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/chromium/v8/src/arm/assembler-arm.cc b/chromium/v8/src/arm/assembler-arm.cc
index 50c707d2a07..d2e3231bb8a 100644
--- a/chromium/v8/src/arm/assembler-arm.cc
+++ b/chromium/v8/src/arm/assembler-arm.cc
@@ -52,6 +52,14 @@ namespace internal {
// snapshot.
static unsigned CpuFeaturesImpliedByCompiler() {
unsigned answer = 0;
+#ifdef CAN_USE_ARMV8_INSTRUCTIONS
+ if (FLAG_enable_armv8) {
+ answer |= 1u << ARMv8;
+ // ARMv8 always features VFP and NEON.
+ answer |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
+ answer |= 1u << SUDIV | 1u << MLS;
+ }
+#endif // CAN_USE_ARMV8_INSTRUCTIONS
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
if (FLAG_enable_armv7) answer |= 1u << ARMv7;
#endif // CAN_USE_ARMV7_INSTRUCTIONS
@@ -81,6 +89,13 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#ifndef __arm__
// For the simulator build, use whatever the flags specify.
+ if (FLAG_enable_armv8) {
+ supported_ |= 1u << ARMv8;
+ // ARMv8 always features VFP and NEON.
+ supported_ |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
+ supported_ |= 1u << SUDIV | 1u << MLS;
+ if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+ }
if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7;
if (FLAG_enable_vfp3) supported_ |= 1u << VFP3;
@@ -154,7 +169,9 @@ void CpuFeatures::PrintTarget() {
arm_no_probe = " noprobe";
#endif
-#if defined CAN_USE_ARMV7_INSTRUCTIONS
+#if defined CAN_USE_ARMV8_INSTRUCTIONS
+ arm_arch = "arm v8";
+#elif defined CAN_USE_ARMV7_INSTRUCTIONS
arm_arch = "arm v7";
#else
arm_arch = "arm v6";
@@ -192,13 +209,15 @@ void CpuFeatures::PrintTarget() {
void CpuFeatures::PrintFeatures() {
printf(
- "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
- "MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
+ "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d MLS=%d"
+ "UNALIGNED_ACCESSES=%d MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
+ CpuFeatures::IsSupported(ARMv8),
CpuFeatures::IsSupported(ARMv7),
CpuFeatures::IsSupported(VFP3),
CpuFeatures::IsSupported(VFP32DREGS),
CpuFeatures::IsSupported(NEON),
CpuFeatures::IsSupported(SUDIV),
+ CpuFeatures::IsSupported(MLS),
CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS),
CpuFeatures::IsSupported(COHERENT_CACHE));
@@ -214,18 +233,6 @@ void CpuFeatures::PrintFeatures() {
// -----------------------------------------------------------------------------
-// Implementation of DwVfpRegister
-
-const char* DwVfpRegister::AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < NumAllocatableRegisters());
- DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
- kNumReservedRegisters - 1);
- if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters;
- return VFPRegisters::Name(index, true);
-}
-
-
-// -----------------------------------------------------------------------------
// Implementation of RelocInfo
// static
@@ -398,26 +405,26 @@ NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
const Instr kPushRegPattern =
- al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
+ al | B26 | 4 | NegPreIndex | Register::kCode_sp * B16;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
const Instr kPopRegPattern =
- al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
+ al | B26 | L | 4 | PostIndex | Register::kCode_sp * B16;
// ldr rd, [pc, #offset]
const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16;
+const Instr kLdrPCImmedPattern = 5 * B24 | L | Register::kCode_pc * B16;
// ldr rd, [pp, #offset]
const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16;
+const Instr kLdrPpImmedPattern = 5 * B24 | L | Register::kCode_r8 * B16;
// ldr rd, [pp, rn]
const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPpRegPattern = 7 * B24 | L | kRegister_r8_Code * B16;
+const Instr kLdrPpRegPattern = 7 * B24 | L | Register::kCode_r8 * B16;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
-const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
+const Instr kVldrDPCPattern = 13 * B24 | L | Register::kCode_pc * B16 | 11 * B8;
// vldr dd, [pp, #offset]
const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
-const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
+const Instr kVldrDPpPattern = 13 * B24 | L | Register::kCode_r8 * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -444,13 +451,13 @@ const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kLdrRegFpOffsetPattern =
- al | B26 | L | Offset | kRegister_fp_Code * B16;
+ al | B26 | L | Offset | Register::kCode_fp * B16;
const Instr kStrRegFpOffsetPattern =
- al | B26 | Offset | kRegister_fp_Code * B16;
+ al | B26 | Offset | Register::kCode_fp * B16;
const Instr kLdrRegFpNegOffsetPattern =
- al | B26 | L | NegOffset | kRegister_fp_Code * B16;
+ al | B26 | L | NegOffset | Register::kCode_fp * B16;
const Instr kStrRegFpNegOffsetPattern =
- al | B26 | NegOffset | kRegister_fp_Code * B16;
+ al | B26 | NegOffset | Register::kCode_fp * B16;
const Instr kLdrStrInstrTypeMask = 0xffff0000;
@@ -626,21 +633,21 @@ Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
Register Assembler::GetRd(Instr instr) {
Register reg;
- reg.code_ = Instruction::RdValue(instr);
+ reg.reg_code = Instruction::RdValue(instr);
return reg;
}
Register Assembler::GetRn(Instr instr) {
Register reg;
- reg.code_ = Instruction::RnValue(instr);
+ reg.reg_code = Instruction::RnValue(instr);
return reg;
}
Register Assembler::GetRm(Instr instr) {
Register reg;
- reg.code_ = Instruction::RmValue(instr);
+ reg.reg_code = Instruction::RmValue(instr);
return reg;
}
@@ -836,8 +843,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
// instruction.
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
- 1,
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 1,
CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target24));
} else {
@@ -846,14 +852,12 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Patch with movw/movt.
if (target16_1 == 0) {
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
- 1,
- CodePatcher::DONT_FLUSH);
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
+ 1, CodePatcher::DONT_FLUSH);
patcher.masm()->movw(dst, target16_0);
} else {
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
- 2,
- CodePatcher::DONT_FLUSH);
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
+ 2, CodePatcher::DONT_FLUSH);
patcher.masm()->movw(dst, target16_0);
patcher.masm()->movt(dst, target16_1);
}
@@ -863,15 +867,13 @@ void Assembler::target_at_put(int pos, int target_pos) {
uint8_t target8_1 = target16_0 >> 8;
uint8_t target8_2 = target16_1 & kImm8Mask;
if (target8_2 == 0) {
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
- 2,
- CodePatcher::DONT_FLUSH);
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
+ 2, CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target8_0));
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
} else {
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
- 3,
- CodePatcher::DONT_FLUSH);
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
+ 3, CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target8_0));
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
@@ -3355,6 +3357,20 @@ void Assembler::vmrs(Register dst, Condition cond) {
}
+void Assembler::vrinta(const SwVfpRegister dst, const SwVfpRegister src) {
+ // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+ // 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
+ // M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
+ 0x5 * B9 | B6 | m * B5 | vm);
+}
+
+
void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@@ -3369,6 +3385,20 @@ void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
}
+void Assembler::vrintn(const SwVfpRegister dst, const SwVfpRegister src) {
+ // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+ // 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
+ // M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
+ vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
+}
+
+
void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@@ -3383,6 +3413,20 @@ void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
}
+void Assembler::vrintp(const SwVfpRegister dst, const SwVfpRegister src) {
+ // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+ // 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
+ // M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
+ vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
+}
+
+
void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@@ -3397,6 +3441,20 @@ void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
}
+void Assembler::vrintm(const SwVfpRegister dst, const SwVfpRegister src) {
+ // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+ // 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
+ // M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
+ vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
+}
+
+
void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@@ -3411,6 +3469,20 @@ void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
}
+void Assembler::vrintz(const SwVfpRegister dst, const SwVfpRegister src,
+ const Condition cond) {
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
+ // Vd(15-12) | 101(11-9) | sz=0(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
+ 0x5 * B9 | B7 | B6 | m * B5 | vm);
+}
+
+
void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
@@ -3587,6 +3659,7 @@ void Assembler::GrowBuffer() {
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc.origin = this;
// Copy the data.
int pc_delta = desc.buffer - buffer_;
@@ -3662,7 +3735,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
data = RecordedAstId().ToInt();
ClearRecordedAstId();
}
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
diff --git a/chromium/v8/src/arm/assembler-arm.h b/chromium/v8/src/arm/assembler-arm.h
index 1d1cc485d50..1abf1ab6a69 100644
--- a/chromium/v8/src/arm/assembler-arm.h
+++ b/chromium/v8/src/arm/assembler-arm.h
@@ -45,11 +45,35 @@
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
-#include "src/compiler.h"
namespace v8 {
namespace internal {
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(fp) V(ip) V(sp) V(lr) V(pc)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) V(r8)
+
+#define DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) \
+// clang-format on
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -71,190 +95,123 @@ namespace internal {
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
-// These constants are used in several locations, including static initializers
-const int kRegister_no_reg_Code = -1;
-const int kRegister_r0_Code = 0;
-const int kRegister_r1_Code = 1;
-const int kRegister_r2_Code = 2;
-const int kRegister_r3_Code = 3;
-const int kRegister_r4_Code = 4;
-const int kRegister_r5_Code = 5;
-const int kRegister_r6_Code = 6;
-const int kRegister_r7_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_fp_Code = 11;
-const int kRegister_ip_Code = 12;
-const int kRegister_sp_Code = 13;
-const int kRegister_lr_Code = 14;
-const int kRegister_pc_Code = 15;
-
-// Core register
struct Register {
- static const int kNumRegisters = 16;
- static const int kMaxNumAllocatableRegisters =
- FLAG_enable_embedded_constant_pool ? 8 : 9;
- static const int kSizeInBytes = 4;
-
- inline static int NumAllocatableRegisters();
-
- static int ToAllocationIndex(Register reg) {
- DCHECK(reg.code() < kMaxNumAllocatableRegisters);
- return reg.code();
- }
-
- static Register FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index);
- }
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "r0",
- "r1",
- "r2",
- "r3",
- "r4",
- "r5",
- "r6",
- "r7",
- "r8",
- };
- if (FLAG_enable_embedded_constant_pool && (index >= 7)) {
- return names[index + 1];
- }
- return names[index];
- }
+ static const int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) {
- Register r = { code };
+ DCHECK(code >= 0);
+ DCHECK(code < kNumRegisters);
+ Register r = {code};
return r;
}
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
-
void set_code(int code) {
- code_ = code;
+ reg_code = code;
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
-const Register no_reg = { kRegister_no_reg_Code };
-
-const Register r0 = { kRegister_r0_Code };
-const Register r1 = { kRegister_r1_Code };
-const Register r2 = { kRegister_r2_Code };
-const Register r3 = { kRegister_r3_Code };
-const Register r4 = { kRegister_r4_Code };
-const Register r5 = { kRegister_r5_Code };
-const Register r6 = { kRegister_r6_Code };
-// Used as context register.
-const Register r7 = {kRegister_r7_Code};
-// Used as constant pool pointer register if FLAG_enable_embedded_constant_pool.
-const Register r8 = { kRegister_r8_Code };
-// Used as lithium codegen scratch register.
-const Register r9 = { kRegister_r9_Code };
-// Used as roots register.
-const Register r10 = { kRegister_r10_Code };
-const Register fp = { kRegister_fp_Code };
-const Register ip = { kRegister_ip_Code };
-const Register sp = { kRegister_sp_Code };
-const Register lr = { kRegister_lr_Code };
-const Register pc = { kRegister_pc_Code };
+// r7: context register
+// r8: constant pool pointer register if FLAG_enable_embedded_constant_pool.
+// r9: lithium scratch
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
// Single word VFP register.
struct SwVfpRegister {
static const int kSizeInBytes = 4;
- bool is_valid() const { return 0 <= code_ && code_ < 32; }
- bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
+ bool is_valid() const { return 0 <= reg_code && reg_code < 32; }
+ bool is(SwVfpRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
void split_code(int* vm, int* m) const {
DCHECK(is_valid());
- *m = code_ & 0x1;
- *vm = code_ >> 1;
+ *m = reg_code & 0x1;
+ *vm = reg_code >> 1;
}
- int code_;
+ int reg_code;
};
// Double word VFP register.
-struct DwVfpRegister {
- static const int kMaxNumRegisters = 32;
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
+ static const int kMaxNumRegisters = Code::kAfterLast;
+
+ inline static int NumRegisters();
+
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
- static const int kNumReservedRegisters = 2;
- static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
- kNumReservedRegisters;
static const int kSizeInBytes = 8;
- // Note: the number of registers can be different at snapshot and run-time.
- // Any code included in the snapshot must be able to run both with 16 or 32
- // registers.
- inline static int NumRegisters();
- inline static int NumReservedRegisters();
- inline static int NumAllocatableRegisters();
-
- // TODO(turbofan): This is a temporary work-around required because our
- // register allocator does not yet support the aliasing of single/double
- // registers on ARM.
- inline static int NumAllocatableAliasedRegisters();
-
- inline static int ToAllocationIndex(DwVfpRegister reg);
- static const char* AllocationIndexToString(int index);
- inline static DwVfpRegister FromAllocationIndex(int index);
-
- static DwVfpRegister from_code(int code) {
- DwVfpRegister r = { code };
- return r;
- }
-
- bool is_valid() const {
- return 0 <= code_ && code_ < kMaxNumRegisters;
- }
- bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
+ }
+
+ static DoubleRegister from_code(int code) {
+ DoubleRegister r = {code};
+ return r;
}
void split_code(int* vm, int* m) const {
DCHECK(is_valid());
- *m = (code_ & 0x10) >> 4;
- *vm = code_ & 0x0F;
+ *m = (reg_code & 0x10) >> 4;
+ *vm = reg_code & 0x0F;
}
- int code_;
+ int reg_code;
};
-typedef DwVfpRegister DoubleRegister;
+typedef DoubleRegister DwVfpRegister;
// Double word VFP register d0-15.
@@ -262,7 +219,7 @@ struct LowDwVfpRegister {
public:
static const int kMaxNumLowRegisters = 16;
operator DwVfpRegister() const {
- DwVfpRegister r = { code_ };
+ DwVfpRegister r = { reg_code };
return r;
}
static LowDwVfpRegister from_code(int code) {
@@ -271,30 +228,30 @@ struct LowDwVfpRegister {
}
bool is_valid() const {
- return 0 <= code_ && code_ < kMaxNumLowRegisters;
+ return 0 <= reg_code && reg_code < kMaxNumLowRegisters;
}
- bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
- bool is(LowDwVfpRegister reg) const { return code_ == reg.code_; }
+ bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
+ bool is(LowDwVfpRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
SwVfpRegister low() const {
SwVfpRegister reg;
- reg.code_ = code_ * 2;
+ reg.reg_code = reg_code * 2;
DCHECK(reg.is_valid());
return reg;
}
SwVfpRegister high() const {
SwVfpRegister reg;
- reg.code_ = (code_ * 2) + 1;
+ reg.reg_code = (reg_code * 2) + 1;
DCHECK(reg.is_valid());
return reg;
}
- int code_;
+ int reg_code;
};
@@ -308,21 +265,21 @@ struct QwNeonRegister {
}
bool is_valid() const {
- return (0 <= code_) && (code_ < kMaxNumRegisters);
+ return (0 <= reg_code) && (reg_code < kMaxNumRegisters);
}
- bool is(QwNeonRegister reg) const { return code_ == reg.code_; }
+ bool is(QwNeonRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
void split_code(int* vm, int* m) const {
DCHECK(is_valid());
- int encoded_code = code_ << 1;
+ int encoded_code = reg_code << 1;
*m = (encoded_code & 0x10) >> 4;
*vm = encoded_code & 0x0F;
}
- int code_;
+ int reg_code;
};
@@ -427,19 +384,19 @@ const QwNeonRegister q15 = { 15 };
// Coprocessor register
struct CRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(CRegister creg) const { return code_ == creg.code_; }
+ bool is_valid() const { return 0 <= reg_code && reg_code < 16; }
+ bool is(CRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
@@ -714,19 +671,18 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
- INLINE(static void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED)) {
+ INLINE(static void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target,
+ icache_flush_mode);
}
// Return the code target address at a call site from the return address
@@ -740,11 +696,12 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Code* code, Address target);
+ Isolate* isolate, Address constant_pool_entry, Code* code,
+ Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Here we are patching the address in the constant pool, not the actual call
@@ -1254,10 +1211,16 @@ class Assembler : public AssemblerBase {
const Condition cond = al);
// ARMv8 rounding instructions.
+ void vrinta(const SwVfpRegister dst, const SwVfpRegister src);
void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
+ void vrintn(const SwVfpRegister dst, const SwVfpRegister src);
void vrintn(const DwVfpRegister dst, const DwVfpRegister src);
+ void vrintm(const SwVfpRegister dst, const SwVfpRegister src);
void vrintm(const DwVfpRegister dst, const DwVfpRegister src);
+ void vrintp(const SwVfpRegister dst, const SwVfpRegister src);
void vrintp(const DwVfpRegister dst, const DwVfpRegister src);
+ void vrintz(const SwVfpRegister dst, const SwVfpRegister src,
+ const Condition cond = al);
void vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
@@ -1351,7 +1314,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
@@ -1667,6 +1630,7 @@ class EnsureSpace BASE_EMBEDDED {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_ASSEMBLER_ARM_H_
diff --git a/chromium/v8/src/arm/builtins-arm.cc b/chromium/v8/src/arm/builtins-arm.cc
index ea2c92e6407..0c83f918caa 100644
--- a/chromium/v8/src/arm/builtins-arm.cc
+++ b/chromium/v8/src/arm/builtins-arm.cc
@@ -22,11 +22,11 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- r0 : number of arguments excluding receiver
- // -- r1 : called function (only guaranteed when
- // extra_args requires it)
+ // -- r1 : target
+ // -- r3 : new.target
// -- sp[0] : last argument
// -- ...
- // -- sp[4 * (argc - 1)] : first argument (argc == r0)
+ // -- sp[4 * (argc - 1)] : first argument
// -- sp[4 * argc] : receiver
// -----------------------------------
__ AssertFunction(r1);
@@ -35,21 +35,31 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(r1);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ switch (extra_args) {
+ case BuiltinExtraArguments::kTarget:
+ __ Push(r1);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kNewTarget:
+ __ Push(r3);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kTargetAndNewTarget:
+ __ Push(r1, r3);
+ num_extra_args += 2;
+ break;
+ case BuiltinExtraArguments::kNone:
+ break;
}
// JumpToExternalReference expects r0 to contain the number of arguments
// including the receiver and the extra arguments.
__ add(r0, r0, Operand(num_extra_args + 1));
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -57,32 +67,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the native context.
-
- __ ldr(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- // Load the InternalArray function from the native context.
- __ ldr(result,
- MemOperand(result,
- Context::SlotOffset(
- Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+ // Load the InternalArray function from the current native context.
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
-
- __ ldr(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- // Load the Array function from the native context.
- __ ldr(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ // Load the Array function from the current native context.
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
@@ -144,6 +137,106 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- r1 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into r0 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ sub(r0, r0, Operand(1), SetCC);
+ __ b(lo, &no_arguments);
+ __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
+ __ Drop(2);
+ }
+
+ // 2a. Convert the first argument to a number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0.
+ __ bind(&no_arguments);
+ __ Move(r0, Smi::FromInt(0));
+ __ Ret(1);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- r1 : constructor function
+ // -- r3 : new target
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into r2 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ sub(r0, r0, Operand(1), SetCC);
+ __ b(lo, &no_arguments);
+ __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
+ __ Drop(2);
+ __ b(&done);
+ __ bind(&no_arguments);
+ __ Move(r2, Smi::FromInt(0));
+ __ Drop(1);
+ __ bind(&done);
+ }
+
+ // 3. Make sure r2 is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(r2, &done_convert);
+ __ CompareObjectType(r2, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &done_convert);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r3);
+ __ Move(r0, r2);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(r2, r0);
+ __ Pop(r1, r3);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(r1, r3);
+ __ b(ne, &new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(r0, r1, r2, r4, r5, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r2, r1, r3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(r2);
+ }
+ __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -193,7 +286,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&symbol_descriptive_string);
{
__ Push(r0);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -203,90 +296,88 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
+ // -- r3 : new target
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r0 and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into r2 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
__ sub(r0, r0, Operand(1), SetCC);
__ b(lo, &no_arguments);
- __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
+ __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
__ Drop(2);
__ b(&done);
__ bind(&no_arguments);
- __ LoadRoot(r0, Heap::kempty_stringRootIndex);
+ __ LoadRoot(r2, Heap::kempty_stringRootIndex);
__ Drop(1);
__ bind(&done);
}
- // 2. Make sure r0 is a string.
+ // 3. Make sure r2 is a string.
{
Label convert, done_convert;
- __ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ JumpIfSmi(r2, &convert);
+ __ CompareObjectType(r2, r4, r4, FIRST_NONSTRING_TYPE);
__ b(lo, &done_convert);
__ bind(&convert);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
- __ Push(r1);
+ __ Push(r1, r3);
+ __ Move(r0, r2);
__ CallStub(&stub);
- __ Pop(r1);
+ __ Move(r2, r0);
+ __ Pop(r1, r3);
}
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- r0 : the first argument
- // -- r1 : constructor function
- // -- lr : return address
- // -----------------------------------
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(r1, r3);
+ __ b(ne, &new_object);
- Label allocate, done_allocate;
- __ Move(r2, r0);
- __ Allocate(JSValue::kSize, r0, r3, r4, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
-
- // Initialize the JSValue in r0.
- __ LoadGlobalFunctionInitialMap(r1, r3, r4);
- __ str(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(r0, r1, r2, r4, r5, &new_object);
+ __ Ret();
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Move(r3, Smi::FromInt(JSValue::kSize));
- __ Push(r1, r2, r3);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(r1, r2);
- }
- __ b(&done_allocate);
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r2, r1, r3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(r2);
}
+ __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
+ __ Ret();
}
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- r1 : target function (preserved for callee)
+ // -- r3 : new target (preserved for callee)
+ // -----------------------------------
+
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
+ // Push a copy of the target function and the new target.
__ push(r1);
+ __ push(r3);
// Push function as parameter to the runtime call.
__ Push(r1);
__ CallRuntime(function_id, 1);
- // Restore receiver.
+ // Restore target function and new target.
+ __ pop(r3);
__ pop(r1);
}
@@ -325,12 +416,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
// -- r2 : allocation site or undefined
- // -- r3 : original constructor
+ // -- r3 : new target
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -346,172 +438,168 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(r2);
__ SmiTag(r0);
__ push(r0);
- __ push(r1);
- __ push(r3);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ mov(r2, Operand(debug_step_in_fp));
- __ ldr(r2, MemOperand(r2));
- __ tst(r2, r2);
- __ b(ne, &rt_call);
-
- // Fall back to runtime if the original constructor and function differ.
- __ cmp(r1, r3);
- __ b(ne, &rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &rt_call);
- __ CompareObjectType(r2, r5, r4, MAP_TYPE);
- __ b(ne, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // r1: constructor function
- // r2: initial map
- __ CompareInstanceType(r2, r5, JS_FUNCTION_TYPE);
- __ b(eq, &rt_call);
-
- if (!is_api_function) {
- Label allocate;
- MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ ldr(r4, bit_field3);
- __ DecodeField<Map::Counter>(r3, r4);
- __ cmp(r3, Operand(Map::kSlackTrackingCounterEnd));
- __ b(lt, &allocate);
- // Decrease generous allocation count.
- __ sub(r4, r4, Operand(1 << Map::Counter::kShift));
- __ str(r4, bit_field3);
- __ cmp(r3, Operand(Map::kSlackTrackingCounterEnd));
- __ b(ne, &allocate);
-
- __ push(r1);
-
- __ Push(r2, r1); // r1 = constructor
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(r2);
- __ pop(r1);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // r1: constructor function
- // r2: initial map
- Label rt_call_reload_new_target;
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-
- __ Allocate(r3, r4, r5, r6, &rt_call_reload_new_target, SIZE_IN_WORDS);
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // r1: constructor function
- // r2: initial map
- // r3: object size
- // r4: JSObject (not tagged)
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Fill all the in-object properties with the appropriate filler.
- // r1: constructor function
- // r2: initial map
- // r3: object size
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ ldr(ip, FieldMemOperand(r2, Map::kBitField3Offset));
- __ DecodeField<Map::Counter>(ip);
- __ cmp(ip, Operand(Map::kSlackTrackingCounterEnd));
- __ b(lt, &no_inobject_slack_tracking);
-
- // Allocate object with a slack.
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r0, r0, Map::kInObjectPropertiesOrConstructorFunctionIndexByte *
- kBitsPerByte,
- kBitsPerByte);
- __ ldr(r2, FieldMemOperand(r2, Map::kInstanceAttributesOffset));
- __ Ubfx(r2, r2, Map::kUnusedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ sub(r0, r0, Operand(r2));
- __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
- // r0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ add(ip, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- __ cmp(r0, ip);
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ if (create_implicit_receiver) {
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ CompareObjectType(r3, r5, r4, JS_FUNCTION_TYPE);
+ __ b(ne, &rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ // r3: new target
+ __ ldr(r2,
+ FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r2, &rt_call);
+ __ CompareObjectType(r2, r5, r4, MAP_TYPE);
+ __ b(ne, &rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ ldr(r5, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
+ __ cmp(r1, r5);
+ __ b(ne, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // r1: constructor function
+ // r2: initial map
+ // r3: new target
+ __ CompareInstanceType(r2, r5, JS_FUNCTION_TYPE);
+ __ b(eq, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ // r1: constructor function
+ // r2: initial map
+ // r3: new target
+ __ ldrb(r9, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+
+ __ Allocate(r9, r4, r9, r6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // r1: constructor function
+ // r2: initial map
+ // r3: new target
+ // r4: JSObject (not HeapObject tagged - the actual address).
+ // r9: start of next object
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(r5, r4);
+ STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
+ __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+ STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ add(r4, r4, Operand(kHeapObjectTag));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // r4: JSObject (tagged)
+ // r5: First in-object property of JSObject (not tagged)
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ ldr(r0, bit_field3);
+ __ DecodeField<Map::ConstructionCounter>(ip, r0);
+ // ip: slack tracking counter
+ __ cmp(ip, Operand(Map::kSlackTrackingCounterEnd));
+ __ b(lt, &no_inobject_slack_tracking);
+ __ push(ip); // Save allocation count value.
+ // Decrease generous allocation count.
+ __ sub(r0, r0, Operand(1 << Map::ConstructionCounter::kShift));
+ __ str(r0, bit_field3);
+
+ // Allocate object with a slack.
+ __ ldr(r0, FieldMemOperand(r2, Map::kInstanceAttributesOffset));
+ __ Ubfx(r0, r0, Map::kUnusedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ sub(r0, r9, Operand(r0, LSL, kPointerSizeLog2));
+ // r0: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(r5, r0);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(r5, r0, r6);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(r5, r9, r6);
+
+ __ pop(r0); // Restore allocation count value before decreasing.
+ __ cmp(r0, Operand(Map::kSlackTrackingCounterEnd));
+ __ b(ne, &allocated);
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(r1, r3, r4, r2);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(r1, r3, r4);
+
+ // Continue with JSObject being successfully allocated
+ // r1: constructor function
+ // r3: new target
+ // r4: JSObject
+ __ jmp(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- __ InitializeFieldsWithFiller(r5, r0, r6);
- // To allow for truncation.
- __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
- // Fill the remaining fields with one pointer filler map.
-
- __ bind(&no_inobject_slack_tracking);
- }
- __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- __ InitializeFieldsWithFiller(r5, r0, r6);
+ __ InitializeFieldsWithFiller(r5, r9, r6);
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ add(r4, r4, Operand(kHeapObjectTag));
+ // Continue with JSObject being successfully allocated
+ // r1: constructor function
+ // r3: new target
+ // r4: JSObject
+ __ jmp(&allocated);
+ }
- // Continue with JSObject being successfully allocated
+ // Allocate the new receiver object using the runtime call.
+ // r1: constructor function
+ // r3: new target
+ __ bind(&rt_call);
+
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(r1, r3);
+ __ Push(r1, r3); // constructor function, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ mov(r4, r0);
+ __ Pop(r1, r3);
+
+ // Receiver for constructor call allocated.
+ // r1: constructor function
+ // r3: new target
// r4: JSObject
- __ jmp(&allocated);
+ __ bind(&allocated);
- // Reload the original constructor and fall-through.
- __ bind(&rt_call_reload_new_target);
- __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+ // Retrieve smi-tagged arguments count from the stack.
+ __ ldr(r0, MemOperand(sp));
}
- // Allocate the new receiver object using the runtime call.
- // r1: constructor function
- // r3: original constructor
- __ bind(&rt_call);
-
- __ push(r1); // argument 2/1: constructor function
- __ push(r3); // argument 3/2: original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mov(r4, r0);
-
- // Receiver for constructor call allocated.
- // r4: JSObject
- __ bind(&allocated);
-
- // Restore the parameters.
- __ pop(r3);
- __ pop(r1);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ ldr(r0, MemOperand(sp));
__ SmiUntag(r0);
- // Push new.target onto the construct frame. This is stored just below the
- // receiver on the stack.
- __ push(r3);
- __ push(r4);
- __ push(r4);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(r4);
+ __ push(r4);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -520,24 +608,25 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r0: number of arguments
// r1: constructor function
// r2: address of last argument (caller sp)
- // r3: number of arguments (smi-tagged)
+ // r3: new target
+ // r4: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: number of arguments (smi-tagged)
Label loop, entry;
- __ SmiTag(r3, r0);
+ __ SmiTag(r4, r0);
__ b(&entry);
__ bind(&loop);
- __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
+ __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2 - 1));
__ push(ip);
__ bind(&entry);
- __ sub(r3, r3, Operand(2), SetCC);
+ __ sub(r4, r4, Operand(2), SetCC);
__ b(ge, &loop);
// Call the function.
// r0: number of arguments
// r1: constructor function
+ // r3: new target
if (is_api_function) {
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
Handle<Code> code =
@@ -545,156 +634,85 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r0: result
// sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
+ // sp[1]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r0: result
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(r0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r1, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (original constructor)
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // r0: result
+ // sp[0]: receiver
+ // sp[1]: number of arguments (smi-tagged)
+ __ JumpIfSmi(r0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r0, r1, r3, FIRST_JS_RECEIVER_TYPE);
+ __ b(ge, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ldr(r0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // r0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+ } else {
+ __ ldr(r1, MemOperand(sp));
+ }
// Leave construct frame.
}
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
+ }
__ Jump(lr);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- r2 : allocation site or undefined
- // -- r3 : original constructor
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- __ AssertUndefinedOrAllocationSite(r2, r4);
- __ push(r2);
-
- __ mov(r4, r0);
- __ SmiTag(r4);
- __ push(r4); // Smi-tagged arguments count.
-
- // Push new.target.
- __ push(r3);
-
- // receiver is the hole.
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ push(ip);
-
- // Set up pointer to last argument.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- // r0: number of arguments
- // r1: constructor function
- // r2: address of last argument (caller sp)
- // r4: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- Label loop, entry;
- __ b(&entry);
- __ bind(&loop);
- __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2 - 1));
- __ push(ip);
- __ bind(&entry);
- __ sub(r4, r4, Operand(2), SetCC);
- __ b(ge, &loop);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ mov(r2, Operand(debug_step_in_fp));
- __ ldr(r2, MemOperand(r2));
- __ tst(r2, r2);
- __ b(eq, &skip_step_in);
-
- __ Push(r0);
- __ Push(r1);
- __ Push(r1);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(r1);
- __ Pop(r0);
-
- __ bind(&skip_step_in);
-
- // Call the function.
- // r0: number of arguments
- // r1: constructor function
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- // r0: result
- // sp[0]: number of arguments (smi-tagged)
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Get arguments count, skipping over new.target.
- __ ldr(r1, MemOperand(sp, kPointerSize));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Leave construct frame.
- }
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
- __ add(sp, sp, Operand(kPointerSize));
- __ Jump(lr);
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r1);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -722,7 +740,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ b(gt, &okay); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -829,6 +847,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o r1: the JS function object being called.
+// o r3: the new target
// o cp: our context
// o pp: the caller's constant pool pointer (if enabled)
// o fp: the caller's frame pointer
@@ -846,6 +865,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushFixedFrame(r1);
__ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ push(r3);
+
+ // Push zero for bytecode array offset.
+ __ mov(r0, Operand(0));
+ __ push(r0);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -874,7 +898,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
__ cmp(r9, Operand(r2));
__ b(hs, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -896,21 +920,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
@@ -918,15 +928,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ CallRuntime(Runtime::kStackGuard);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ sub(kInterpreterRegisterFileRegister, fp,
- Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ add(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -966,37 +978,164 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
+ Register limit, Register scratch) {
+ Label loop_header, loop_check;
+ __ b(al, &loop_check);
+ __ bind(&loop_header);
+ __ ldr(scratch, MemOperand(index, -kPointerSize, PostIndex));
+ __ push(scratch);
+ __ bind(&loop_check);
+ __ cmp(index, limit);
+ __ b(gt, &loop_header);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- __ push(r1);
- // Push function as parameter to the runtime call.
- __ Push(r1);
- // Whether to compile in a background thread.
- __ LoadRoot(
- ip, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- r1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ add(r3, r0, Operand(1)); // Add one for receiver.
+ __ mov(r3, Operand(r3, LSL, kPointerSizeLog2));
+ __ sub(r3, r2, r3);
+
+ // Push the arguments.
+ Generate_InterpreterPushArgs(masm, r2, r3, r4);
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (not including receiver)
+ // -- r3 : new target
+ // -- r1 : constructor to call
+ // -- r2 : address of the first argument
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ mov(r4, Operand(r0, LSL, kPointerSizeLog2));
+ __ sub(r4, r2, r4);
+
+ // Push a slot for the receiver to be constructed.
+ __ mov(ip, Operand::Zero());
__ push(ip);
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ pop(r1);
+ // Push the arguments.
+ Generate_InterpreterPushArgs(masm, r2, r4, r5);
+
+ // Call the constructor with r0, r1, and r3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ mov(r1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(r1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use this for interpreter deopts).
+ __ Drop(1);
+
+ // Initialize register file register and dispatch table register.
+ __ add(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ ldr(kContextRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ ldr(r1,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r1, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r1, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ ldr(kInterpreterBytecodeOffsetRegister,
+ MemOperand(
+ kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
+ kPointerSizeLog2));
+ __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(pc, ip);
+}
+
+
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -1012,13 +1151,14 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// the runtime:
// r0 - contains return address (beginning of patch sequence)
// r1 - isolate
+ // r3 - new target
FrameScope scope(masm, StackFrame::MANUAL);
- __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(2, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
__ mov(pc, r0);
}
@@ -1045,13 +1185,14 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// the runtime:
// r0 - contains return address (beginning of patch sequence)
// r1 - isolate
+ // r3 - new target
FrameScope scope(masm, StackFrame::MANUAL);
- __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(2, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(ExternalReference::get_mark_code_as_executed_function(
masm->isolate()), 2);
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
// Perform prologue operations usually performed by the young code stub.
__ PushFixedFrame(r1);
@@ -1083,7 +1224,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
@@ -1109,7 +1250,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
}
// Get the full codegen state from the stack and untag it -> r6.
@@ -1149,6 +1290,109 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Register scratch0, Register scratch1,
+ Register scratch2,
+ Label* receiver_check_failed) {
+ Register signature = scratch0;
+ Register map = scratch1;
+ Register constructor = scratch2;
+
+ // If there is no signature, return the holder.
+ __ ldr(signature, FieldMemOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ __ CompareRoot(signature, Heap::kUndefinedValueRootIndex);
+ Label receiver_check_passed;
+ __ b(eq, &receiver_check_passed);
+
+ // Walk the prototype chain.
+ __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(constructor, map, ip, ip);
+ __ cmp(ip, Operand(JS_FUNCTION_TYPE));
+ Label next_prototype;
+ __ b(ne, &next_prototype);
+ Register type = constructor;
+ __ ldr(type,
+ FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ cmp(signature, type);
+ __ b(eq, &receiver_check_passed);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype);
+ __ CompareObjectType(type, ip, ip, FUNCTION_TEMPLATE_INFO_TYPE);
+
+ // Otherwise load the parent function template and iterate.
+ __ ldr(type,
+ FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset),
+ eq);
+ __ b(&function_template_loop, eq);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ b(eq, receiver_check_failed);
+ __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldr(ip, FieldMemOperand(map, Map::kBitField3Offset));
+ __ tst(ip, Operand(Map::IsHiddenPrototype::kMask));
+ __ b(eq, receiver_check_failed);
+ // Iterate.
+ __ b(&prototype_loop_start);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments excluding receiver
+ // -- r1 : callee
+ // -- lr : return address
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ CompatibleReceiverCheck(masm, r2, r3, r4, r5, r6, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ ldr(r4, FieldMemOperand(r3, FunctionTemplateInfo::kCallCodeOffset));
+ __ ldr(r4, FieldMemOperand(r4, CallHandlerInfo::kFastHandlerOffset));
+ __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r4);
+
+ // Compatible receiver check failed: throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ // Drop the arguments (including the receiver)
+ __ add(r0, r0, Operand(1));
+ __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1156,7 +1400,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
@@ -1200,7 +1444,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ b(hs, &ok);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1211,7 +1455,120 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // 1. Pop receiver into r0 and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ Pop(r0);
+ __ JumpIfSmi(r0, &receiver_not_date);
+ __ CompareObjectType(r0, r1, r2, JS_DATE_TYPE);
+ __ b(ne, &receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ ldr(r0, FieldMemOperand(r0, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ mov(r1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+ __ ldr(r1, MemOperand(r1));
+ __ ldr(ip, FieldMemOperand(r0, JSDate::kCacheStampOffset));
+ __ cmp(r1, ip);
+ __ b(ne, &stamp_mismatch);
+ __ ldr(r0, FieldMemOperand(
+ r0, JSDate::kValueOffset + field_index * kPointerSize));
+ __ Ret();
+ __ bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, r1);
+ __ mov(r1, Operand(Smi::FromInt(field_index)));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ Ret();
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- sp[0] : argArray
+ // -- sp[4] : thisArg
+ // -- sp[8] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into r1, argArray into r0 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ mov(r3, r2);
+ __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver
+ __ sub(r4, r0, Operand(1), SetCC);
+ __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArg
+ __ sub(r4, r4, Operand(1), SetCC, ge);
+ __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argArray
+ __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ str(r2, MemOperand(sp, 0));
+ __ mov(r0, r3);
+ }
+
+ // ----------- S t a t e -------------
+ // -- r0 : argArray
+ // -- r1 : receiver
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(r1, &receiver_not_callable);
+ __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ tst(r4, Operand(1 << Map::kIsCallable));
+ __ b(eq, &receiver_not_callable);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(r0, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r0, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ mov(r0, Operand(0));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ str(r1, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
{
@@ -1254,185 +1611,128 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ ldr(key, MemOperand(fp, indexOffset));
- __ b(&entry);
-
- // Load the current argument from the arguments array.
- __ bind(&loop);
- __ ldr(receiver, MemOperand(fp, argumentsOffset));
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ mov(slot, Operand(Smi::FromInt(slot_index)));
- __ ldr(vector, MemOperand(fp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // Push the nth argument.
- __ push(r0);
-
- __ ldr(key, MemOperand(fp, indexOffset));
- __ add(key, key, Operand(1 << kSmiTagSize));
- __ str(key, MemOperand(fp, indexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ldr(r1, MemOperand(fp, limitOffset));
- __ cmp(key, r1);
- __ b(ne, &loop);
-
- // On exit, the pushed arguments count is in r0, untagged
- __ mov(r0, key);
- __ SmiUntag(r0);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- sp[0] : argumentsList
+ // -- sp[4] : thisArgument
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into r1 (if present), argumentsList into r0 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
{
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(r1);
-
- __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
- __ ldr(r1, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ Push(r0, r1);
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
-
- Generate_CheckStackOverflow(masm, r0, kArgcIsSmiTagged);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ mov(r2, r1);
+ __ mov(r3, r1);
+ __ sub(r4, r0, Operand(1), SetCC);
+ __ ldr(r1, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // target
+ __ sub(r4, r4, Operand(1), SetCC, ge);
+ __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArgument
+ __ sub(r4, r4, Operand(1), SetCC, ge);
+ __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argumentsList
+ __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ str(r2, MemOperand(sp, 0));
+ __ mov(r0, r3);
+ }
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ mov(r1, Operand::Zero());
- __ ldr(r2, MemOperand(fp, kReceiverOffset));
- __ Push(r0, r1, r2); // limit, initial index and receiver.
+ // ----------- S t a t e -------------
+ // -- r0 : argumentsList
+ // -- r1 : target
+ // -- sp[0] : thisArgument
+ // -----------------------------------
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(r1, &target_not_callable);
+ __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ tst(r4, Operand(1 << Map::kIsCallable));
+ __ b(eq, &target_not_callable);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Tear down the internal frame and remove function, receiver and args.
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
+ {
+ __ str(r1, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ add(sp, sp, Operand(kStackSize * kPointerSize));
- __ Jump(lr);
}
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- sp[0] : new.target (optional)
+ // -- sp[4] : argumentsList
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into r1 (if present), argumentsList into r0 (if present),
+ // new.target into r3 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
{
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(r1);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ ldr(r0, MemOperand(fp, kNewTargetOffset));
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &validate_arguments);
- __ ldr(r0, MemOperand(fp, kFunctionOffset));
- __ str(r0, MemOperand(fp, kNewTargetOffset));
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r0);
- __ ldr(r0, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ push(r0);
- __ ldr(r0, MemOperand(fp, kNewTargetOffset)); // get the new.target
- __ push(r0);
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- Generate_CheckStackOverflow(masm, r0, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ push(r0); // limit
- __ mov(r1, Operand::Zero()); // initial index
- __ push(r1);
- // Push the constructor function as callee.
- __ ldr(r0, MemOperand(fp, kFunctionOffset));
- __ push(r0);
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ ldr(r4, MemOperand(fp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ mov(r2, r1);
+ __ str(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver
+ __ sub(r4, r0, Operand(1), SetCC);
+ __ ldr(r1, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // target
+ __ mov(r3, r1); // new.target defaults to target
+ __ sub(r4, r4, Operand(1), SetCC, ge);
+ __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argumentsList
+ __ sub(r4, r4, Operand(1), SetCC, ge);
+ __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // new.target
+ __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ mov(r0, r2);
}
- __ add(sp, sp, Operand(kStackSize * kPointerSize));
- __ Jump(lr);
-}
+ // ----------- S t a t e -------------
+ // -- r0 : argumentsList
+ // -- r3 : new.target
+ // -- r1 : target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(r1, &target_not_constructor);
+ __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ tst(r4, Operand(1 << Map::kIsConstructor));
+ __ b(eq, &target_not_constructor);
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(r3, &new_target_not_constructor);
+ __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ tst(r4, Operand(1 << Map::kIsConstructor));
+ __ b(eq, &new_target_not_constructor);
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ str(r1, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ str(r3, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1442,6 +1742,7 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
// -- r0 : actual number of arguments
// -- r1 : function (passed through to callee)
// -- r2 : expected number of arguments
+ // -- r3 : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -1483,70 +1784,206 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argumentsList
+ // -- r1 : target
+ // -- r3 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(r0, &create_runtime);
+
+ // Load the map of argumentsList into r2.
+ __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+
+ // Load native context into r4.
+ __ ldr(r4, NativeContextMemOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ ldr(ip, ContextMemOperand(r4, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ cmp(ip, r2);
+ __ b(eq, &create_arguments);
+ __ ldr(ip, ContextMemOperand(r4, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ cmp(ip, r2);
+ __ b(eq, &create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CompareInstanceType(r2, ip, JS_ARRAY_TYPE);
+ __ b(eq, &create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r3, r0);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(r1, r3);
+ __ ldr(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+ __ SmiUntag(r2);
+ }
+ __ jmp(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ ldr(r2,
+ FieldMemOperand(r0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ ldr(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ ldr(ip, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ cmp(r2, ip);
+ __ b(ne, &create_runtime);
+ __ SmiUntag(r2);
+ __ mov(r0, r4);
+ __ b(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ ldr(r2, FieldMemOperand(r2, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(r2);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ cmp(r2, Operand(FAST_ELEMENTS));
+ __ b(hi, &create_runtime);
+ __ cmp(r2, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ b(eq, &create_runtime);
+ __ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
+ __ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ SmiUntag(r2);
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
+ // Make ip the space we have left. The stack might already be overflowed
+ // here which will cause ip to become negative.
+ __ sub(ip, sp, ip);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ip, Operand(r2, LSL, kPointerSizeLog2));
+ __ b(gt, &done); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- r1 : target
+ // -- r0 : args (a FixedArray built from argumentsList)
+ // -- r2 : len (number of elements to push from args)
+ // -- r3 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ __ mov(r4, Operand(0));
+ Label done, loop;
+ __ bind(&loop);
+ __ cmp(r4, r2);
+ __ b(eq, &done);
+ __ add(ip, r0, Operand(r4, LSL, kPointerSizeLog2));
+ __ ldr(ip, FieldMemOperand(ip, FixedArray::kHeaderSize));
+ __ Push(ip);
+ __ add(r4, r4, Operand(1));
+ __ b(&loop);
+ __ bind(&done);
+ __ Move(r0, r4);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(r1);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldrb(r3, FieldMemOperand(r2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ tst(r3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ b(ne, &class_constructor);
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
__ ldrb(r3, FieldMemOperand(r2, SharedFunctionInfo::kNativeByteOffset));
__ tst(r3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
__ b(ne, &done_convert);
{
- __ ldr(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
-
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the function to call (checked to be a JSFunction)
// -- r2 : the shared function info.
- // -- r3 : the receiver
// -- cp : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(r3, &convert_to_object);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
- __ b(hs, &done_convert);
- __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
- __ JumpIfNotRoot(r3, Heap::kNullValueRootIndex, &convert_to_object);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(r3);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ ldr(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ JumpIfSmi(r3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
+ __ b(hs, &done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy);
+ __ JumpIfNotRoot(r3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(r3);
+ }
+ __ b(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r0);
+ __ Push(r0, r1);
+ __ mov(r0, r3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(r3, r0);
+ __ Pop(r0, r1);
+ __ SmiUntag(r0);
+ }
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ b(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r0);
- __ Push(r0, r1);
- __ mov(r0, r3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(r3, r0);
- __ Pop(r0, r1);
- __ SmiUntag(r0);
- }
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ str(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
}
__ bind(&done_convert);
@@ -1561,15 +1998,126 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(r2);
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount actual(r0);
ParameterCount expected(r2);
- __ InvokeCode(r3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(r1, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ push(r1);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : target (checked to be a JSBoundFunction)
+ // -- r3 : new.target (only in case of [[Construct]])
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into r2 and length of that into r4.
+ Label no_bound_arguments;
+ __ ldr(r2, FieldMemOperand(r1, JSBoundFunction::kBoundArgumentsOffset));
+ __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ SmiUntag(r4);
+ __ cmp(r4, Operand(0));
+ __ b(eq, &no_bound_arguments);
+ {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : target (checked to be a JSBoundFunction)
+ // -- r2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- r3 : new.target (only in case of [[Construct]])
+ // -- r4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ sub(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ b(gt, &done); // Signed comparison.
+ // Restore the stack pointer.
+ __ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ {
+ Label loop, done_loop;
+ __ mov(r5, Operand(0));
+ __ bind(&loop);
+ __ cmp(r5, r0);
+ __ b(gt, &done_loop);
+ __ ldr(ip, MemOperand(sp, r4, LSL, kPointerSizeLog2));
+ __ str(ip, MemOperand(sp, r5, LSL, kPointerSizeLog2));
+ __ add(r4, r4, Operand(1));
+ __ add(r5, r5, Operand(1));
+ __ b(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ SmiUntag(r4);
+ __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ sub(r4, r4, Operand(1), SetCC);
+ __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2));
+ __ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ add(r0, r0, Operand(1));
+ __ b(gt, &loop);
+ }
+ }
+ __ bind(&no_bound_arguments);
+}
+
+} // namespace
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(r1);
+
+ // Patch the receiver to [[BoundThis]].
+ __ ldr(ip, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
+ __ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+ masm->isolate())));
+ __ ldr(ip, MemOperand(ip));
+ __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the target to call (can be any Object).
@@ -1579,16 +2127,22 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(r1, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
- eq);
- __ cmp(r5, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, eq);
+ __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET, eq);
+ __ cmp(r5, Operand(JS_PROXY_TYPE));
__ b(ne, &non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ ldr(r1, FieldMemOperand(r1, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(r1);
- __ b(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ Push(r1);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ add(r0, r0, Operand(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1600,15 +2154,17 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
// Overwrite the original receiver the (original) target.
__ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1618,10 +2174,9 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the constructor to call (checked to be a JSFunction)
- // -- r3 : the original constructor (checked to be a JSFunction)
+ // -- r3 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertFunction(r1);
- __ AssertFunction(r3);
// Calling convention for function specific ConstructStubs require
// r2 to contain either an AllocationSite or undefined.
@@ -1636,17 +2191,47 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the function to call (checked to be a JSBoundFunction)
+ // -- r3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertBoundFunction(r1);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ __ cmp(r1, r3);
+ __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset),
+ eq);
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ ldr(ip, MemOperand(ip));
+ __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
- // -- r1 : the constructor to call (checked to be a JSFunctionProxy)
- // -- r3 : the original constructor (either the same as the constructor or
+ // -- r1 : the constructor to call (checked to be a JSProxy)
+ // -- r3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ ldr(r1, FieldMemOperand(r1, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ Push(r1);
+ __ Push(r3);
+ // Include the pushed new_target, constructor and the receiver.
+ __ add(r0, r0, Operand(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1655,23 +2240,32 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the constructor to call (can be any Object)
- // -- r3 : the original constructor (either the same as the constructor or
+ // -- r3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(r1, &non_constructor);
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+
+ // Dispatch based on instance type.
+ __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET, eq);
+
+ // Check if target has a [[Construct]] internal method.
__ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset));
__ tst(r2, Operand(1 << Map::kIsConstructor));
__ b(eq, &non_constructor);
- // Dispatch based on instance type.
- __ CompareInstanceType(r4, r5, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
RelocInfo::CODE_TARGET, eq);
- __ cmp(r5, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ cmp(r5, Operand(JS_PROXY_TYPE));
__ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
eq);
@@ -1680,7 +2274,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r1);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1688,40 +2282,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
-}
-
-
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
- // -- r2 : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- r1 : the target to call (can be any Object).
-
- // Find the address of the last argument.
- __ add(r3, r0, Operand(1)); // Add one for receiver.
- __ mov(r3, Operand(r3, LSL, kPointerSizeLog2));
- __ sub(r3, r2, r3);
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ b(al, &loop_check);
- __ bind(&loop_header);
- __ ldr(r4, MemOperand(r2, -kPointerSize, PostIndex));
- __ push(r4);
- __ bind(&loop_check);
- __ cmp(r2, r3);
- __ b(gt, &loop_header);
-
- // Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1730,14 +2292,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r0 : actual number of arguments
// -- r1 : function (passed through to callee)
// -- r2 : expected number of arguments
+ // -- r3 : new target (passed through to callee)
// -----------------------------------
- Label stack_overflow;
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ cmp(r0, r2);
__ b(lt, &too_few);
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -1746,12 +2306,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: actual >= expected
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into r0 and copy end address into r4.
// r0: actual number of arguments as a smi
// r1: function
// r2: expected number of arguments
- // r3: code entry to call
+ // r3: new target (passed through to callee)
__ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
// adjust for return address and receiver
__ add(r0, r0, Operand(2 * kPointerSize));
@@ -1761,7 +2322,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r0: copy start address
// r1: function
// r2: expected number of arguments
- // r3: code entry to call
+ // r3: new target (passed through to callee)
// r4: copy end address
Label copy;
@@ -1794,24 +2355,25 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into r0 and copy end address is fp.
// r0: actual number of arguments as a smi
// r1: function
// r2: expected number of arguments
- // r3: code entry to call
+ // r3: new target (passed through to callee)
__ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
// Copy the arguments (including the receiver) to the new stack frame.
// r0: copy start address
// r1: function
// r2: expected number of arguments
- // r3: code entry to call
+ // r3: new target (passed through to callee)
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
@@ -1824,7 +2386,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// r1: function
// r2: expected number of arguments
- // r3: code entry to call
+ // r3: new target (passed through to callee)
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
// Adjust for frame.
@@ -1843,7 +2405,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ mov(r0, r2);
// r0 : expected number of arguments
// r1 : function (passed through to callee)
- __ Call(r3);
+ // r3 : new target (passed through to callee)
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Call(r4);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1857,13 +2421,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ Jump(r3);
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Jump(r4);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0);
}
}
diff --git a/chromium/v8/src/arm/code-stubs-arm.cc b/chromium/v8/src/arm/code-stubs-arm.cc
index a8a4b5f5ac5..21413335ea0 100644
--- a/chromium/v8/src/arm/code-stubs-arm.cc
+++ b/chromium/v8/src/arm/code-stubs-arm.cc
@@ -250,7 +250,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
// Call runtime on identical JSObjects.
- __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r4, r4, FIRST_JS_RECEIVER_TYPE);
__ b(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
@@ -271,7 +271,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ b(eq, &heap_number);
// Comparing JS objects with <=, >= is complicated.
if (cond != eq) {
- __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
@@ -436,11 +436,11 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
- // FIRST_SPEC_OBJECT_TYPE.
- __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
+ // FIRST_JS_RECEIVER_TYPE.
+ __ CompareObjectType(rhs, r2, r2, FIRST_JS_RECEIVER_TYPE);
__ b(lt, &first_non_object);
// Return non-zero (r0 is not zero)
@@ -453,7 +453,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
__ cmp(r2, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
- __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r3, r3, FIRST_JS_RECEIVER_TYPE);
__ b(ge, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -520,9 +520,9 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ Ret();
__ bind(&object_test);
- __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmp(r2, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(lt, not_both_strings);
- __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r2, r3, FIRST_JS_RECEIVER_TYPE);
__ b(lt, not_both_strings);
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -682,8 +682,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cc == eq) {
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result
if (cc == lt || cc == le) {
@@ -697,9 +696,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -901,7 +899,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -974,14 +972,21 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
-
+ //
+ // If argv_in_register():
+ // r2: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ mov(r5, Operand(r1));
- // Compute the argv pointer in a callee-saved register.
- __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ sub(r1, r1, Operand(kPointerSize));
+ if (argv_in_register()) {
+ // Move argv into the correct register.
+ __ mov(r1, Operand(r2));
+ } else {
+ // Compute the argv pointer in a callee-saved register.
+ __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ sub(r1, r1, Operand(kPointerSize));
+ }
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
@@ -1057,8 +1062,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- // Callee-saved register r4 still holds argc.
- __ LeaveExitFrame(save_doubles(), r4, true);
+ Register argc;
+ if (argv_in_register()) {
+ // We don't want to pop arguments so set argc to no_reg.
+ argc = no_reg;
+ } else {
+ // Callee-saved register r4 still holds argc.
+ argc = r4;
+ }
+ __ LeaveExitFrame(save_doubles(), argc, true);
__ mov(pc, lr);
// Handling of exception.
@@ -1332,16 +1344,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
__ b(ne, &slow_case);
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ ldr(shared_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(scratch, FieldMemOperand(shared_info,
- SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(scratch,
- Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
- __ b(ne, &slow_case);
-
// Get the "prototype" (or initial map) of the {function}.
__ ldr(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1366,27 +1368,47 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
Register const null = scratch;
- Label done, loop;
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+ Register const result = r0;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, function_prototype);
+
+ // Check if the object needs to be access checked.
+ __ ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ tst(map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ b(ne, &fast_runtime_fallback);
+ // Check if the current object is a Proxy.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ __ b(eq, &fast_runtime_fallback);
+
+ __ ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object, function_prototype);
__ b(eq, &done);
- __ cmp(object_prototype, null);
- __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ cmp(object, null);
+ __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
__ b(ne, &loop);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime
+ __ bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ __ Move(scratch, Smi::FromInt(0));
+ __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -1488,7 +1510,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r1);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -1515,7 +1537,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(r1, r3, r2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1576,7 +1598,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r9, r0, r4, r9, &runtime, TAG_OBJECT);
+ __ Allocate(r9, r0, r9, r4, &runtime, TAG_OBJECT);
// r0 = address of new object(s) (tagged)
// r2 = argument count (smi-tagged)
@@ -1586,8 +1608,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
+ __ ldr(r4, NativeContextMemOperand());
__ cmp(r6, Operand::Zero());
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
@@ -1716,7 +1737,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r5 = argument count (tagged)
__ bind(&runtime);
__ Push(r1, r3, r5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1735,7 +1756,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1779,10 +1800,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
- __ ldr(r4, MemOperand(
- r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4);
__ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
__ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
@@ -1831,7 +1849,29 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ Push(r1, r3, r2);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // r2 : number of parameters (tagged)
+ // r3 : parameters pointer
+ // r4 : rest parameter index (tagged)
+
+ Label runtime;
+ __ ldr(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r0, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ ldr(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ add(r3, r5, Operand::PointerOffsetFromSmiKey(r2));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ __ bind(&runtime);
+ __ Push(r2, r3, r4);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -1840,7 +1880,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2111,7 +2151,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ b(eq, &runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure and exception return null.
@@ -2206,7 +2246,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2249,33 +2289,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r0 : number of arguments to the construct function
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
- // r4 : original constructor (for IsSuperConstructorCall)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ Push(r3, r2, r1, r0);
- if (is_super) {
- __ Push(r4);
- }
__ CallStub(stub);
- if (is_super) {
- __ Pop(r4);
- }
__ Pop(r3, r2, r1, r0);
__ SmiUntag(r0);
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -2283,7 +2315,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
- // r4 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2324,7 +2355,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ b(ne, &miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &megamorphic);
__ jmp(&done);
@@ -2347,7 +2378,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ bind(&initialize);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &not_array_function);
@@ -2355,115 +2386,21 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, cont);
-
- // Do not transform the receiver for native (Compilerhints already in r3).
- __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, cont);
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm, int argc) {
- __ mov(r0, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- __ push(r1);
- __ mov(r0, r3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ pop(r1);
- }
- __ str(r0, MemOperand(sp, argc * kPointerSize));
- __ jmp(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // r1 : the function to call
- Label slow, wrap, cont;
-
- if (needs_checks) {
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(r1, &slow);
-
- // Goto slow case if we do not have a function.
- __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
- }
-
- // Fast-case: Invoke the function now.
- // r1: pushed function
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Compute the receiver in sloppy mode.
- __ ldr(r3, MemOperand(sp, argc * kPointerSize));
-
- if (needs_checks) {
- __ JumpIfSmi(r3, &wrap);
- __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, &wrap);
- } else {
- __ jmp(&wrap);
- }
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
- // r4 : original constructor (for IsSuperConstructorCall)
Label non_function;
// Check that the function is not a smi.
@@ -2472,28 +2409,22 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
__ b(ne, &non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
+ GenerateRecordCallTarget(masm);
- __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into r2, or undefined.
- __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
- __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(eq, &feedback_register_initialized);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into r2, or undefined.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(eq, &feedback_register_initialized);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(r2, r5);
- }
+ __ AssertUndefinedOrAllocationSite(r2, r5);
- // Pass function as original constructor.
- if (IsSuperConstructorCall()) {
- __ mov(r3, r4);
- } else {
- __ mov(r3, r1);
- }
+ // Pass function as new target.
+ __ mov(r3, r1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -2512,7 +2443,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r3 - slot id
// r2 - vector
// r4 - allocation site (loaded from vector[slot])
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, miss);
@@ -2536,13 +2467,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// r1 - function
// r3 - slot id (Smi)
// r2 - vector
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2579,34 +2504,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ str(r3, FieldMemOperand(r2, 0));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
- // Compute the receiver in sloppy mode.
- __ ldr(r3, MemOperand(sp, argc * kPointerSize));
-
- __ JumpIfSmi(r3, &wrap);
- __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, &wrap);
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call_function);
+ __ mov(r0, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
- __ b(eq, &slow_start);
+ __ b(eq, &call);
// Verify that r4 contains an AllocationSite
__ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
@@ -2635,14 +2542,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
- // We have to update statistics for runtime profiling.
- __ ldr(r4, FieldMemOperand(r2, with_types_offset));
- __ sub(r4, r4, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(r2, with_types_offset));
- __ ldr(r4, FieldMemOperand(r2, generic_offset));
- __ add(r4, r4, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(r2, generic_offset));
- __ jmp(&slow_start);
+
+ __ bind(&call);
+ __ mov(r0, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2655,14 +2559,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r4);
__ cmp(r1, r4);
__ b(eq, &miss);
- // Update stats.
- __ ldr(r4, FieldMemOperand(r2, with_types_offset));
- __ add(r4, r4, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(r2, with_types_offset));
+ // Make sure the function belongs to the same native context.
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kContextOffset));
+ __ ldr(r4, ContextMemOperand(r4, Context::NATIVE_CONTEXT_INDEX));
+ __ ldr(ip, NativeContextMemOperand());
+ __ cmp(r4, ip);
+ __ b(ne, &miss);
// Initialize the call counter.
__ Move(r5, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
@@ -2681,23 +2587,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(r1);
}
- __ jmp(&have_js_function);
+ __ jmp(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(r1, &slow);
-
- // Goto slow case if we do not have a function.
- __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
- __ jmp(&have_js_function);
+ __ jmp(&call);
}
@@ -2708,7 +2605,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r1, r2, r3);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ mov(r1, r0);
@@ -2773,11 +2670,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -2804,7 +2701,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -2843,7 +2740,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -3099,7 +2996,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// r0: original string
@@ -3139,7 +3036,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&slow_string);
__ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -3150,7 +3047,22 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
+}
+
+
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes one argument in r0.
+ Label not_smi;
+ __ JumpIfNotSmi(r0, &not_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(r0, r0);
+ __ mov(r0, Operand(0), LeaveCC, lt);
+ __ Ret();
+ __ bind(&not_smi);
+
+ __ push(r0); // Push argument.
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3180,7 +3092,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3310,7 +3222,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// tagged as a small integer.
__ bind(&runtime);
__ Push(r1, r0);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3352,7 +3264,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(r1, r2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(r0, r3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ ldr(r1, FieldMemOperand(r1, Oddball::kToNumberOffset));
@@ -3613,9 +3525,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3623,16 +3535,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
- __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
- __ b(ne, &miss);
- __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
- __ b(ne, &miss);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(r0, r2, r2, FIRST_JS_RECEIVER_TYPE);
+ __ b(lt, &miss);
+ __ CompareObjectType(r1, r2, r2, FIRST_JS_RECEIVER_TYPE);
+ __ b(lt, &miss);
DCHECK(GetCondition() == eq);
__ sub(r0, r0, Operand(r1));
@@ -3643,7 +3556,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ and_(r2, r1, Operand(r0));
@@ -3660,7 +3573,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ sub(r0, r0, Operand(r1));
__ Ret();
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ mov(r2, Operand(Smi::FromInt(GREATER)));
@@ -3668,7 +3581,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ mov(r2, Operand(Smi::FromInt(LESS)));
}
__ Push(r1, r0, r2);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -3684,7 +3597,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op())));
__ push(ip);
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -4129,11 +4042,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
@@ -4153,68 +4066,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : element value to store
- // -- r3 : element index as smi
- // -- sp[0] : array literal index in function as smi
- // -- sp[4] : array literal
- // clobbers r1, r2, r4
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
-
- __ CheckFastElements(r2, r5, &double_elements);
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
- __ JumpIfSmi(r0, &smi_element);
- __ CheckFastSmiElements(r2, r5, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(r1, r3, r0);
- __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
- __ Push(r5, r4);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
- __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(r0, MemOperand(r6, 0));
- // Update the write barrier for the array store.
- __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
- __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
- __ Ret();
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
- __ Ret();
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -4929,7 +4780,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- r0 : argc (only if argument_count() == ANY)
// -- r1 : constructor
// -- r2 : AllocationSite or undefined
- // -- r3 : original constructor
+ // -- r3 : new target
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
@@ -4950,6 +4801,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(r2, r4);
}
+ // Enter the context of the Array function.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
Label subclassing;
__ cmp(r3, r1);
__ b(ne, &subclassing);
@@ -4969,25 +4823,23 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
- __ push(r1);
- __ push(r3);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ add(r0, r0, Operand(2));
+ __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ add(r0, r0, Operand(3));
break;
case NONE:
- __ mov(r0, Operand(2));
+ __ str(r1, MemOperand(sp, 0 * kPointerSize));
+ __ mov(r0, Operand(3));
break;
case ONE:
- __ mov(r0, Operand(3));
+ __ str(r1, MemOperand(sp, 1 * kPointerSize));
+ __ mov(r0, Operand(4));
break;
}
-
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ Push(r3, r2);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5073,13 +4925,13 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ ldr(result, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = result;
}
// Load the PropertyCell value at the specified slot.
__ add(result, context, Operand(slot, LSL, kPointerSizeLog2));
- __ ldr(result, ContextOperand(result));
+ __ ldr(result, ContextMemOperand(result));
__ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// If the result is not the_hole, return. Otherwise, handle in the runtime.
@@ -5089,7 +4941,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Fallback to runtime.
__ SmiTag(slot);
__ push(slot);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5115,13 +4967,13 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); i++) {
- __ ldr(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = context_temp;
}
// Load the PropertyCell at the specified slot.
__ add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
- __ ldr(cell, ContextOperand(cell));
+ __ ldr(cell, ContextMemOperand(cell));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ ldr(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
@@ -5213,8 +5065,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot, value);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5341,7 +5192,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/chromium/v8/src/arm/code-stubs-arm.h b/chromium/v8/src/arm/code-stubs-arm.h
index e572fd9a1b0..30ae358eb05 100644
--- a/chromium/v8/src/arm/code-stubs-arm.h
+++ b/chromium/v8/src/arm/code-stubs-arm.h
@@ -109,9 +109,8 @@ class RecordWriteStub: public PlatformCodeStub {
}
static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL,
- stub->instruction_start(),
- stub->instruction_size());
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
@@ -311,6 +310,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/chromium/v8/src/arm/codegen-arm.cc b/chromium/v8/src/arm/codegen-arm.cc
index 97f10340611..c34acd6a5b8 100644
--- a/chromium/v8/src/arm/codegen-arm.cc
+++ b/chromium/v8/src/arm/codegen-arm.cc
@@ -18,23 +18,23 @@ namespace internal {
#if defined(USE_SIMULATOR)
-byte* fast_exp_arm_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
- fast_exp_arm_machine_code, x, 0);
+byte* fast_exp_arm_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+ return Simulator::current(isolate)
+ ->CallFPReturnsDouble(fast_exp_arm_machine_code, x, 0);
}
#endif
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
DwVfpRegister input = d0;
@@ -67,11 +67,11 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_arm_machine_code = buffer;
return &fast_exp_simulator;
@@ -79,7 +79,8 @@ UnaryMathFunction CreateExpFunction() {
}
#if defined(V8_HOST_ARCH_ARM)
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
+MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
+ MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
@@ -87,9 +88,10 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return stub;
+ if (buffer == nullptr) return stub;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
Register dest = r0;
Register src = r1;
@@ -227,7 +229,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
@@ -236,7 +238,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
// Convert 8 to 16. The number of character to copy must be at least 8.
MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
- MemCopyUint16Uint8Function stub) {
+ Isolate* isolate, MemCopyUint16Uint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
@@ -244,9 +246,10 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return stub;
+ if (buffer == nullptr) return stub;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
Register dest = r0;
Register src = r1;
@@ -314,7 +317,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CodeDesc desc;
masm.GetCode(&desc);
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
@@ -322,16 +325,17 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
}
#endif
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
- return &std::sqrt;
+ return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
+ if (buffer == nullptr) return nullptr;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
__ MovFromFloatParameter(d0);
__ vsqrt(d0, d0);
@@ -342,9 +346,9 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
@@ -882,15 +886,17 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
#endif
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(new CodePatcher(
- young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(
+ new CodePatcher(isolate, young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r1);
patcher->masm()->nop(ip.code());
@@ -937,7 +943,8 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ CodePatcher patcher(isolate, sequence,
+ young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));
patcher.masm()->emit_code_stub_address(stub);
diff --git a/chromium/v8/src/arm/codegen-arm.h b/chromium/v8/src/arm/codegen-arm.h
index d36ce59d669..880825a1be0 100644
--- a/chromium/v8/src/arm/codegen-arm.h
+++ b/chromium/v8/src/arm/codegen-arm.h
@@ -5,7 +5,7 @@
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -44,6 +44,7 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/chromium/v8/src/arm/constants-arm.cc b/chromium/v8/src/arm/constants-arm.cc
index 9fefc3140a9..915d9030e89 100644
--- a/chromium/v8/src/arm/constants-arm.cc
+++ b/chromium/v8/src/arm/constants-arm.cc
@@ -51,17 +51,6 @@ const Registers::RegisterAlias Registers::aliases_[] = {
};
-const char* Registers::Name(int reg) {
- const char* result;
- if ((0 <= reg) && (reg < kNumRegisters)) {
- result = names_[reg];
- } else {
- result = "noreg";
- }
- return result;
-}
-
-
// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
// Note that "sN:sM" is the same as "dN/2" up to d15.
// These register names are defined in a way to match the native disassembler
diff --git a/chromium/v8/src/arm/constants-arm.h b/chromium/v8/src/arm/constants-arm.h
index 6d544f3f368..efc060a82dd 100644
--- a/chromium/v8/src/arm/constants-arm.h
+++ b/chromium/v8/src/arm/constants-arm.h
@@ -702,6 +702,7 @@ class VFPRegisters {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_CONSTANTS_ARM_H_
diff --git a/chromium/v8/src/arm/deoptimizer-arm.cc b/chromium/v8/src/arm/deoptimizer-arm.cc
index 312bb00df32..38635ea3cf9 100644
--- a/chromium/v8/src/arm/deoptimizer-arm.cc
+++ b/chromium/v8/src/arm/deoptimizer-arm.cc
@@ -5,6 +5,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
@@ -39,14 +40,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->bkpt(0);
}
}
@@ -71,7 +73,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
- CodePatcher patcher(call_address, call_size_in_words);
+ CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
@@ -93,7 +95,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -142,8 +144,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Everything but pc, lr and ip which will be saved but not restored.
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
- const int kDoubleRegsSize =
- kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
+ const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kMaxNumRegisters;
// Save all allocatable VFP registers before messing with them.
DCHECK(kDoubleRegZero.code() == 14);
@@ -152,11 +153,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
- // Push registers d0-d13, and possibly d16-d31, on the stack.
+ // Push registers d0-d15, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
__ vstm(db_w, sp, d16, d31, ne);
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
- __ vstm(db_w, sp, d0, d13);
+ __ vstm(db_w, sp, d0, d15);
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@@ -211,9 +212,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
diff --git a/chromium/v8/src/arm/disasm-arm.cc b/chromium/v8/src/arm/disasm-arm.cc
index 0cc24e00af0..66b7f458494 100644
--- a/chromium/v8/src/arm/disasm-arm.cc
+++ b/chromium/v8/src/arm/disasm-arm.cc
@@ -1781,28 +1781,28 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if (dp_operation) {
Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
} else {
- Unknown(instr);
+ Format(instr, "vrinta.f32.f32 'Sd, 'Sm");
}
break;
case 0x1:
if (dp_operation) {
Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
} else {
- Unknown(instr);
+ Format(instr, "vrintn.f32.f32 'Sd, 'Sm");
}
break;
case 0x2:
if (dp_operation) {
Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
} else {
- Unknown(instr);
+ Format(instr, "vrintp.f32.f32 'Sd, 'Sm");
}
break;
case 0x3:
if (dp_operation) {
Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
} else {
- Unknown(instr);
+ Format(instr, "vrintm.f32.f32 'Sd, 'Sm");
}
break;
default:
@@ -1923,7 +1923,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::Registers::Name(reg);
+ return v8::internal::Register::from_code(reg).ToString();
}
diff --git a/chromium/v8/src/arm/frames-arm.h b/chromium/v8/src/arm/frames-arm.h
index dcba34f0175..1ea7b1af567 100644
--- a/chromium/v8/src/arm/frames-arm.h
+++ b/chromium/v8/src/arm/frames-arm.h
@@ -128,6 +128,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_FRAMES_ARM_H_
diff --git a/chromium/v8/src/arm/interface-descriptors-arm.cc b/chromium/v8/src/arm/interface-descriptors-arm.cc
index aa49843bd05..b7fad7bee61 100644
--- a/chromium/v8/src/arm/interface-descriptors-arm.cc
+++ b/chromium/v8/src/arm/interface-descriptors-arm.cc
@@ -65,6 +65,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return r2; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r3; }
+const Register RestParamAccessDescriptor::parameter_count() { return r2; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return r3; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return r4; }
+
+
const Register ApiGetterDescriptor::function_address() { return r2; }
@@ -80,14 +85,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister(), MapRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
@@ -110,6 +107,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return r0; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return r0; }
@@ -131,6 +132,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3, r2, r1, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r1};
@@ -193,7 +201,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
- // r4 : original constructor (for IsSuperConstructorCall)
+ // r4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {r0, r1, r4, r2};
@@ -210,6 +218,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : allocation site or undefined
+ Register registers[] = {r1, r3, r0, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments
+ // r1 : the target to call
+ // r3 : the new target
+ Register registers[] = {r1, r3, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r1, r0};
@@ -230,6 +259,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -355,6 +391,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
Register registers[] = {
r1, // JSFunction
+ r3, // the new target
r0, // actual number of arguments
r2, // expected number of arguments
};
@@ -396,33 +433,35 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- r1, // math rounding function
- r3, // vector slot id
+ r0, // argument count (not including receiver)
+ r2, // address of first argument
+ r1 // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- r1, // math rounding function
- r3, // vector slot id
- r4, // type vector
+ r0, // argument count (not including receiver)
+ r3, // new target
+ r1, // constructor to call
+ r2 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- r0, // argument count (including receiver)
- r2, // address of first argument
- r1 // the target callable to be call
+ r0, // argument count (argc)
+ r2, // address of first argument (argv)
+ r1 // the runtime function to call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/chromium/v8/src/arm/interface-descriptors-arm.h b/chromium/v8/src/arm/interface-descriptors-arm.h
index 6201adc6851..a64927924e8 100644
--- a/chromium/v8/src/arm/interface-descriptors-arm.h
+++ b/chromium/v8/src/arm/interface-descriptors-arm.h
@@ -20,7 +20,7 @@ class PlatformInterfaceDescriptor {
private:
TargetAddressStorageMode storage_mode_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
diff --git a/chromium/v8/src/arm/macro-assembler-arm.cc b/chromium/v8/src/arm/macro-assembler-arm.cc
index 49802ba734d..57fa3f58049 100644
--- a/chromium/v8/src/arm/macro-assembler-arm.cc
+++ b/chromium/v8/src/arm/macro-assembler-arm.cc
@@ -11,6 +11,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/arm/macro-assembler-arm.h"
@@ -18,13 +19,14 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -759,7 +761,9 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// Number of d-regs not known at snapshot time.
DCHECK(!serializer_enabled());
// General purpose registers are pushed last on the stack.
- int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -1233,8 +1237,6 @@ void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -1254,7 +1256,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg().is(r0));
DCHECK(expected.is_immediate() || expected.reg().is(r2));
- DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -1286,11 +1287,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- mov(r3, Operand(code_constant));
- add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
-
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
@@ -1308,20 +1304,79 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ mov(r4, Operand(step_in_enabled));
+ ldrb(r4, MemOperand(r4));
+ cmp(r4, Operand(0));
+ b(eq, &skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(r1));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ }
Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag,
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = r4;
+ ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@@ -1339,6 +1394,7 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokeFunction(Register fun,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -1349,19 +1405,17 @@ void MacroAssembler::InvokeFunction(Register fun,
DCHECK(fun.is(r1));
Register expected_reg = r2;
- Register code_reg = r3;
+ Register temp_reg = r4;
- ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
ldr(expected_reg,
- FieldMemOperand(code_reg,
+ FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(expected_reg);
- ldr(code_reg,
- FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
}
@@ -1379,11 +1433,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(r1, no_reg, expected, actual, flag, call_wrapper);
}
@@ -1471,10 +1521,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- ldr(scratch, FieldMemOperand(scratch, offset));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ ldr(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1658,11 +1705,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!scratch1.is(ip));
- DCHECK(!scratch2.is(ip));
+ DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -1679,48 +1722,46 @@ void MacroAssembler::Allocate(int object_size,
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
DCHECK(result.code() < ip.code());
// Set up allocation top address register.
- Register topaddr = scratch1;
- mov(topaddr, Operand(allocation_top));
-
+ Register top_address = scratch1;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
+ Register alloc_limit = ip;
+ Register result_end = scratch2;
+ mov(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- ldm(ia, topaddr, result.bit() | ip.bit());
+ // Load allocation top into result and allocation limit into alloc_limit.
+ ldm(ia, top_address, result.bit() | alloc_limit.bit());
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- ldr(ip, MemOperand(topaddr));
- cmp(result, ip);
+ // Assert that result actually contains top on entry.
+ ldr(alloc_limit, MemOperand(top_address));
+ cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit into ip. Result already contains allocation top.
- ldr(ip, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ ldr(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+ and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
if ((flags & PRETENURE) != 0) {
- cmp(result, Operand(ip));
+ cmp(result, Operand(alloc_limit));
b(hs, gc_required);
}
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
@@ -1740,15 +1781,15 @@ void MacroAssembler::Allocate(int object_size,
shift += 8;
Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1);
- add(scratch2, source, bits_operand, SetCC, cond);
- source = scratch2;
+ add(result_end, source, bits_operand, SetCC, cond);
+ source = result_end;
cond = cc;
}
}
b(cs, gc_required);
- cmp(scratch2, Operand(ip));
+ cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
- str(scratch2, MemOperand(topaddr));
+ str(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -1757,32 +1798,25 @@ void MacroAssembler::Allocate(int object_size,
}
-void MacroAssembler::Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Operand(0x7091));
- mov(scratch1, Operand(0x7191));
- mov(scratch2, Operand(0x7291));
+ mov(scratch, Operand(0x7191));
+ mov(result_end, Operand(0x7291));
}
jmp(gc_required);
return;
}
- // Assert that the register arguments are different and that none of
- // them are ip. ip is used explicitly in the code generated below.
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!object_size.is(ip));
- DCHECK(!result.is(ip));
- DCHECK(!scratch1.is(ip));
- DCHECK(!scratch2.is(ip));
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, ip));
+ DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDM.
@@ -1792,48 +1826,45 @@ void MacroAssembler::Allocate(Register object_size,
AllocationUtils::GetAllocationTopReference(isolate(), flags);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
DCHECK(result.code() < ip.code());
- // Set up allocation top address.
- Register topaddr = scratch1;
- mov(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
+ Register alloc_limit = ip;
+ mov(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- ldm(ia, topaddr, result.bit() | ip.bit());
+ // Load allocation top into result and allocation limit into alloc_limit.
+ ldm(ia, top_address, result.bit() | alloc_limit.bit());
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- ldr(ip, MemOperand(topaddr));
- cmp(result, ip);
+ // Assert that result actually contains top on entry.
+ ldr(alloc_limit, MemOperand(top_address));
+ cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit into ip. Result already contains allocation top.
- ldr(ip, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ ldr(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+ and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
if ((flags & PRETENURE) != 0) {
- cmp(result, Operand(ip));
+ cmp(result, Operand(alloc_limit));
b(hs, gc_required);
}
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
@@ -1841,20 +1872,20 @@ void MacroAssembler::Allocate(Register object_size,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
+ add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
} else {
- add(scratch2, result, Operand(object_size), SetCC);
+ add(result_end, result, Operand(object_size), SetCC);
}
b(cs, gc_required);
- cmp(scratch2, Operand(ip));
+ cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
- tst(scratch2, Operand(kObjectAlignmentMask));
+ tst(result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace);
}
- str(scratch2, MemOperand(topaddr));
+ str(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -2060,6 +2091,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
LowDwVfpRegister double_scratch,
Label* fail,
int elements_offset) {
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label smi_value, store;
// Handle smi values specially.
@@ -2449,24 +2481,17 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r0, Operand(num_arguments));
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r0, Operand(function->nargs));
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -2486,35 +2511,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(r2, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(r2));
- Call(r2);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Jump(r2);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- ldr(target,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ldr(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- ldr(target, ContextOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(r1));
- GetBuiltinFunction(r1, native_context_index);
- // Load the code entry point from the builtins object.
- ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, r1);
+ InvokeFunctionCode(r1, no_reg, expected, expected, flag, call_wrapper);
}
@@ -2648,49 +2648,30 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- ldr(dst, GlobalObjectOperand());
- ldr(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- ldr(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- ldr(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ldr(ip, FieldMemOperand(scratch, offset));
+ ldr(scratch, NativeContextMemOperand());
+ ldr(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
cmp(map_in_out, ip);
b(ne, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ldr(map_in_out, FieldMemOperand(scratch, offset));
+ ldr(map_in_out,
+ ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- ldr(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- ldr(function, FieldMemOperand(function,
- GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- ldr(function, MemOperand(function, Context::SlotOffset(index)));
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ ldr(dst, NativeContextMemOperand());
+ ldr(dst, ContextMemOperand(dst, index));
}
@@ -2828,6 +2809,19 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmiAndNotABoundFunction);
+ push(object);
+ CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -2942,27 +2936,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
}
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- LowDwVfpRegister double_scratch,
- int field_count) {
- int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
- for (int i = 0; i < double_count; i++) {
- vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
- vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
- }
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
- STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
- STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
- int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
- if (remain != 0) {
- vldr(double_scratch.low(),
- FieldMemOperand(src, (field_count - 1) * kPointerSize));
- vstr(double_scratch.low(),
- FieldMemOperand(dst, (field_count - 1) * kPointerSize));
- }
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
+ str(value, FieldMemOperand(result, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -3019,15 +3011,15 @@ void MacroAssembler::CopyBytes(Register src,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
b(&entry);
bind(&loop);
- str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
+ str(filler, MemOperand(current_address, kPointerSize, PostIndex));
bind(&entry);
- cmp(start_offset, end_offset);
+ cmp(current_address, end_address);
b(lo, &loop);
}
@@ -3278,8 +3270,8 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -3312,27 +3304,6 @@ void MacroAssembler::HasColor(Register object,
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- Label is_data_object;
- ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- b(eq, &is_data_object);
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- b(ne, not_data_object);
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
@@ -3347,96 +3318,23 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Register load_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(mask_scratch, load_scratch);
- b(ne, &done);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // LSL may overflow, making the check conservative.
- tst(load_scratch, Operand(mask_scratch, LSL, 1));
- b(eq, &ok);
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
- b(eq, &is_data_object);
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- b(ne, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- tst(instance_type, Operand(kExternalStringTag));
- mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
- b(ne, &is_data_object);
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
- ldr(ip, FieldMemOperand(value, String::kLengthOffset));
- tst(instance_type, Operand(kStringEncodingMask));
- mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
- add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- orr(ip, ip, Operand(mask_scratch));
- str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- add(ip, ip, Operand(length));
- str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
+ b(eq, value_is_white);
}
@@ -3578,8 +3476,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
if (regs & candidate.bit()) continue;
return candidate;
}
@@ -3655,12 +3556,11 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(byte* address,
- int instructions,
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap),
+ masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
@@ -3672,7 +3572,7 @@ CodePatcher::CodePatcher(byte* address,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- Assembler::FlushICacheWithoutIsolate(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that the code was patched as expected.
diff --git a/chromium/v8/src/arm/macro-assembler-arm.h b/chromium/v8/src/arm/macro-assembler-arm.h
index d78bf8f49a5..26811b988cf 100644
--- a/chromium/v8/src/arm/macro-assembler-arm.h
+++ b/chromium/v8/src/arm/macro-assembler-arm.h
@@ -14,17 +14,19 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_r0_Code};
-const Register kReturnRegister1 = {kRegister_r1_Code};
-const Register kJSFunctionRegister = {kRegister_r1_Code};
-const Register kContextRegister = {kRegister_r7_Code};
-const Register kInterpreterAccumulatorRegister = {kRegister_r0_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_r4_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_r5_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_r6_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_r8_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_r1_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_r0_Code};
+const Register kReturnRegister0 = {Register::kCode_r0};
+const Register kReturnRegister1 = {Register::kCode_r1};
+const Register kJSFunctionRegister = {Register::kCode_r1};
+const Register kContextRegister = {Register::kCode_r7};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_r4};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_r0};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r3};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_r1};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_r0};
// ----------------------------------------------------------------------------
// Static helper functions
@@ -36,9 +38,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
// Give alias names to registers
-const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
-const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
-const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
+const Register cp = {Register::kCode_r7}; // JavaScript context pointer.
+const Register pp = {Register::kCode_r8}; // Constant pool pointer.
+const Register kRootRegister = {Register::kCode_r10}; // Roots array pointer.
// Flags used for AllocateHeapNumber
enum TaggingMode {
@@ -86,11 +88,8 @@ enum TargetAddressStorageMode {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
// Returns the size of a call in instructions. Note, the value returned is
@@ -243,22 +242,10 @@ class MacroAssembler: public Assembler {
Register scratch1,
Label* on_black);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -609,8 +596,15 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
@@ -623,7 +617,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- void LoadGlobalFunction(int index, Register function);
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -641,15 +635,19 @@ class MacroAssembler: public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
@@ -762,12 +760,8 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- void Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
@@ -812,11 +806,11 @@ class MacroAssembler: public Assembler {
Register heap_number_map,
Label* gc_required);
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst,
- Register src,
- LowDwVfpRegister double_scratch,
- int field_count);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
@@ -826,12 +820,11 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@@ -1078,33 +1071,30 @@ class MacroAssembler: public Assembler {
void CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -1157,13 +1147,6 @@ class MacroAssembler: public Assembler {
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the code object for the given builtin in the target register and
- // setup the function in r1.
- void GetBuiltinEntry(Register target, int native_context_index);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
@@ -1311,6 +1294,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1461,8 +1448,6 @@ class MacroAssembler: public Assembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -1515,8 +1500,7 @@ class CodePatcher {
DONT_FLUSH
};
- CodePatcher(byte* address,
- int instructions,
+ CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache = FLUSH);
~CodePatcher();
@@ -1544,13 +1528,13 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
-inline MemOperand ContextOperand(Register context, int index = 0) {
+inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
-inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+inline MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
@@ -1564,6 +1548,7 @@ inline MemOperand GlobalObjectOperand() {
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
diff --git a/chromium/v8/src/arm/simulator-arm.cc b/chromium/v8/src/arm/simulator-arm.cc
index 5da6204050b..6e193885b04 100644
--- a/chromium/v8/src/arm/simulator-arm.cc
+++ b/chromium/v8/src/arm/simulator-arm.cc
@@ -298,7 +298,8 @@ void ArmDebugger::Debug() {
if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
- PrintF("%3s: 0x%08x %10d", Registers::Name(i), value, value);
+ PrintF("%3s: 0x%08x %10d", Register::from_code(i).ToString(),
+ value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
i < 8 &&
(i % 2) == 0) {
@@ -389,7 +390,7 @@ void ArmDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ Heap* current_heap = sim_->isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
@@ -784,12 +785,12 @@ Simulator::~Simulator() { free(stack_); }
// offset from the svc instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, ExternalReference::Type type)
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
: external_function_(external_function),
- swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
+ swi_instruction_(al | (0xf * B24) | kCallRtRedirected),
type_(type),
next_(NULL) {
- Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
@@ -805,9 +806,8 @@ class Redirection {
void* external_function() { return external_function_; }
ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function,
+ static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
@@ -815,7 +815,7 @@ class Redirection {
return current;
}
}
- return new Redirection(external_function, type);
+ return new Redirection(isolate, external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -860,9 +860,10 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
}
-void* Simulator::RedirectExternalReference(void* external_function,
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -3156,14 +3157,15 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
DecodeVCMP(instr);
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
+ lazily_initialize_fast_sqrt(isolate_);
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm);
- double dd_value = fast_sqrt(dm_value);
+ double dd_value = fast_sqrt(dm_value, isolate_);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m);
- float sd_value = fast_sqrt(sm_value);
+ float sd_value = fast_sqrt(sm_value, isolate_);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
@@ -3176,10 +3178,17 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
}
} else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
// vrintz - truncate
- double dm_value = get_double_from_d_register(vm);
- double dd_value = trunc(dm_value);
- dd_value = canonicalizeNaN(dd_value);
- set_d_register_from_double(vd, dd_value);
+ if (instr->SzValue() == 0x1) {
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = trunc(dm_value);
+ dd_value = canonicalizeNaN(dd_value);
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ float sm_value = get_float_from_s_register(m);
+ float sd_value = truncf(sm_value);
+ sd_value = canonicalizeNaN(sd_value);
+ set_s_register_from_float(d, sd_value);
+ }
} else {
UNREACHABLE(); // Not used by V8.
}
@@ -3868,44 +3877,60 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
break;
case 0x1D:
if (instr->Opc1Value() == 0x7 && instr->Opc3Value() == 0x1 &&
- instr->Bits(11, 9) == 0x5 && instr->Bits(19, 18) == 0x2 &&
- instr->Bit(8) == 0x1) {
- int vm = instr->VFPMRegValue(kDoublePrecision);
- int vd = instr->VFPDRegValue(kDoublePrecision);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = 0.0;
- int rounding_mode = instr->Bits(17, 16);
- switch (rounding_mode) {
- case 0x0: // vrinta - round with ties to away from zero
- dd_value = round(dm_value);
- break;
- case 0x1: { // vrintn - round with ties to even
- dd_value = std::floor(dm_value);
- double error = dm_value - dd_value;
- // Take care of correctly handling the range [-0.5, -0.0], which
- // must yield -0.0.
- if ((-0.5 <= dm_value) && (dm_value < 0.0)) {
- dd_value = -0.0;
- // If the error is greater than 0.5, or is equal to 0.5 and the
- // integer result is odd, round up.
- } else if ((error > 0.5) ||
- ((error == 0.5) && (fmod(dd_value, 2) != 0))) {
- dd_value++;
+ instr->Bits(11, 9) == 0x5 && instr->Bits(19, 18) == 0x2) {
+ if (instr->SzValue() == 0x1) {
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = 0.0;
+ int rounding_mode = instr->Bits(17, 16);
+ switch (rounding_mode) {
+ case 0x0: // vrinta - round with ties to away from zero
+ dd_value = round(dm_value);
+ break;
+ case 0x1: { // vrintn - round with ties to even
+ dd_value = nearbyint(dm_value);
+ break;
}
- break;
+ case 0x2: // vrintp - ceil
+ dd_value = ceil(dm_value);
+ break;
+ case 0x3: // vrintm - floor
+ dd_value = floor(dm_value);
+ break;
+ default:
+ UNREACHABLE(); // Case analysis is exhaustive.
+ break;
}
- case 0x2: // vrintp - ceil
- dd_value = std::ceil(dm_value);
- break;
- case 0x3: // vrintm - floor
- dd_value = std::floor(dm_value);
- break;
- default:
- UNREACHABLE(); // Case analysis is exhaustive.
- break;
+ dd_value = canonicalizeNaN(dd_value);
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ int m = instr->VFPMRegValue(kSinglePrecision);
+ int d = instr->VFPDRegValue(kSinglePrecision);
+ float sm_value = get_float_from_s_register(m);
+ float sd_value = 0.0;
+ int rounding_mode = instr->Bits(17, 16);
+ switch (rounding_mode) {
+ case 0x0: // vrinta - round with ties to away from zero
+ sd_value = roundf(sm_value);
+ break;
+ case 0x1: { // vrintn - round with ties to even
+ sd_value = nearbyintf(sm_value);
+ break;
+ }
+ case 0x2: // vrintp - ceil
+ sd_value = ceilf(sm_value);
+ break;
+ case 0x3: // vrintm - floor
+ sd_value = floorf(sm_value);
+ break;
+ default:
+ UNREACHABLE(); // Case analysis is exhaustive.
+ break;
+ }
+ sd_value = canonicalizeNaN(sd_value);
+ set_s_register_from_float(d, sd_value);
}
- dd_value = canonicalizeNaN(dd_value);
- set_d_register_from_double(vd, dd_value);
} else {
UNIMPLEMENTED();
}
diff --git a/chromium/v8/src/arm/simulator-arm.h b/chromium/v8/src/arm/simulator-arm.h
index a972a77d411..6567607bb8a 100644
--- a/chromium/v8/src/arm/simulator-arm.h
+++ b/chromium/v8/src/arm/simulator-arm.h
@@ -22,7 +22,7 @@ namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
@@ -33,9 +33,10 @@ typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<arm_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
+ p7, p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on arm uses the C stack, we
@@ -48,14 +49,19 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() { }
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ USE(isolate);
+ }
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
@@ -343,7 +349,7 @@ class Simulator {
// Runtime call support.
static void* RedirectExternalReference(
- void* external_function,
+ Isolate* isolate, void* external_function,
v8::internal::ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
@@ -425,17 +431,17 @@ class Simulator {
// When running with the simulator transition into simulated execution at this
// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-#define CALL_GENERATED_FP_INT(entry, p0, p1) \
- Simulator::current(Isolate::Current())->CallFPReturnsInt( \
- FUNCTION_ADDR(entry), p0, p1)
+#define CALL_GENERATED_FP_INT(isolate, entry, p0, p1) \
+ Simulator::current(isolate)->CallFPReturnsInt(FUNCTION_ADDR(entry), p0, p1)
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ Simulator::current(isolate) \
+ ->Call(entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
// The simulator has its own stack. Thus it has a different stack limit from
@@ -449,17 +455,19 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // !defined(USE_SIMULATOR)
#endif // V8_ARM_SIMULATOR_ARM_H_
diff --git a/chromium/v8/src/arm64/assembler-arm64-inl.h b/chromium/v8/src/arm64/assembler-arm64-inl.h
index f02207f5497..d7769791ef9 100644
--- a/chromium/v8/src/arm64/assembler-arm64-inl.h
+++ b/chromium/v8/src/arm64/assembler-arm64-inl.h
@@ -31,7 +31,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -41,7 +42,7 @@ void RelocInfo::set_target_address(Address target,
}
-inline unsigned CPURegister::code() const {
+inline int CPURegister::code() const {
DCHECK(IsValid());
return reg_code;
}
@@ -54,12 +55,12 @@ inline CPURegister::RegisterType CPURegister::type() const {
inline RegList CPURegister::Bit() const {
- DCHECK(reg_code < (sizeof(RegList) * kBitsPerByte));
+ DCHECK(static_cast<size_t>(reg_code) < (sizeof(RegList) * kBitsPerByte));
return IsValid() ? 1UL << reg_code : 0;
}
-inline unsigned CPURegister::SizeInBits() const {
+inline int CPURegister::SizeInBits() const {
DCHECK(IsValid());
return reg_size;
}
@@ -648,24 +649,24 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Code* code, Address target) {
+ Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // Assembler::FlushICacheWithoutIsolate(pc, sizeof(target));
+ // Assembler::FlushICache(isolate(), pc, sizeof(target));
// However, on ARM, an instruction is actually patched in the case of
// embedded constants of the form:
// ldr ip, [pc, #...]
@@ -674,12 +675,11 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
-void Assembler::set_target_address_at(Address pc,
- Code* code,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
@@ -725,7 +725,7 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, host_,
+ Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
@@ -832,7 +832,7 @@ Address RelocInfo::debug_call_address() {
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
- Assembler::set_target_address_at(pc_, host_, target);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -848,7 +848,7 @@ void RelocInfo::WipeOut() {
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else {
- Assembler::set_target_address_at(pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
@@ -1259,6 +1259,7 @@ void Assembler::ClearRecordedAstId() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_
diff --git a/chromium/v8/src/arm64/assembler-arm64.cc b/chromium/v8/src/arm64/assembler-arm64.cc
index 37a2f5a29d0..ea7a732f8a9 100644
--- a/chromium/v8/src/arm64/assembler-arm64.cc
+++ b/chromium/v8/src/arm64/assembler-arm64.cc
@@ -35,6 +35,7 @@
#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
+#include "src/register-configuration.h"
namespace v8 {
namespace internal {
@@ -109,17 +110,17 @@ void CPURegList::RemoveCalleeSaved() {
}
-CPURegList CPURegList::GetCalleeSaved(unsigned size) {
+CPURegList CPURegList::GetCalleeSaved(int size) {
return CPURegList(CPURegister::kRegister, size, 19, 29);
}
-CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
+CPURegList CPURegList::GetCalleeSavedFP(int size) {
return CPURegList(CPURegister::kFPRegister, size, 8, 15);
}
-CPURegList CPURegList::GetCallerSaved(unsigned size) {
+CPURegList CPURegList::GetCallerSaved(int size) {
// Registers x0-x18 and lr (x30) are caller-saved.
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
list.Combine(lr);
@@ -127,7 +128,7 @@ CPURegList CPURegList::GetCallerSaved(unsigned size) {
}
-CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
+CPURegList CPURegList::GetCallerSavedFP(int size) {
// Registers d0-d7 and d16-d31 are caller-saved.
CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
@@ -192,8 +193,11 @@ bool RelocInfo::IsInConstantPool() {
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
Register reg3, Register reg4) {
CPURegList regs(reg1, reg2, reg3, reg4);
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ Register candidate = Register::from_code(code);
if (regs.IncludesAliasOf(candidate)) continue;
return candidate;
}
@@ -507,7 +511,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->pc());
+ instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
}
assm_->dc64(data);
}
@@ -523,7 +527,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->pc());
+ instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
assm_->dc64(unique_it->first);
}
unique_entries_.clear();
@@ -585,6 +589,7 @@ void Assembler::GetCode(CodeDesc* desc) {
static_cast<int>((reinterpret_cast<byte*>(buffer_) + buffer_size_) -
reloc_info_writer.pos());
desc->origin = this;
+ desc->constant_pool_size = 0;
}
}
@@ -653,22 +658,22 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
} else if (branch == next_link) {
// The branch is the last (but not also the first) instruction in the chain.
- prev_link->SetImmPCOffsetTarget(prev_link);
+ prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
} else {
// The branch is in the middle of the chain.
if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
- prev_link->SetImmPCOffsetTarget(next_link);
+ prev_link->SetImmPCOffsetTarget(isolate(), next_link);
} else if (label_veneer != NULL) {
// Use the veneer for all previous links in the chain.
- prev_link->SetImmPCOffsetTarget(prev_link);
+ prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
end_of_chain = false;
link = next_link;
while (!end_of_chain) {
next_link = link->ImmPCOffsetTarget();
end_of_chain = (link == next_link);
- link->SetImmPCOffsetTarget(label_veneer);
+ link->SetImmPCOffsetTarget(isolate(), label_veneer);
link = next_link;
}
} else {
@@ -739,10 +744,11 @@ void Assembler::bind(Label* label) {
// Internal references do not get patched to an instruction but directly
// to an address.
internal_reference_positions_.push_back(linkoffset);
- PatchingAssembler patcher(link, 2);
+ PatchingAssembler patcher(isolate(), link, 2);
patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
} else {
- link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+ link->SetImmPCOffsetTarget(isolate(),
+ reinterpret_cast<Instruction*>(pc_));
}
// Link the label to the previous link in the chain.
@@ -1275,10 +1281,8 @@ void Assembler::rorv(const Register& rd,
// Bitfield operations.
-void Assembler::bfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms) {
+void Assembler::bfm(const Register& rd, const Register& rn, int immr,
+ int imms) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | BFM | N |
@@ -1288,10 +1292,8 @@ void Assembler::bfm(const Register& rd,
}
-void Assembler::sbfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms) {
+void Assembler::sbfm(const Register& rd, const Register& rn, int immr,
+ int imms) {
DCHECK(rd.Is64Bits() || rn.Is32Bits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | SBFM | N |
@@ -1301,10 +1303,8 @@ void Assembler::sbfm(const Register& rd,
}
-void Assembler::ubfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms) {
+void Assembler::ubfm(const Register& rd, const Register& rn, int immr,
+ int imms) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | UBFM | N |
@@ -1314,10 +1314,8 @@ void Assembler::ubfm(const Register& rd,
}
-void Assembler::extr(const Register& rd,
- const Register& rn,
- const Register& rm,
- unsigned lsb) {
+void Assembler::extr(const Register& rd, const Register& rn, const Register& rm,
+ int lsb) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
DCHECK(rd.SizeInBits() == rm.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
@@ -2833,6 +2831,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size =
@@ -2870,9 +2869,9 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
+ RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL);
if (((rmode >= RelocInfo::COMMENT) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL)) ||
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_REASON) ||
@@ -2899,8 +2898,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(
- reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
+ RelocInfo reloc_info_with_ast_id(isolate(), reinterpret_cast<byte*>(pc_),
+ rmode, RecordedAstId().ToInt(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@@ -2989,9 +2988,8 @@ bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
void Assembler::RecordVeneerPool(int location_offset, int size) {
- RelocInfo rinfo(buffer_ + location_offset,
- RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
- NULL);
+ RelocInfo rinfo(isolate(), buffer_ + location_offset, RelocInfo::VENEER_POOL,
+ static_cast<intptr_t>(size), NULL);
reloc_info_writer.Write(&rinfo);
}
@@ -3033,7 +3031,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
// to the label.
Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
RemoveBranchFromLabelLinkChain(branch, label, veneer);
- branch->SetImmPCOffsetTarget(veneer);
+ branch->SetImmPCOffsetTarget(isolate(), veneer);
b(label);
#ifdef DEBUG
DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
diff --git a/chromium/v8/src/arm64/assembler-arm64.h b/chromium/v8/src/arm64/assembler-arm64.h
index f20be8315e2..5854704b682 100644
--- a/chromium/v8/src/arm64/assembler-arm64.h
+++ b/chromium/v8/src/arm64/assembler-arm64.h
@@ -12,7 +12,6 @@
#include "src/arm64/instructions-arm64.h"
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/globals.h"
#include "src/utils.h"
@@ -23,12 +22,36 @@ namespace internal {
// -----------------------------------------------------------------------------
// Registers.
-#define REGISTER_CODE_LIST(R) \
-R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
-R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
-R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
+// clang-format off
+#define GENERAL_REGISTER_CODE_LIST(R) \
+ R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+ R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+ R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+ R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+#define GENERAL_REGISTERS(R) \
+ R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
+ R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
+ R(x16) R(x17) R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) \
+ R(x24) R(x25) R(x26) R(x27) R(x28) R(x29) R(x30) R(x31)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(R) \
+ R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
+ R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
+ R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27)
+
+#define DOUBLE_REGISTERS(R) \
+ R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
+ R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d15) \
+ R(d16) R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) \
+ R(d24) R(d25) R(d26) R(d27) R(d28) R(d29) R(d30) R(d31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(R) \
+ R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
+ R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d16) \
+ R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) R(d24) \
+ R(d25) R(d26) R(d27) R(d28)
+// clang-format on
static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
@@ -40,6 +63,14 @@ struct FPRegister;
struct CPURegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
enum RegisterType {
// The kInvalid value is used to detect uninitialized static instances,
// which are always zero-initialized before any constructors are called.
@@ -49,15 +80,15 @@ struct CPURegister {
kNoRegister
};
- static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
+ static CPURegister Create(int code, int size, RegisterType type) {
CPURegister r = {code, size, type};
return r;
}
- unsigned code() const;
+ int code() const;
RegisterType type() const;
RegList Bit() const;
- unsigned SizeInBits() const;
+ int SizeInBits() const;
int SizeInBytes() const;
bool Is32Bits() const;
bool Is64Bits() const;
@@ -86,14 +117,14 @@ struct CPURegister {
bool is(const CPURegister& other) const { return Is(other); }
bool is_valid() const { return IsValid(); }
- unsigned reg_code;
- unsigned reg_size;
+ int reg_code;
+ int reg_size;
RegisterType reg_type;
};
struct Register : public CPURegister {
- static Register Create(unsigned code, unsigned size) {
+ static Register Create(int code, int size) {
return Register(CPURegister::Create(code, size, CPURegister::kRegister));
}
@@ -117,6 +148,8 @@ struct Register : public CPURegister {
DCHECK(IsValidOrNone());
}
+ const char* ToString();
+ bool IsAllocatable() const;
bool IsValid() const {
DCHECK(IsRegister() || IsNone());
return IsValidRegister();
@@ -130,6 +163,7 @@ struct Register : public CPURegister {
// A few of them may be unused for now.
static const int kNumRegisters = kNumberOfRegisters;
+ STATIC_ASSERT(kNumRegisters == Code::kAfterLast);
static int NumRegisters() { return kNumRegisters; }
// We allow crankshaft to use the following registers:
@@ -146,70 +180,6 @@ struct Register : public CPURegister {
// - "low range"
// - "high range"
// - "context"
- static const unsigned kAllocatableLowRangeBegin = 0;
- static const unsigned kAllocatableLowRangeEnd = 15;
- static const unsigned kAllocatableHighRangeBegin = 18;
- static const unsigned kAllocatableHighRangeEnd = 24;
- static const unsigned kAllocatableContext = 27;
-
- // Gap between low and high ranges.
- static const int kAllocatableRangeGapSize =
- (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
-
- static const int kMaxNumAllocatableRegisters =
- (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
- (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
- static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
-
- // Return true if the register is one that crankshaft can allocate.
- bool IsAllocatable() const {
- return ((reg_code == kAllocatableContext) ||
- (reg_code <= kAllocatableLowRangeEnd) ||
- ((reg_code >= kAllocatableHighRangeBegin) &&
- (reg_code <= kAllocatableHighRangeEnd)));
- }
-
- static Register FromAllocationIndex(unsigned index) {
- DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
- // cp is the last allocatable register.
- if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
- return from_code(kAllocatableContext);
- }
-
- // Handle low and high ranges.
- return (index <= kAllocatableLowRangeEnd)
- ? from_code(index)
- : from_code(index + kAllocatableRangeGapSize);
- }
-
- static const char* AllocationIndexToString(int index) {
- DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
- DCHECK((kAllocatableLowRangeBegin == 0) &&
- (kAllocatableLowRangeEnd == 15) &&
- (kAllocatableHighRangeBegin == 18) &&
- (kAllocatableHighRangeEnd == 24) &&
- (kAllocatableContext == 27));
- const char* const names[] = {
- "x0", "x1", "x2", "x3", "x4",
- "x5", "x6", "x7", "x8", "x9",
- "x10", "x11", "x12", "x13", "x14",
- "x15", "x18", "x19", "x20", "x21",
- "x22", "x23", "x24", "x27",
- };
- return names[index];
- }
-
- static int ToAllocationIndex(Register reg) {
- DCHECK(reg.IsAllocatable());
- unsigned code = reg.code();
- if (code == kAllocatableContext) {
- return NumAllocatableRegisters() - 1;
- }
-
- return (code <= kAllocatableLowRangeEnd)
- ? code
- : code - kAllocatableRangeGapSize;
- }
static Register from_code(int code) {
// Always return an X register.
@@ -221,7 +191,15 @@ struct Register : public CPURegister {
struct FPRegister : public CPURegister {
- static FPRegister Create(unsigned code, unsigned size) {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
+ static FPRegister Create(int code, int size) {
return FPRegister(
CPURegister::Create(code, size, CPURegister::kFPRegister));
}
@@ -246,6 +224,8 @@ struct FPRegister : public CPURegister {
DCHECK(IsValidOrNone());
}
+ const char* ToString();
+ bool IsAllocatable() const;
bool IsValid() const {
DCHECK(IsFPRegister() || IsNone());
return IsValidFPRegister();
@@ -256,69 +236,12 @@ struct FPRegister : public CPURegister {
// Start of V8 compatibility section ---------------------
static const int kMaxNumRegisters = kNumberOfFPRegisters;
+ STATIC_ASSERT(kMaxNumRegisters == Code::kAfterLast);
// Crankshaft can use all the FP registers except:
// - d15 which is used to keep the 0 double value
// - d30 which is used in crankshaft as a double scratch register
// - d31 which is used in the MacroAssembler as a double scratch register
- static const unsigned kAllocatableLowRangeBegin = 0;
- static const unsigned kAllocatableLowRangeEnd = 14;
- static const unsigned kAllocatableHighRangeBegin = 16;
- static const unsigned kAllocatableHighRangeEnd = 28;
-
- static const RegList kAllocatableFPRegisters = 0x1fff7fff;
-
- // Gap between low and high ranges.
- static const int kAllocatableRangeGapSize =
- (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
-
- static const int kMaxNumAllocatableRegisters =
- (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
- (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
- static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
-
- // TODO(turbofan): Proper float32 support.
- static int NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
- }
-
- // Return true if the register is one that crankshaft can allocate.
- bool IsAllocatable() const {
- return (Bit() & kAllocatableFPRegisters) != 0;
- }
-
- static FPRegister FromAllocationIndex(unsigned int index) {
- DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
-
- return (index <= kAllocatableLowRangeEnd)
- ? from_code(index)
- : from_code(index + kAllocatableRangeGapSize);
- }
-
- static const char* AllocationIndexToString(int index) {
- DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
- DCHECK((kAllocatableLowRangeBegin == 0) &&
- (kAllocatableLowRangeEnd == 14) &&
- (kAllocatableHighRangeBegin == 16) &&
- (kAllocatableHighRangeEnd == 28));
- const char* const names[] = {
- "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14",
- "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
- "d24", "d25", "d26", "d27", "d28"
- };
- return names[index];
- }
-
- static int ToAllocationIndex(FPRegister reg) {
- DCHECK(reg.IsAllocatable());
- unsigned code = reg.code();
-
- return (code <= kAllocatableLowRangeEnd)
- ? code
- : code - kAllocatableRangeGapSize;
- }
-
static FPRegister from_code(int code) {
// Always return a D register.
return FPRegister::Create(code, kDRegSizeInBits);
@@ -361,7 +284,7 @@ INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
kWRegSizeInBits, CPURegister::kRegister); \
INITIALIZE_REGISTER(Register, x##N, N, \
kXRegSizeInBits, CPURegister::kRegister);
-REGISTER_CODE_LIST(DEFINE_REGISTERS)
+GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
@@ -374,7 +297,7 @@ INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
kSRegSizeInBits, CPURegister::kFPRegister); \
INITIALIZE_REGISTER(FPRegister, d##N, N, \
kDRegSizeInBits, CPURegister::kFPRegister);
-REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
+GENERAL_REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
#undef DEFINE_FPREGISTERS
#undef INITIALIZE_REGISTER
@@ -461,13 +384,13 @@ class CPURegList {
DCHECK(IsValid());
}
- CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+ CPURegList(CPURegister::RegisterType type, int size, RegList list)
: list_(list), size_(size), type_(type) {
DCHECK(IsValid());
}
- CPURegList(CPURegister::RegisterType type, unsigned size,
- unsigned first_reg, unsigned last_reg)
+ CPURegList(CPURegister::RegisterType type, int size, int first_reg,
+ int last_reg)
: size_(size), type_(type) {
DCHECK(((type == CPURegister::kRegister) &&
(last_reg < kNumberOfRegisters)) ||
@@ -524,12 +447,12 @@ class CPURegList {
CPURegister PopHighestIndex();
// AAPCS64 callee-saved registers.
- static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
- static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
+ static CPURegList GetCalleeSaved(int size = kXRegSizeInBits);
+ static CPURegList GetCalleeSavedFP(int size = kDRegSizeInBits);
// AAPCS64 caller-saved registers. Note that this includes lr.
- static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
- static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
+ static CPURegList GetCallerSaved(int size = kXRegSizeInBits);
+ static CPURegList GetCallerSavedFP(int size = kDRegSizeInBits);
// Registers saved as safepoints.
static CPURegList GetSafepointSavedRegisters();
@@ -557,25 +480,25 @@ class CPURegList {
return CountSetBits(list_, kRegListSizeInBits);
}
- unsigned RegisterSizeInBits() const {
+ int RegisterSizeInBits() const {
DCHECK(IsValid());
return size_;
}
- unsigned RegisterSizeInBytes() const {
+ int RegisterSizeInBytes() const {
int size_in_bits = RegisterSizeInBits();
DCHECK((size_in_bits % kBitsPerByte) == 0);
return size_in_bits / kBitsPerByte;
}
- unsigned TotalSizeInBytes() const {
+ int TotalSizeInBytes() const {
DCHECK(IsValid());
return RegisterSizeInBytes() * Count();
}
private:
RegList list_;
- unsigned size_;
+ int size_;
CPURegister::RegisterType type_;
bool IsValid() const {
@@ -876,14 +799,12 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code);
- static inline void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED);
+ static inline void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address of
// that call in the instruction stream.
@@ -896,11 +817,12 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Code* code, Address target);
+ Isolate* isolate, Address constant_pool_entry, Code* code,
+ Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// All addresses in the constant pool are the same size as pointers.
@@ -1011,7 +933,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the emission of a constant pool.
//
@@ -1197,39 +1119,24 @@ class Assembler : public AssemblerBase {
// Bitfield instructions.
// Bitfield move.
- void bfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms);
+ void bfm(const Register& rd, const Register& rn, int immr, int imms);
// Signed bitfield move.
- void sbfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms);
+ void sbfm(const Register& rd, const Register& rn, int immr, int imms);
// Unsigned bitfield move.
- void ubfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms);
+ void ubfm(const Register& rd, const Register& rn, int immr, int imms);
// Bfm aliases.
// Bitfield insert.
- void bfi(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void bfi(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
// Bitfield extract and insert low.
- void bfxil(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void bfxil(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, lsb, lsb + width - 1);
@@ -1237,26 +1144,20 @@ class Assembler : public AssemblerBase {
// Sbfm aliases.
// Arithmetic shift right.
- void asr(const Register& rd, const Register& rn, unsigned shift) {
+ void asr(const Register& rd, const Register& rn, int shift) {
DCHECK(shift < rd.SizeInBits());
sbfm(rd, rn, shift, rd.SizeInBits() - 1);
}
// Signed bitfield insert in zero.
- void sbfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void sbfiz(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
// Signed bitfield extract.
- void sbfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void sbfx(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, lsb, lsb + width - 1);
@@ -1279,33 +1180,27 @@ class Assembler : public AssemblerBase {
// Ubfm aliases.
// Logical shift left.
- void lsl(const Register& rd, const Register& rn, unsigned shift) {
- unsigned reg_size = rd.SizeInBits();
+ void lsl(const Register& rd, const Register& rn, int shift) {
+ int reg_size = rd.SizeInBits();
DCHECK(shift < reg_size);
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
}
// Logical shift right.
- void lsr(const Register& rd, const Register& rn, unsigned shift) {
+ void lsr(const Register& rd, const Register& rn, int shift) {
DCHECK(shift < rd.SizeInBits());
ubfm(rd, rn, shift, rd.SizeInBits() - 1);
}
// Unsigned bitfield insert in zero.
- void ubfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void ubfiz(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
// Unsigned bitfield extract.
- void ubfx(const Register& rd,
- const Register& rn,
- unsigned lsb,
- unsigned width) {
+ void ubfx(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, lsb, lsb + width - 1);
@@ -1327,10 +1222,8 @@ class Assembler : public AssemblerBase {
}
// Extract.
- void extr(const Register& rd,
- const Register& rn,
- const Register& rm,
- unsigned lsb);
+ void extr(const Register& rd, const Register& rn, const Register& rm,
+ int lsb);
// Conditional select: rd = cond ? rn : rm.
void csel(const Register& rd,
@@ -2256,15 +2149,14 @@ class PatchingAssembler : public Assembler {
// If more or fewer instructions than expected are generated or if some
// relocation information takes space in the buffer, the PatchingAssembler
// will crash trying to grow the buffer.
- PatchingAssembler(Instruction* start, unsigned count)
- : Assembler(NULL,
- reinterpret_cast<byte*>(start),
- count * kInstructionSize + kGap) {
+ PatchingAssembler(Isolate* isolate, Instruction* start, unsigned count)
+ : Assembler(isolate, reinterpret_cast<byte*>(start),
+ count * kInstructionSize + kGap) {
StartBlockPools();
}
- PatchingAssembler(byte* start, unsigned count)
- : Assembler(NULL, start, count * kInstructionSize + kGap) {
+ PatchingAssembler(Isolate* isolate, byte* start, unsigned count)
+ : Assembler(isolate, start, count * kInstructionSize + kGap) {
// Block constant pool emission.
StartBlockPools();
}
@@ -2279,7 +2171,7 @@ class PatchingAssembler : public Assembler {
DCHECK(IsConstPoolEmpty());
// Flush the Instruction cache.
size_t length = buffer_size_ - kGap;
- Assembler::FlushICacheWithoutIsolate(buffer_, length);
+ Assembler::FlushICache(isolate(), buffer_, length);
}
// See definition of PatchAdrFar() for details.
@@ -2296,6 +2188,7 @@ class EnsureSpace BASE_EMBEDDED {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_ASSEMBLER_ARM64_H_
diff --git a/chromium/v8/src/arm64/builtins-arm64.cc b/chromium/v8/src/arm64/builtins-arm64.cc
index 43311980176..b6bae4ad0ed 100644
--- a/chromium/v8/src/arm64/builtins-arm64.cc
+++ b/chromium/v8/src/arm64/builtins-arm64.cc
@@ -20,27 +20,16 @@ namespace internal {
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
- __ Ldr(result, GlobalObjectMemOperand());
- __ Ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
- __ Ldr(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the native context.
- __ Ldr(result, GlobalObjectMemOperand());
- __ Ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
- __ Ldr(result, ContextMemOperand(result,
- Context::INTERNAL_ARRAY_FUNCTION_INDEX));
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
@@ -49,11 +38,11 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- x0 : number of arguments excluding receiver
- // -- x1 : called function (only guaranteed when
- // extra_args requires it)
+ // -- x1 : target
+ // -- x3 : new target
// -- sp[0] : last argument
// -- ...
- // -- sp[4 * (argc - 1)] : first argument (argc == x0)
+ // -- sp[4 * (argc - 1)] : first argument
// -- sp[4 * argc] : receiver
// -----------------------------------
__ AssertFunction(x1);
@@ -62,21 +51,31 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ Push(x1);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ switch (extra_args) {
+ case BuiltinExtraArguments::kTarget:
+ __ Push(x1);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kNewTarget:
+ __ Push(x3);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kTargetAndNewTarget:
+ __ Push(x1, x3);
+ num_extra_args += 2;
+ break;
+ case BuiltinExtraArguments::kNone:
+ break;
}
// JumpToExternalReference expects x0 to contain the number of arguments
// including the receiver and the extra arguments.
__ Add(x0, x0, num_extra_args + 1);
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -139,6 +138,107 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_NumberConstructor");
+
+ // 1. Load the first argument into x0 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ Cbz(x0, &no_arguments);
+ __ Sub(x0, x0, 1);
+ __ Drop(x0);
+ __ Ldr(x0, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ }
+
+ // 2a. Convert first argument to number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0 (already in x0).
+ __ Bind(&no_arguments);
+ __ Drop(1);
+ __ Ret();
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- x3 : new target
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_NumberConstructor_ConstructStub");
+
+ // 1. Make sure we operate in the context of the called function.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into x2 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ Cbz(x0, &no_arguments);
+ __ Sub(x0, x0, 1);
+ __ Drop(x0);
+ __ Ldr(x2, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ __ B(&done);
+ __ Bind(&no_arguments);
+ __ Drop(1);
+ __ Mov(x2, Smi::FromInt(0));
+ __ Bind(&done);
+ }
+
+ // 3. Make sure x2 is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(x2, &done_convert);
+ __ JumpIfObjectType(x2, x4, x4, HEAP_NUMBER_TYPE, &done_convert, eq);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x1, x3);
+ __ Move(x0, x2);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(x2, x0);
+ __ Pop(x3, x1);
+ }
+ __ Bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ Cmp(x1, x3);
+ __ B(ne, &new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(x0, x1, x2, x4, x5, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x2, x1, x3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(x2);
+ }
+ __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@@ -190,7 +290,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ Bind(&symbol_descriptive_string);
{
__ Push(x0);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -200,13 +300,17 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
+ // -- x3 : new target
// -- lr : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_StringConstructor_ConstructStub");
- // 1. Load the first argument into x2 and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into x2 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -221,71 +325,62 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Bind(&done);
}
- // 2. Make sure x2 is a string.
+ // 3. Make sure x2 is a string.
{
Label convert, done_convert;
__ JumpIfSmi(x2, &convert);
- __ JumpIfObjectType(x2, x3, x3, FIRST_NONSTRING_TYPE, &done_convert, lo);
+ __ JumpIfObjectType(x2, x4, x4, FIRST_NONSTRING_TYPE, &done_convert, lo);
__ Bind(&convert);
{
FrameScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
- __ Push(x1);
+ __ Push(x1, x3);
__ Move(x0, x2);
__ CallStub(&stub);
__ Move(x2, x0);
- __ Pop(x1);
+ __ Pop(x3, x1);
}
__ Bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- x1 : constructor function
- // -- x2 : the first argument
- // -- lr : return address
- // -----------------------------------
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ Cmp(x1, x3);
+ __ B(ne, &new_object);
- Label allocate, done_allocate;
- __ Allocate(JSValue::kSize, x0, x3, x4, &allocate, TAG_OBJECT);
- __ Bind(&done_allocate);
-
- // Initialize the JSValue in eax.
- __ LoadGlobalFunctionInitialMap(x1, x3, x4);
- __ Str(x3, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
- __ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
- __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(x0, x1, x2, x4, x5, &new_object);
+ __ Ret();
- // Fallback to the runtime to allocate in new space.
- __ Bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1, x2);
- __ Push(Smi::FromInt(JSValue::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(x2, x1);
- }
- __ B(&done_allocate);
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x2, x1, x3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(x2);
}
+ __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
+ __ Ret();
}
static void CallRuntimePassFunction(MacroAssembler* masm,
Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- x1 : target function (preserved for callee)
+ // -- x3 : new target (preserved for callee)
+ // -----------------------------------
+
FrameScope scope(masm, StackFrame::INTERNAL);
- // - Push a copy of the function onto the stack.
- // - Push another copy as a parameter to the runtime call.
- __ Push(x1, x1);
+ // Push a copy of the target function and the new target.
+ // Push another copy as a parameter to the runtime call.
+ __ Push(x1, x3, x1);
__ CallRuntime(function_id, 1);
- // - Restore receiver.
- __ Pop(x1);
+ // Restore target function and new target.
+ __ Pop(x3, x1);
}
@@ -322,12 +417,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
// -- x2 : allocation site or undefined
- // -- x3 : original constructor
+ // -- x3 : new target
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -344,187 +440,173 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Register argc = x0;
Register constructor = x1;
Register allocation_site = x2;
- Register original_constructor = x3;
+ Register new_target = x3;
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
__ SmiTag(argc);
- __ Push(allocation_site, argc, constructor, original_constructor);
- // sp[0]: new.target
- // sp[1]: Constructor function.
- // sp[2]: number of arguments (smi-tagged)
- // sp[3]: allocation site
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ Mov(x2, Operand(debug_step_in_fp));
- __ Ldr(x2, MemOperand(x2));
- __ Cbnz(x2, &rt_call);
-
- // Fall back to runtime if the original constructor and function differ.
- __ Cmp(constructor, original_constructor);
- __ B(ne, &rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- Register init_map = x2;
- __ Ldr(init_map,
- FieldMemOperand(constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(init_map, &rt_call);
- __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the initial
- // map's instance type would be JS_FUNCTION_TYPE.
- __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
- __ B(eq, &rt_call);
-
- Register constructon_count = x14;
- if (!is_api_function) {
- Label allocate;
- MemOperand bit_field3 =
- FieldMemOperand(init_map, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ Ldr(x4, bit_field3);
- __ DecodeField<Map::Counter>(constructon_count, x4);
- __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
- __ B(lt, &allocate);
- // Decrease generous allocation count.
- __ Subs(x4, x4, Operand(1 << Map::Counter::kShift));
- __ Str(x4, bit_field3);
- __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
- __ B(ne, &allocate);
-
- // Push the constructor and map to the stack, and the constructor again
- // as argument to the runtime call.
- __ Push(constructor, init_map, constructor);
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
- __ Pop(init_map, constructor);
- __ Mov(constructon_count, Operand(Map::kSlackTrackingCounterEnd - 1));
- __ Bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- Label rt_call_reload_new_target;
- Register obj_size = x3;
- Register new_obj = x4;
- __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
- __ Allocate(obj_size, new_obj, x10, x11, &rt_call_reload_new_target,
- SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // NB. the object pointer is not tagged, so MemOperand is used.
- Register empty = x5;
- __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
- __ Str(init_map, MemOperand(new_obj, JSObject::kMapOffset));
- STATIC_ASSERT(JSObject::kElementsOffset ==
- (JSObject::kPropertiesOffset + kPointerSize));
- __ Stp(empty, empty, MemOperand(new_obj, JSObject::kPropertiesOffset));
-
- Register first_prop = x5;
- __ Add(first_prop, new_obj, JSObject::kHeaderSize);
-
- // Fill all of the in-object properties with the appropriate filler.
- Register filler = x7;
- __ LoadRoot(filler, Heap::kUndefinedValueRootIndex);
-
- // Obtain number of pre-allocated property fields and in-object
- // properties.
- Register unused_props = x10;
- Register inobject_props = x11;
- Register inst_sizes_or_attrs = x11;
- Register prealloc_fields = x10;
- __ Ldr(inst_sizes_or_attrs,
- FieldMemOperand(init_map, Map::kInstanceAttributesOffset));
- __ Ubfx(unused_props, inst_sizes_or_attrs,
- Map::kUnusedPropertyFieldsByte * kBitsPerByte, kBitsPerByte);
- __ Ldr(inst_sizes_or_attrs,
- FieldMemOperand(init_map, Map::kInstanceSizesOffset));
- __ Ubfx(
- inobject_props, inst_sizes_or_attrs,
- Map::kInObjectPropertiesOrConstructorFunctionIndexByte * kBitsPerByte,
- kBitsPerByte);
- __ Sub(prealloc_fields, inobject_props, unused_props);
-
- // Calculate number of property fields in the object.
- Register prop_fields = x6;
- __ Sub(prop_fields, obj_size, JSObject::kHeaderSize / kPointerSize);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
- __ B(lt, &no_inobject_slack_tracking);
- constructon_count = NoReg;
-
- // Fill the pre-allocated fields with undef.
- __ FillFields(first_prop, prealloc_fields, filler);
-
- // Update first_prop register to be the offset of the first field after
- // pre-allocated fields.
- __ Add(first_prop, first_prop,
- Operand(prealloc_fields, LSL, kPointerSizeLog2));
-
- if (FLAG_debug_code) {
- Register obj_end = x14;
- __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
- __ Cmp(first_prop, obj_end);
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ __ Push(allocation_site, argc);
+
+ if (create_implicit_receiver) {
+ // sp[0]: new.target
+ // sp[1]: Constructor function.
+ // sp[2]: number of arguments (smi-tagged)
+ // sp[3]: allocation site
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ JumpIfNotObjectType(new_target, x10, x11, JS_FUNCTION_TYPE,
+ &rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ Register init_map = x2;
+ __ Ldr(init_map,
+ FieldMemOperand(new_target,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(init_map, &rt_call);
+ __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ Ldr(x10,
+ FieldMemOperand(init_map, Map::kConstructorOrBackPointerOffset));
+ __ Cmp(constructor, x10);
+ __ B(ne, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial
+ // map's instance type would be JS_FUNCTION_TYPE.
+ __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
+ __ B(eq, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ Register obj_size = x10;
+ Register new_obj = x4;
+ Register next_obj = obj_size; // May overlap.
+ __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
+ __ Allocate(obj_size, new_obj, next_obj, x11, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // NB. the object pointer is not tagged, so MemOperand is used.
+ Register write_address = x5;
+ Register empty = x7;
+ __ Mov(write_address, new_obj);
+ __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
+ STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
+ __ Str(init_map, MemOperand(write_address, kPointerSize, PostIndex));
+ STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
+ STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
+ __ Stp(empty, empty,
+ MemOperand(write_address, 2 * kPointerSize, PostIndex));
+ STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ Add(new_obj, new_obj, kHeapObjectTag);
+
+ // Fill all of the in-object properties with the appropriate filler.
+ Register filler = x7;
+ __ LoadRoot(filler, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ Register constructon_count = x14;
+ MemOperand bit_field3 =
+ FieldMemOperand(init_map, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ Ldr(x11, bit_field3);
+ __ DecodeField<Map::ConstructionCounter>(constructon_count, x11);
+ __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
+ __ B(lt, &no_inobject_slack_tracking);
+ // Decrease generous allocation count.
+ __ Subs(x11, x11, Operand(1 << Map::ConstructionCounter::kShift));
+ __ Str(x11, bit_field3);
+
+ // Allocate object with a slack.
+ Register unused_props = x11;
+ __ Ldr(unused_props,
+ FieldMemOperand(init_map, Map::kInstanceAttributesOffset));
+ __ Ubfx(unused_props, unused_props,
+ Map::kUnusedPropertyFieldsByte * kBitsPerByte, kBitsPerByte);
+
+ Register end_of_pre_allocated = x11;
+ __ Sub(end_of_pre_allocated, next_obj,
+ Operand(unused_props, LSL, kPointerSizeLog2));
+ unused_props = NoReg;
+
+ if (FLAG_debug_code) {
+ __ Cmp(write_address, end_of_pre_allocated);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+
+ // Fill the pre-allocated fields with undef.
+ __ InitializeFieldsWithFiller(write_address, end_of_pre_allocated,
+ filler);
+
+ // Fill the remaining fields with one pointer filler map.
+ __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(write_address, next_obj, filler);
+
+ __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
+ __ B(ne, &allocated);
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(constructor, new_target, new_obj, init_map);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(new_obj, new_target, constructor);
+
+ // Continue with JSObject being successfully allocated.
+ __ B(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- // Fill the remaining fields with one pointer filler map.
- __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
- __ Sub(prop_fields, prop_fields, prealloc_fields);
+ __ InitializeFieldsWithFiller(write_address, next_obj, filler);
- __ bind(&no_inobject_slack_tracking);
+ // Continue with JSObject being successfully allocated.
+ __ B(&allocated);
}
- // Fill all of the property fields with undef.
- __ FillFields(first_prop, prop_fields, filler);
- first_prop = NoReg;
- prop_fields = NoReg;
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ Add(new_obj, new_obj, kHeapObjectTag);
-
- // Continue with JSObject being successfully allocated.
- __ B(&allocated);
-
- // Reload the original constructor and fall-through.
- __ Bind(&rt_call_reload_new_target);
- __ Peek(x3, 0 * kXRegSize);
+ // Allocate the new receiver object using the runtime call.
+ // x1: constructor function
+ // x3: new target
+ __ Bind(&rt_call);
+
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(constructor, new_target, constructor, new_target);
+ __ CallRuntime(Runtime::kNewObject);
+ __ Mov(x4, x0);
+ __ Pop(new_target, constructor);
+
+ // Receiver for constructor call allocated.
+ // x1: constructor function
+ // x3: new target
+ // x4: JSObject
+ __ Bind(&allocated);
+
+ // Reload the number of arguments from the stack.
+ // Set it up in x0 for the function call below.
+ // jssp[0]: number of arguments (smi-tagged)
+ __ Peek(argc, 0); // Load number of arguments.
}
- // Allocate the new receiver object using the runtime call.
- // x1: constructor function
- // x3: original constructor
- __ Bind(&rt_call);
- __ Push(constructor, original_constructor); // arguments 1-2
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Mov(x4, x0);
-
- // Receiver for constructor call allocated.
- // x4: JSObject
- __ Bind(&allocated);
-
- // Restore the parameters.
- __ Pop(original_constructor);
- __ Pop(constructor);
-
- // Reload the number of arguments from the stack.
- // Set it up in x0 for the function call below.
- // jssp[0]: number of arguments (smi-tagged)
- __ Peek(argc, 0); // Load number of arguments.
__ SmiUntag(argc);
- __ Push(original_constructor, x4, x4);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(x4, x4);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@@ -534,19 +616,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x0: number of arguments
// x1: constructor function
// x2: address of last argument (caller sp)
+ // x3: new target
// jssp[0]: receiver
// jssp[1]: receiver
- // jssp[2]: new.target
- // jssp[3]: number of arguments (smi-tagged)
+ // jssp[2]: number of arguments (smi-tagged)
// Compute the start address of the copy in x3.
- __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
+ __ Add(x4, x2, Operand(argc, LSL, kPointerSizeLog2));
Label loop, entry, done_copying_arguments;
__ B(&entry);
__ Bind(&loop);
- __ Ldp(x10, x11, MemOperand(x3, -2 * kPointerSize, PreIndex));
+ __ Ldp(x10, x11, MemOperand(x4, -2 * kPointerSize, PreIndex));
__ Push(x11, x10);
__ Bind(&entry);
- __ Cmp(x3, x2);
+ __ Cmp(x4, x2);
__ B(gt, &loop);
// Because we copied values 2 by 2 we may have copied one extra value.
// Drop it if that is the case.
@@ -557,6 +639,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Call the function.
// x0: number of arguments
// x1: constructor function
+ // x3: new target
if (is_api_function) {
__ Ldr(cp, FieldMemOperand(constructor, JSFunction::kContextOffset));
Handle<Code> code =
@@ -564,156 +647,84 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(argc);
- __ InvokeFunction(constructor, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(constructor, new_target, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore the context from the frame.
// x0: result
// jssp[0]: receiver
- // jssp[1]: new.target
- // jssp[2]: number of arguments (smi-tagged)
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // x0: result
- // jssp[0]: receiver (newly allocated object)
// jssp[1]: number of arguments (smi-tagged)
- __ JumpIfSmi(x0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ JumpIfObjectType(x0, x1, x3, FIRST_SPEC_OBJECT_TYPE, &exit, ge);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ Bind(&use_receiver);
- __ Peek(x0, 0);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Remove the receiver from the stack, remove caller arguments, and
- // return.
- __ Bind(&exit);
- // x0: result
- // jssp[0]: receiver (newly allocated object)
- // jssp[1]: new.target (original constructor)
- // jssp[2]: number of arguments (smi-tagged)
- __ Peek(x1, 2 * kXRegSize);
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: number of arguments (smi-tagged)
+ __ JumpIfSmi(x0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ JumpIfObjectType(x0, x1, x3, FIRST_JS_RECEIVER_TYPE, &exit, ge);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ Bind(&use_receiver);
+ __ Peek(x0, 0);
+
+ // Remove the receiver from the stack, remove caller arguments, and
+ // return.
+ __ Bind(&exit);
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: number of arguments (smi-tagged)
+ __ Peek(x1, 1 * kXRegSize);
+ } else {
+ __ Peek(x1, 0);
+ }
// Leave construct frame.
}
__ DropBySMI(x1);
__ Drop(1);
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
+ }
__ Ret();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- x1 : constructor function
- // -- x2 : allocation site or undefined
- // -- x3 : original constructor
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- ASM_LOCATION("Builtins::Generate_JSConstructStubForDerived");
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- __ AssertUndefinedOrAllocationSite(x2, x10);
- __ Mov(x4, x0);
- __ SmiTag(x4);
- __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Push(x2, x4, x3, x10);
- // sp[0]: receiver (the hole)
- // sp[1]: new.target
- // sp[2]: number of arguments
- // sp[3]: allocation site
-
- // Set up pointer to last argument.
- __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
-
- // Copy arguments and receiver to the expression stack.
- // Copy 2 values every loop to use ldp/stp.
- // x0: number of arguments
- // x1: constructor function
- // x2: address of last argument (caller sp)
- // jssp[0]: receiver
- // jssp[1]: new.target
- // jssp[2]: number of arguments (smi-tagged)
- // Compute the start address of the copy in x4.
- __ Add(x4, x2, Operand(x0, LSL, kPointerSizeLog2));
- Label loop, entry, done_copying_arguments;
- __ B(&entry);
- __ Bind(&loop);
- __ Ldp(x10, x11, MemOperand(x4, -2 * kPointerSize, PreIndex));
- __ Push(x11, x10);
- __ Bind(&entry);
- __ Cmp(x4, x2);
- __ B(gt, &loop);
- // Because we copied values 2 by 2 we may have copied one extra value.
- // Drop it if that is the case.
- __ B(eq, &done_copying_arguments);
- __ Drop(1);
- __ Bind(&done_copying_arguments);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ Mov(x2, Operand(debug_step_in_fp));
- __ Ldr(x2, MemOperand(x2));
- __ Cbz(x2, &skip_step_in);
-
- __ Push(x0, x1, x1);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(x1, x0);
-
- __ bind(&skip_step_in);
-
- // Call the function.
- // x0: number of arguments
- // x1: constructor function
- ParameterCount actual(x0);
- __ InvokeFunction(x1, actual, CALL_FUNCTION, NullCallWrapper());
-
-
- // Restore the context from the frame.
- // x0: result
- // jssp[0]: number of arguments (smi-tagged)
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // Load number of arguments (smi), skipping over new.target.
- __ Peek(x1, kPointerSize);
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Leave construct frame
- }
- __ DropBySMI(x1);
- __ Drop(1);
- __ Ret();
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x1);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -740,7 +751,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ Cmp(x10, Operand(argc, LSL, kPointerSizeLog2));
}
__ B(gt, &enough_stack_space);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
// We should never return from the APPLY_OVERFLOW builtin.
if (__ emit_debug_code()) {
__ Unreachable();
@@ -860,6 +871,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// - x1: the JS function object being called.
+// - x3: the new target
// - cp: our context.
// - fp: our caller's frame pointer.
// - jssp: stack pointer.
@@ -876,6 +888,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, x1);
__ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ Push(x3);
+
+ // Push zero for bytecode array offset.
+ __ Mov(x0, Operand(0));
+ __ Push(x0);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -904,7 +921,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Sub(x10, jssp, Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -924,36 +941,24 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
Label ok;
__ CompareRoot(jssp, Heap::kStackLimitRootIndex);
__ B(hs, &ok);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ Push(kInterpreterBytecodeArrayRegister);
+ __ CallRuntime(Runtime::kStackGuard);
+ __ Pop(kInterpreterBytecodeArrayRegister);
__ Bind(&ok);
}
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ Sub(kInterpreterRegisterFileRegister, fp,
- Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Add(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ Mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -993,36 +998,103 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ Mov(x1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ Push(x1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use this for interpreter deopts).
+ __ Drop(1);
+
+ // Initialize register file register and dispatch table register.
+ __ Add(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ Ldr(kContextRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ Ldr(x1,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
+ kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ Ldr(kInterpreterBytecodeOffsetRegister,
+ MemOperand(
+ kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
+ __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
+ __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip0);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- Register function = x1;
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
- // Preserve function. At the same time, push arguments for
- // kCompileOptimized.
- __ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent));
- __ Push(function, function, x10);
- __ CallRuntime(Runtime::kCompileOptimized, 2);
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
- // Restore receiver.
- __ Pop(function);
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -1038,16 +1110,17 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// calling through to the runtime:
// x0 - The address from which to resume execution.
// x1 - isolate
+ // x3 - new target
// lr - The return address for the JSFunction itself. It has not yet been
// preserved on the stack because the frame setup code was replaced
// with a call to this stub, to handle code ageing.
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(x0, x1, fp, lr);
+ __ Push(x0, x1, x3, fp, lr);
__ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
- __ Pop(lr, fp, x1, x0);
+ __ Pop(lr, fp, x3, x1, x0);
}
// The calling function has been made young again, so return to execute the
@@ -1078,17 +1151,18 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// calling through to the runtime:
// x0 - The address from which to resume execution.
// x1 - isolate
+ // x3 - new target
// lr - The return address for the JSFunction itself. It has not yet been
// preserved on the stack because the frame setup code was replaced
// with a call to this stub, to handle code ageing.
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(x0, x1, fp, lr);
+ __ Push(x0, x1, x3, fp, lr);
__ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(
masm->isolate()), 2);
- __ Pop(lr, fp, x1, x0);
+ __ Pop(lr, fp, x3, x1, x0);
// Perform prologue operations usually performed by the young code stub.
__ EmitFrameSetupForCodeAgePatching(masm);
@@ -1123,7 +1197,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// preserve the registers with parameters.
__ PushXRegList(kSafepointSavedRegisters);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ PopXRegList(kSafepointSavedRegisters);
}
@@ -1153,7 +1227,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the deoptimization type to the runtime system.
__ Mov(x0, Smi::FromInt(static_cast<int>(type)));
__ Push(x0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
}
// Get the full codegen state from the stack and untag it.
@@ -1195,6 +1269,109 @@ void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
}
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Register scratch0, Register scratch1,
+ Register scratch2,
+ Label* receiver_check_failed) {
+ Register signature = scratch0;
+ Register map = scratch1;
+ Register constructor = scratch2;
+
+ // If there is no signature, return the holder.
+ __ Ldr(signature, FieldMemOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ __ CompareRoot(signature, Heap::kUndefinedValueRootIndex);
+ Label receiver_check_passed;
+ __ B(eq, &receiver_check_passed);
+
+ // Walk the prototype chain.
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ Bind(&prototype_loop_start);
+
+ // Get the constructor, if any
+ __ GetMapConstructor(constructor, map, x16, x16);
+ __ cmp(x16, Operand(JS_FUNCTION_TYPE));
+ Label next_prototype;
+ __ B(ne, &next_prototype);
+ Register type = constructor;
+ __ Ldr(type,
+ FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ Bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ Cmp(signature, type);
+ __ B(eq, &receiver_check_passed);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype);
+ __ CompareObjectType(type, x16, x17, FUNCTION_TEMPLATE_INFO_TYPE);
+ __ B(ne, &next_prototype);
+
+ // Otherwise load the parent function template and iterate.
+ __ Ldr(type,
+ FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+ __ B(&function_template_loop);
+
+ // Load the next prototype.
+ __ Bind(&next_prototype);
+ __ Ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ B(eq, receiver_check_failed);
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldr(x16, FieldMemOperand(map, Map::kBitField3Offset));
+ __ Tst(x16, Operand(Map::IsHiddenPrototype::kMask));
+ __ B(eq, receiver_check_failed);
+ // Iterate.
+ __ B(&prototype_loop_start);
+
+ __ Bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments excluding receiver
+ // -- x1 : callee
+ // -- lr : return address
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[8 * (argc - 1)] : first argument
+ // -- sp[8 * argc] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ Ldr(x2, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
+ CompatibleReceiverCheck(masm, x2, x3, x4, x5, x6, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ Ldr(x4, FieldMemOperand(x3, FunctionTemplateInfo::kCallCodeOffset));
+ __ Ldr(x4, FieldMemOperand(x4, CallHandlerInfo::kFastHandlerOffset));
+ __ Add(x4, x4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(x4);
+
+ // Compatible receiver check failed: throw an Illegal Invocation exception.
+ __ Bind(&receiver_check_failed);
+ // Drop the arguments (including the receiver)
+ __ add(x0, x0, Operand(1));
+ __ Drop(x0);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1202,7 +1379,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ Push(x0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
@@ -1238,7 +1415,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ B(hs, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1248,13 +1425,149 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
}
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- jssp[0] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_DatePrototype_GetField");
+
+ // 1. Pop receiver into x0 and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ Pop(x0);
+ __ JumpIfSmi(x0, &receiver_not_date);
+ __ JumpIfNotObjectType(x0, x1, x2, JS_DATE_TYPE, &receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ Ldr(x0, FieldMemOperand(x0, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ Mov(x1, ExternalReference::date_cache_stamp(masm->isolate()));
+ __ Ldr(x1, MemOperand(x1));
+ __ Ldr(x2, FieldMemOperand(x0, JSDate::kCacheStampOffset));
+ __ Cmp(x1, x2);
+ __ B(ne, &stamp_mismatch);
+ __ Ldr(x0, FieldMemOperand(
+ x0, JSDate::kValueOffset + field_index * kPointerSize));
+ __ Ret();
+ __ Bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Mov(x1, Smi::FromInt(field_index));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ Ret();
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ Bind(&receiver_not_date);
+ __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : argArray (if argc == 2)
+ // -- jssp[8] : thisArg (if argc >= 1)
+ // -- jssp[16] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
+
+ Register argc = x0;
+ Register arg_array = x0;
+ Register receiver = x1;
+ Register this_arg = x2;
+ Register undefined_value = x3;
+ Register null_value = x4;
+
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+
+ // 1. Load receiver into x1, argArray into x0 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ // Claim (2 - argc) dummy arguments from the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+ __ Claim(2);
+ __ Drop(argc);
+
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : argArray (dummy value if argc <= 1)
+ // -- jssp[8] : thisArg (dummy value if argc == 0)
+ // -- jssp[16] : receiver
+ // -----------------------------------
+ __ Cmp(argc, 1);
+ __ Pop(arg_array, this_arg); // Overwrites argc.
+ __ CmovX(this_arg, undefined_value, lo); // undefined if argc == 0.
+ __ CmovX(arg_array, undefined_value, ls); // undefined if argc <= 1.
+
+ __ Peek(receiver, 0);
+ __ Poke(this_arg, 0);
+ }
+
+ // ----------- S t a t e -------------
+ // -- x0 : argArray
+ // -- x1 : receiver
+ // -- x3 : undefined root value
+ // -- jssp[0] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(receiver, &receiver_not_callable);
+ __ Ldr(x10, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(w10, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x10, 1 << Map::kIsCallable,
+ &receiver_not_callable);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ Cmp(arg_array, null_value);
+ __ Ccmp(arg_array, undefined_value, ZFlag, ne);
+ __ B(eq, &no_arguments);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target in x3).
+ DCHECK(undefined_value.Is(x3));
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ Bind(&no_arguments);
+ {
+ __ Mov(x0, 0);
+ DCHECK(receiver.Is(x1));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ Bind(&receiver_not_callable);
+ {
+ __ Poke(receiver, 0);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Register argc = x0;
Register function = x1;
Register scratch1 = x10;
Register scratch2 = x11;
- ASM_LOCATION("Builtins::Generate_FunctionCall");
+ ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall");
+
// 1. Make sure we have at least one argument.
{
Label done;
@@ -1293,205 +1606,161 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ Ldr(key, MemOperand(fp, indexOffset));
- __ B(&entry);
-
- // Load the current argument from the arguments array.
- __ Bind(&loop);
- __ Ldr(receiver, MemOperand(fp, argumentsOffset));
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ Mov(slot, Operand(Smi::FromInt(slot_index)));
- __ Ldr(vector, MemOperand(fp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // Push the nth argument.
- __ Push(x0);
-
- __ Ldr(key, MemOperand(fp, indexOffset));
- __ Add(key, key, Smi::FromInt(1));
- __ Str(key, MemOperand(fp, indexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ Bind(&entry);
- __ Ldr(x1, MemOperand(fp, limitOffset));
- __ Cmp(key, x1);
- __ B(ne, &loop);
-
- // On exit, the pushed arguments count is in x0, untagged
- __ Mov(x0, key);
- __ SmiUntag(x0);
-}
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : argumentsList (if argc == 3)
+ // -- jssp[8] : thisArgument (if argc >= 2)
+ // -- jssp[16] : target (if argc >= 1)
+ // -- jssp[24] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_ReflectApply");
+ Register argc = x0;
+ Register arguments_list = x0;
+ Register target = x1;
+ Register this_argument = x2;
+ Register undefined_value = x3;
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+ // 1. Load target into x1 (if present), argumentsList into x0 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
-
- const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
-
- Register args = x12;
- Register receiver = x14;
- Register function = x15;
- Register apply_function = x1;
-
- // Push the vector.
- __ Ldr(
- apply_function,
- FieldMemOperand(apply_function, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(apply_function,
- FieldMemOperand(apply_function,
- SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(apply_function);
-
- // Get the length of the arguments via a builtin call.
- __ Ldr(function, MemOperand(fp, kFunctionOffset));
- __ Ldr(args, MemOperand(fp, kArgumentsOffset));
- __ Push(function, args);
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
- Register argc = x0;
+ // Claim (3 - argc) dummy arguments from the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+ __ Claim(3);
+ __ Drop(argc);
- Generate_CheckStackOverflow(masm, argc, kArgcIsSmiTagged);
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : argumentsList (dummy value if argc <= 2)
+ // -- jssp[8] : thisArgument (dummy value if argc <= 1)
+ // -- jssp[16] : target (dummy value if argc == 0)
+ // -- jssp[24] : receiver
+ // -----------------------------------
+ __ Adds(x10, argc, 0); // Preserve argc, and set the Z flag if it is zero.
+ __ Pop(arguments_list, this_argument, target); // Overwrites argc.
+ __ CmovX(target, undefined_value, eq); // undefined if argc == 0.
+ __ Cmp(x10, 2);
+ __ CmovX(this_argument, undefined_value, lo); // undefined if argc <= 1.
+ __ CmovX(arguments_list, undefined_value, ls); // undefined if argc <= 2.
+
+ __ Poke(this_argument, 0); // Overwrite receiver.
+ }
- // Push current limit, index and receiver.
- __ Mov(x1, 0); // Initial index.
- __ Ldr(receiver, MemOperand(fp, kReceiverOffset));
- __ Push(argc, x1, receiver);
+ // ----------- S t a t e -------------
+ // -- x0 : argumentsList
+ // -- x1 : target
+ // -- jssp[0] : thisArgument
+ // -----------------------------------
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(target, &target_not_callable);
+ __ Ldr(x10, FieldMemOperand(target, HeapObject::kMapOffset));
+ __ Ldr(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x10, 1 << Map::kIsCallable, &target_not_callable);
- // At the end of the loop, the number of arguments is stored in x0, untagged
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target in x3).
+ DCHECK(undefined_value.Is(x3));
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ Ldr(x1, MemOperand(fp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ Bind(&target_not_callable);
+ {
+ __ Poke(target, 0);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ Drop(kStackSize);
- __ Ret();
}
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
-
- const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
-
- // Is x11 safe to use?
- Register newTarget = x11;
- Register args = x12;
- Register function = x15;
- Register construct_function = x1;
-
- // Push the vector.
- __ Ldr(construct_function,
- FieldMemOperand(construct_function,
- JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(construct_function,
- FieldMemOperand(construct_function,
- SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(construct_function);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ Ldr(x0, MemOperand(fp, kNewTargetOffset));
- __ CompareRoot(x0, Heap::kUndefinedValueRootIndex);
- __ B(ne, &validate_arguments);
- __ Ldr(x0, MemOperand(fp, kFunctionOffset));
- __ Str(x0, MemOperand(fp, kNewTargetOffset));
-
- // Validate arguments
- __ Bind(&validate_arguments);
- __ Ldr(function, MemOperand(fp, kFunctionOffset));
- __ Ldr(args, MemOperand(fp, kArgumentsOffset));
- __ Ldr(newTarget, MemOperand(fp, kNewTargetOffset));
- __ Push(function, args, newTarget);
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- Register argc = x0;
-
- Generate_CheckStackOverflow(masm, argc, kArgcIsSmiTagged);
-
- // Push current limit and index & constructor function as callee.
- __ Mov(x1, 0); // Initial index.
- __ Push(argc, x1, function);
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : new.target (optional)
+ // -- jssp[8] : argumentsList
+ // -- jssp[16] : target
+ // -- jssp[24] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_ReflectConstruct");
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ Register argc = x0;
+ Register arguments_list = x0;
+ Register target = x1;
+ Register new_target = x3;
+ Register undefined_value = x4;
- // Use undefined feedback vector
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- __ Ldr(x1, MemOperand(fp, kFunctionOffset));
- __ Ldr(x4, MemOperand(fp, kNewTargetOffset));
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ // 1. Load target into x1 (if present), argumentsList into x0 (if present),
+ // new.target into x3 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
+ {
+ // Claim (3 - argc) dummy arguments from the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+ __ Claim(3);
+ __ Drop(argc);
- // Leave internal frame.
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : new.target (dummy value if argc <= 2)
+ // -- jssp[8] : argumentsList (dummy value if argc <= 1)
+ // -- jssp[16] : target (dummy value if argc == 0)
+ // -- jssp[24] : receiver
+ // -----------------------------------
+ __ Adds(x10, argc, 0); // Preserve argc, and set the Z flag if it is zero.
+ __ Pop(new_target, arguments_list, target); // Overwrites argc.
+ __ CmovX(target, undefined_value, eq); // undefined if argc == 0.
+ __ Cmp(x10, 2);
+ __ CmovX(arguments_list, undefined_value, lo); // undefined if argc <= 1.
+ __ CmovX(new_target, target, ls); // target if argc <= 2.
+
+ __ Poke(undefined_value, 0); // Overwrite receiver.
}
- __ Drop(kStackSize);
- __ Ret();
-}
+ // ----------- S t a t e -------------
+ // -- x0 : argumentsList
+ // -- x1 : target
+ // -- x3 : new.target
+ // -- jssp[0] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_FunctionApply");
- Generate_ApplyHelper(masm, false);
-}
-
-
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_ReflectApply");
- Generate_ApplyHelper(masm, true);
-}
-
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(target, &target_not_constructor);
+ __ Ldr(x10, FieldMemOperand(target, HeapObject::kMapOffset));
+ __ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x10, 1 << Map::kIsConstructor,
+ &target_not_constructor);
+
+ // 3. Make sure the new.target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(new_target, &new_target_not_constructor);
+ __ Ldr(x10, FieldMemOperand(new_target, HeapObject::kMapOffset));
+ __ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x10, 1 << Map::kIsConstructor,
+ &new_target_not_constructor);
+
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ Bind(&target_not_constructor);
+ {
+ __ Poke(target, 0);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_ReflectConstruct");
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ Bind(&new_target_not_constructor);
+ {
+ __ Poke(new_target, 0);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1501,6 +1770,7 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
// -- x0 : actual number of arguments
// -- x1 : function (passed through to callee)
// -- x2 : expected number of arguments
+ // -- x3 : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow.
// We are not trying to catch interruptions (e.g. debug break and
@@ -1542,69 +1812,227 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argumentsList
+ // -- x1 : target
+ // -- x3 : new.target (checked to be constructor or undefined)
+ // -- jssp[0] : thisArgument
+ // -----------------------------------
+
+ Register arguments_list = x0;
+ Register target = x1;
+ Register new_target = x3;
+
+ Register args = x0;
+ Register len = x2;
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(arguments_list, &create_runtime);
+
+ // Load native context.
+ Register native_context = x4;
+ __ Ldr(native_context, NativeContextMemOperand());
+
+ // Load the map of argumentsList.
+ Register arguments_list_map = x2;
+ __ Ldr(arguments_list_map,
+ FieldMemOperand(arguments_list, HeapObject::kMapOffset));
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ Ldr(x10, ContextMemOperand(native_context,
+ Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ Ldr(x11, ContextMemOperand(native_context,
+ Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ Cmp(arguments_list_map, x10);
+ __ Ccmp(arguments_list_map, x11, ZFlag, ne);
+ __ B(eq, &create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CompareInstanceType(arguments_list_map, native_context, JS_ARRAY_TYPE);
+ __ B(eq, &create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ Bind(&create_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(target, new_target, arguments_list);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(new_target, target);
+ __ Ldrsw(len, UntagSmiFieldMemOperand(arguments_list,
+ FixedArray::kLengthOffset));
+ }
+ __ B(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ Bind(&create_arguments);
+ __ Ldrsw(len, UntagSmiFieldMemOperand(
+ arguments_list,
+ JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ Ldr(x10, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
+ __ Ldrsw(x11, UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
+ __ CompareAndBranch(len, x11, ne, &create_runtime);
+ __ Mov(args, x10);
+ __ B(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ Bind(&create_array);
+ __ Ldr(x10, FieldMemOperand(arguments_list_map, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(x10);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ // Branch for anything that's not FAST_{SMI_}ELEMENTS.
+ __ TestAndBranchIfAnySet(x10, ~FAST_ELEMENTS, &create_runtime);
+ __ Ldrsw(len,
+ UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
+ __ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
+
+ __ Bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ // Make x10 the space we have left. The stack might already be overflowed
+ // here which will cause x10 to become negative.
+ __ Sub(x10, masm->StackPointer(), x10);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(x10, Operand(len, LSL, kPointerSizeLog2));
+ __ B(gt, &done); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ Bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- x0 : args (a FixedArray built from argumentsList)
+ // -- x1 : target
+ // -- x2 : len (number of elements to push from args)
+ // -- x3 : new.target (checked to be constructor or undefined)
+ // -- jssp[0] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ Label done, loop;
+ Register src = x4;
+
+ __ Add(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Mov(x0, len); // The 'len' argument for Call() or Construct().
+ __ Cbz(len, &done);
+ __ Claim(len);
+ __ Bind(&loop);
+ __ Sub(len, len, 1);
+ __ Ldr(x10, MemOperand(src, kPointerSize, PostIndex));
+ __ Poke(x10, Operand(len, LSL, kPointerSizeLog2));
+ __ Cbnz(len, &loop);
+ __ Bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (len)
+ // -- x1 : target
+ // -- x3 : new.target (checked to be constructor or undefined)
+ // -- jssp[0] : args[len-1]
+ // -- jssp[8] : args[len-2]
+ // ... : ...
+ // -- jssp[8*(len-2)] : args[1]
+ // -- jssp[8*(len-1)] : args[0]
+ // -----------------------------------
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(new_target, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
+ ASM_LOCATION("Builtins::Generate_CallFunction");
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(x1);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that function is not a "classConstructor".
+ Label class_constructor;
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestAndBranchIfAnySet(
+ w3, (1 << SharedFunctionInfo::kIsDefaultConstructor) |
+ (1 << SharedFunctionInfo::kIsSubclassConstructor) |
+ (1 << SharedFunctionInfo::kIsBaseConstructor),
+ &class_constructor);
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
- __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+ Label done_convert;
__ TestAndBranchIfAnySet(w3,
(1 << SharedFunctionInfo::kNative) |
(1 << SharedFunctionInfo::kStrictModeFunction),
&done_convert);
{
- __ Peek(x3, Operand(x0, LSL, kXRegSizeLog2));
-
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the function to call (checked to be a JSFunction)
// -- x2 : the shared function info.
- // -- x3 : the receiver
// -- cp : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(x3, &convert_to_object);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(x3, x4, x4, FIRST_JS_RECEIVER_TYPE);
- __ B(hs, &done_convert);
- __ JumpIfRoot(x3, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
- __ JumpIfNotRoot(x3, Heap::kNullValueRootIndex, &convert_to_object);
- __ Bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(x3);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ Peek(x3, Operand(x0, LSL, kXRegSizeLog2));
+ __ JumpIfSmi(x3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(x3, x4, x4, FIRST_JS_RECEIVER_TYPE);
+ __ B(hs, &done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(x3, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy);
+ __ JumpIfNotRoot(x3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ Bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(x3);
+ }
+ __ B(&convert_receiver);
+ }
+ __ Bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(x0);
+ __ Push(x0, x1);
+ __ Mov(x0, x3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Mov(x3, x0);
+ __ Pop(x1, x0);
+ __ SmiUntag(x0);
+ }
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Bind(&convert_receiver);
}
- __ B(&convert_receiver);
- __ Bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(x0);
- __ Push(x0, x1);
- __ Mov(x0, x3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ Mov(x3, x0);
- __ Pop(x1, x0);
- __ SmiUntag(x0);
- }
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Bind(&convert_receiver);
__ Poke(x3, Operand(x0, LSL, kXRegSizeLog2));
}
__ Bind(&done_convert);
@@ -1618,15 +2046,126 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
__ Ldrsw(
x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Ldr(x3, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
ParameterCount actual(x0);
ParameterCount expected(x2);
- __ InvokeCode(x3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(x1, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ Push(x1);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : target (checked to be a JSBoundFunction)
+ // -- x3 : new.target (only in case of [[Construct]])
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into x2 and length of that into x4.
+ Label no_bound_arguments;
+ __ Ldr(x2, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
+ __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
+ __ Cmp(x4, 0);
+ __ B(eq, &no_bound_arguments);
+ {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : target (checked to be a JSBoundFunction)
+ // -- x2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- x3 : new.target (only in case of [[Construct]])
+ // -- x4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ Claim(x4);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(jssp, Heap::kRealStackLimitRootIndex);
+ __ B(gt, &done); // Signed comparison.
+ // Restore the stack pointer.
+ __ Drop(x4);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ Bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ {
+ Label loop, done_loop;
+ __ Mov(x5, 0);
+ __ Bind(&loop);
+ __ Cmp(x5, x0);
+ __ B(gt, &done_loop);
+ __ Peek(x10, Operand(x4, LSL, kPointerSizeLog2));
+ __ Poke(x10, Operand(x5, LSL, kPointerSizeLog2));
+ __ Add(x4, x4, 1);
+ __ Add(x5, x5, 1);
+ __ B(&loop);
+ __ Bind(&done_loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
+ __ Add(x2, x2, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Bind(&loop);
+ __ Sub(x4, x4, 1);
+ __ Ldr(x10, MemOperand(x2, x4, LSL, kPointerSizeLog2));
+ __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
+ __ Add(x0, x0, 1);
+ __ Cmp(x4, 0);
+ __ B(gt, &loop);
+ }
+ }
+ __ Bind(&no_bound_arguments);
}
+} // namespace
+
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(x1);
+
+ // Patch the receiver to [[BoundThis]].
+ __ Ldr(x10, FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
+ __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Mov(x10,
+ ExternalReference(Builtins::kCall_ReceiverIsAny, masm->isolate()));
+ __ Ldr(x11, MemOperand(x10));
+ __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x12);
+}
+
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the target to call (can be any Object).
@@ -1636,16 +2175,22 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(x1, &non_callable);
__ Bind(&non_smi);
__ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
- eq);
- __ Cmp(x5, JS_FUNCTION_PROXY_TYPE);
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, eq);
+ __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET, eq);
+ __ Cmp(x5, JS_PROXY_TYPE);
__ B(ne, &non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ Ldr(x1, FieldMemOperand(x1, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(x1);
- __ B(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ Push(x1);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ Add(x0, x0, Operand(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1656,15 +2201,17 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(x1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1674,10 +2221,9 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the constructor to call (checked to be a JSFunction)
- // -- x3 : the original constructor (checked to be a JSFunction)
+ // -- x3 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertFunction(x1);
- __ AssertFunction(x3);
// Calling convention for function specific ConstructStubs require
// x2 to contain either an AllocationSite or undefined.
@@ -1693,17 +2239,53 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the function to call (checked to be a JSBoundFunction)
+ // -- x3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertBoundFunction(x1);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label done;
+ __ Cmp(x1, x3);
+ __ B(ne, &done);
+ __ Ldr(x3,
+ FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Bind(&done);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Mov(x10, ExternalReference(Builtins::kConstruct, masm->isolate()));
+ __ Ldr(x11, MemOperand(x10));
+ __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x12);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
- // -- x1 : the constructor to call (checked to be a JSFunctionProxy)
- // -- x3 : the original constructor (either the same as the constructor or
+ // -- x1 : the constructor to call (checked to be a JSProxy)
+ // -- x3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ Ldr(x1, FieldMemOperand(x1, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ Push(x1);
+ __ Push(x3);
+ // Include the pushed new_target, constructor and the receiver.
+ __ Add(x0, x0, 3);
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1712,22 +2294,31 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the constructor to call (can be any Object)
- // -- x3 : the original constructor (either the same as the constructor or
+ // -- x3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(x1, &non_constructor);
- __ Ldr(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
- __ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x2, 1 << Map::kIsConstructor, &non_constructor);
// Dispatch based on instance type.
- __ CompareInstanceType(x4, x5, JS_FUNCTION_TYPE);
+ __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->ConstructFunction(),
RelocInfo::CODE_TARGET, eq);
- __ Cmp(x5, JS_FUNCTION_PROXY_TYPE);
+
+ // Check if target has a [[Construct]] internal method.
+ __ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x2, 1 << Map::kIsConstructor, &non_constructor);
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+ RelocInfo::CODE_TARGET, eq);
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ Cmp(x5, JS_PROXY_TYPE);
__ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
eq);
@@ -1736,7 +2327,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, x1);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, x1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1744,22 +2335,20 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x2 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -- x1 : the target to call (can be any Object).
+ // -----------------------------------
// Find the address of the last argument.
__ add(x3, x0, Operand(1)); // Add one for receiver.
@@ -1784,26 +2373,60 @@ void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
}
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (not including receiver)
+ // -- x3 : new target
+ // -- x1 : constructor to call
+ // -- x2 : address of the first argument
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ add(x5, x0, Operand(1)); // Add one for receiver (to be constructed).
+ __ lsl(x5, x5, kPointerSizeLog2);
+
+ // Set stack pointer and where to stop.
+ __ Mov(x6, jssp);
+ __ Claim(x5, 1);
+ __ sub(x4, x6, x5);
+
+ // Push a slot for the receiver.
+ __ Str(xzr, MemOperand(x6, -kPointerSize, PreIndex));
+
+ Label loop_header, loop_check;
+ // Push the arguments.
+ __ B(&loop_check);
+ __ Bind(&loop_header);
+ // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
+ __ Ldr(x5, MemOperand(x2, -kPointerSize, PostIndex));
+ __ Str(x5, MemOperand(x6, -kPointerSize, PreIndex));
+ __ Bind(&loop_check);
+ __ Cmp(x6, x4);
+ __ B(gt, &loop_header);
+
+ // Call the constructor with x0, x1, and x3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
// ----------- S t a t e -------------
// -- x0 : actual number of arguments
// -- x1 : function (passed through to callee)
// -- x2 : expected number of arguments
+ // -- x3 : new target (passed through to callee)
// -----------------------------------
- Label stack_overflow;
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
-
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
- Register code_entry = x3;
+ Register code_entry = x10;
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
__ Cmp(argc_actual, argc_expected);
__ B(lt, &too_few);
__ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
@@ -1811,6 +2434,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: actual >= expected
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
Register copy_start = x10;
Register copy_end = x11;
@@ -1876,11 +2500,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ Bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
__ Lsl(scratch2, argc_expected, kPointerSizeLog2);
__ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
@@ -1930,6 +2555,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Mov(argc_actual, argc_expected);
// x0 : expected number of arguments
// x1 : function (passed through to callee)
+ // x3 : new target (passed through to callee)
+ __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
__ Call(code_entry);
// Store offset of return address for deoptimizer.
@@ -1941,13 +2568,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point without adapting the arguments.
__ Bind(&dont_adapt_arguments);
+ __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
__ Jump(code_entry);
__ Bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ Unreachable();
}
}
diff --git a/chromium/v8/src/arm64/code-stubs-arm64.cc b/chromium/v8/src/arm64/code-stubs-arm64.cc
index e39e08831a0..a1e920755dd 100644
--- a/chromium/v8/src/arm64/code-stubs-arm64.cc
+++ b/chromium/v8/src/arm64/code-stubs-arm64.cc
@@ -223,7 +223,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
Register right_type = scratch;
if ((cond == lt) || (cond == gt)) {
// Call runtime on identical JSObjects. Otherwise return equal.
- __ JumpIfObjectType(right, right_type, right_type, FIRST_SPEC_OBJECT_TYPE,
+ __ JumpIfObjectType(right, right_type, right_type, FIRST_JS_RECEIVER_TYPE,
slow, ge);
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
@@ -245,7 +245,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
__ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
&heap_number);
// Comparing JS objects with <=, >= is complicated.
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
__ B(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
@@ -336,10 +336,10 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are not
// equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label right_non_object;
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
__ B(lt, &right_non_object);
// Return non-zero - x0 already contains a non-zero pointer.
@@ -356,9 +356,9 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If right is not ODDBALL, test left. Otherwise, set eq condition.
__ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
- // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
+ // If right or left is not ODDBALL, test left >= FIRST_JS_RECEIVER_TYPE.
// Otherwise, right or left is ODDBALL, so set a ge condition.
- __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
+ __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NVFlag, ne);
__ B(ge, &return_not_equal);
@@ -471,11 +471,11 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ Bind(&object_test);
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
- // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
- // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
- __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
+ // If right >= FIRST_JS_RECEIVER_TYPE, test left.
+ // Otherwise, right < FIRST_JS_RECEIVER_TYPE, so set lt condition.
+ __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NFlag, ge);
__ B(lt, not_both_strings);
@@ -653,8 +653,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cond == eq) {
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result
if ((cond == lt) || (cond == le)) {
@@ -668,9 +667,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ Bind(&miss);
@@ -966,7 +964,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Bind(&call_runtime);
// Put the arguments back on the stack.
__ Push(base_tagged, exponent_tagged);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// Return.
__ Bind(&done);
@@ -1067,6 +1065,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Register parameters:
// x0: argc (including receiver, untagged)
// x1: target
+ // If argv_in_register():
+ // x11: argv (pointer to first argument)
//
// The stack on entry holds the arguments and the receiver, with the receiver
// at the highest address:
@@ -1098,9 +1098,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// (arg[argc-2]), or just below the receiver in case there are no arguments.
// - Adjust for the arg[] array.
Register temp_argv = x11;
- __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
- // - Adjust for the receiver.
- __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+ if (!argv_in_register()) {
+ __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
+ // - Adjust for the receiver.
+ __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
+ }
// Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
// registers.
@@ -1204,12 +1206,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ LeaveExitFrame(save_doubles(), x10, true);
DCHECK(jssp.Is(__ StackPointer()));
- // Pop or drop the remaining stack slots and return from the stub.
- // jssp[24]: Arguments array (of size argc), including receiver.
- // jssp[16]: Preserved x23 (used for target).
- // jssp[8]: Preserved x22 (used for argc).
- // jssp[0]: Preserved x21 (used for argv).
- __ Drop(x11);
+ if (!argv_in_register()) {
+ // Drop the remaining stack slots and return from the stub.
+ __ Drop(x11);
+ }
__ AssertFPCRState();
__ Ret();
@@ -1548,17 +1548,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ Ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
__ Tbnz(scratch, Map::kHasNonInstancePrototype, &slow_case);
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- Register const scratch_w = scratch.W();
- __ Ldr(shared_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- // On 64-bit platforms, compiler hints field is not a smi. See definition of
- // kCompilerHintsOffset in src/objects.h.
- __ Ldr(scratch_w, FieldMemOperand(shared_info,
- SharedFunctionInfo::kCompilerHintsOffset));
- __ Tbnz(scratch_w, SharedFunctionInfo::kBoundFunction, &slow_case);
-
// Get the "prototype" (or initial map) of the {function}.
__ Ldr(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1583,27 +1572,47 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
Register const null = scratch;
- Label done, loop;
- __ LoadRoot(x0, Heap::kTrueValueRootIndex);
+ Register const result = x0;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ Bind(&loop);
- __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ Cmp(object_prototype, function_prototype);
+
+ // Check if the object needs to be access checked.
+ __ Ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(map_bit_field, 1 << Map::kIsAccessCheckNeeded,
+ &fast_runtime_fallback);
+ // Check if the current object is a Proxy.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ __ B(eq, &fast_runtime_fallback);
+
+ __ Ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Cmp(object, function_prototype);
__ B(eq, &done);
- __ Cmp(object_prototype, null);
- __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ Cmp(object, null);
+ __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
__ B(ne, &loop);
- __ LoadRoot(x0, Heap::kFalseValueRootIndex);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
__ Bind(&done);
- __ StoreRoot(x0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime
+ __ Bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ __ Move(scratch, Smi::FromInt(0));
+ __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -1654,7 +1663,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// the runtime system.
__ Bind(&slow);
__ Push(key);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -1685,7 +1694,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ Bind(&runtime);
__ Push(x1, x3, x2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1799,13 +1808,10 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// x11 sloppy_args_map offset to args (or aliased args) map (uninit)
// x14 arg_count number of function arguments
- Register global_object = x10;
Register global_ctx = x10;
Register sloppy_args_map = x11;
Register aliased_args_map = x10;
- __ Ldr(global_object, GlobalObjectMemOperand());
- __ Ldr(global_ctx, FieldMemOperand(global_object,
- GlobalObject::kNativeContextOffset));
+ __ Ldr(global_ctx, NativeContextMemOperand());
__ Ldr(sloppy_args_map,
ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
@@ -1963,7 +1969,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ Bind(&runtime);
__ Push(function, recv_arg, arg_count_smi);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1979,7 +1985,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Everything is fine, call runtime.
__ Push(receiver, key);
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ Bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -2045,14 +2051,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current (native) context.
- Register global_object = x10;
- Register global_ctx = x10;
Register strict_args_map = x4;
- __ Ldr(global_object, GlobalObjectMemOperand());
- __ Ldr(global_ctx, FieldMemOperand(global_object,
- GlobalObject::kNativeContextOffset));
- __ Ldr(strict_args_map,
- ContextMemOperand(global_ctx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX,
+ strict_args_map);
// x0 alloc_obj pointer to allocated objects: parameter array and
// arguments object
@@ -2116,13 +2117,61 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ Bind(&runtime);
__ Push(function, params, param_count_smi);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // x2 : number of parameters (tagged)
+ // x3 : parameters pointer
+ // x4 : rest parameter index (tagged)
+ //
+ // Returns pointer to result object in x0.
+
+ DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(x3.is(RestParamAccessDescriptor::parameter_pointer()));
+ DCHECK(x4.is(RestParamAccessDescriptor::rest_parameter_index()));
+
+ // Get the stub arguments from the frame, and make an untagged copy of the
+ // parameter count.
+ Register rest_index_smi = x4;
+ Register param_count_smi = x2;
+ Register params = x3;
+ Register param_count = x13;
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Test if arguments adaptor needed.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label runtime;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx,
+ MemOperand(caller_fp, StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &runtime);
+
+ // x4 rest_index_smi index of rest parameter
+ // x2 param_count_smi number of parameters passed to function (smi)
+ // x3 params pointer to parameters
+ // x11 caller_fp caller's frame pointer
+ // x13 param_count number of parameters passed to function
+
+ // Patch the argument length and parameters pointer.
+ __ Ldr(param_count_smi,
+ MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(param_count, param_count_smi);
+ __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
+ __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
+
+ __ Bind(&runtime);
+ __ Push(param_count_smi, params, rest_index_smi);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2563,7 +2612,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ B(eq, &runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ Bind(&failure);
__ Mov(x0, Operand(isolate()->factory()->null_value()));
@@ -2572,7 +2621,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2620,25 +2669,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
Register argc, Register function,
Register feedback_vector, Register index,
- Register orig_construct, bool is_super) {
+ Register new_target) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(argc);
- if (is_super) {
- __ Push(argc, function, feedback_vector, index, orig_construct);
- } else {
- __ Push(argc, function, feedback_vector, index);
- }
+ __ Push(argc, function, feedback_vector, index);
DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(stub);
- if (is_super) {
- __ Pop(orig_construct, index, feedback_vector, function, argc);
- } else {
- __ Pop(index, feedback_vector, function, argc);
- }
+ __ Pop(index, feedback_vector, function, argc);
__ SmiUntag(argc);
}
@@ -2646,19 +2687,17 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Register function,
Register feedback_vector, Register index,
- Register orig_construct, Register scratch1,
- Register scratch2, Register scratch3,
- bool is_super) {
+ Register new_target, Register scratch1,
+ Register scratch2, Register scratch3) {
ASM_LOCATION("GenerateRecordCallTarget");
DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
- feedback_vector, index, orig_construct));
+ feedback_vector, index, new_target));
// Cache the called function in a feedback vector slot. Cache states are
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
// argc : number of arguments to the construct function
// function : the function to call
// feedback_vector : the feedback vector
// index : slot in feedback vector (smi)
- // orig_construct : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2701,7 +2740,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
__ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ Cmp(function, scratch1);
__ B(ne, &megamorphic);
__ B(&done);
@@ -2725,7 +2764,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
__ Bind(&initialize);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ Cmp(function, scratch1);
__ B(ne, &not_array_function);
@@ -2734,119 +2773,23 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
- feedback_vector, index, orig_construct, is_super);
+ feedback_vector, index, new_target);
__ B(&done);
__ Bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
- feedback_vector, index, orig_construct, is_super);
+ feedback_vector, index, new_target);
__ Bind(&done);
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions.
- __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
- __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, cont);
-
- // Do not transform the receiver for native (Compilerhints already in x3).
- __ Tbnz(w4, SharedFunctionInfo::kNative, cont);
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm, int argc) {
- __ Mov(x0, argc);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(x1);
- __ Mov(x0, x3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ Pop(x1);
- }
- __ Poke(x0, argc * kPointerSize);
- __ B(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // x1 function the function to call
- Register function = x1;
- Register type = x4;
- Label slow, wrap, cont;
-
- // TODO(jbramley): This function has a lot of unnamed registers. Name them,
- // and tidy things up a bit.
-
- if (needs_checks) {
- // Check that the function is really a JavaScript function.
- __ JumpIfSmi(function, &slow);
-
- // Goto slow case if we do not have a function.
- __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
- }
-
- // Fast-case: Invoke the function now.
- // x1 function pushed function
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Compute the receiver in sloppy mode.
- __ Peek(x3, argc * kPointerSize);
-
- if (needs_checks) {
- __ JumpIfSmi(x3, &wrap);
- __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
- } else {
- __ B(&wrap);
- }
-
- __ Bind(&cont);
- }
-
- __ InvokeFunction(function,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper());
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ Bind(&slow);
- EmitSlowCase(masm, argc);
- }
-
- if (call_as_method) {
- __ Bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- ASM_LOCATION("CallFunctionStub::Generate");
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("CallConstructStub::Generate");
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
- // x4 : original constructor (for IsSuperConstructorCall)
Register function = x1;
Label non_function;
@@ -2857,28 +2800,21 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
&non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12,
- IsSuperConstructorCall());
-
- __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into x2, or undefined.
- __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
- __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
- __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
- &feedback_register_initialized);
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
-
- __ AssertUndefinedOrAllocationSite(x2, x5);
- }
+ GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12);
- if (IsSuperConstructorCall()) {
- __ Mov(x3, x4);
- } else {
- __ Mov(x3, function);
- }
+ __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into x2, or undefined.
+ __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
+ __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
+ __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
+ &feedback_register_initialized);
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+
+ __ AssertUndefinedOrAllocationSite(x2, x5);
+
+ __ Mov(x3, function);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -2904,7 +2840,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
Register allocation_site = x4;
Register scratch = x5;
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch);
__ Cmp(function, scratch);
__ B(ne, miss);
@@ -2921,9 +2857,9 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// Set up arguments for the array constructor stub.
Register allocation_site_arg = feedback_vector;
- Register original_constructor_arg = index;
+ Register new_target_arg = index;
__ Mov(allocation_site_arg, allocation_site);
- __ Mov(original_constructor_arg, function);
+ __ Mov(new_target_arg, function);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
}
@@ -2935,20 +2871,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
// x1 - function
// x3 - slot id (Smi)
// x2 - vector
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
Register function = x1;
Register feedback_vector = x2;
Register index = x3;
- Register type = x4;
// The checks. First, does x1 match the recorded monomorphic target?
__ Add(x4, feedback_vector,
@@ -2986,36 +2915,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
-
- // Compute the receiver in sloppy mode.
- __ Peek(x3, argc * kPointerSize);
-
- __ JumpIfSmi(x3, &wrap);
- __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
-
- __ Bind(&cont);
- }
-
- __ InvokeFunction(function,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ Bind(&call_function);
+ __ Mov(x0, argc);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
- __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
+ __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &call);
__ Ldr(x5, FieldMemOperand(x4, HeapObject::kMapOffset));
__ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &not_allocation_site);
@@ -3040,14 +2948,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
__ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
- // We have to update statistics for runtime profiling.
- __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
- __ Subs(x4, x4, Operand(Smi::FromInt(1)));
- __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
- __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
- __ Adds(x4, x4, Operand(Smi::FromInt(1)));
- __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
- __ B(&slow_start);
+
+ __ Bind(&call);
+ __ Mov(x0, argc);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -3059,14 +2964,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, x5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, x5);
__ Cmp(function, x5);
__ B(eq, &miss);
- // Update stats.
- __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
- __ Adds(x4, x4, Operand(Smi::FromInt(1)));
- __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+ // Make sure the function belongs to the same native context.
+ __ Ldr(x4, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ Ldr(x4, ContextMemOperand(x4, Context::NATIVE_CONTEXT_INDEX));
+ __ Ldr(x5, NativeContextMemOperand());
+ __ Cmp(x4, x5);
+ __ B(ne, &miss);
// Initialize the call counter.
__ Mov(x5, Smi::FromInt(CallICNexus::kCallCountIncrement));
@@ -3086,22 +2993,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(function);
}
- __ B(&have_js_function);
+ __ B(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
-
- // Check that the function is really a JavaScript function.
- __ JumpIfSmi(function, &slow);
-
- // Goto slow case if we do not have a function.
- __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
- __ B(&have_js_function);
+ __ B(&call);
}
@@ -3114,7 +3013,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(x1, x2, x3);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ Mov(x1, x0);
@@ -3172,11 +3071,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -3204,7 +3103,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Mov(result_, x0);
call_helper.AfterCall(masm);
__ B(&exit_);
@@ -3235,7 +3134,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Bind(&slow_case_);
call_helper.BeforeCall(masm);
__ Push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
__ Mov(result_, x0);
call_helper.AfterCall(masm);
__ B(&exit_);
@@ -3253,7 +3152,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
@@ -3507,9 +3406,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ Bind(&runtime);
__ Push(lhs, rhs);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ Bind(&miss);
@@ -3517,9 +3416,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
- ASM_LOCATION("CompareICStub[Objects]");
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
+ ASM_LOCATION("CompareICStub[Receivers]");
Label miss;
@@ -3529,10 +3428,11 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
__ JumpIfEitherSmi(rhs, lhs, &miss);
- __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
- __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ JumpIfObjectType(rhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
+ __ JumpIfObjectType(lhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
- DCHECK(GetCondition() == eq);
+ DCHECK_EQ(eq, GetCondition());
__ Sub(result, rhs, lhs);
__ Ret();
@@ -3541,8 +3441,8 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
- ASM_LOCATION("CompareICStub[KnownObjects]");
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
+ ASM_LOCATION("CompareICStub[KnownReceivers]");
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
@@ -3568,7 +3468,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Sub(result, rhs, lhs);
__ Ret();
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
Register ncr = x2;
if (op() == Token::LT || op() == Token::LTE) {
@@ -3577,7 +3477,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Mov(ncr, Smi::FromInt(LESS));
}
__ Push(lhs, rhs, ncr);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ Bind(&miss);
@@ -3605,7 +3505,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(left, right, op);
// Call the miss handler. This also pops the arguments.
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
@@ -3851,7 +3751,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// x1: result_length
@@ -3897,7 +3797,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&slow_string);
__ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ Bind(&not_string);
Label not_oddball;
@@ -3908,7 +3808,22 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Bind(&not_oddball);
__ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
+}
+
+
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes one argument in x0.
+ Label not_smi;
+ __ JumpIfNotSmi(x0, &not_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Tst(x0, x0);
+ __ Csel(x0, x0, Operand(0), ge);
+ __ Ret();
+ __ Bind(&not_smi);
+
+ __ Push(x0); // Push argument.
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3940,7 +3855,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ Bind(&not_oddball);
__ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -4084,7 +3999,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
__ Bind(&runtime);
__ Push(x1, x0);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -4226,12 +4141,12 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.address(), regs_.object());
- __ EnsureNotWhite(val,
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- regs_.scratch2(), // Scratch.
- &need_incremental_pop_scratch);
+ __ JumpIfWhite(val,
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ regs_.scratch2(), // Scratch.
+ &need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm); // Restore the extra scratch registers we used.
@@ -4281,76 +4196,6 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // x0 value element value to store
- // x3 index_smi element index as smi
- // sp[0] array_index_smi array literal index in function as smi
- // sp[1] array array literal
-
- Register value = x0;
- Register index_smi = x3;
-
- Register array = x1;
- Register array_map = x2;
- Register array_index_smi = x4;
- __ PeekPair(array_index_smi, array, 0);
- __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
-
- Label double_elements, smi_element, fast_elements, slow_elements;
- Register bitfield2 = x10;
- __ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset));
-
- // Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or
- // FAST_HOLEY_ELEMENTS.
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ Cmp(bitfield2, Map::kMaximumBitField2FastHoleyElementValue);
- __ B(hi, &double_elements);
-
- __ JumpIfSmi(value, &smi_element);
-
- // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
- __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::ElementsKindBits::kShift),
- &fast_elements);
-
- // Store into the array literal requires an elements transition. Call into
- // the runtime.
- __ Bind(&slow_elements);
- __ Push(array, index_smi, value);
- __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
- __ Push(x11, array_index_smi);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ Bind(&fast_elements);
- __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
- __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Str(value, MemOperand(x11));
- // Update the write barrier for the array store.
- __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ Bind(&smi_element);
- __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
- __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
- __ Ret();
-
- __ Bind(&double_elements);
- __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0,
- &slow_elements);
- __ Ret();
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -5341,12 +5186,12 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
// -- x1 : constructor
// -- x2 : AllocationSite or undefined
- // -- x3 : original constructor
+ // -- x3 : new target
// -- sp[0] : last argument
// -----------------------------------
Register constructor = x1;
Register allocation_site = x2;
- Register original_constructor = x3;
+ Register new_target = x3;
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
@@ -5368,8 +5213,11 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
}
+ // Enter the context of the Array function.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
Label subclassing;
- __ Cmp(original_constructor, constructor);
+ __ Cmp(new_target, constructor);
__ B(ne, &subclassing);
Register kind = x3;
@@ -5388,22 +5236,23 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing support.
__ Bind(&subclassing);
- __ Push(constructor, original_constructor);
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ add(x0, x0, Operand(2));
+ __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
+ __ Add(x0, x0, Operand(3));
break;
case NONE:
- __ Mov(x0, Operand(2));
+ __ Poke(constructor, 0 * kPointerSize);
+ __ Mov(x0, Operand(3));
break;
case ONE:
- __ Mov(x0, Operand(3));
+ __ Poke(constructor, 1 * kPointerSize);
+ __ Mov(x0, Operand(4));
break;
}
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ Push(new_target, allocation_site);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5519,7 +5368,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Bind(&slow_case);
__ SmiTag(slot);
__ Push(slot);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5639,8 +5488,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot, value);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5793,7 +5641,7 @@ static void CallApiFunctionAndReturn(
// Re-throw by promoting a scheduled exception.
__ Bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ Bind(&delete_allocated_handles);
diff --git a/chromium/v8/src/arm64/code-stubs-arm64.h b/chromium/v8/src/arm64/code-stubs-arm64.h
index 1b64a625f99..4b56b5468f9 100644
--- a/chromium/v8/src/arm64/code-stubs-arm64.h
+++ b/chromium/v8/src/arm64/code-stubs-arm64.h
@@ -131,6 +131,7 @@ class RecordWriteStub: public PlatformCodeStub {
static void Patch(Code* stub, Mode mode) {
// We are going to patch the two first instructions of the stub.
PatchingAssembler patcher(
+ stub->GetIsolate(),
reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
@@ -384,6 +385,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_CODE_STUBS_ARM64_H_
diff --git a/chromium/v8/src/arm64/codegen-arm64.cc b/chromium/v8/src/arm64/codegen-arm64.cc
index 8e927bfd905..c2073f1f4b6 100644
--- a/chromium/v8/src/arm64/codegen-arm64.cc
+++ b/chromium/v8/src/arm64/codegen-arm64.cc
@@ -16,9 +16,9 @@ namespace internal {
#define __ ACCESS_MASM(masm)
#if defined(USE_SIMULATOR)
-byte* fast_exp_arm64_machine_code = NULL;
-double fast_exp_simulator(double x) {
- Simulator * simulator = Simulator::current(Isolate::Current());
+byte* fast_exp_arm64_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+ Simulator * simulator = Simulator::current(isolate);
Simulator::CallArgument args[] = {
Simulator::CallArgument(x),
Simulator::CallArgument::End()
@@ -28,19 +28,18 @@ double fast_exp_simulator(double x) {
#endif
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
-
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
// Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
// an AAPCS64-compliant exp() function. This will be faster than the C
// library's exp() function, but probably less accurate.
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
masm.SetStackPointer(csp);
// The argument will be in d0 on entry.
@@ -64,11 +63,11 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_arm64_machine_code = buffer;
return &fast_exp_simulator;
@@ -76,8 +75,8 @@ UnaryMathFunction CreateExpFunction() {
}
-UnaryMathFunction CreateSqrtFunction() {
- return &std::sqrt;
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
+ return nullptr;
}
@@ -368,12 +367,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
}
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
- PatchingAssembler patcher(young_sequence_.start(),
+ PatchingAssembler patcher(isolate, young_sequence_.start(),
young_sequence_.length() / kInstructionSize);
// The young sequence is the frame setup code for FUNCTION code types. It is
// generated by FullCodeGenerator::Generate.
@@ -382,7 +382,7 @@ CodeAgingHelper::CodeAgingHelper() {
#ifdef DEBUG
const int length = kCodeAgeStubEntryOffset / kInstructionSize;
DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset);
- PatchingAssembler patcher_old(old_sequence_.start(), length);
+ PatchingAssembler patcher_old(isolate, old_sequence_.start(), length);
MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
#endif
}
@@ -417,7 +417,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
- PatchingAssembler patcher(sequence,
+ PatchingAssembler patcher(isolate, sequence,
kNoCodeAgeSequenceLength / kInstructionSize);
if (age == kNoAgeCodeAge) {
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
diff --git a/chromium/v8/src/arm64/codegen-arm64.h b/chromium/v8/src/arm64/codegen-arm64.h
index 2f01c510de7..573f6fe159c 100644
--- a/chromium/v8/src/arm64/codegen-arm64.h
+++ b/chromium/v8/src/arm64/codegen-arm64.h
@@ -5,7 +5,7 @@
#ifndef V8_ARM64_CODEGEN_ARM64_H_
#define V8_ARM64_CODEGEN_ARM64_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -43,6 +43,7 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_CODEGEN_ARM64_H_
diff --git a/chromium/v8/src/arm64/constants-arm64.h b/chromium/v8/src/arm64/constants-arm64.h
index 1529c647ff7..00b24e93759 100644
--- a/chromium/v8/src/arm64/constants-arm64.h
+++ b/chromium/v8/src/arm64/constants-arm64.h
@@ -9,11 +9,11 @@
#include "src/globals.h"
// Assert that this is an LP64 system.
-STATIC_ASSERT(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(int) == sizeof(int32_t));
STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
-STATIC_ASSERT(sizeof(void *) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
-STATIC_ASSERT(sizeof(1) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
-STATIC_ASSERT(sizeof(1L) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(void *) == sizeof(int64_t));
+STATIC_ASSERT(sizeof(1) == sizeof(int32_t));
+STATIC_ASSERT(sizeof(1L) == sizeof(int64_t));
// Get the standard printf format macros for C99 stdint types.
@@ -32,8 +32,8 @@ const unsigned kInstructionSizeLog2 = 2;
const unsigned kLoadLiteralScaleLog2 = 2;
const unsigned kMaxLoadLiteralRange = 1 * MB;
-const unsigned kNumberOfRegisters = 32;
-const unsigned kNumberOfFPRegisters = 32;
+const int kNumberOfRegisters = 32;
+const int kNumberOfFPRegisters = 32;
// Callee saved registers are x19-x30(lr).
const int kNumberOfCalleeSavedRegisters = 11;
const int kFirstCalleeSavedRegisterIndex = 19;
@@ -42,23 +42,22 @@ const int kNumberOfCalleeSavedFPRegisters = 8;
const int kFirstCalleeSavedFPRegisterIndex = 8;
// Callee saved registers with no specific purpose in JS are x19-x25.
const unsigned kJSCalleeSavedRegList = 0x03f80000;
-// TODO(all): k<Y>RegSize should probably be k<Y>RegSizeInBits.
-const unsigned kWRegSizeInBits = 32;
-const unsigned kWRegSizeInBitsLog2 = 5;
-const unsigned kWRegSize = kWRegSizeInBits >> 3;
-const unsigned kWRegSizeLog2 = kWRegSizeInBitsLog2 - 3;
-const unsigned kXRegSizeInBits = 64;
-const unsigned kXRegSizeInBitsLog2 = 6;
-const unsigned kXRegSize = kXRegSizeInBits >> 3;
-const unsigned kXRegSizeLog2 = kXRegSizeInBitsLog2 - 3;
-const unsigned kSRegSizeInBits = 32;
-const unsigned kSRegSizeInBitsLog2 = 5;
-const unsigned kSRegSize = kSRegSizeInBits >> 3;
-const unsigned kSRegSizeLog2 = kSRegSizeInBitsLog2 - 3;
-const unsigned kDRegSizeInBits = 64;
-const unsigned kDRegSizeInBitsLog2 = 6;
-const unsigned kDRegSize = kDRegSizeInBits >> 3;
-const unsigned kDRegSizeLog2 = kDRegSizeInBitsLog2 - 3;
+const int kWRegSizeInBits = 32;
+const int kWRegSizeInBitsLog2 = 5;
+const int kWRegSize = kWRegSizeInBits >> 3;
+const int kWRegSizeLog2 = kWRegSizeInBitsLog2 - 3;
+const int kXRegSizeInBits = 64;
+const int kXRegSizeInBitsLog2 = 6;
+const int kXRegSize = kXRegSizeInBits >> 3;
+const int kXRegSizeLog2 = kXRegSizeInBitsLog2 - 3;
+const int kSRegSizeInBits = 32;
+const int kSRegSizeInBitsLog2 = 5;
+const int kSRegSize = kSRegSizeInBits >> 3;
+const int kSRegSizeLog2 = kSRegSizeInBitsLog2 - 3;
+const int kDRegSizeInBits = 64;
+const int kDRegSizeInBitsLog2 = 6;
+const int kDRegSize = kDRegSizeInBits >> 3;
+const int kDRegSizeLog2 = kDRegSizeInBitsLog2 - 3;
const int64_t kWRegMask = 0x00000000ffffffffL;
const int64_t kXRegMask = 0xffffffffffffffffL;
const int64_t kSRegMask = 0x00000000ffffffffL;
@@ -86,13 +85,13 @@ const int64_t kXMaxInt = 0x7fffffffffffffffL;
const int64_t kXMinInt = 0x8000000000000000L;
const int32_t kWMaxInt = 0x7fffffff;
const int32_t kWMinInt = 0x80000000;
-const unsigned kIp0Code = 16;
-const unsigned kIp1Code = 17;
-const unsigned kFramePointerRegCode = 29;
-const unsigned kLinkRegCode = 30;
-const unsigned kZeroRegCode = 31;
-const unsigned kJSSPCode = 28;
-const unsigned kSPRegInternalCode = 63;
+const int kIp0Code = 16;
+const int kIp1Code = 17;
+const int kFramePointerRegCode = 29;
+const int kLinkRegCode = 30;
+const int kZeroRegCode = 31;
+const int kJSSPCode = 28;
+const int kSPRegInternalCode = 63;
const unsigned kRegCodeMask = 0x1f;
const unsigned kShiftAmountWRegMask = 0x1f;
const unsigned kShiftAmountXRegMask = 0x3f;
@@ -118,12 +117,6 @@ const unsigned kDoubleExponentBias = 1023;
const unsigned kFloatMantissaBits = 23;
const unsigned kFloatExponentBits = 8;
-#define REGISTER_CODE_LIST(R) \
-R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
-R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
-R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
#define INSTRUCTION_FIELDS_LIST(V_) \
/* Register fields */ \
V_(Rd, 4, 0, Bits) /* Destination register. */ \
@@ -1237,6 +1230,7 @@ enum UnallocatedOp {
UnallocatedFMask = 0x00000000
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_CONSTANTS_ARM64_H_
diff --git a/chromium/v8/src/arm64/decoder-arm64-inl.h b/chromium/v8/src/arm64/decoder-arm64-inl.h
index c29f2d3c5ed..e00105e7bc1 100644
--- a/chromium/v8/src/arm64/decoder-arm64-inl.h
+++ b/chromium/v8/src/arm64/decoder-arm64-inl.h
@@ -644,6 +644,7 @@ void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_DECODER_ARM64_INL_H_
diff --git a/chromium/v8/src/arm64/decoder-arm64.h b/chromium/v8/src/arm64/decoder-arm64.h
index 6140bc28180..b1ef41f1a2f 100644
--- a/chromium/v8/src/arm64/decoder-arm64.h
+++ b/chromium/v8/src/arm64/decoder-arm64.h
@@ -181,6 +181,7 @@ class Decoder : public V {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_DECODER_ARM64_H_
diff --git a/chromium/v8/src/arm64/deoptimizer-arm64.cc b/chromium/v8/src/arm64/deoptimizer-arm64.cc
index 65fb93e53cf..118c5dfa8dc 100644
--- a/chromium/v8/src/arm64/deoptimizer-arm64.cc
+++ b/chromium/v8/src/arm64/deoptimizer-arm64.cc
@@ -6,6 +6,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@@ -48,7 +49,8 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
- PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
+ PatchingAssembler patcher(isolate, call_address,
+ patch_size() / kInstructionSize);
patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
@@ -75,7 +77,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -122,8 +124,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// in the input frame.
// Save all allocatable floating point registers.
- CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSizeInBits,
- FPRegister::kAllocatableFPRegisters);
+ CPURegList saved_fp_registers(
+ CPURegister::kFPRegister, kDRegSizeInBits,
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_double_codes_mask());
__ PushCPURegList(saved_fp_registers);
// We save all the registers expcept jssp, sp and lr.
diff --git a/chromium/v8/src/arm64/disasm-arm64.cc b/chromium/v8/src/arm64/disasm-arm64.cc
index fb3b692d08f..00c3ec25d6a 100644
--- a/chromium/v8/src/arm64/disasm-arm64.cc
+++ b/chromium/v8/src/arm64/disasm-arm64.cc
@@ -19,7 +19,7 @@ namespace v8 {
namespace internal {
-Disassembler::Disassembler() {
+DisassemblingDecoder::DisassemblingDecoder() {
buffer_size_ = 256;
buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
buffer_pos_ = 0;
@@ -27,7 +27,7 @@ Disassembler::Disassembler() {
}
-Disassembler::Disassembler(char* text_buffer, int buffer_size) {
+DisassemblingDecoder::DisassemblingDecoder(char* text_buffer, int buffer_size) {
buffer_size_ = buffer_size;
buffer_ = text_buffer;
buffer_pos_ = 0;
@@ -35,19 +35,17 @@ Disassembler::Disassembler(char* text_buffer, int buffer_size) {
}
-Disassembler::~Disassembler() {
+DisassemblingDecoder::~DisassemblingDecoder() {
if (own_buffer_) {
free(buffer_);
}
}
-char* Disassembler::GetOutput() {
- return buffer_;
-}
+char* DisassemblingDecoder::GetOutput() { return buffer_; }
-void Disassembler::VisitAddSubImmediate(Instruction* instr) {
+void DisassemblingDecoder::VisitAddSubImmediate(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
(instr->ImmAddSub() == 0) ? true : false;
@@ -92,7 +90,7 @@ void Disassembler::VisitAddSubImmediate(Instruction* instr) {
}
-void Disassembler::VisitAddSubShifted(Instruction* instr) {
+void DisassemblingDecoder::VisitAddSubShifted(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool rn_is_zr = RnIsZROrSP(instr);
const char *mnemonic = "";
@@ -139,7 +137,7 @@ void Disassembler::VisitAddSubShifted(Instruction* instr) {
}
-void Disassembler::VisitAddSubExtended(Instruction* instr) {
+void DisassemblingDecoder::VisitAddSubExtended(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
const char *mnemonic = "";
Extend mode = static_cast<Extend>(instr->ExtendMode());
@@ -177,7 +175,7 @@ void Disassembler::VisitAddSubExtended(Instruction* instr) {
}
-void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
+void DisassemblingDecoder::VisitAddSubWithCarry(Instruction* instr) {
bool rn_is_zr = RnIsZROrSP(instr);
const char *mnemonic = "";
const char *form = "'Rd, 'Rn, 'Rm";
@@ -212,7 +210,7 @@ void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
}
-void Disassembler::VisitLogicalImmediate(Instruction* instr) {
+void DisassemblingDecoder::VisitLogicalImmediate(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool rn_is_zr = RnIsZROrSP(instr);
const char *mnemonic = "";
@@ -255,7 +253,7 @@ void Disassembler::VisitLogicalImmediate(Instruction* instr) {
}
-bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
+bool DisassemblingDecoder::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
DCHECK((reg_size == kXRegSizeInBits) ||
((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
@@ -284,7 +282,7 @@ bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
}
-void Disassembler::VisitLogicalShifted(Instruction* instr) {
+void DisassemblingDecoder::VisitLogicalShifted(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool rn_is_zr = RnIsZROrSP(instr);
const char *mnemonic = "";
@@ -335,7 +333,7 @@ void Disassembler::VisitLogicalShifted(Instruction* instr) {
}
-void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
+void DisassemblingDecoder::VisitConditionalCompareRegister(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
@@ -350,7 +348,8 @@ void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
}
-void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
+void DisassemblingDecoder::VisitConditionalCompareImmediate(
+ Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
@@ -365,7 +364,7 @@ void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
}
-void Disassembler::VisitConditionalSelect(Instruction* instr) {
+void DisassemblingDecoder::VisitConditionalSelect(Instruction* instr) {
bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
bool rn_is_rm = (instr->Rn() == instr->Rm());
const char *mnemonic = "";
@@ -418,7 +417,7 @@ void Disassembler::VisitConditionalSelect(Instruction* instr) {
}
-void Disassembler::VisitBitfield(Instruction* instr) {
+void DisassemblingDecoder::VisitBitfield(Instruction* instr) {
unsigned s = instr->ImmS();
unsigned r = instr->ImmR();
unsigned rd_size_minus_1 =
@@ -496,7 +495,7 @@ void Disassembler::VisitBitfield(Instruction* instr) {
}
-void Disassembler::VisitExtract(Instruction* instr) {
+void DisassemblingDecoder::VisitExtract(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
@@ -517,7 +516,7 @@ void Disassembler::VisitExtract(Instruction* instr) {
}
-void Disassembler::VisitPCRelAddressing(Instruction* instr) {
+void DisassemblingDecoder::VisitPCRelAddressing(Instruction* instr) {
switch (instr->Mask(PCRelAddressingMask)) {
case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
// ADRP is not implemented.
@@ -526,7 +525,7 @@ void Disassembler::VisitPCRelAddressing(Instruction* instr) {
}
-void Disassembler::VisitConditionalBranch(Instruction* instr) {
+void DisassemblingDecoder::VisitConditionalBranch(Instruction* instr) {
switch (instr->Mask(ConditionalBranchMask)) {
case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
default: UNREACHABLE();
@@ -534,7 +533,8 @@ void Disassembler::VisitConditionalBranch(Instruction* instr) {
}
-void Disassembler::VisitUnconditionalBranchToRegister(Instruction* instr) {
+void DisassemblingDecoder::VisitUnconditionalBranchToRegister(
+ Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Xn";
@@ -554,7 +554,7 @@ void Disassembler::VisitUnconditionalBranchToRegister(Instruction* instr) {
}
-void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
+void DisassemblingDecoder::VisitUnconditionalBranch(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'BImmUncn";
@@ -567,7 +567,7 @@ void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
}
-void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
+void DisassemblingDecoder::VisitDataProcessing1Source(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rd, 'Rn";
@@ -588,7 +588,7 @@ void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
}
-void Disassembler::VisitDataProcessing2Source(Instruction* instr) {
+void DisassemblingDecoder::VisitDataProcessing2Source(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Rd, 'Rn, 'Rm";
@@ -609,7 +609,7 @@ void Disassembler::VisitDataProcessing2Source(Instruction* instr) {
}
-void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
+void DisassemblingDecoder::VisitDataProcessing3Source(Instruction* instr) {
bool ra_is_zr = RaIsZROrSP(instr);
const char *mnemonic = "";
const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
@@ -687,7 +687,7 @@ void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
}
-void Disassembler::VisitCompareBranch(Instruction* instr) {
+void DisassemblingDecoder::VisitCompareBranch(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rt, 'BImmCmpa";
@@ -702,7 +702,7 @@ void Disassembler::VisitCompareBranch(Instruction* instr) {
}
-void Disassembler::VisitTestBranch(Instruction* instr) {
+void DisassemblingDecoder::VisitTestBranch(Instruction* instr) {
const char *mnemonic = "";
// If the top bit of the immediate is clear, the tested register is
// disassembled as Wt, otherwise Xt. As the top bit of the immediate is
@@ -719,7 +719,7 @@ void Disassembler::VisitTestBranch(Instruction* instr) {
}
-void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
+void DisassemblingDecoder::VisitMoveWideImmediate(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rd, 'IMoveImm";
@@ -758,7 +758,7 @@ void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
V(LDR_s, "ldr", "'St") \
V(LDR_d, "ldr", "'Dt")
-void Disassembler::VisitLoadStorePreIndex(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStorePreIndex(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStorePreIndex)";
@@ -772,7 +772,7 @@ void Disassembler::VisitLoadStorePreIndex(Instruction* instr) {
}
-void Disassembler::VisitLoadStorePostIndex(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStorePostIndex(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStorePostIndex)";
@@ -786,7 +786,7 @@ void Disassembler::VisitLoadStorePostIndex(Instruction* instr) {
}
-void Disassembler::VisitLoadStoreUnsignedOffset(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStoreUnsignedOffset(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStoreUnsignedOffset)";
@@ -801,7 +801,7 @@ void Disassembler::VisitLoadStoreUnsignedOffset(Instruction* instr) {
}
-void Disassembler::VisitLoadStoreRegisterOffset(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStoreRegisterOffset(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStoreRegisterOffset)";
@@ -816,7 +816,7 @@ void Disassembler::VisitLoadStoreRegisterOffset(Instruction* instr) {
}
-void Disassembler::VisitLoadStoreUnscaledOffset(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStoreUnscaledOffset(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Wt, ['Xns'ILS]";
const char *form_x = "'Xt, ['Xns'ILS]";
@@ -847,7 +847,7 @@ void Disassembler::VisitLoadStoreUnscaledOffset(Instruction* instr) {
}
-void Disassembler::VisitLoadLiteral(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadLiteral(Instruction* instr) {
const char *mnemonic = "ldr";
const char *form = "(LoadLiteral)";
@@ -873,7 +873,7 @@ void Disassembler::VisitLoadLiteral(Instruction* instr) {
V(STP_d, "stp", "'Dt, 'Dt2", "8") \
V(LDP_d, "ldp", "'Dt, 'Dt2", "8")
-void Disassembler::VisitLoadStorePairPostIndex(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStorePairPostIndex(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStorePairPostIndex)";
@@ -887,7 +887,7 @@ void Disassembler::VisitLoadStorePairPostIndex(Instruction* instr) {
}
-void Disassembler::VisitLoadStorePairPreIndex(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStorePairPreIndex(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStorePairPreIndex)";
@@ -901,7 +901,7 @@ void Disassembler::VisitLoadStorePairPreIndex(Instruction* instr) {
}
-void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
+void DisassemblingDecoder::VisitLoadStorePairOffset(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(LoadStorePairOffset)";
@@ -915,7 +915,7 @@ void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
}
-void Disassembler::VisitFPCompare(Instruction* instr) {
+void DisassemblingDecoder::VisitFPCompare(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Fn, 'Fm";
const char *form_zero = "'Fn, #0.0";
@@ -931,7 +931,7 @@ void Disassembler::VisitFPCompare(Instruction* instr) {
}
-void Disassembler::VisitFPConditionalCompare(Instruction* instr) {
+void DisassemblingDecoder::VisitFPConditionalCompare(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
@@ -946,7 +946,7 @@ void Disassembler::VisitFPConditionalCompare(Instruction* instr) {
}
-void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
+void DisassemblingDecoder::VisitFPConditionalSelect(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
@@ -959,7 +959,7 @@ void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
}
-void Disassembler::VisitFPDataProcessing1Source(Instruction* instr) {
+void DisassemblingDecoder::VisitFPDataProcessing1Source(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Fd, 'Fn";
@@ -987,7 +987,7 @@ void Disassembler::VisitFPDataProcessing1Source(Instruction* instr) {
}
-void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
+void DisassemblingDecoder::VisitFPDataProcessing2Source(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Fd, 'Fn, 'Fm";
@@ -1011,7 +1011,7 @@ void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
}
-void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
+void DisassemblingDecoder::VisitFPDataProcessing3Source(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
@@ -1030,7 +1030,7 @@ void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
}
-void Disassembler::VisitFPImmediate(Instruction* instr) {
+void DisassemblingDecoder::VisitFPImmediate(Instruction* instr) {
const char *mnemonic = "";
const char *form = "(FPImmediate)";
@@ -1043,7 +1043,7 @@ void Disassembler::VisitFPImmediate(Instruction* instr) {
}
-void Disassembler::VisitFPIntegerConvert(Instruction* instr) {
+void DisassemblingDecoder::VisitFPIntegerConvert(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "(FPIntegerConvert)";
const char *form_rf = "'Rd, 'Fn";
@@ -1099,7 +1099,7 @@ void Disassembler::VisitFPIntegerConvert(Instruction* instr) {
}
-void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
+void DisassemblingDecoder::VisitFPFixedPointConvert(Instruction* instr) {
const char *mnemonic = "";
const char *form = "'Rd, 'Fn, 'IFPFBits";
const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
@@ -1126,7 +1126,7 @@ void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
}
-void Disassembler::VisitSystem(Instruction* instr) {
+void DisassemblingDecoder::VisitSystem(Instruction* instr) {
// Some system instructions hijack their Op and Cp fields to represent a
// range of immediates instead of indicating a different instruction. This
// makes the decoding tricky.
@@ -1187,7 +1187,7 @@ void Disassembler::VisitSystem(Instruction* instr) {
}
-void Disassembler::VisitException(Instruction* instr) {
+void DisassemblingDecoder::VisitException(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'IDebug";
@@ -1206,23 +1206,23 @@ void Disassembler::VisitException(Instruction* instr) {
}
-void Disassembler::VisitUnimplemented(Instruction* instr) {
+void DisassemblingDecoder::VisitUnimplemented(Instruction* instr) {
Format(instr, "unimplemented", "(Unimplemented)");
}
-void Disassembler::VisitUnallocated(Instruction* instr) {
+void DisassemblingDecoder::VisitUnallocated(Instruction* instr) {
Format(instr, "unallocated", "(Unallocated)");
}
-void Disassembler::ProcessOutput(Instruction* /*instr*/) {
+void DisassemblingDecoder::ProcessOutput(Instruction* /*instr*/) {
// The base disasm does nothing more than disassembling into a buffer.
}
-void Disassembler::Format(Instruction* instr, const char* mnemonic,
- const char* format) {
+void DisassemblingDecoder::Format(Instruction* instr, const char* mnemonic,
+ const char* format) {
// TODO(mcapewel) don't think I can use the instr address here - there needs
// to be a base address too
DCHECK(mnemonic != NULL);
@@ -1237,7 +1237,7 @@ void Disassembler::Format(Instruction* instr, const char* mnemonic,
}
-void Disassembler::Substitute(Instruction* instr, const char* string) {
+void DisassemblingDecoder::Substitute(Instruction* instr, const char* string) {
char chr = *string++;
while (chr != '\0') {
if (chr == '\'') {
@@ -1250,7 +1250,8 @@ void Disassembler::Substitute(Instruction* instr, const char* string) {
}
-int Disassembler::SubstituteField(Instruction* instr, const char* format) {
+int DisassemblingDecoder::SubstituteField(Instruction* instr,
+ const char* format) {
switch (format[0]) {
case 'R': // Register. X or W, selected by sf bit.
case 'F': // FP Register. S or D, selected by type field.
@@ -1276,8 +1277,8 @@ int Disassembler::SubstituteField(Instruction* instr, const char* format) {
}
-int Disassembler::SubstituteRegisterField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
+ const char* format) {
unsigned reg_num = 0;
unsigned field_len = 2;
switch (format[1]) {
@@ -1341,8 +1342,8 @@ int Disassembler::SubstituteRegisterField(Instruction* instr,
}
-int Disassembler::SubstituteImmediateField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
+ const char* format) {
DCHECK(format[0] == 'I');
switch (format[1]) {
@@ -1452,8 +1453,8 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
}
-int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteBitfieldImmediateField(Instruction* instr,
+ const char* format) {
DCHECK((format[0] == 'I') && (format[1] == 'B'));
unsigned r = instr->ImmR();
unsigned s = instr->ImmS();
@@ -1488,8 +1489,8 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
}
-int Disassembler::SubstituteLiteralField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteLiteralField(Instruction* instr,
+ const char* format) {
DCHECK(strncmp(format, "LValue", 6) == 0);
USE(format);
@@ -1507,7 +1508,8 @@ int Disassembler::SubstituteLiteralField(Instruction* instr,
}
-int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
+int DisassemblingDecoder::SubstituteShiftField(Instruction* instr,
+ const char* format) {
DCHECK(format[0] == 'H');
DCHECK(instr->ShiftDP() <= 0x3);
@@ -1530,8 +1532,8 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
}
-int Disassembler::SubstituteConditionField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteConditionField(Instruction* instr,
+ const char* format) {
DCHECK(format[0] == 'C');
const char* condition_code[] = { "eq", "ne", "hs", "lo",
"mi", "pl", "vs", "vc",
@@ -1551,8 +1553,8 @@ int Disassembler::SubstituteConditionField(Instruction* instr,
}
-int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstitutePCRelAddressField(Instruction* instr,
+ const char* format) {
USE(format);
DCHECK(strncmp(format, "AddrPCRel", 9) == 0);
@@ -1572,8 +1574,8 @@ int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
}
-int Disassembler::SubstituteBranchTargetField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
+ const char* format) {
DCHECK(strncmp(format, "BImm", 4) == 0);
int64_t offset = 0;
@@ -1599,8 +1601,8 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr,
}
-int Disassembler::SubstituteExtendField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteExtendField(Instruction* instr,
+ const char* format) {
DCHECK(strncmp(format, "Ext", 3) == 0);
DCHECK(instr->ExtendMode() <= 7);
USE(format);
@@ -1626,8 +1628,8 @@ int Disassembler::SubstituteExtendField(Instruction* instr,
}
-int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteLSRegOffsetField(Instruction* instr,
+ const char* format) {
DCHECK(strncmp(format, "Offsetreg", 9) == 0);
const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
"undefined", "undefined", "sxtw", "sxtx" };
@@ -1655,8 +1657,8 @@ int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
}
-int Disassembler::SubstitutePrefetchField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstitutePrefetchField(Instruction* instr,
+ const char* format) {
DCHECK(format[0] == 'P');
USE(format);
@@ -1670,8 +1672,8 @@ int Disassembler::SubstitutePrefetchField(Instruction* instr,
return 6;
}
-int Disassembler::SubstituteBarrierField(Instruction* instr,
- const char* format) {
+int DisassemblingDecoder::SubstituteBarrierField(Instruction* instr,
+ const char* format) {
DCHECK(format[0] == 'M');
USE(format);
@@ -1689,13 +1691,13 @@ int Disassembler::SubstituteBarrierField(Instruction* instr,
}
-void Disassembler::ResetOutput() {
+void DisassemblingDecoder::ResetOutput() {
buffer_pos_ = 0;
buffer_[buffer_pos_] = 0;
}
-void Disassembler::AppendToOutput(const char* format, ...) {
+void DisassemblingDecoder::AppendToOutput(const char* format, ...) {
va_list args;
va_start(args, format);
buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_, format, args);
@@ -1761,7 +1763,7 @@ const char* NameConverter::NameInCode(byte* addr) const {
//------------------------------------------------------------------------------
-class BufferDisassembler : public v8::internal::Disassembler {
+class BufferDisassembler : public v8::internal::DisassemblingDecoder {
public:
explicit BufferDisassembler(v8::internal::Vector<char> out_buffer)
: out_buffer_(out_buffer) { }
diff --git a/chromium/v8/src/arm64/disasm-arm64.h b/chromium/v8/src/arm64/disasm-arm64.h
index c6b189bf971..4b477bc438e 100644
--- a/chromium/v8/src/arm64/disasm-arm64.h
+++ b/chromium/v8/src/arm64/disasm-arm64.h
@@ -14,11 +14,11 @@ namespace v8 {
namespace internal {
-class Disassembler: public DecoderVisitor {
+class DisassemblingDecoder : public DecoderVisitor {
public:
- Disassembler();
- Disassembler(char* text_buffer, int buffer_size);
- virtual ~Disassembler();
+ DisassemblingDecoder();
+ DisassemblingDecoder(char* text_buffer, int buffer_size);
+ virtual ~DisassemblingDecoder();
char* GetOutput();
// Declare all Visitor functions.
@@ -73,7 +73,7 @@ class Disassembler: public DecoderVisitor {
};
-class PrintDisassembler: public Disassembler {
+class PrintDisassembler : public DisassemblingDecoder {
public:
explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
~PrintDisassembler() { }
@@ -85,6 +85,7 @@ class PrintDisassembler: public Disassembler {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_DISASM_ARM64_H
diff --git a/chromium/v8/src/arm64/frames-arm64.h b/chromium/v8/src/arm64/frames-arm64.h
index 9e6551783da..783514437f2 100644
--- a/chromium/v8/src/arm64/frames-arm64.h
+++ b/chromium/v8/src/arm64/frames-arm64.h
@@ -63,6 +63,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_FRAMES_ARM64_H_
diff --git a/chromium/v8/src/arm64/instructions-arm64.cc b/chromium/v8/src/arm64/instructions-arm64.cc
index 60243d8306d..d23533d8bc7 100644
--- a/chromium/v8/src/arm64/instructions-arm64.cc
+++ b/chromium/v8/src/arm64/instructions-arm64.cc
@@ -219,13 +219,13 @@ bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
}
-void Instruction::SetImmPCOffsetTarget(Instruction* target) {
+void Instruction::SetImmPCOffsetTarget(Isolate* isolate, Instruction* target) {
if (IsPCRelAddressing()) {
- SetPCRelImmTarget(target);
+ SetPCRelImmTarget(isolate, target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
} else if (IsUnresolvedInternalReference()) {
- SetUnresolvedInternalReferenceImmTarget(target);
+ SetUnresolvedInternalReferenceImmTarget(isolate, target);
} else {
// Load literal (offset from PC).
SetImmLLiteral(target);
@@ -233,7 +233,7 @@ void Instruction::SetImmPCOffsetTarget(Instruction* target) {
}
-void Instruction::SetPCRelImmTarget(Instruction* target) {
+void Instruction::SetPCRelImmTarget(Isolate* isolate, Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
DCHECK(IsAdr());
@@ -243,7 +243,7 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else {
- PatchingAssembler patcher(this,
+ PatchingAssembler patcher(isolate, this,
PatchingAssembler::kAdrFarPatchableNInstrs);
patcher.PatchAdrFar(target_offset);
}
@@ -284,7 +284,8 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
}
-void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
+void Instruction::SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
+ Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
@@ -293,7 +294,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
- PatchingAssembler patcher(this, 2);
+ PatchingAssembler patcher(isolate, this, 2);
patcher.brk(high16);
patcher.brk(low16);
}
diff --git a/chromium/v8/src/arm64/instructions-arm64.h b/chromium/v8/src/arm64/instructions-arm64.h
index 145a7c9053a..db4e3d03a81 100644
--- a/chromium/v8/src/arm64/instructions-arm64.h
+++ b/chromium/v8/src/arm64/instructions-arm64.h
@@ -373,8 +373,9 @@ class Instruction {
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
- void SetImmPCOffsetTarget(Instruction* target);
- void SetUnresolvedInternalReferenceImmTarget(Instruction* target);
+ void SetImmPCOffsetTarget(Isolate* isolate, Instruction* target);
+ void SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
+ Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
@@ -410,7 +411,7 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
- void SetPCRelImmTarget(Instruction* target);
+ void SetPCRelImmTarget(Isolate* isolate, Instruction* target);
void SetBranchImmTarget(Instruction* target);
};
@@ -532,7 +533,8 @@ enum DebugParameters {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_INSTRUCTIONS_ARM64_H_
diff --git a/chromium/v8/src/arm64/instrument-arm64.h b/chromium/v8/src/arm64/instrument-arm64.h
index 86ddfcbbc1e..02816e943e2 100644
--- a/chromium/v8/src/arm64/instrument-arm64.h
+++ b/chromium/v8/src/arm64/instrument-arm64.h
@@ -80,6 +80,7 @@ class Instrument: public DecoderVisitor {
uint64_t sample_period_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_INSTRUMENT_ARM64_H_
diff --git a/chromium/v8/src/arm64/interface-descriptors-arm64.cc b/chromium/v8/src/arm64/interface-descriptors-arm64.cc
index 3dac70e7844..485aa780e39 100644
--- a/chromium/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/chromium/v8/src/arm64/interface-descriptors-arm64.cc
@@ -65,6 +65,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return x2; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return x3; }
+const Register RestParamAccessDescriptor::parameter_count() { return x2; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return x3; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return x4; }
+
+
const Register ApiGetterDescriptor::function_address() { return x2; }
@@ -78,14 +83,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister(), MapRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: function info
@@ -111,6 +108,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return x0; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return x0; }
@@ -133,9 +134,20 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x3: closure
+ // x2: object literal index
+ // x1: constant properties
+ // x0: object literal flags
+ Register registers[] = {x3, x2, x1, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // x3: array literals array
+ // x3: closure
// x2: array literal index
// x1: constant elements
Register registers[] = {x3, x2, x1};
@@ -145,7 +157,7 @@ void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // x3: object literals array
+ // x3: closure
// x2: object literal index
// x1: constant properties
// x0: object literal flags
@@ -208,7 +220,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
- // x4 : original constructor (for IsSuperConstructorCall)
+ // x4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {x0, x1, x4, x2};
@@ -225,6 +237,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x3: new target
+ // x1: target
+ // x0: number of arguments
+ // x2: allocation site or undefined
+ Register registers[] = {x1, x3, x0, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x3: new target
+ // x1: target
+ // x0: number of arguments
+ Register registers[] = {x1, x3, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: length
@@ -250,6 +283,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: function
@@ -384,6 +424,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
Register registers[] = {
x1, // JSFunction
+ x3, // the new target
x0, // actual number of arguments
x2, // expected number of arguments
};
@@ -425,37 +466,40 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- x1, // math rounding function
- x3, // vector slot id
+ x0, // argument count (not including receiver)
+ x2, // address of first argument
+ x1 // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- x1, // math rounding function
- x3, // vector slot id
- x4, // type vector
+ x0, // argument count (not including receiver)
+ x3, // new target
+ x1, // constructor to call
+ x2 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- x0, // argument count (including receiver)
- x2, // address of first argument
- x1 // the target callable to be call
+ x0, // argument count (argc)
+ x11, // address of first argument (argv)
+ x1 // the runtime function to call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/arm64/interface-descriptors-arm64.h b/chromium/v8/src/arm64/interface-descriptors-arm64.h
index 76def88326a..20ab8cb6124 100644
--- a/chromium/v8/src/arm64/interface-descriptors-arm64.h
+++ b/chromium/v8/src/arm64/interface-descriptors-arm64.h
@@ -20,7 +20,7 @@ class PlatformInterfaceDescriptor {
private:
TargetAddressStorageMode storage_mode_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
diff --git a/chromium/v8/src/arm64/macro-assembler-arm64-inl.h b/chromium/v8/src/arm64/macro-assembler-arm64-inl.h
index 445513bf5ab..60418ad8394 100644
--- a/chromium/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/chromium/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -1434,32 +1434,6 @@ void MacroAssembler::IsObjectNameType(Register object,
}
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- // If cmp result is lt, the following ccmp will clear all flags.
- // Z == 0, N == V implies gt condition.
- Cmp(scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- Ccmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE, NoFlag, ge);
-
- // If we didn't get a valid label object just fall through and leave the
- // flags updated.
- if (fail != NULL) {
- B(gt, fail);
- }
-}
-
-
void MacroAssembler::IsObjectJSStringType(Register object,
Register type,
Label* not_string,
@@ -1488,7 +1462,8 @@ void MacroAssembler::Push(Handle<Object> handle) {
}
-void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
+void MacroAssembler::Claim(int64_t count, uint64_t unit_size) {
+ DCHECK(count >= 0);
uint64_t size = count * unit_size;
if (size == 0) {
@@ -1516,6 +1491,7 @@ void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
return;
}
+ AssertPositiveOrZero(count);
if (!csp.Is(StackPointer())) {
BumpSystemStackPointer(size);
}
@@ -1543,7 +1519,8 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
}
-void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
+void MacroAssembler::Drop(int64_t count, uint64_t unit_size) {
+ DCHECK(count >= 0);
uint64_t size = count * unit_size;
if (size == 0) {
@@ -1574,6 +1551,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
return;
}
+ AssertPositiveOrZero(count);
Add(StackPointer(), StackPointer(), size);
if (!csp.Is(StackPointer()) && emit_debug_code()) {
@@ -1683,6 +1661,7 @@ void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
movn(xzr, (marker_name[1] << 8) | marker_name[0]);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
diff --git a/chromium/v8/src/arm64/macro-assembler-arm64.cc b/chromium/v8/src/arm64/macro-assembler-arm64.cc
index 5e8abe72157..fbf459db46c 100644
--- a/chromium/v8/src/arm64/macro-assembler-arm64.cc
+++ b/chromium/v8/src/arm64/macro-assembler-arm64.cc
@@ -9,6 +9,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/arm64/frames-arm64.h"
@@ -21,9 +22,9 @@ namespace internal {
#define __
-MacroAssembler::MacroAssembler(Isolate* arg_isolate,
- byte * buffer,
- unsigned buffer_size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer,
+ unsigned buffer_size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, buffer_size),
generating_stub_(false),
#if DEBUG
@@ -34,9 +35,9 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate,
sp_(jssp),
tmp_list_(DefaultTmpList()),
fptmp_list_(DefaultFPTmpList()) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -208,7 +209,7 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
// halfword, and movk for subsequent halfwords.
DCHECK((reg_size % 16) == 0);
bool first_mov_done = false;
- for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
+ for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
if (imm16 != ignored_halfword) {
if (!first_mov_done) {
@@ -1342,6 +1343,8 @@ void MacroAssembler::AssertStackConsistency() {
// Avoid generating AssertStackConsistency checks for the Push in Abort.
{ DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
+ // Restore StackPointer().
+ sub(StackPointer(), csp, StackPointer());
Abort(kTheCurrentStackPointerIsBelowCsp);
}
@@ -1625,6 +1628,19 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
+ Check(eq, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -1653,6 +1669,17 @@ void MacroAssembler::AssertString(Register object) {
}
+void MacroAssembler::AssertPositiveOrZero(Register value) {
+ if (emit_debug_code()) {
+ Label done;
+ int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
+ Tbz(value, sign_bit, &done);
+ Abort(kUnexpectedNegativeValue);
+ Bind(&done);
+ }
+}
+
+
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
@@ -1700,62 +1727,30 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- Ldr(target, GlobalObjectMemOperand());
- Ldr(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- Ldr(target, ContextMemOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Register function,
- int native_context_index) {
- DCHECK(!AreAliased(target, function));
- GetBuiltinFunction(function, native_context_index);
- // Load the code entry point from the builtins object.
- Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
-}
-
-
void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper) {
ASM_LOCATION("MacroAssembler::InvokeBuiltin");
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- // Get the builtin entry in x2 and setup the function object in x1.
- GetBuiltinEntry(x2, x1, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(x2));
- Call(x2);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Jump(x2);
- }
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Mov(x0, num_arguments);
- JumpToExternalReference(ext);
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, x1);
+ InvokeFunctionCode(x1, no_reg, expected, expected, flag, call_wrapper);
}
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Mov(x0, function->nargs);
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -2152,152 +2147,6 @@ void MacroAssembler::ClampDoubleToUint8(Register output,
}
-void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
- Register src,
- unsigned count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5) {
- // Untag src and dst into scratch registers.
- // Copy src->dst in a tight loop.
- DCHECK(!AreAliased(dst, src,
- scratch1, scratch2, scratch3, scratch4, scratch5));
- DCHECK(count >= 2);
-
- const Register& remaining = scratch3;
- Mov(remaining, count / 2);
-
- const Register& dst_untagged = scratch1;
- const Register& src_untagged = scratch2;
- Sub(dst_untagged, dst, kHeapObjectTag);
- Sub(src_untagged, src, kHeapObjectTag);
-
- // Copy fields in pairs.
- Label loop;
- Bind(&loop);
- Ldp(scratch4, scratch5,
- MemOperand(src_untagged, kXRegSize* 2, PostIndex));
- Stp(scratch4, scratch5,
- MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
- Sub(remaining, remaining, 1);
- Cbnz(remaining, &loop);
-
- // Handle the leftovers.
- if (count & 1) {
- Ldr(scratch4, MemOperand(src_untagged));
- Str(scratch4, MemOperand(dst_untagged));
- }
-}
-
-
-void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
- Register src,
- unsigned count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- // Untag src and dst into scratch registers.
- // Copy src->dst in an unrolled loop.
- DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
-
- const Register& dst_untagged = scratch1;
- const Register& src_untagged = scratch2;
- sub(dst_untagged, dst, kHeapObjectTag);
- sub(src_untagged, src, kHeapObjectTag);
-
- // Copy fields in pairs.
- for (unsigned i = 0; i < count / 2; i++) {
- Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
- Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
- }
-
- // Handle the leftovers.
- if (count & 1) {
- Ldr(scratch3, MemOperand(src_untagged));
- Str(scratch3, MemOperand(dst_untagged));
- }
-}
-
-
-void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
- Register src,
- unsigned count,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- // Untag src and dst into scratch registers.
- // Copy src->dst in an unrolled loop.
- DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3));
-
- const Register& dst_untagged = scratch1;
- const Register& src_untagged = scratch2;
- Sub(dst_untagged, dst, kHeapObjectTag);
- Sub(src_untagged, src, kHeapObjectTag);
-
- // Copy fields one by one.
- for (unsigned i = 0; i < count; i++) {
- Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
- Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
- }
-}
-
-
-void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
- unsigned count) {
- // One of two methods is used:
- //
- // For high 'count' values where many scratch registers are available:
- // Untag src and dst into scratch registers.
- // Copy src->dst in a tight loop.
- //
- // For low 'count' values or where few scratch registers are available:
- // Untag src and dst into scratch registers.
- // Copy src->dst in an unrolled loop.
- //
- // In both cases, fields are copied in pairs if possible, and left-overs are
- // handled separately.
- DCHECK(!AreAliased(dst, src));
- DCHECK(!temps.IncludesAliasOf(dst));
- DCHECK(!temps.IncludesAliasOf(src));
- DCHECK(!temps.IncludesAliasOf(xzr));
-
- if (emit_debug_code()) {
- Cmp(dst, src);
- Check(ne, kTheSourceAndDestinationAreTheSame);
- }
-
- // The value of 'count' at which a loop will be generated (if there are
- // enough scratch registers).
- static const unsigned kLoopThreshold = 8;
-
- UseScratchRegisterScope masm_temps(this);
- if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
- CopyFieldsLoopPairsHelper(dst, src, count,
- Register(temps.PopLowestIndex()),
- Register(temps.PopLowestIndex()),
- Register(temps.PopLowestIndex()),
- masm_temps.AcquireX(),
- masm_temps.AcquireX());
- } else if (temps.Count() >= 2) {
- CopyFieldsUnrolledPairsHelper(dst, src, count,
- Register(temps.PopLowestIndex()),
- Register(temps.PopLowestIndex()),
- masm_temps.AcquireX(),
- masm_temps.AcquireX());
- } else if (temps.Count() == 1) {
- CopyFieldsUnrolledHelper(dst, src, count,
- Register(temps.PopLowestIndex()),
- masm_temps.AcquireX(),
- masm_temps.AcquireX());
- } else {
- UNREACHABLE();
- }
-}
-
-
void MacroAssembler::CopyBytes(Register dst,
Register src,
Register length,
@@ -2353,38 +2202,35 @@ void MacroAssembler::CopyBytes(Register dst,
}
-void MacroAssembler::FillFields(Register dst,
- Register field_count,
- Register filler) {
- DCHECK(!dst.Is(csp));
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
+ Register filler) {
+ DCHECK(!current_address.Is(csp));
UseScratchRegisterScope temps(this);
- Register field_ptr = temps.AcquireX();
- Register counter = temps.AcquireX();
+ Register distance_in_words = temps.AcquireX();
Label done;
- // Decrement count. If the result < zero, count was zero, and there's nothing
- // to do. If count was one, flags are set to fail the gt condition at the end
- // of the pairs loop.
- Subs(counter, field_count, 1);
- B(lt, &done);
+ // Calculate the distance. If it's <= zero then there's nothing to do.
+ Subs(distance_in_words, end_address, current_address);
+ B(le, &done);
// There's at least one field to fill, so do this unconditionally.
- Str(filler, MemOperand(dst, kPointerSize, PostIndex));
+ Str(filler, MemOperand(current_address));
- // If the bottom bit of counter is set, there are an even number of fields to
- // fill, so pull the start pointer back by one field, allowing the pairs loop
- // to overwrite the field that was stored above.
- And(field_ptr, counter, 1);
- Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
+ // If the distance_in_words consists of odd number of words we advance
+ // start_address by one word, otherwise the pairs loop will ovwerite the
+ // field that was stored above.
+ And(distance_in_words, distance_in_words, kPointerSize);
+ Add(current_address, current_address, distance_in_words);
// Store filler to memory in pairs.
- Label entry, loop;
+ Label loop, entry;
B(&entry);
Bind(&loop);
- Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
- Subs(counter, counter, 2);
+ Stp(filler, filler, MemOperand(current_address, 2 * kPointerSize, PostIndex));
Bind(&entry);
- B(gt, &loop);
+ Cmp(current_address, end_address);
+ B(lo, &loop);
Bind(&done);
}
@@ -2423,9 +2269,10 @@ void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
Label* failure) {
DCHECK(!AreAliased(scratch1, second));
DCHECK(!AreAliased(scratch1, scratch2));
- static const int kFlatOneByteStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- static const int kFlatOneByteStringTag = ONE_BYTE_STRING_TYPE;
+ const int kFlatOneByteStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
And(scratch1, first, kFlatOneByteStringMask);
And(scratch2, second, kFlatOneByteStringMask);
Cmp(scratch1, kFlatOneByteStringTag);
@@ -2479,8 +2326,6 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
InvokeFlag flag,
bool* definitely_mismatches,
@@ -2500,7 +2345,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg().is(x0));
DCHECK(expected.is_immediate() || expected.reg().is(x2));
- DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -2535,11 +2379,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// If the argument counts may mismatch, generate a call to the argument
// adaptor.
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- Mov(x3, Operand(code_constant));
- Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
- }
-
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
@@ -2548,7 +2387,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
// If the arg counts don't match, no extra code is emitted by
- // MAsm::InvokeCode and we can just fall through.
+ // MAsm::InvokeFunctionCode and we can just fall through.
B(done);
}
} else {
@@ -2559,24 +2398,80 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ Mov(x4, Operand(step_in_enabled));
+ ldrb(x4, MemOperand(x4));
+ CompareAndBranch(x4, Operand(0), eq, &skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(x1));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
- Label done;
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
+ }
+ Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
- &definitely_mismatches, call_wrapper);
+ InvokePrologue(expected, actual, &done, flag, &definitely_mismatches,
+ call_wrapper);
// If we are certain that actual != expected, then we know InvokePrologue will
// have handled the call through the argument adaptor mechanism.
// The called function expects the call kind in x5.
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = x4;
+ Ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@@ -2594,6 +2489,7 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -2605,7 +2501,6 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(function.is(x1));
Register expected_reg = x2;
- Register code_reg = x3;
Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
// The number of arguments is stored as an int32_t, and -1 is a marker
@@ -2616,11 +2511,10 @@ void MacroAssembler::InvokeFunction(Register function,
Ldrsw(expected_reg,
FieldMemOperand(expected_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
- Ldr(code_reg,
- FieldMemOperand(function, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(function, new_target, expected, actual, flag,
+ call_wrapper);
}
@@ -2636,16 +2530,10 @@ void MacroAssembler::InvokeFunction(Register function,
// (See FullCodeGenerator::Generate().)
DCHECK(function.Is(x1));
- Register code_reg = x3;
-
// Set up the context.
Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(function, no_reg, expected, actual, flag, call_wrapper);
}
@@ -2758,14 +2646,13 @@ void MacroAssembler::TruncateHeapNumberToI(Register result,
void MacroAssembler::StubPrologue() {
- DCHECK(StackPointer().Is(jssp));
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
__ Mov(temp, Smi::FromInt(StackFrame::STUB));
// Compiled stubs don't age, and so they don't need the predictable code
// ageing sequence.
__ Push(lr, fp, cp, temp);
- __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
}
@@ -2998,12 +2885,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- Ldr(dst, GlobalObjectMemOperand());
- Ldr(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::DebugBreak() {
Mov(x0, 0);
Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
@@ -3082,23 +2963,24 @@ void MacroAssembler::Allocate(int object_size,
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
+ // Set up allocation top address and allocation limit registers.
Register top_address = scratch1;
- Register allocation_limit = scratch2;
+ Register alloc_limit = scratch2;
+ Register result_end = scratch3;
Mov(top_address, Operand(heap_allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and the allocation limit.
- Ldp(result, allocation_limit, MemOperand(top_address));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ Ldp(result, alloc_limit, MemOperand(top_address));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
- Ldr(scratch3, MemOperand(top_address));
- Cmp(result, scratch3);
+ Ldr(alloc_limit, MemOperand(top_address));
+ Cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load the allocation limit. 'result' already contains the allocation top.
- Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ Ldr(alloc_limit, MemOperand(top_address, limit - top));
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
@@ -3106,10 +2988,10 @@ void MacroAssembler::Allocate(int object_size,
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
// Calculate new top and bail out if new space is exhausted.
- Adds(scratch3, result, object_size);
- Ccmp(scratch3, allocation_limit, CFlag, cc);
+ Adds(result_end, result, object_size);
+ Ccmp(result_end, alloc_limit, CFlag, cc);
B(hi, gc_required);
- Str(scratch3, MemOperand(top_address));
+ Str(result_end, MemOperand(top_address));
// Tag the object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3118,30 +3000,29 @@ void MacroAssembler::Allocate(int object_size,
}
-void MacroAssembler::Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
// We apply salt to the original zap value to easily spot the values.
Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
- Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
- Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(scratch, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(result_end, (kDebugZapValue & ~0xffL) | 0x21L);
}
B(gc_required);
return;
}
UseScratchRegisterScope temps(this);
- Register scratch3 = temps.AcquireX();
+ Register scratch2 = temps.AcquireX();
- DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
- DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
- scratch1.Is64Bits() && scratch2.Is64Bits());
+ // |object_size| and |result_end| may overlap, other registers must not.
+ DCHECK(!AreAliased(object_size, result, scratch, scratch2));
+ DCHECK(!AreAliased(result_end, result, scratch, scratch2));
+ DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
+ result_end.Is64Bits());
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDP.
@@ -3153,23 +3034,23 @@ void MacroAssembler::Allocate(Register object_size,
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
- Register top_address = scratch1;
- Register allocation_limit = scratch2;
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
+ Register alloc_limit = scratch2;
Mov(top_address, heap_allocation_top);
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and the allocation limit.
- Ldp(result, allocation_limit, MemOperand(top_address));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ Ldp(result, alloc_limit, MemOperand(top_address));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
- Ldr(scratch3, MemOperand(top_address));
- Cmp(result, scratch3);
+ Ldr(alloc_limit, MemOperand(top_address));
+ Cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load the allocation limit. 'result' already contains the allocation top.
- Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ Ldr(alloc_limit, MemOperand(top_address, limit - top));
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
@@ -3178,19 +3059,19 @@ void MacroAssembler::Allocate(Register object_size,
// Calculate new top and bail out if new space is exhausted
if ((flags & SIZE_IN_WORDS) != 0) {
- Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
+ Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
} else {
- Adds(scratch3, result, object_size);
+ Adds(result_end, result, object_size);
}
if (emit_debug_code()) {
- Tst(scratch3, kObjectAlignmentMask);
+ Tst(result_end, kObjectAlignmentMask);
Check(eq, kUnalignedAllocationInNewSpace);
}
- Ccmp(scratch3, allocation_limit, CFlag, cc);
+ Ccmp(result_end, alloc_limit, CFlag, cc);
B(hi, gc_required);
- Str(scratch3, MemOperand(top_address));
+ Str(result_end, MemOperand(top_address));
// Tag the object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3388,6 +3269,28 @@ void MacroAssembler::JumpIfObjectType(Register object,
}
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
+
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
+ Str(value, FieldMemOperand(result, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+}
+
+
void MacroAssembler::JumpIfNotObjectType(Register object,
Register map,
Register type_reg,
@@ -3570,6 +3473,14 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
}
+void MacroAssembler::PushRoot(Heap::RootListIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ LoadRoot(temp, index);
+ Push(temp);
+}
+
+
void MacroAssembler::CompareRoot(const Register& obj,
Heap::RootListIndex index) {
UseScratchRegisterScope temps(this);
@@ -3769,10 +3680,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- Ldr(scratch1, FieldMemOperand(scratch1, offset));
- Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
+ Ldr(scratch1, ContextMemOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -3984,14 +3892,18 @@ void MacroAssembler::PushSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
- PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
- FPRegister::kAllocatableFPRegisters));
+ PushCPURegList(CPURegList(
+ CPURegister::kFPRegister, kDRegSizeInBits,
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_double_codes_mask()));
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
- PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
- FPRegister::kAllocatableFPRegisters));
+ PopCPURegList(CPURegList(
+ CPURegister::kFPRegister, kDRegSizeInBits,
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_double_codes_mask()));
PopSafepointRegisters();
}
@@ -4299,8 +4211,8 @@ void MacroAssembler::HasColor(Register object,
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
// Check for the color.
if (first_bit == 0) {
@@ -4328,8 +4240,8 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
}
@@ -4365,21 +4277,18 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register shift_scratch,
- Register load_scratch,
- Register length_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register shift_scratch, Register load_scratch,
+ Register length_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(
value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
GetMarkBits(value, bitmap_scratch, shift_scratch);
Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
@@ -4390,71 +4299,7 @@ void MacroAssembler::EnsureNotWhite(
// If the value is black or grey we don't need to do anything.
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
- Label done;
- Tbnz(load_scratch, 0, &done);
-
- // Value is white. We check whether it is data that doesn't need scanning.
- Register map = load_scratch; // Holds map while checking type.
- Label is_data_object;
-
- // Check for heap-number.
- Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- Mov(length_scratch, HeapNumber::kSize);
- JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- TestAndBranchIfAnySet(instance_type,
- kIsIndirectStringMask | kIsNotStringMask,
- value_is_white_and_not_data);
-
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- Mov(length_scratch, ExternalString::kSize);
- TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
- String::kLengthOffset));
- Tst(instance_type, kStringEncodingMask);
- Cset(load_scratch, eq);
- Lsl(length_scratch, length_scratch, load_scratch);
- Add(length_scratch,
- length_scratch,
- SeqString::kHeaderSize + kObjectAlignmentMask);
- Bic(length_scratch, length_scratch, kObjectAlignmentMask);
-
- Bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- Register mask = shift_scratch;
- Mov(load_scratch, 1);
- Lsl(mask, load_scratch, shift_scratch);
-
- Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Orr(load_scratch, load_scratch, mask);
- Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
- Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- Add(load_scratch, load_scratch, length_scratch);
- Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- Bind(&done);
+ Tbz(load_scratch, 0, value_is_white);
}
@@ -4600,31 +4445,25 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch1,
Register scratch2,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- Ldr(scratch1, GlobalObjectMemOperand());
- Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
- int offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
- Ldr(scratch2, FieldMemOperand(scratch1, offset));
+ Ldr(scratch1, NativeContextMemOperand());
+ Ldr(scratch2,
+ ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
Cmp(map_in_out, scratch2);
B(ne, no_map_match);
// Use the transitioned cached map.
- offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
- Ldr(map_in_out, FieldMemOperand(scratch1, offset));
+ Ldr(map_in_out,
+ ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- Ldr(function, GlobalObjectMemOperand());
- // Load the native context from the global or builtins object.
- Ldr(function, FieldMemOperand(function,
- GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- Ldr(function, ContextMemOperand(function, index));
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ Ldr(dst, NativeContextMemOperand());
+ Ldr(dst, ContextMemOperand(dst, index));
}
diff --git a/chromium/v8/src/arm64/macro-assembler-arm64.h b/chromium/v8/src/arm64/macro-assembler-arm64.h
index 769140d9170..78997d6d020 100644
--- a/chromium/v8/src/arm64/macro-assembler-arm64.h
+++ b/chromium/v8/src/arm64/macro-assembler-arm64.h
@@ -44,6 +44,8 @@ namespace internal {
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
+#define kJavaScriptCallArgCountRegister x0
+#define kJavaScriptCallNewTargetRegister x3
#define kRuntimeCallFunctionRegister x1
#define kRuntimeCallArgCountRegister x0
@@ -144,7 +146,8 @@ enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
class MacroAssembler : public Assembler {
public:
- MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
+ MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
+ CodeObjectRequired create_code_object);
inline Handle<Object> CodeObject();
@@ -721,10 +724,10 @@ class MacroAssembler : public Assembler {
//
// Note that unit_size must be specified in bytes. For variants which take a
// Register count, the unit size must be a power of two.
- inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
+ inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
inline void Claim(const Register& count,
uint64_t unit_size = kXRegSize);
- inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
+ inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
inline void Drop(const Register& count,
uint64_t unit_size = kXRegSize);
@@ -892,6 +895,7 @@ class MacroAssembler : public Assembler {
// This is required for compatibility with architecture independant code.
// Remove if not needed.
inline void Move(Register dst, Register src) { Mov(dst, src); }
+ inline void Move(Register dst, Smi* src) { Mov(dst, src); }
void LoadInstanceDescriptors(Register map,
Register descriptors);
@@ -962,6 +966,10 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -969,6 +977,10 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
+ // Abort execution if argument is not a positive or zero integer, enabled via
+ // --debug-code.
+ void AssertPositiveOrZero(Register value);
+
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
@@ -1026,22 +1038,11 @@ class MacroAssembler : public Assembler {
// ---- Object Utilities ----
- // Copy fields from 'src' to 'dst', where both are tagged objects.
- // The 'temps' list is a list of X registers which can be used for scratch
- // values. The temps list must include at least one register.
- //
- // Currently, CopyFields cannot make use of more than three registers from
- // the 'temps' list.
- //
- // CopyFields expects to be able to take at least two registers from
- // MacroAssembler::TmpList().
- void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
-
- // Starting at address in dst, initialize field_count 64-bit fields with
- // 64-bit value in register filler. Register dst is corrupted.
- void FillFields(Register dst,
- Register field_count,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// Copies a number of bytes from src to dst. All passed registers are
// clobbered. On exit src and dst will point to the place just after where the
@@ -1093,20 +1094,25 @@ class MacroAssembler : public Assembler {
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
}
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
int ActivationFrameAlignment();
@@ -1126,12 +1132,8 @@ class MacroAssembler : public Assembler {
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
+
+ // Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
@@ -1140,14 +1142,6 @@ class MacroAssembler : public Assembler {
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the code object for the given builtin in the target register and
- // setup the function in the function register.
- void GetBuiltinEntry(Register target, Register function,
- int native_context_index);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
void Jump(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
@@ -1178,20 +1172,21 @@ class MacroAssembler : public Assembler {
// 'call_kind' must be x5.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
InvokeFlag flag,
bool* definitely_mismatches,
const CallWrapper& call_wrapper);
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register.
// Changes the current context to the context in the function before invoking.
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
@@ -1296,12 +1291,8 @@ class MacroAssembler : public Assembler {
// If the new space is exhausted control continues at the gc_required label.
// In this case, the result and scratch registers may still be clobbered.
// If flags includes TAG_OBJECT, the result is tagged as as a heap object.
- void Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
void Allocate(int object_size,
Register result,
@@ -1349,6 +1340,12 @@ class MacroAssembler : public Assembler {
CPURegister heap_number_map = NoReg,
MutableMode mode = IMMUTABLE);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -1461,6 +1458,9 @@ class MacroAssembler : public Assembler {
// register.
void LoadElementsKindFromMap(Register result, Register map);
+ // Load the value from the root list and push it onto the stack.
+ void PushRoot(Heap::RootListIndex index);
+
// Compare the object in a register to a value from the root list.
void CompareRoot(const Register& obj, Heap::RootListIndex index);
@@ -1481,20 +1481,6 @@ class MacroAssembler : public Assembler {
// Fall-through if the object was a string and jump on fail otherwise.
inline void IsObjectNameType(Register object, Register type, Label* fail);
- inline void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- // Check the instance type in the given map to see if it corresponds to a
- // JS object type. Jump to the fail label if this is not the case and fall
- // through otherwise. However if fail label is NULL, no branch will be
- // performed and the flag will be updated. You can test the flag for "le"
- // condition to test if it is a valid JS object type.
- inline void IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail);
-
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// The object and type arguments can be the same register; in that case it
@@ -1684,8 +1670,15 @@ class MacroAssembler : public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Emit code for a truncating division by a constant. The dividend register is
// unchanged. Dividend and result must be different.
@@ -1821,23 +1814,10 @@ class MacroAssembler : public Assembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Register scratch4, Label* value_is_white);
// Helper for finding the mark bits for an address.
// Note that the behaviour slightly differs from other architectures.
@@ -1907,7 +1887,7 @@ class MacroAssembler : public Assembler {
Register scratch2,
Label* no_map_match);
- void LoadGlobalFunction(int index, Register function);
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers function and
// map can be the same, function is then overwritten.
@@ -2009,19 +1989,6 @@ class MacroAssembler : public Assembler {
void PopPostamble(int count, int size) { PopPostamble(count * size); }
private:
- // Helpers for CopyFields.
- // These each implement CopyFields in a different way.
- void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
- Register scratch1, Register scratch2,
- Register scratch3, Register scratch4,
- Register scratch5);
- void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
- Register scratch1, Register scratch2,
- Register scratch3, Register scratch4);
- void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
- Register scratch1, Register scratch2,
- Register scratch3);
-
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
// (Push|Pop)CPURegList to bundle together run-time assertions for a large
@@ -2225,8 +2192,8 @@ inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
-inline MemOperand GlobalObjectMemOperand() {
- return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+inline MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
@@ -2278,7 +2245,8 @@ class InlineSmiCheckInfo {
class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#ifdef GENERATED_CODE_COVERAGE
#error "Unsupported option"
diff --git a/chromium/v8/src/arm64/simulator-arm64.cc b/chromium/v8/src/arm64/simulator-arm64.cc
index 4e6a9d91e1a..8f72669f499 100644
--- a/chromium/v8/src/arm64/simulator-arm64.cc
+++ b/chromium/v8/src/arm64/simulator-arm64.cc
@@ -462,13 +462,11 @@ void Simulator::RunFrom(Instruction* start) {
// offset from the svc instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, ExternalReference::Type type)
- : external_function_(external_function),
- type_(type),
- next_(NULL) {
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
+ : external_function_(external_function), type_(type), next_(NULL) {
redirect_call_.SetInstructionBits(
HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
- Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
// TODO(all): Simulator flush I cache
isolate->set_simulator_redirection(this);
@@ -483,9 +481,8 @@ class Redirection {
ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function,
+ static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
@@ -493,7 +490,7 @@ class Redirection {
return current;
}
}
- return new Redirection(external_function, type);
+ return new Redirection(isolate, external_function, type);
}
static Redirection* FromHltInstruction(Instruction* redirect_call) {
@@ -748,9 +745,10 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
}
-void* Simulator::RedirectExternalReference(void* external_function,
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_redirect_call();
}
@@ -2761,7 +2759,7 @@ double Simulator::FPRoundInt(double value, FPRounding round_mode) {
// If the error is greater than 0.5, or is equal to 0.5 and the integer
// result is odd, round up.
} else if ((error > 0.5) ||
- ((error == 0.5) && (fmod(int_result, 2) != 0))) {
+ ((error == 0.5) && (modulo(int_result, 2) != 0))) {
int_result++;
}
break;
@@ -3107,7 +3105,8 @@ T Simulator::FPSqrt(T op) {
} else if (op < 0.0) {
return FPDefaultNaN<T>();
} else {
- return fast_sqrt(op);
+ lazily_initialize_fast_sqrt(isolate_);
+ return fast_sqrt(op, isolate_);
}
}
@@ -3510,7 +3509,7 @@ void Simulator::Debug() {
reinterpret_cast<uint64_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int64_t value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ Heap* current_heap = isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & kSmiTagMask) == 0) {
diff --git a/chromium/v8/src/arm64/simulator-arm64.h b/chromium/v8/src/arm64/simulator-arm64.h
index e4d9a81ffdb..724c767ab72 100644
--- a/chromium/v8/src/arm64/simulator-arm64.h
+++ b/chromium/v8/src/arm64/simulator-arm64.h
@@ -17,12 +17,6 @@
#include "src/globals.h"
#include "src/utils.h"
-#define REGISTER_CODE_LIST(R) \
-R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
-R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
-R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
namespace v8 {
namespace internal {
@@ -30,7 +24,7 @@ namespace internal {
// Running without a simulator on a native ARM64 platform.
// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*arm64_regexp_matcher)(String* input,
@@ -48,24 +42,29 @@ typedef int (*arm64_regexp_matcher)(String* input,
// should act as a function matching the type arm64_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<arm64_regexp_matcher>(entry)( \
- p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<arm64_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
+ NULL, p8))
// Running without a simulator there is nothing to do.
class SimulatorStack : public v8::internal::AllStatic {
public:
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
+ uintptr_t c_limit) {
USE(isolate);
return c_limit;
}
- static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static void UnregisterCTryCatch() { }
+ static void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ USE(isolate);
+ }
};
#else // !defined(USE_SIMULATOR)
@@ -278,7 +277,8 @@ class Simulator : public DecoderVisitor {
void ResetState();
// Runtime call support.
- static void* RedirectExternalReference(void* external_function,
+ static void* RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type);
void DoRuntimeCall(Instruction* instr);
@@ -877,15 +877,14 @@ class Simulator : public DecoderVisitor {
// When running with the simulator transition into simulated execution at this
// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
- FUNCTION_ADDR(entry), \
- p0, p1, p2, p3, p4))
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(isolate)->CallJS( \
+ FUNCTION_ADDR(entry), p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- static_cast<int>( \
- Simulator::current(Isolate::Current()) \
- ->CallRegExp(entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ static_cast<int>(Simulator::current(isolate)->CallRegExp( \
+ entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
// The simulator has its own stack. Thus it has a different stack limit from
@@ -899,18 +898,20 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
- static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
+ static uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
- static void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
+ static void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
}
};
#endif // !defined(USE_SIMULATOR)
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_SIMULATOR_ARM64_H_
diff --git a/chromium/v8/src/arm64/utils-arm64.h b/chromium/v8/src/arm64/utils-arm64.h
index da91fd5d600..1e1c0a33c24 100644
--- a/chromium/v8/src/arm64/utils-arm64.h
+++ b/chromium/v8/src/arm64/utils-arm64.h
@@ -9,12 +9,6 @@
#include "src/arm64/constants-arm64.h"
-#define REGISTER_CODE_LIST(R) \
-R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
-R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
-R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
-R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
-
namespace v8 {
namespace internal {
@@ -151,6 +145,7 @@ inline float FusedMultiplyAdd(float op1, float op2, float a) {
return fmaf(op1, op2, a);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM64_UTILS_ARM64_H_
diff --git a/chromium/v8/src/assembler.cc b/chromium/v8/src/assembler.cc
index dd05a07750d..4aac08d5416 100644
--- a/chromium/v8/src/assembler.cc
+++ b/chromium/v8/src/assembler.cc
@@ -46,17 +46,20 @@
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
+#include "src/disassembler.h"
#include "src/execution.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/ostreams.h"
+#include "src/parsing/token.h"
#include "src/profiler/cpu-profiler.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/serialize.h"
-#include "src/token.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32-inl.h" // NOLINT
@@ -105,6 +108,39 @@ namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
+// Common register code.
+
+const char* Register::ToString() {
+ // This is the mapping of allocation indices to registers.
+ DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
+ return RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->GetGeneralRegisterName(reg_code);
+}
+
+
+bool Register::IsAllocatable() const {
+ return ((1 << reg_code) &
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_general_codes_mask()) != 0;
+}
+
+
+const char* DoubleRegister::ToString() {
+ // This is the mapping of allocation indices to registers.
+ DCHECK(reg_code >= 0 && reg_code < kMaxNumRegisters);
+ return RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->GetDoubleRegisterName(reg_code);
+}
+
+
+bool DoubleRegister::IsAllocatable() const {
+ return ((1 << reg_code) &
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_double_codes_mask()) != 0;
+}
+
+
+// -----------------------------------------------------------------------------
// Common double constants.
struct DoubleConstant BASE_EMBEDDED {
@@ -137,7 +173,8 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
// We may use the assembler without an isolate.
serializer_enabled_(isolate && isolate->serializer_enabled()),
constant_pool_available_(false) {
- if (FLAG_mask_constants_with_cookie && isolate != NULL) {
+ DCHECK_NOT_NULL(isolate);
+ if (FLAG_mask_constants_with_cookie) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
own_buffer_ = buffer == NULL;
@@ -168,16 +205,9 @@ void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
}
-void AssemblerBase::FlushICacheWithoutIsolate(void* start, size_t size) {
- // Ideally we would just call Isolate::Current() here. However, this flushes
- // out issues because we usually only need the isolate when in the simulator.
- Isolate* isolate;
-#if defined(USE_SIMULATOR)
- isolate = Isolate::Current();
-#else
- isolate = nullptr;
-#endif // USE_SIMULATOR
- FlushICache(isolate, start, size);
+void AssemblerBase::Print() {
+ OFStream os(stdout);
+ v8::internal::Disassembler::Decode(isolate(), &os, buffer_, pc_, nullptr);
}
@@ -478,8 +508,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
if (RelocInfo::IsComment(rmode)) {
WriteData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
- RelocInfo::IsVeneerPool(rmode) ||
- RelocInfo::IsDebugBreakSlotAtCall(rmode)) {
+ RelocInfo::IsVeneerPool(rmode)) {
WriteIntData(static_cast<int>(rinfo->data()));
}
}
@@ -670,8 +699,7 @@ void RelocIterator::next() {
Advance(kIntSize);
}
} else if (RelocInfo::IsConstPool(rmode) ||
- RelocInfo::IsVeneerPool(rmode) ||
- RelocInfo::IsDebugBreakSlotAtCall(rmode)) {
+ RelocInfo::IsVeneerPool(rmode)) {
if (SetMode(rmode)) {
AdvanceReadInt();
return;
@@ -696,7 +724,8 @@ void RelocIterator::next() {
}
-RelocIterator::RelocIterator(Code* code, int mode_mask) {
+RelocIterator::RelocIterator(Code* code, int mode_mask)
+ : rinfo_(code->map()->GetIsolate()) {
rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
@@ -721,7 +750,8 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
}
-RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
+RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
+ : rinfo_(desc.origin->isolate()) {
rinfo_.pc_ = desc.buffer;
rinfo_.data_ = 0;
// Relocation info is read backwards.
@@ -765,8 +795,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "no reloc 64";
case EMBEDDED_OBJECT:
return "embedded object";
- case CONSTRUCT_CALL:
- return "code target (js construct call)";
case DEBUGGER_STATEMENT:
return "debugger statement";
case CODE_TARGET:
@@ -801,8 +829,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "debug break slot at return";
case DEBUG_BREAK_SLOT_AT_CALL:
return "debug break slot at call";
- case DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL:
- return "debug break slot at construct call";
case CODE_AGE_SEQUENCE:
return "code age sequence";
case GENERATOR_CONTINUATION:
@@ -867,7 +893,6 @@ void RelocInfo::Verify(Isolate* isolate) {
Object::VerifyPointer(target_cell());
break;
case DEBUGGER_STATEMENT:
- case CONSTRUCT_CALL:
case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
// convert inline target address to code object
@@ -900,7 +925,6 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEBUG_BREAK_SLOT_AT_POSITION:
case DEBUG_BREAK_SLOT_AT_RETURN:
case DEBUG_BREAK_SLOT_AT_CALL:
- case DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL:
case GENERATOR_CONTINUATION:
case NONE32:
case NONE64:
@@ -917,12 +941,6 @@ void RelocInfo::Verify(Isolate* isolate) {
#endif // VERIFY_HEAP
-int RelocInfo::DebugBreakCallArgumentsCount(intptr_t data) {
- return static_cast<int>(data);
-}
-
-
-// -----------------------------------------------------------------------------
// Implementation of ExternalReference
void ExternalReference::SetUp() {
@@ -1399,31 +1417,38 @@ ExternalReference ExternalReference::debug_after_break_target_address(
}
-ExternalReference
- ExternalReference::debug_restarter_frame_function_pointer_address(
- Isolate* isolate) {
- return ExternalReference(
- isolate->debug()->restarter_frame_function_pointer_address());
+ExternalReference ExternalReference::virtual_handler_register(
+ Isolate* isolate) {
+ return ExternalReference(isolate->virtual_handler_register_address());
+}
+
+
+ExternalReference ExternalReference::virtual_slot_register(Isolate* isolate) {
+ return ExternalReference(isolate->virtual_slot_register_address());
}
-ExternalReference ExternalReference::vector_store_virtual_register(
+ExternalReference ExternalReference::runtime_function_table_address(
Isolate* isolate) {
- return ExternalReference(isolate->vector_store_virtual_register_address());
+ return ExternalReference(
+ const_cast<Runtime::Function*>(Runtime::RuntimeFunctionTable(isolate)));
}
-double power_helper(double x, double y) {
+double power_helper(Isolate* isolate, double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
return power_double_int(x, y_int); // Returns 1 if exponent is 0.
}
if (y == 0.5) {
+ lazily_initialize_fast_sqrt(isolate);
return (std::isinf(x)) ? V8_INFINITY
- : fast_sqrt(x + 0.0); // Convert -0 to +0.
+ : fast_sqrt(x + 0.0, isolate); // Convert -0 to +0.
}
if (y == -0.5) {
- return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
+ lazily_initialize_fast_sqrt(isolate);
+ return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0,
+ isolate); // Convert -0 to +0.
}
return power_double_double(x, y);
}
@@ -1521,9 +1546,9 @@ ExternalReference ExternalReference::mod_two_doubles_operation(
}
-ExternalReference ExternalReference::debug_step_in_fp_address(
+ExternalReference ExternalReference::debug_step_in_enabled_address(
Isolate* isolate) {
- return ExternalReference(isolate->debug()->step_in_fp_addr());
+ return ExternalReference(isolate->debug()->step_in_enabled_address());
}
@@ -1837,11 +1862,10 @@ void Assembler::RecordGeneratorContinuation() {
}
-void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode, int call_argc) {
+void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsDebugBreakSlot(mode));
- intptr_t data = static_cast<intptr_t>(call_argc);
- RecordRelocInfo(mode, data);
+ RecordRelocInfo(mode);
}
diff --git a/chromium/v8/src/assembler.h b/chromium/v8/src/assembler.h
index 433b9b8456d..08c6b38541c 100644
--- a/chromium/v8/src/assembler.h
+++ b/chromium/v8/src/assembler.h
@@ -38,8 +38,8 @@
#include "src/allocation.h"
#include "src/builtins.h"
#include "src/isolate.h"
+#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
-#include "src/token.h"
namespace v8 {
@@ -49,11 +49,15 @@ class ApiFunction;
namespace internal {
// Forward declarations.
+class SourcePosition;
class StatsCounter;
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
+enum class CodeObjectRequired { kNo, kYes };
+
+
class AssemblerBase: public Malloced {
public:
AssemblerBase(Isolate* isolate, void* buffer, int buffer_size);
@@ -99,13 +103,13 @@ class AssemblerBase: public Malloced {
// the assembler could clean up internal data structures.
virtual void AbortedCodeGeneration() { }
+ // Debugging
+ void Print();
+
static const int kMinimalBufferSize = 4*KB;
static void FlushICache(Isolate* isolate, void* start, size_t size);
- // TODO(all): Help get rid of this one.
- static void FlushICacheWithoutIsolate(void* start, size_t size);
-
protected:
// The buffer into which code and relocation info are generated. It could
// either be owned by the assembler or be provided externally.
@@ -229,17 +233,18 @@ class CpuFeatures : public AllStatic {
static void PrintTarget();
static void PrintFeatures();
+ private:
+ friend class ExternalReference;
+ friend class AssemblerBase;
// Flush instruction cache.
static void FlushICache(void* start, size_t size);
- private:
// Platform-dependent implementation.
static void ProbeImpl(bool cross_compile);
static unsigned supported_;
static unsigned cache_line_size_;
static bool initialized_;
- friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -319,6 +324,8 @@ class Label {
enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
+enum ArgvMode { kArgvOnStack, kArgvInRegister };
+
// Specifies whether to perform icache flush operations on RelocInfo updates.
// If FLUSH_ICACHE_IF_NEEDED, the icache will always be flushed if an
// instruction was modified. If SKIP_ICACHE_FLUSH the flush will always be
@@ -370,7 +377,6 @@ class RelocInfo {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
- CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
DEBUGGER_STATEMENT, // Code target for the debugger statement.
EMBEDDED_OBJECT,
CELL,
@@ -385,7 +391,6 @@ class RelocInfo {
DEBUG_BREAK_SLOT_AT_POSITION,
DEBUG_BREAK_SLOT_AT_RETURN,
DEBUG_BREAK_SLOT_AT_CALL,
- DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL,
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
@@ -422,19 +427,19 @@ class RelocInfo {
STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
- RelocInfo() {}
+ explicit RelocInfo(Isolate* isolate) : isolate_(isolate) {
+ DCHECK_NOT_NULL(isolate);
+ }
- RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
- : pc_(pc), rmode_(rmode), data_(data), host_(host) {
+ RelocInfo(Isolate* isolate, byte* pc, Mode rmode, intptr_t data, Code* host)
+ : isolate_(isolate), pc_(pc), rmode_(rmode), data_(data), host_(host) {
+ DCHECK_NOT_NULL(isolate);
}
static inline bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE &&
mode <= LAST_REAL_RELOC_MODE;
}
- static inline bool IsConstructCall(Mode mode) {
- return mode == CONSTRUCT_CALL;
- }
static inline bool IsCodeTarget(Mode mode) {
return mode <= LAST_CODE_ENUM;
}
@@ -478,8 +483,7 @@ class RelocInfo {
}
static inline bool IsDebugBreakSlot(Mode mode) {
return IsDebugBreakSlotAtPosition(mode) || IsDebugBreakSlotAtReturn(mode) ||
- IsDebugBreakSlotAtCall(mode) ||
- IsDebugBreakSlotAtConstructCall(mode);
+ IsDebugBreakSlotAtCall(mode);
}
static inline bool IsDebugBreakSlotAtPosition(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_POSITION;
@@ -490,9 +494,6 @@ class RelocInfo {
static inline bool IsDebugBreakSlotAtCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_CALL;
}
- static inline bool IsDebugBreakSlotAtConstructCall(Mode mode) {
- return mode == DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL;
- }
static inline bool IsDebuggerStatement(Mode mode) {
return mode == DEBUGGER_STATEMENT;
}
@@ -508,6 +509,7 @@ class RelocInfo {
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
+ Isolate* isolate() const { return isolate_; }
byte* pc() const { return pc_; }
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
@@ -530,9 +532,6 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
- static int DebugBreakCallArgumentsCount(intptr_t data);
-
- // Read/modify the code target in the branch/call instruction
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
INLINE(Address target_address());
@@ -615,9 +614,6 @@ class RelocInfo {
template<typename StaticVisitor> inline void Visit(Heap* heap);
inline void Visit(Isolate* isolate, ObjectVisitor* v);
- // Patch the code with a call.
- void PatchCodeWithCall(Address target, int guard_bytes);
-
// Check whether this return sequence has been patched
// with a call to the debugger.
INLINE(bool IsPatchedReturnSequence());
@@ -645,12 +641,13 @@ class RelocInfo {
static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
static const int kDataMask =
(1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT);
- static const int kDebugBreakSlotMask =
- 1 << DEBUG_BREAK_SLOT_AT_POSITION | 1 << DEBUG_BREAK_SLOT_AT_RETURN |
- 1 << DEBUG_BREAK_SLOT_AT_CALL | 1 << DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL;
+ static const int kDebugBreakSlotMask = 1 << DEBUG_BREAK_SLOT_AT_POSITION |
+ 1 << DEBUG_BREAK_SLOT_AT_RETURN |
+ 1 << DEBUG_BREAK_SLOT_AT_CALL;
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
+ Isolate* isolate_;
// On ARM, note that pc_ is the address of the constant pool entry
// to be relocated and not the address of the instruction
// referencing the constant pool entry (except when rmode_ ==
@@ -659,11 +656,6 @@ class RelocInfo {
Mode rmode_;
intptr_t data_;
Code* host_;
- // External-reference pointers are also split across instruction-pairs
- // on some platforms, but are accessed via indirect pointers. This location
- // provides a place for that pointer to exist naturally. Its address
- // is returned by RelocInfo::target_reference_address().
- Address reconstructed_adr_ptr_;
friend class RelocIterator;
};
@@ -865,7 +857,8 @@ class ExternalReference BASE_EMBEDDED {
static void InitializeMathExpData();
static void TearDownMathExpData();
- typedef void* ExternalReferenceRedirector(void* original, Type type);
+ typedef void* ExternalReferenceRedirector(Isolate* isolate, void* original,
+ Type type);
ExternalReference() : address_(NULL) {}
@@ -983,19 +976,20 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference debug_is_active_address(Isolate* isolate);
static ExternalReference debug_after_break_target_address(Isolate* isolate);
- static ExternalReference debug_restarter_frame_function_pointer_address(
- Isolate* isolate);
static ExternalReference is_profiling_address(Isolate* isolate);
static ExternalReference invoke_function_callback(Isolate* isolate);
static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
- static ExternalReference vector_store_virtual_register(Isolate* isolate);
+ static ExternalReference virtual_handler_register(Isolate* isolate);
+ static ExternalReference virtual_slot_register(Isolate* isolate);
+
+ static ExternalReference runtime_function_table_address(Isolate* isolate);
Address address() const { return reinterpret_cast<Address>(address_); }
// Used to check if single stepping is enabled in generated code.
- static ExternalReference debug_step_in_fp_address(Isolate* isolate);
+ static ExternalReference debug_step_in_enabled_address(Isolate* isolate);
#ifndef V8_INTERPRETED_REGEXP
// C functions called from RegExp generated code.
@@ -1039,9 +1033,8 @@ class ExternalReference BASE_EMBEDDED {
reinterpret_cast<ExternalReferenceRedirector*>(
isolate->external_reference_redirector());
void* address = reinterpret_cast<void*>(address_arg);
- void* answer = (redirector == NULL) ?
- address :
- (*redirector)(address, type);
+ void* answer =
+ (redirector == NULL) ? address : (*redirector)(isolate, address, type);
return answer;
}
@@ -1130,7 +1123,7 @@ inline int NumberOfBitsSet(uint32_t x) {
bool EvalComparison(Token::Value op, double op1, double op2);
// Computes pow(x, y) with the special cases in the spec for Math.pow.
-double power_helper(double x, double y);
+double power_helper(Isolate* isolate, double x, double y);
double power_double_int(double x, int y);
double power_double_double(double x, double y);
@@ -1146,8 +1139,11 @@ class CallWrapper {
virtual void BeforeCall(int call_size) const = 0;
// Called just after emitting a call, i.e., at the return site for the call.
virtual void AfterCall() const = 0;
+ // Return whether call needs to check for debug stepping.
+ virtual bool NeedsDebugStepCheck() const { return false; }
};
+
class NullCallWrapper : public CallWrapper {
public:
NullCallWrapper() { }
@@ -1157,6 +1153,16 @@ class NullCallWrapper : public CallWrapper {
};
+class CheckDebugStepCallWrapper : public CallWrapper {
+ public:
+ CheckDebugStepCallWrapper() {}
+ virtual ~CheckDebugStepCallWrapper() {}
+ virtual void BeforeCall(int call_size) const {}
+ virtual void AfterCall() const {}
+ virtual bool NeedsDebugStepCheck() const { return true; }
+};
+
+
// -----------------------------------------------------------------------------
// Constant pool support
@@ -1276,7 +1282,6 @@ class ConstantPoolBuilder BASE_EMBEDDED {
PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
};
-
-} } // namespace v8::internal
-
+} // namespace internal
+} // namespace v8
#endif // V8_ASSEMBLER_H_
diff --git a/chromium/v8/src/assert-scope.h b/chromium/v8/src/assert-scope.h
index 8757a329100..84e6990b04b 100644
--- a/chromium/v8/src/assert-scope.h
+++ b/chromium/v8/src/assert-scope.h
@@ -170,6 +170,7 @@ typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, false>
// Scope to introduce an exception to DisallowDeoptimization.
typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, true>
AllowCompilation;
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ASSERT_SCOPE_H_
diff --git a/chromium/v8/src/ast/OWNERS b/chromium/v8/src/ast/OWNERS
new file mode 100644
index 00000000000..4fdc3f95404
--- /dev/null
+++ b/chromium/v8/src/ast/OWNERS
@@ -0,0 +1,8 @@
+set noparent
+
+adamk@chromium.org
+bmeurer@chromium.org
+littledan@chromium.org
+mstarzinger@chromium.org
+rossberg@chromium.org
+
diff --git a/chromium/v8/src/ast/ast-expression-rewriter.cc b/chromium/v8/src/ast/ast-expression-rewriter.cc
new file mode 100644
index 00000000000..49cc7f6ff4d
--- /dev/null
+++ b/chromium/v8/src/ast/ast-expression-rewriter.cc
@@ -0,0 +1,409 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ast/ast.h"
+#include "src/ast/ast-expression-rewriter.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Implementation of AstExpressionRewriter
+// The AST is traversed but no actual rewriting takes place, unless the
+// Visit methods are overriden in subclasses.
+
+#define REWRITE_THIS(node) \
+ do { \
+ if (!RewriteExpression(node)) return; \
+ } while (false)
+#define NOTHING() DCHECK_NULL(replacement_)
+
+
+void AstExpressionRewriter::VisitDeclarations(
+ ZoneList<Declaration*>* declarations) {
+ for (int i = 0; i < declarations->length(); i++) {
+ AST_REWRITE_LIST_ELEMENT(Declaration, declarations, i);
+ }
+}
+
+
+void AstExpressionRewriter::VisitStatements(ZoneList<Statement*>* statements) {
+ for (int i = 0; i < statements->length(); i++) {
+ AST_REWRITE_LIST_ELEMENT(Statement, statements, i);
+ // Not stopping when a jump statement is found.
+ }
+}
+
+
+void AstExpressionRewriter::VisitExpressions(
+ ZoneList<Expression*>* expressions) {
+ for (int i = 0; i < expressions->length(); i++) {
+ // The variable statement visiting code may pass NULL expressions
+ // to this code. Maybe this should be handled by introducing an
+ // undefined expression or literal? Revisit this code if this
+ // changes
+ if (expressions->at(i) != nullptr) {
+ AST_REWRITE_LIST_ELEMENT(Expression, expressions, i);
+ }
+ }
+}
+
+
+void AstExpressionRewriter::VisitVariableDeclaration(
+ VariableDeclaration* node) {
+ // Not visiting `proxy_`.
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitFunctionDeclaration(
+ FunctionDeclaration* node) {
+ // Not visiting `proxy_`.
+ AST_REWRITE_PROPERTY(FunctionLiteral, node, fun);
+}
+
+
+void AstExpressionRewriter::VisitImportDeclaration(ImportDeclaration* node) {
+ // Not visiting `proxy_`.
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitExportDeclaration(ExportDeclaration* node) {
+ // Not visiting `proxy_`.
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitBlock(Block* node) {
+ VisitStatements(node->statements());
+}
+
+
+void AstExpressionRewriter::VisitExpressionStatement(
+ ExpressionStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+void AstExpressionRewriter::VisitEmptyStatement(EmptyStatement* node) {
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* node) {
+ AST_REWRITE_PROPERTY(Statement, node, statement);
+}
+
+
+void AstExpressionRewriter::VisitIfStatement(IfStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, condition);
+ AST_REWRITE_PROPERTY(Statement, node, then_statement);
+ AST_REWRITE_PROPERTY(Statement, node, else_statement);
+}
+
+
+void AstExpressionRewriter::VisitContinueStatement(ContinueStatement* node) {
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitBreakStatement(BreakStatement* node) {
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitReturnStatement(ReturnStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+void AstExpressionRewriter::VisitWithStatement(WithStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+ AST_REWRITE_PROPERTY(Statement, node, statement);
+}
+
+
+void AstExpressionRewriter::VisitSwitchStatement(SwitchStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, tag);
+ ZoneList<CaseClause*>* clauses = node->cases();
+ for (int i = 0; i < clauses->length(); i++) {
+ AST_REWRITE_LIST_ELEMENT(CaseClause, clauses, i);
+ }
+}
+
+
+void AstExpressionRewriter::VisitDoWhileStatement(DoWhileStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, cond);
+ AST_REWRITE_PROPERTY(Statement, node, body);
+}
+
+
+void AstExpressionRewriter::VisitWhileStatement(WhileStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, cond);
+ AST_REWRITE_PROPERTY(Statement, node, body);
+}
+
+
+void AstExpressionRewriter::VisitForStatement(ForStatement* node) {
+ if (node->init() != nullptr) {
+ AST_REWRITE_PROPERTY(Statement, node, init);
+ }
+ if (node->cond() != nullptr) {
+ AST_REWRITE_PROPERTY(Expression, node, cond);
+ }
+ if (node->next() != nullptr) {
+ AST_REWRITE_PROPERTY(Statement, node, next);
+ }
+ AST_REWRITE_PROPERTY(Statement, node, body);
+}
+
+
+void AstExpressionRewriter::VisitForInStatement(ForInStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, each);
+ AST_REWRITE_PROPERTY(Expression, node, subject);
+ AST_REWRITE_PROPERTY(Statement, node, body);
+}
+
+
+void AstExpressionRewriter::VisitForOfStatement(ForOfStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, each);
+ AST_REWRITE_PROPERTY(Expression, node, assign_iterator);
+ AST_REWRITE_PROPERTY(Expression, node, next_result);
+ AST_REWRITE_PROPERTY(Expression, node, result_done);
+ AST_REWRITE_PROPERTY(Expression, node, assign_each);
+ AST_REWRITE_PROPERTY(Expression, node, subject);
+ AST_REWRITE_PROPERTY(Statement, node, body);
+}
+
+
+void AstExpressionRewriter::VisitTryCatchStatement(TryCatchStatement* node) {
+ AST_REWRITE_PROPERTY(Block, node, try_block);
+ // Not visiting the variable.
+ AST_REWRITE_PROPERTY(Block, node, catch_block);
+}
+
+
+void AstExpressionRewriter::VisitTryFinallyStatement(
+ TryFinallyStatement* node) {
+ AST_REWRITE_PROPERTY(Block, node, try_block);
+ AST_REWRITE_PROPERTY(Block, node, finally_block);
+}
+
+
+void AstExpressionRewriter::VisitDebuggerStatement(DebuggerStatement* node) {
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitFunctionLiteral(FunctionLiteral* node) {
+ REWRITE_THIS(node);
+ VisitDeclarations(node->scope()->declarations());
+ ZoneList<Statement*>* body = node->body();
+ if (body != nullptr) VisitStatements(body);
+}
+
+
+void AstExpressionRewriter::VisitClassLiteral(ClassLiteral* node) {
+ REWRITE_THIS(node);
+ // Not visiting `class_variable_proxy_`.
+ if (node->extends() != nullptr) {
+ AST_REWRITE_PROPERTY(Expression, node, extends);
+ }
+ AST_REWRITE_PROPERTY(FunctionLiteral, node, constructor);
+ ZoneList<typename ClassLiteral::Property*>* properties = node->properties();
+ for (int i = 0; i < properties->length(); i++) {
+ VisitObjectLiteralProperty(properties->at(i));
+ }
+}
+
+
+void AstExpressionRewriter::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* node) {
+ REWRITE_THIS(node);
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitConditional(Conditional* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, condition);
+ AST_REWRITE_PROPERTY(Expression, node, then_expression);
+ AST_REWRITE_PROPERTY(Expression, node, else_expression);
+}
+
+
+void AstExpressionRewriter::VisitVariableProxy(VariableProxy* node) {
+ REWRITE_THIS(node);
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitLiteral(Literal* node) {
+ REWRITE_THIS(node);
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitRegExpLiteral(RegExpLiteral* node) {
+ REWRITE_THIS(node);
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitObjectLiteral(ObjectLiteral* node) {
+ REWRITE_THIS(node);
+ ZoneList<typename ObjectLiteral::Property*>* properties = node->properties();
+ for (int i = 0; i < properties->length(); i++) {
+ VisitObjectLiteralProperty(properties->at(i));
+ }
+}
+
+
+void AstExpressionRewriter::VisitObjectLiteralProperty(
+ ObjectLiteralProperty* property) {
+ if (property == nullptr) return;
+ AST_REWRITE_PROPERTY(Expression, property, key);
+ AST_REWRITE_PROPERTY(Expression, property, value);
+}
+
+
+void AstExpressionRewriter::VisitArrayLiteral(ArrayLiteral* node) {
+ REWRITE_THIS(node);
+ VisitExpressions(node->values());
+}
+
+
+void AstExpressionRewriter::VisitAssignment(Assignment* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, target);
+ AST_REWRITE_PROPERTY(Expression, node, value);
+}
+
+
+void AstExpressionRewriter::VisitYield(Yield* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, generator_object);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+void AstExpressionRewriter::VisitThrow(Throw* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, exception);
+}
+
+
+void AstExpressionRewriter::VisitProperty(Property* node) {
+ REWRITE_THIS(node);
+ if (node == nullptr) return;
+ AST_REWRITE_PROPERTY(Expression, node, obj);
+ AST_REWRITE_PROPERTY(Expression, node, key);
+}
+
+
+void AstExpressionRewriter::VisitCall(Call* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+ VisitExpressions(node->arguments());
+}
+
+
+void AstExpressionRewriter::VisitCallNew(CallNew* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+ VisitExpressions(node->arguments());
+}
+
+
+void AstExpressionRewriter::VisitCallRuntime(CallRuntime* node) {
+ REWRITE_THIS(node);
+ VisitExpressions(node->arguments());
+}
+
+
+void AstExpressionRewriter::VisitUnaryOperation(UnaryOperation* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+void AstExpressionRewriter::VisitCountOperation(CountOperation* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+void AstExpressionRewriter::VisitBinaryOperation(BinaryOperation* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, left);
+ AST_REWRITE_PROPERTY(Expression, node, right);
+}
+
+
+void AstExpressionRewriter::VisitCompareOperation(CompareOperation* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, left);
+ AST_REWRITE_PROPERTY(Expression, node, right);
+}
+
+
+void AstExpressionRewriter::VisitSpread(Spread* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+void AstExpressionRewriter::VisitThisFunction(ThisFunction* node) {
+ REWRITE_THIS(node);
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitSuperPropertyReference(
+ SuperPropertyReference* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(VariableProxy, node, this_var);
+ AST_REWRITE_PROPERTY(Expression, node, home_object);
+}
+
+
+void AstExpressionRewriter::VisitSuperCallReference(SuperCallReference* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(VariableProxy, node, this_var);
+ AST_REWRITE_PROPERTY(VariableProxy, node, new_target_var);
+ AST_REWRITE_PROPERTY(VariableProxy, node, this_function_var);
+}
+
+
+void AstExpressionRewriter::VisitCaseClause(CaseClause* node) {
+ if (!node->is_default()) {
+ AST_REWRITE_PROPERTY(Expression, node, label);
+ }
+ VisitStatements(node->statements());
+}
+
+
+void AstExpressionRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitDoExpression(DoExpression* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Block, node, block);
+ AST_REWRITE_PROPERTY(VariableProxy, node, result);
+}
+
+
+void AstExpressionRewriter::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/ast/ast-expression-rewriter.h b/chromium/v8/src/ast/ast-expression-rewriter.h
new file mode 100644
index 00000000000..916842ab20e
--- /dev/null
+++ b/chromium/v8/src/ast/ast-expression-rewriter.h
@@ -0,0 +1,54 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_AST_EXPRESSION_REWRITER_H_
+#define V8_AST_AST_EXPRESSION_REWRITER_H_
+
+#include "src/allocation.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/effects.h"
+#include "src/type-info.h"
+#include "src/types.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// A rewriting Visitor over a CompilationInfo's AST that invokes
+// VisitExpression on each expression node.
+
+class AstExpressionRewriter : public AstVisitor {
+ public:
+ explicit AstExpressionRewriter(Isolate* isolate) : AstVisitor() {
+ InitializeAstRewriter(isolate);
+ }
+ explicit AstExpressionRewriter(uintptr_t stack_limit) : AstVisitor() {
+ InitializeAstRewriter(stack_limit);
+ }
+ ~AstExpressionRewriter() override {}
+
+ void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+ void VisitStatements(ZoneList<Statement*>* statements) override;
+ void VisitExpressions(ZoneList<Expression*>* expressions) override;
+
+ virtual void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
+
+ protected:
+ virtual bool RewriteExpression(Expression* expr) = 0;
+
+ private:
+ DEFINE_AST_REWRITER_SUBCLASS_MEMBERS();
+
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ DISALLOW_COPY_AND_ASSIGN(AstExpressionRewriter);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_AST_AST_EXPRESSION_REWRITER_H_
diff --git a/chromium/v8/src/ast-expression-visitor.cc b/chromium/v8/src/ast/ast-expression-visitor.cc
index 782d4bbca64..6b2550c5418 100644
--- a/chromium/v8/src/ast-expression-visitor.cc
+++ b/chromium/v8/src/ast/ast-expression-visitor.cc
@@ -4,11 +4,11 @@
#include "src/v8.h"
-#include "src/ast-expression-visitor.h"
+#include "src/ast/ast-expression-visitor.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/codegen.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -32,14 +32,20 @@ namespace internal {
} while (false)
-AstExpressionVisitor::AstExpressionVisitor(Isolate* isolate, Zone* zone,
- FunctionLiteral* root)
+AstExpressionVisitor::AstExpressionVisitor(Isolate* isolate, Expression* root)
: root_(root), depth_(0) {
- InitializeAstVisitor(isolate, zone);
+ InitializeAstVisitor(isolate);
}
-void AstExpressionVisitor::Run() { RECURSE(VisitFunctionLiteral(root_)); }
+AstExpressionVisitor::AstExpressionVisitor(uintptr_t stack_limit,
+ Expression* root)
+ : root_(root), depth_(0) {
+ InitializeAstVisitor(stack_limit);
+}
+
+
+void AstExpressionVisitor::Run() { RECURSE(Visit(root_)); }
void AstExpressionVisitor::VisitVariableDeclaration(VariableDeclaration* decl) {
@@ -165,6 +171,11 @@ void AstExpressionVisitor::VisitForInStatement(ForInStatement* stmt) {
void AstExpressionVisitor::VisitForOfStatement(ForOfStatement* stmt) {
RECURSE(Visit(stmt->iterable()));
+ RECURSE(Visit(stmt->each()));
+ RECURSE(Visit(stmt->assign_iterator()));
+ RECURSE(Visit(stmt->next_result()));
+ RECURSE(Visit(stmt->result_done()));
+ RECURSE(Visit(stmt->assign_each()));
RECURSE(Visit(stmt->body()));
}
@@ -196,10 +207,17 @@ void AstExpressionVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {}
+void AstExpressionVisitor::VisitDoExpression(DoExpression* expr) {
+ RECURSE(VisitBlock(expr->block()));
+ RECURSE(VisitVariableProxy(expr->result()));
+}
+
+
void AstExpressionVisitor::VisitConditional(Conditional* expr) {
- RECURSE(Visit(expr->condition()));
- RECURSE(Visit(expr->then_expression()));
- RECURSE(Visit(expr->else_expression()));
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->condition()));
+ RECURSE_EXPRESSION(Visit(expr->then_expression()));
+ RECURSE_EXPRESSION(Visit(expr->else_expression()));
}
@@ -223,6 +241,9 @@ void AstExpressionVisitor::VisitObjectLiteral(ObjectLiteral* expr) {
ZoneList<ObjectLiteralProperty*>* props = expr->properties();
for (int i = 0; i < props->length(); ++i) {
ObjectLiteralProperty* prop = props->at(i);
+ if (!prop->key()->IsLiteral()) {
+ RECURSE_EXPRESSION(Visit(prop->key()));
+ }
RECURSE_EXPRESSION(Visit(prop->value()));
}
}
@@ -336,21 +357,54 @@ void AstExpressionVisitor::VisitDeclarations(ZoneList<Declaration*>* decls) {
}
-void AstExpressionVisitor::VisitClassLiteral(ClassLiteral* expr) {}
+void AstExpressionVisitor::VisitClassLiteral(ClassLiteral* expr) {
+ VisitExpression(expr);
+ if (expr->extends() != nullptr) {
+ RECURSE_EXPRESSION(Visit(expr->extends()));
+ }
+ RECURSE_EXPRESSION(Visit(expr->constructor()));
+ ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+ for (int i = 0; i < props->length(); ++i) {
+ ObjectLiteralProperty* prop = props->at(i);
+ if (!prop->key()->IsLiteral()) {
+ RECURSE_EXPRESSION(Visit(prop->key()));
+ }
+ RECURSE_EXPRESSION(Visit(prop->value()));
+ }
+}
-void AstExpressionVisitor::VisitSpread(Spread* expr) {}
+void AstExpressionVisitor::VisitSpread(Spread* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+}
void AstExpressionVisitor::VisitEmptyParentheses(EmptyParentheses* expr) {}
void AstExpressionVisitor::VisitSuperPropertyReference(
- SuperPropertyReference* expr) {}
+ SuperPropertyReference* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
+ RECURSE_EXPRESSION(Visit(expr->home_object()));
+}
-void AstExpressionVisitor::VisitSuperCallReference(SuperCallReference* expr) {}
+void AstExpressionVisitor::VisitSuperCallReference(SuperCallReference* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->new_target_var()));
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->this_function_var()));
+}
+
+
+void AstExpressionVisitor::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ VisitExpression(expr);
+ RECURSE(Visit(expr->expression()));
}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/ast-expression-visitor.h b/chromium/v8/src/ast/ast-expression-visitor.h
index 43b34bac796..cda624d5b7a 100644
--- a/chromium/v8/src/ast-expression-visitor.h
+++ b/chromium/v8/src/ast/ast-expression-visitor.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_AST_EXPRESSION_VISITOR_H_
-#define V8_AST_EXPRESSION_VISITOR_H_
+#ifndef V8_AST_AST_EXPRESSION_VISITOR_H_
+#define V8_AST_AST_EXPRESSION_VISITOR_H_
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/effects.h"
-#include "src/scopes.h"
#include "src/type-info.h"
#include "src/types.h"
#include "src/zone.h"
@@ -21,7 +21,8 @@ namespace internal {
class AstExpressionVisitor : public AstVisitor {
public:
- AstExpressionVisitor(Isolate* isolate, Zone* zone, FunctionLiteral* root);
+ AstExpressionVisitor(Isolate* isolate, Expression* root);
+ AstExpressionVisitor(uintptr_t stack_limit, Expression* root);
void Run();
protected:
@@ -34,16 +35,16 @@ class AstExpressionVisitor : public AstVisitor {
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- FunctionLiteral* root_;
+ Expression* root_;
int depth_;
DISALLOW_COPY_AND_ASSIGN(AstExpressionVisitor);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_AST_EXPRESSION_VISITOR_H_
+#endif // V8_AST_AST_EXPRESSION_VISITOR_H_
diff --git a/chromium/v8/src/ast-literal-reindexer.cc b/chromium/v8/src/ast/ast-literal-reindexer.cc
index e5729c7818a..fce33e70b8f 100644
--- a/chromium/v8/src/ast-literal-reindexer.cc
+++ b/chromium/v8/src/ast/ast-literal-reindexer.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast-literal-reindexer.h"
+#include "src/ast/ast-literal-reindexer.h"
-#include "src/ast.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
@@ -43,6 +43,11 @@ void AstLiteralReindexer::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {}
+void AstLiteralReindexer::VisitDoExpression(DoExpression* node) {
+ // TODO(caitp): literals in do expressions need re-indexing too.
+}
+
+
void AstLiteralReindexer::VisitLiteral(Literal* node) {}
@@ -71,6 +76,12 @@ void AstLiteralReindexer::VisitSuperCallReference(SuperCallReference* node) {
}
+void AstLiteralReindexer::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ Visit(node->expression());
+}
+
+
void AstLiteralReindexer::VisitImportDeclaration(ImportDeclaration* node) {
VisitVariableProxy(node->proxy());
}
@@ -316,5 +327,5 @@ void AstLiteralReindexer::VisitFunctionLiteral(FunctionLiteral* node) {
void AstLiteralReindexer::Reindex(Expression* pattern) {
pattern->Accept(this);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/ast-literal-reindexer.h b/chromium/v8/src/ast/ast-literal-reindexer.h
index 2fe920b7c46..e2a71d3c471 100644
--- a/chromium/v8/src/ast-literal-reindexer.h
+++ b/chromium/v8/src/ast/ast-literal-reindexer.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_AST_LITERAL_REINDEXER
-#define V8_AST_LITERAL_REINDEXER
+#ifndef V8_AST_AST_LITERAL_REINDEXER
+#define V8_AST_AST_LITERAL_REINDEXER
-#include "src/ast.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
@@ -17,10 +17,9 @@ class AstLiteralReindexer final : public AstVisitor {
int count() const { return next_index_; }
void Reindex(Expression* pattern);
- int NextIndex() { return next_index_++; }
private:
-#define DEFINE_VISIT(type) virtual void Visit##type(type* node) override;
+#define DEFINE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
@@ -39,7 +38,7 @@ class AstLiteralReindexer final : public AstVisitor {
DISALLOW_COPY_AND_ASSIGN(AstLiteralReindexer);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_AST_LITERAL_REINDEXER
+#endif // V8_AST_AST_LITERAL_REINDEXER
diff --git a/chromium/v8/src/ast-numbering.cc b/chromium/v8/src/ast/ast-numbering.cc
index 55eaacd1f5e..6c2b696a5df 100644
--- a/chromium/v8/src/ast-numbering.cc
+++ b/chromium/v8/src/ast/ast-numbering.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast-numbering.h"
+#include "src/ast/ast-numbering.h"
-#include "src/ast.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
@@ -14,18 +14,20 @@ class AstNumberingVisitor final : public AstVisitor {
public:
AstNumberingVisitor(Isolate* isolate, Zone* zone)
: AstVisitor(),
+ isolate_(isolate),
+ zone_(zone),
next_id_(BailoutId::FirstUsable().ToInt()),
properties_(zone),
- ic_slot_cache_(zone),
+ slot_cache_(zone),
dont_optimize_reason_(kNoReason) {
- InitializeAstVisitor(isolate, zone);
+ InitializeAstVisitor(isolate);
}
bool Renumber(FunctionLiteral* node);
private:
// AST node visitor interface.
-#define DEFINE_VISIT(type) virtual void Visit##type(type* node) override;
+#define DEFINE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
@@ -65,16 +67,18 @@ class AstNumberingVisitor final : public AstVisitor {
template <typename Node>
void ReserveFeedbackSlots(Node* node) {
- node->AssignFeedbackVectorSlots(isolate(), properties_.get_spec(),
- &ic_slot_cache_);
+ node->AssignFeedbackVectorSlots(isolate_, properties_.get_spec(),
+ &slot_cache_);
}
BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
+ Isolate* isolate_;
+ Zone* zone_;
int next_id_;
AstProperties properties_;
- // The slot cache allows us to reuse certain vector IC slots.
- ICSlotCache ic_slot_cache_;
+ // The slot cache allows us to reuse certain feedback vector slots.
+ FeedbackVectorSlotCache slot_cache_;
BailoutReason dont_optimize_reason_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -132,6 +136,15 @@ void AstNumberingVisitor::VisitNativeFunctionLiteral(
}
+void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
+ IncrementNodeCount();
+ DisableCrankshaft(kDoExpression);
+ node->set_base_id(ReserveIdRange(DoExpression::num_ids()));
+ Visit(node->block());
+ Visit(node->result());
+}
+
+
void AstNumberingVisitor::VisitLiteral(Literal* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Literal::num_ids()));
@@ -168,7 +181,7 @@ void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) {
IncrementNodeCount();
- DisableOptimization(kSuperReference);
+ DisableCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperPropertyReference::num_ids()));
Visit(node->this_var());
Visit(node->home_object());
@@ -177,7 +190,7 @@ void AstNumberingVisitor::VisitSuperPropertyReference(
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
IncrementNodeCount();
- DisableOptimization(kSuperReference);
+ DisableCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperCallReference::num_ids()));
Visit(node->this_var());
Visit(node->new_target_var());
@@ -335,6 +348,7 @@ void AstNumberingVisitor::VisitProperty(Property* node) {
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Assignment::num_ids()));
+
if (node->is_compound()) VisitBinaryOperation(node->binary_operation());
VisitReference(node->target());
Visit(node->value());
@@ -360,7 +374,7 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
void AstNumberingVisitor::VisitSpread(Spread* node) {
IncrementNodeCount();
- DisableOptimization(kSpread);
+ DisableCrankshaft(kSpread);
Visit(node->expression());
}
@@ -466,11 +480,11 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
VisitObjectLiteralProperty(node->properties()->at(i));
}
- node->BuildConstantProperties(isolate());
+ node->BuildConstantProperties(isolate_);
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code will be is emitted.
- node->CalculateEmitStore(zone());
+ node->CalculateEmitStore(zone_);
ReserveFeedbackSlots(node);
}
@@ -489,6 +503,8 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
+ node->BuildConstantElements(isolate_);
+ ReserveFeedbackSlots(node);
}
@@ -541,6 +557,14 @@ void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
}
+void AstNumberingVisitor::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ IncrementNodeCount();
+ node->set_base_id(ReserveIdRange(RewritableAssignmentExpression::num_ids()));
+ Visit(node->expression());
+}
+
+
bool AstNumberingVisitor::Finish(FunctionLiteral* node) {
node->set_ast_properties(&properties_);
node->set_dont_optimize_reason(dont_optimize_reason());
@@ -556,11 +580,17 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DisableOptimization(kFunctionWithIllegalRedeclaration);
return Finish(node);
}
+ if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
if (scope->calls_eval()) DisableOptimization(kFunctionCallsEval);
if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
DisableCrankshaft(kContextAllocatedArguments);
}
+ int rest_index;
+ if (scope->rest_parameter(&rest_index)) {
+ DisableCrankshaft(kRestParameter);
+ }
+
VisitDeclarations(scope->declarations());
VisitStatements(node->body());
diff --git a/chromium/v8/src/ast-numbering.h b/chromium/v8/src/ast/ast-numbering.h
index 57c750cf640..0ac1ef21343 100644
--- a/chromium/v8/src/ast-numbering.h
+++ b/chromium/v8/src/ast/ast-numbering.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_AST_NUMBERING_H_
-#define V8_AST_NUMBERING_H_
+#ifndef V8_AST_AST_NUMBERING_H_
+#define V8_AST_AST_NUMBERING_H_
namespace v8 {
namespace internal {
@@ -22,4 +22,4 @@ bool Renumber(Isolate* isolate, Zone* zone, FunctionLiteral* function);
} // namespace internal
} // namespace v8
-#endif // V8_AST_NUMBERING_H_
+#endif // V8_AST_AST_NUMBERING_H_
diff --git a/chromium/v8/src/ast-value-factory.cc b/chromium/v8/src/ast/ast-value-factory.cc
index fbcde8b4570..2e17fbcfafd 100644
--- a/chromium/v8/src/ast-value-factory.cc
+++ b/chromium/v8/src/ast/ast-value-factory.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/ast-value-factory.h"
+#include "src/ast/ast-value-factory.h"
#include "src/api.h"
#include "src/objects.h"
@@ -50,7 +50,7 @@ class OneByteStringStream {
int pos_;
};
-}
+} // namespace
class AstRawStringInternalizationKey : public HashTableKey {
public:
diff --git a/chromium/v8/src/ast-value-factory.h b/chromium/v8/src/ast/ast-value-factory.h
index 69fc6cc2f45..4ae912ea82d 100644
--- a/chromium/v8/src/ast-value-factory.h
+++ b/chromium/v8/src/ast/ast-value-factory.h
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_AST_VALUE_FACTORY_H_
-#define V8_AST_VALUE_FACTORY_H_
+#ifndef V8_AST_AST_VALUE_FACTORY_H_
+#define V8_AST_AST_VALUE_FACTORY_H_
#include "src/api.h"
#include "src/hashmap.h"
@@ -62,7 +62,7 @@ class AstString : public ZoneObject {
};
-class AstRawString : public AstString {
+class AstRawString final : public AstString {
public:
int length() const override {
if (is_one_byte_)
@@ -115,19 +115,17 @@ class AstRawString : public AstString {
};
-class AstConsString : public AstString {
+class AstConsString final : public AstString {
public:
AstConsString(const AstString* left, const AstString* right)
- : left_(left),
- right_(right) {}
+ : length_(left->length() + right->length()), left_(left), right_(right) {}
- int length() const override { return left_->length() + right_->length(); }
+ int length() const override { return length_; }
void Internalize(Isolate* isolate) override;
private:
- friend class AstValueFactory;
-
+ const int length_;
const AstString* left_;
const AstString* right_;
};
@@ -252,11 +250,12 @@ class AstValue : public ZoneObject {
F(dot_generator, ".generator") \
F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \
- F(dot_module, ".module") \
F(dot_result, ".result") \
F(dot_switch_tag, ".switch_tag") \
+ F(dot_catch, ".catch") \
F(empty, "") \
F(eval, "eval") \
+ F(get_space, "get ") \
F(let, "let") \
F(native, "native") \
F(new_target, ".new.target") \
@@ -264,6 +263,7 @@ class AstValue : public ZoneObject {
F(proto, "__proto__") \
F(prototype, "prototype") \
F(rest_parameter, ".rest_parameter") \
+ F(set_space, "set ") \
F(this, "this") \
F(this_function, ".this_function") \
F(undefined, "undefined") \
@@ -366,9 +366,10 @@ class AstValueFactory {
OTHER_CONSTANTS(F)
#undef F
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#undef STRING_CONSTANTS
#undef OTHER_CONSTANTS
-#endif // V8_AST_VALUE_FACTORY_H_
+#endif // V8_AST_AST_VALUE_FACTORY_H_
diff --git a/chromium/v8/src/ast.cc b/chromium/v8/src/ast/ast.cc
index 3292b1d50bd..69e7351a7d3 100644
--- a/chromium/v8/src/ast.cc
+++ b/chromium/v8/src/ast/ast.cc
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include <cmath> // For isfinite.
+#include "src/ast/scopes.h"
#include "src/builtins.h"
#include "src/code-stubs.h"
#include "src/contexts.h"
#include "src/conversions.h"
#include "src/hashmap.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/property.h"
#include "src/property-details.h"
-#include "src/scopes.h"
#include "src/string-stream.h"
#include "src/type-info.h"
@@ -71,7 +71,6 @@ VariableProxy::VariableProxy(Zone* zone, Variable* var, int start_position,
bit_field_(IsThisField::encode(var->is_this()) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
- variable_feedback_slot_(FeedbackVectorICSlot::Invalid()),
raw_name_(var->raw_name()),
end_position_(end_position) {
BindTo(var);
@@ -85,7 +84,6 @@ VariableProxy::VariableProxy(Zone* zone, const AstRawString* name,
bit_field_(IsThisField::encode(variable_kind == Variable::THIS) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
- variable_feedback_slot_(FeedbackVectorICSlot::Invalid()),
raw_name_(name),
end_position_(end_position) {}
@@ -100,14 +98,14 @@ void VariableProxy::BindTo(Variable* var) {
void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
+ FeedbackVectorSlotCache* cache) {
if (UsesVariableFeedbackSlot()) {
// VariableProxies that point to the same Variable within a function can
// make their loads from the same IC slot.
if (var()->IsUnallocated()) {
ZoneHashMap::Entry* entry = cache->Get(var());
if (entry != NULL) {
- variable_feedback_slot_ = FeedbackVectorICSlot(
+ variable_feedback_slot_ = FeedbackVectorSlot(
static_cast<int>(reinterpret_cast<intptr_t>(entry->value)));
return;
}
@@ -121,26 +119,28 @@ void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
- FeedbackVectorICSlot* out_slot) {
- if (FLAG_vector_stores) {
- Property* property = expr->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
- if ((assign_type == VARIABLE &&
- expr->AsVariableProxy()->var()->IsUnallocated()) ||
- assign_type == NAMED_PROPERTY || assign_type == KEYED_PROPERTY) {
- // TODO(ishell): consider using ICSlotCache for variables here.
- FeedbackVectorSlotKind kind = assign_type == KEYED_PROPERTY
- ? FeedbackVectorSlotKind::KEYED_STORE_IC
- : FeedbackVectorSlotKind::STORE_IC;
- *out_slot = spec->AddSlot(kind);
- }
- }
-}
-
-
-void ForEachStatement::AssignFeedbackVectorSlots(Isolate* isolate,
- FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
+ FeedbackVectorSlot* out_slot) {
+ Property* property = expr->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(property);
+ if ((assign_type == VARIABLE &&
+ expr->AsVariableProxy()->var()->IsUnallocated()) ||
+ assign_type == NAMED_PROPERTY || assign_type == KEYED_PROPERTY) {
+ // TODO(ishell): consider using ICSlotCache for variables here.
+ FeedbackVectorSlotKind kind = assign_type == KEYED_PROPERTY
+ ? FeedbackVectorSlotKind::KEYED_STORE_IC
+ : FeedbackVectorSlotKind::STORE_IC;
+ *out_slot = spec->AddSlot(kind);
+ }
+}
+
+
+void ForEachStatement::AssignFeedbackVectorSlots(
+ Isolate* isolate, FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache) {
+ // TODO(adamk): for-of statements do not make use of this feedback slot.
+ // The each_slot_ should be specific to ForInStatement, and this work moved
+ // there.
+ if (IsForOfStatement()) return;
AssignVectorSlots(each(), spec, &each_slot_);
}
@@ -153,20 +153,19 @@ Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op)),
target_(target),
value_(value),
- binary_operation_(NULL),
- slot_(FeedbackVectorICSlot::Invalid()) {}
+ binary_operation_(NULL) {}
void Assignment::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
+ FeedbackVectorSlotCache* cache) {
AssignVectorSlots(target(), spec, &slot_);
}
void CountOperation::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
+ FeedbackVectorSlotCache* cache) {
AssignVectorSlots(expression(), spec, &slot_);
}
@@ -227,7 +226,6 @@ ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value,
bool is_computed_name)
: key_(key),
value_(value),
- slot_(FeedbackVectorICSlot::Invalid()),
kind_(kind),
emit_store_(true),
is_static_(is_static),
@@ -240,7 +238,6 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
bool is_computed_name)
: key_(key),
value_(value),
- slot_(FeedbackVectorICSlot::Invalid()),
emit_store_(true),
is_static_(is_static),
is_computed_name_(is_computed_name) {
@@ -260,9 +257,7 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
- if (!FLAG_vector_stores) return;
-
+ FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
// ICs must mirror FullCodeGenerator::VisitClassLiteral.
if (NeedsProxySlot()) {
@@ -273,7 +268,7 @@ void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
ObjectLiteral::Property* property = properties()->at(i);
Expression* value = property->value();
if (FunctionLiteral::NeedsHomeObject(value)) {
- property->set_slot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot());
}
}
}
@@ -298,9 +293,7 @@ bool ObjectLiteral::Property::emit_store() {
void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
- if (!FLAG_vector_stores) return;
-
+ FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitObjectLiteral.
int property_index = 0;
@@ -321,27 +314,27 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
// contains computed properties with an uninitialized value.
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
- property->set_slot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot());
if (FunctionLiteral::NeedsHomeObject(value)) {
- spec->AddStoreICSlot();
+ property->SetSlot(spec->AddStoreICSlot(), 1);
}
}
break;
}
if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
- property->set_slot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot());
}
break;
case ObjectLiteral::Property::PROTOTYPE:
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
- property->set_slot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot());
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
- property->set_slot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot());
}
break;
}
@@ -353,7 +346,7 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
Expression* value = property->value();
if (property->kind() != ObjectLiteral::Property::PROTOTYPE) {
if (FunctionLiteral::NeedsHomeObject(value)) {
- property->set_slot(spec->AddStoreICSlot());
+ property->SetSlot(spec->AddStoreICSlot());
}
}
}
@@ -552,6 +545,25 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
}
+void ArrayLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
+ FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache) {
+ // This logic that computes the number of slots needed for vector store
+ // ics must mirror FullCodeGenerator::VisitArrayLiteral.
+ int array_index = 0;
+ for (; array_index < values()->length(); array_index++) {
+ Expression* subexpr = values()->at(array_index);
+ if (subexpr->IsSpread()) break;
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ // We'll reuse the same literal slot for all of the non-constant
+ // subexpressions that use a keyed store IC.
+ literal_slot_ = spec->AddKeyedStoreICSlot();
+ return;
+ }
+}
+
+
Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
Isolate* isolate) {
if (expression->IsLiteral()) {
@@ -705,9 +717,6 @@ bool Call::IsUsingCallFeedbackICSlot(Isolate* isolate) const {
if (call_type == POSSIBLY_EVAL_CALL) {
return false;
}
- if (call_type == SUPER_CALL && !FLAG_vector_stores) {
- return false;
- }
return true;
}
@@ -720,12 +729,12 @@ bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const {
void Call::AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) {
+ FeedbackVectorSlotCache* cache) {
if (IsUsingCallFeedbackICSlot(isolate)) {
ic_slot_ = spec->AddCallICSlot();
}
if (IsUsingCallFeedbackSlot(isolate)) {
- slot_ = spec->AddStubSlot();
+ stub_slot_ = spec->AddGeneralSlot();
}
}
@@ -745,7 +754,16 @@ Call::CallType Call::GetCallType(Isolate* isolate) const {
if (expression()->IsSuperCallReference()) return SUPER_CALL;
Property* property = expression()->AsProperty();
- return property != NULL ? PROPERTY_CALL : OTHER_CALL;
+ if (property != nullptr) {
+ bool is_super = property->IsSuperAccess();
+ if (property->key()->IsPropertyName()) {
+ return is_super ? NAMED_SUPER_PROPERTY_CALL : NAMED_PROPERTY_CALL;
+ } else {
+ return is_super ? KEYED_SUPER_PROPERTY_CALL : KEYED_PROPERTY_CALL;
+ }
+ }
+
+ return OTHER_CALL;
}
@@ -780,335 +798,6 @@ void AstVisitor::VisitExpressions(ZoneList<Expression*>* expressions) {
}
-// ----------------------------------------------------------------------------
-// Regular expressions
-
-#define MAKE_ACCEPT(Name) \
- void* RegExp##Name::Accept(RegExpVisitor* visitor, void* data) { \
- return visitor->Visit##Name(this, data); \
- }
-FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ACCEPT)
-#undef MAKE_ACCEPT
-
-#define MAKE_TYPE_CASE(Name) \
- RegExp##Name* RegExpTree::As##Name() { \
- return NULL; \
- } \
- bool RegExpTree::Is##Name() { return false; }
-FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
-#undef MAKE_TYPE_CASE
-
-#define MAKE_TYPE_CASE(Name) \
- RegExp##Name* RegExp##Name::As##Name() { \
- return this; \
- } \
- bool RegExp##Name::Is##Name() { return true; }
-FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
-#undef MAKE_TYPE_CASE
-
-
-static Interval ListCaptureRegisters(ZoneList<RegExpTree*>* children) {
- Interval result = Interval::Empty();
- for (int i = 0; i < children->length(); i++)
- result = result.Union(children->at(i)->CaptureRegisters());
- return result;
-}
-
-
-Interval RegExpAlternative::CaptureRegisters() {
- return ListCaptureRegisters(nodes());
-}
-
-
-Interval RegExpDisjunction::CaptureRegisters() {
- return ListCaptureRegisters(alternatives());
-}
-
-
-Interval RegExpLookahead::CaptureRegisters() {
- return body()->CaptureRegisters();
-}
-
-
-Interval RegExpCapture::CaptureRegisters() {
- Interval self(StartRegister(index()), EndRegister(index()));
- return self.Union(body()->CaptureRegisters());
-}
-
-
-Interval RegExpQuantifier::CaptureRegisters() {
- return body()->CaptureRegisters();
-}
-
-
-bool RegExpAssertion::IsAnchoredAtStart() {
- return assertion_type() == RegExpAssertion::START_OF_INPUT;
-}
-
-
-bool RegExpAssertion::IsAnchoredAtEnd() {
- return assertion_type() == RegExpAssertion::END_OF_INPUT;
-}
-
-
-bool RegExpAlternative::IsAnchoredAtStart() {
- ZoneList<RegExpTree*>* nodes = this->nodes();
- for (int i = 0; i < nodes->length(); i++) {
- RegExpTree* node = nodes->at(i);
- if (node->IsAnchoredAtStart()) { return true; }
- if (node->max_match() > 0) { return false; }
- }
- return false;
-}
-
-
-bool RegExpAlternative::IsAnchoredAtEnd() {
- ZoneList<RegExpTree*>* nodes = this->nodes();
- for (int i = nodes->length() - 1; i >= 0; i--) {
- RegExpTree* node = nodes->at(i);
- if (node->IsAnchoredAtEnd()) { return true; }
- if (node->max_match() > 0) { return false; }
- }
- return false;
-}
-
-
-bool RegExpDisjunction::IsAnchoredAtStart() {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- for (int i = 0; i < alternatives->length(); i++) {
- if (!alternatives->at(i)->IsAnchoredAtStart())
- return false;
- }
- return true;
-}
-
-
-bool RegExpDisjunction::IsAnchoredAtEnd() {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- for (int i = 0; i < alternatives->length(); i++) {
- if (!alternatives->at(i)->IsAnchoredAtEnd())
- return false;
- }
- return true;
-}
-
-
-bool RegExpLookahead::IsAnchoredAtStart() {
- return is_positive() && body()->IsAnchoredAtStart();
-}
-
-
-bool RegExpCapture::IsAnchoredAtStart() {
- return body()->IsAnchoredAtStart();
-}
-
-
-bool RegExpCapture::IsAnchoredAtEnd() {
- return body()->IsAnchoredAtEnd();
-}
-
-
-// Convert regular expression trees to a simple sexp representation.
-// This representation should be different from the input grammar
-// in as many cases as possible, to make it more difficult for incorrect
-// parses to look as correct ones which is likely if the input and
-// output formats are alike.
-class RegExpUnparser final : public RegExpVisitor {
- public:
- RegExpUnparser(std::ostream& os, Zone* zone) : os_(os), zone_(zone) {}
- void VisitCharacterRange(CharacterRange that);
-#define MAKE_CASE(Name) \
- virtual void* Visit##Name(RegExp##Name*, void* data) override;
- FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
-#undef MAKE_CASE
- private:
- std::ostream& os_;
- Zone* zone_;
-};
-
-
-void* RegExpUnparser::VisitDisjunction(RegExpDisjunction* that, void* data) {
- os_ << "(|";
- for (int i = 0; i < that->alternatives()->length(); i++) {
- os_ << " ";
- that->alternatives()->at(i)->Accept(this, data);
- }
- os_ << ")";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitAlternative(RegExpAlternative* that, void* data) {
- os_ << "(:";
- for (int i = 0; i < that->nodes()->length(); i++) {
- os_ << " ";
- that->nodes()->at(i)->Accept(this, data);
- }
- os_ << ")";
- return NULL;
-}
-
-
-void RegExpUnparser::VisitCharacterRange(CharacterRange that) {
- os_ << AsUC16(that.from());
- if (!that.IsSingleton()) {
- os_ << "-" << AsUC16(that.to());
- }
-}
-
-
-
-void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
- void* data) {
- if (that->is_negated()) os_ << "^";
- os_ << "[";
- for (int i = 0; i < that->ranges(zone_)->length(); i++) {
- if (i > 0) os_ << " ";
- VisitCharacterRange(that->ranges(zone_)->at(i));
- }
- os_ << "]";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
- switch (that->assertion_type()) {
- case RegExpAssertion::START_OF_INPUT:
- os_ << "@^i";
- break;
- case RegExpAssertion::END_OF_INPUT:
- os_ << "@$i";
- break;
- case RegExpAssertion::START_OF_LINE:
- os_ << "@^l";
- break;
- case RegExpAssertion::END_OF_LINE:
- os_ << "@$l";
- break;
- case RegExpAssertion::BOUNDARY:
- os_ << "@b";
- break;
- case RegExpAssertion::NON_BOUNDARY:
- os_ << "@B";
- break;
- }
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
- os_ << "'";
- Vector<const uc16> chardata = that->data();
- for (int i = 0; i < chardata.length(); i++) {
- os_ << AsUC16(chardata[i]);
- }
- os_ << "'";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitText(RegExpText* that, void* data) {
- if (that->elements()->length() == 1) {
- that->elements()->at(0).tree()->Accept(this, data);
- } else {
- os_ << "(!";
- for (int i = 0; i < that->elements()->length(); i++) {
- os_ << " ";
- that->elements()->at(i).tree()->Accept(this, data);
- }
- os_ << ")";
- }
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitQuantifier(RegExpQuantifier* that, void* data) {
- os_ << "(# " << that->min() << " ";
- if (that->max() == RegExpTree::kInfinity) {
- os_ << "- ";
- } else {
- os_ << that->max() << " ";
- }
- os_ << (that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n ");
- that->body()->Accept(this, data);
- os_ << ")";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitCapture(RegExpCapture* that, void* data) {
- os_ << "(^ ";
- that->body()->Accept(this, data);
- os_ << ")";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitLookahead(RegExpLookahead* that, void* data) {
- os_ << "(-> " << (that->is_positive() ? "+ " : "- ");
- that->body()->Accept(this, data);
- os_ << ")";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitBackReference(RegExpBackReference* that,
- void* data) {
- os_ << "(<- " << that->index() << ")";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
- os_ << '%';
- return NULL;
-}
-
-
-std::ostream& RegExpTree::Print(std::ostream& os, Zone* zone) { // NOLINT
- RegExpUnparser unparser(os, zone);
- Accept(&unparser, NULL);
- return os;
-}
-
-
-RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
- : alternatives_(alternatives) {
- DCHECK(alternatives->length() > 1);
- RegExpTree* first_alternative = alternatives->at(0);
- min_match_ = first_alternative->min_match();
- max_match_ = first_alternative->max_match();
- for (int i = 1; i < alternatives->length(); i++) {
- RegExpTree* alternative = alternatives->at(i);
- min_match_ = Min(min_match_, alternative->min_match());
- max_match_ = Max(max_match_, alternative->max_match());
- }
-}
-
-
-static int IncreaseBy(int previous, int increase) {
- if (RegExpTree::kInfinity - previous < increase) {
- return RegExpTree::kInfinity;
- } else {
- return previous + increase;
- }
-}
-
-RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
- : nodes_(nodes) {
- DCHECK(nodes->length() > 1);
- min_match_ = 0;
- max_match_ = 0;
- for (int i = 0; i < nodes->length(); i++) {
- RegExpTree* node = nodes->at(i);
- int node_min_match = node->min_match();
- min_match_ = IncreaseBy(min_match_, node_min_match);
- int node_max_match = node->max_match();
- max_match_ = IncreaseBy(max_match_, node_max_match);
- }
-}
-
-
CaseClause::CaseClause(Zone* zone, Expression* label,
ZoneList<Statement*>* statements, int pos)
: Expression(zone, pos),
diff --git a/chromium/v8/src/ast.h b/chromium/v8/src/ast/ast.h
index 4764918849d..7f00955a644 100644
--- a/chromium/v8/src/ast.h
+++ b/chromium/v8/src/ast/ast.h
@@ -2,25 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_AST_H_
-#define V8_AST_H_
+#ifndef V8_AST_AST_H_
+#define V8_AST_AST_H_
#include "src/assembler.h"
-#include "src/ast-value-factory.h"
+#include "src/ast/ast-value-factory.h"
+#include "src/ast/modules.h"
+#include "src/ast/variables.h"
#include "src/bailout-reason.h"
#include "src/base/flags.h"
#include "src/base/smart-pointers.h"
#include "src/factory.h"
#include "src/isolate.h"
#include "src/list.h"
-#include "src/modules.h"
-#include "src/regexp/jsregexp.h"
+#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
#include "src/small-pointer-list.h"
-#include "src/token.h"
#include "src/types.h"
#include "src/utils.h"
-#include "src/variables.h"
namespace v8 {
namespace internal {
@@ -90,7 +89,9 @@ namespace internal {
V(SuperPropertyReference) \
V(SuperCallReference) \
V(CaseClause) \
- V(EmptyParentheses)
+ V(EmptyParentheses) \
+ V(DoExpression) \
+ V(RewritableAssignmentExpression)
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
@@ -109,19 +110,6 @@ class MaterializedLiteral;
class Statement;
class TypeFeedbackOracle;
-class RegExpAlternative;
-class RegExpAssertion;
-class RegExpAtom;
-class RegExpBackReference;
-class RegExpCapture;
-class RegExpCharacterClass;
-class RegExpCompiler;
-class RegExpDisjunction;
-class RegExpEmpty;
-class RegExpLookahead;
-class RegExpQuantifier;
-class RegExpText;
-
#define DEF_FORWARD_DECLARATION(type) class type;
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
@@ -138,14 +126,14 @@ typedef ZoneList<Handle<Object>> ZoneObjectList;
friend class AstNodeFactory;
-class ICSlotCache {
+class FeedbackVectorSlotCache {
public:
- explicit ICSlotCache(Zone* zone)
+ explicit FeedbackVectorSlotCache(Zone* zone)
: zone_(zone),
hash_map_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)) {}
- void Put(Variable* variable, FeedbackVectorICSlot slot) {
+ void Put(Variable* variable, FeedbackVectorSlot slot) {
ZoneHashMap::Entry* entry = hash_map_.LookupOrInsert(
variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone_));
entry->value = reinterpret_cast<void*>(slot.ToInt());
@@ -230,7 +218,7 @@ class AstNode: public ZoneObject {
// vtable entry per node, something we don't want for space reasons.
virtual void AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
- ICSlotCache* cache) {}
+ FeedbackVectorSlotCache* cache) {}
private:
// Hidden to prevent accidental usage. It would have to load the
@@ -249,6 +237,7 @@ class Statement : public AstNode {
bool IsEmpty() { return AsEmptyStatement() != NULL; }
virtual bool IsJump() const { return false; }
+ virtual void MarkTail() {}
};
@@ -313,6 +302,9 @@ class Expression : public AstNode {
kTest
};
+ // Mark this expression as being in tail position.
+ virtual void MarkTail() {}
+
// True iff the expression is a valid reference expression.
virtual bool IsValidReferenceExpression() const { return false; }
@@ -373,6 +365,14 @@ class Expression : public AstNode {
BailoutId id() const { return BailoutId(local_id(0)); }
TypeFeedbackId test_id() const { return TypeFeedbackId(local_id(1)); }
+ // Parenthesized expressions in the form `( Expression )`.
+ void set_is_parenthesized() {
+ bit_field_ = ParenthesizedField::update(bit_field_, true);
+ }
+ bool is_parenthesized() const {
+ return ParenthesizedField::decode(bit_field_);
+ }
+
protected:
Expression(Zone* zone, int pos)
: AstNode(pos),
@@ -395,6 +395,8 @@ class Expression : public AstNode {
int base_id_;
Bounds bounds_;
class ToBooleanTypesField : public BitField16<uint16_t, 0, 9> {};
+ class ParenthesizedField
+ : public BitField16<bool, ToBooleanTypesField::kNext, 1> {};
uint16_t bit_field_;
// Ends with 16-bit field; deriving classes in turn begin with
// 16-bit fields for optimum packing efficiency.
@@ -458,10 +460,6 @@ class Block final : public BreakableStatement {
public:
DECLARE_NODE_TYPE(Block)
- void AddStatement(Statement* statement, Zone* zone) {
- statements_.Add(statement, zone);
- }
-
ZoneList<Statement*>* statements() { return &statements_; }
bool ignore_completion_value() const { return ignore_completion_value_; }
@@ -473,6 +471,10 @@ class Block final : public BreakableStatement {
&& labels() == NULL; // Good enough as an approximation...
}
+ void MarkTail() override {
+ if (!statements_.is_empty()) statements_.last()->MarkTail();
+ }
+
Scope* scope() const { return scope_; }
void set_scope(Scope* scope) { scope_ = scope; }
@@ -494,6 +496,33 @@ class Block final : public BreakableStatement {
};
+class DoExpression final : public Expression {
+ public:
+ DECLARE_NODE_TYPE(DoExpression)
+
+ Block* block() { return block_; }
+ void set_block(Block* b) { block_ = b; }
+ VariableProxy* result() { return result_; }
+ void set_result(VariableProxy* v) { result_ = v; }
+
+ void MarkTail() override { block_->MarkTail(); }
+
+ protected:
+ DoExpression(Zone* zone, Block* block, VariableProxy* result, int pos)
+ : Expression(zone, pos), block_(block), result_(result) {
+ DCHECK_NOT_NULL(block_);
+ DCHECK_NOT_NULL(result_);
+ }
+ static int parent_num_ids() { return Expression::num_ids(); }
+
+ private:
+ int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+
+ Block* block_;
+ VariableProxy* result_;
+};
+
+
class Declaration : public AstNode {
public:
VariableProxy* proxy() const { return proxy_; }
@@ -552,6 +581,7 @@ class FunctionDeclaration final : public Declaration {
DECLARE_NODE_TYPE(FunctionDeclaration)
FunctionLiteral* fun() const { return fun_; }
+ void set_fun(FunctionLiteral* f) { fun_ = f; }
InitializationFlag initialization() const override {
return kCreatedInitialized;
}
@@ -640,6 +670,7 @@ class IterationStatement : public BreakableStatement {
IterationStatement* AsIterationStatement() final { return this; }
Statement* body() const { return body_; }
+ void set_body(Statement* s) { body_ = s; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId OsrEntryId() const { return BailoutId(local_id(0)); }
@@ -674,6 +705,7 @@ class DoWhileStatement final : public IterationStatement {
}
Expression* cond() const { return cond_; }
+ void set_cond(Expression* e) { cond_ = e; }
static int num_ids() { return parent_num_ids() + 2; }
BailoutId ContinueId() const override { return BailoutId(local_id(0)); }
@@ -702,6 +734,7 @@ class WhileStatement final : public IterationStatement {
}
Expression* cond() const { return cond_; }
+ void set_cond(Expression* e) { cond_ = e; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId ContinueId() const override { return EntryId(); }
@@ -738,6 +771,10 @@ class ForStatement final : public IterationStatement {
Expression* cond() const { return cond_; }
Statement* next() const { return next_; }
+ void set_init(Statement* s) { init_ = s; }
+ void set_cond(Expression* e) { cond_ = e; }
+ void set_next(Statement* s) { next_ = s; }
+
static int num_ids() { return parent_num_ids() + 2; }
BailoutId ContinueId() const override { return BailoutId(local_id(0)); }
BailoutId StackCheckId() const override { return BodyId(); }
@@ -776,21 +813,21 @@ class ForEachStatement : public IterationStatement {
Expression* each() const { return each_; }
Expression* subject() const { return subject_; }
+ void set_each(Expression* e) { each_ = e; }
+ void set_subject(Expression* e) { subject_ = e; }
+
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
- FeedbackVectorICSlot EachFeedbackSlot() const { return each_slot_; }
+ FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
protected:
ForEachStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(zone, labels, pos),
- each_(NULL),
- subject_(NULL),
- each_slot_(FeedbackVectorICSlot::Invalid()) {}
+ : IterationStatement(zone, labels, pos), each_(NULL), subject_(NULL) {}
private:
Expression* each_;
Expression* subject_;
- FeedbackVectorICSlot each_slot_;
+ FeedbackVectorSlot each_slot_;
};
@@ -804,9 +841,9 @@ class ForInStatement final : public ForEachStatement {
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override {
+ FeedbackVectorSlotCache* cache) override {
ForEachStatement::AssignFeedbackVectorSlots(isolate, spec, cache);
- for_in_feedback_slot_ = spec->AddStubSlot();
+ for_in_feedback_slot_ = spec->AddGeneralSlot();
}
FeedbackVectorSlot ForInFeedbackSlot() {
@@ -830,9 +867,7 @@ class ForInStatement final : public ForEachStatement {
protected:
ForInStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : ForEachStatement(zone, labels, pos),
- for_in_type_(SLOW_FOR_IN),
- for_in_feedback_slot_(FeedbackVectorSlot::Invalid()) {}
+ : ForEachStatement(zone, labels, pos), for_in_type_(SLOW_FOR_IN) {}
static int parent_num_ids() { return ForEachStatement::num_ids(); }
private:
@@ -885,6 +920,11 @@ class ForOfStatement final : public ForEachStatement {
return assign_each_;
}
+ void set_assign_iterator(Expression* e) { assign_iterator_ = e; }
+ void set_next_result(Expression* e) { next_result_ = e; }
+ void set_result_done(Expression* e) { result_done_ = e; }
+ void set_assign_each(Expression* e) { assign_each_ = e; }
+
BailoutId ContinueId() const override { return EntryId(); }
BailoutId StackCheckId() const override { return BackEdgeId(); }
@@ -917,6 +957,7 @@ class ExpressionStatement final : public Statement {
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() const { return expression_; }
bool IsJump() const override { return expression_->IsThrow(); }
+ void MarkTail() override { expression_->MarkTail(); }
protected:
ExpressionStatement(Zone* zone, Expression* expression, int pos)
@@ -972,6 +1013,8 @@ class ReturnStatement final : public JumpStatement {
Expression* expression() const { return expression_; }
+ void set_expression(Expression* e) { expression_ = e; }
+
protected:
explicit ReturnStatement(Zone* zone, Expression* expression, int pos)
: JumpStatement(zone, pos), expression_(expression) { }
@@ -987,11 +1030,16 @@ class WithStatement final : public Statement {
Scope* scope() { return scope_; }
Expression* expression() const { return expression_; }
+ void set_expression(Expression* e) { expression_ = e; }
Statement* statement() const { return statement_; }
+ void set_statement(Statement* s) { statement_ = s; }
void set_base_id(int id) { base_id_ = id; }
- static int num_ids() { return parent_num_ids() + 1; }
- BailoutId EntryId() const { return BailoutId(local_id(0)); }
+ static int num_ids() { return parent_num_ids() + 2; }
+ BailoutId ToObjectId() const { return BailoutId(local_id(0)); }
+ BailoutId EntryId() const { return BailoutId(local_id(1)); }
+
+ void MarkTail() override { statement_->MarkTail(); }
protected:
WithStatement(Zone* zone, Scope* scope, Expression* expression,
@@ -1027,6 +1075,7 @@ class CaseClause final : public Expression {
CHECK(!is_default());
return label_;
}
+ void set_label(Expression* e) { label_ = e; }
Label* body_target() { return &body_target_; }
ZoneList<Statement*>* statements() const { return statements_; }
@@ -1034,6 +1083,10 @@ class CaseClause final : public Expression {
BailoutId EntryId() const { return BailoutId(local_id(0)); }
TypeFeedbackId CompareId() { return TypeFeedbackId(local_id(1)); }
+ void MarkTail() override {
+ if (!statements_->is_empty()) statements_->last()->MarkTail();
+ }
+
Type* compare_type() { return compare_type_; }
void set_compare_type(Type* type) { compare_type_ = type; }
@@ -1064,6 +1117,12 @@ class SwitchStatement final : public BreakableStatement {
Expression* tag() const { return tag_; }
ZoneList<CaseClause*>* cases() const { return cases_; }
+ void set_tag(Expression* t) { tag_ = t; }
+
+ void MarkTail() override {
+ if (!cases_->is_empty()) cases_->last()->MarkTail();
+ }
+
protected:
SwitchStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
: BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
@@ -1092,11 +1151,20 @@ class IfStatement final : public Statement {
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
+ void set_condition(Expression* e) { condition_ = e; }
+ void set_then_statement(Statement* s) { then_statement_ = s; }
+ void set_else_statement(Statement* s) { else_statement_ = s; }
+
bool IsJump() const override {
return HasThenStatement() && then_statement()->IsJump()
&& HasElseStatement() && else_statement()->IsJump();
}
+ void MarkTail() override {
+ then_statement_->MarkTail();
+ else_statement_->MarkTail();
+ }
+
void set_base_id(int id) { base_id_ = id; }
static int num_ids() { return parent_num_ids() + 3; }
BailoutId IfId() const { return BailoutId(local_id(0)); }
@@ -1131,6 +1199,7 @@ class IfStatement final : public Statement {
class TryStatement : public Statement {
public:
Block* try_block() const { return try_block_; }
+ void set_try_block(Block* b) { try_block_ = b; }
void set_base_id(int id) { base_id_ = id; }
static int num_ids() { return parent_num_ids() + 1; }
@@ -1163,6 +1232,9 @@ class TryCatchStatement final : public TryStatement {
Scope* scope() { return scope_; }
Variable* variable() { return variable_; }
Block* catch_block() const { return catch_block_; }
+ void set_catch_block(Block* b) { catch_block_ = b; }
+
+ void MarkTail() override { catch_block_->MarkTail(); }
protected:
TryCatchStatement(Zone* zone, Block* try_block, Scope* scope,
@@ -1184,6 +1256,9 @@ class TryFinallyStatement final : public TryStatement {
DECLARE_NODE_TYPE(TryFinallyStatement)
Block* finally_block() const { return finally_block_; }
+ void set_finally_block(Block* b) { finally_block_ = b; }
+
+ void MarkTail() override { finally_block_->MarkTail(); }
protected:
TryFinallyStatement(Zone* zone, Block* try_block, Block* finally_block,
@@ -1301,7 +1376,7 @@ class AstLiteralReindexer;
// Base class for literals that needs space in the corresponding JSFunction.
class MaterializedLiteral : public Expression {
public:
- virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
+ MaterializedLiteral* AsMaterializedLiteral() final { return this; }
int literal_index() { return literal_index_; }
@@ -1371,6 +1446,9 @@ class ObjectLiteralProperty final : public ZoneObject {
Expression* value() { return value_; }
Kind kind() { return kind_; }
+ void set_key(Expression* e) { key_ = e; }
+ void set_value(Expression* e) { value_ = e; }
+
// Type feedback information.
bool IsMonomorphic() { return !receiver_type_.is_null(); }
Handle<Map> GetReceiverType() { return receiver_type_; }
@@ -1383,13 +1461,14 @@ class ObjectLiteralProperty final : public ZoneObject {
bool is_static() const { return is_static_; }
bool is_computed_name() const { return is_computed_name_; }
- FeedbackVectorICSlot GetSlot(int offset = 0) const {
- if (slot_.IsInvalid()) return slot_;
- int slot = slot_.ToInt();
- return FeedbackVectorICSlot(slot + offset);
+ FeedbackVectorSlot GetSlot(int offset = 0) const {
+ DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
+ return slots_[offset];
+ }
+ void SetSlot(FeedbackVectorSlot slot, int offset = 0) {
+ DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
+ slots_[offset] = slot;
}
- FeedbackVectorICSlot slot() const { return slot_; }
- void set_slot(FeedbackVectorICSlot slot) { slot_ = slot; }
void set_receiver_type(Handle<Map> map) { receiver_type_ = map; }
@@ -1405,7 +1484,7 @@ class ObjectLiteralProperty final : public ZoneObject {
private:
Expression* key_;
Expression* value_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slots_[2];
Kind kind_;
bool emit_store_;
bool is_static_;
@@ -1477,16 +1556,23 @@ class ObjectLiteral final : public MaterializedLiteral {
BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
// Return an AST id for a property that is used in simulate instructions.
- BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 1)); }
+ BailoutId GetIdForPropertyName(int i) {
+ return BailoutId(local_id(2 * i + 1));
+ }
+ BailoutId GetIdForPropertySet(int i) {
+ return BailoutId(local_id(2 * i + 2));
+ }
// Unlike other AST nodes, this number of bailout IDs allocated for an
// ObjectLiteral can vary, so num_ids() is not a static method.
- int num_ids() const { return parent_num_ids() + 1 + properties()->length(); }
+ int num_ids() const {
+ return parent_num_ids() + 1 + 2 * properties()->length();
+ }
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache) override;
protected:
ObjectLiteral(Zone* zone, ZoneList<Property*>* properties, int literal_index,
@@ -1498,9 +1584,7 @@ class ObjectLiteral final : public MaterializedLiteral {
fast_elements_(false),
has_elements_(false),
may_store_doubles_(false),
- has_function_(has_function),
- slot_(FeedbackVectorICSlot::Invalid()) {
- }
+ has_function_(has_function) {}
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
private:
@@ -1512,7 +1596,28 @@ class ObjectLiteral final : public MaterializedLiteral {
bool has_elements_;
bool may_store_doubles_;
bool has_function_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
+};
+
+
+// A map from property names to getter/setter pairs allocated in the zone.
+class AccessorTable : public TemplateHashMap<Literal, ObjectLiteral::Accessors,
+ ZoneAllocationPolicy> {
+ public:
+ explicit AccessorTable(Zone* zone)
+ : TemplateHashMap<Literal, ObjectLiteral::Accessors,
+ ZoneAllocationPolicy>(Literal::Match,
+ ZoneAllocationPolicy(zone)),
+ zone_(zone) {}
+
+ Iterator lookup(Literal* literal) {
+ Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
+ if (it->second == NULL) it->second = new (zone_) ObjectLiteral::Accessors();
+ return it;
+ }
+
+ private:
+ Zone* zone_;
};
@@ -1522,12 +1627,11 @@ class RegExpLiteral final : public MaterializedLiteral {
DECLARE_NODE_TYPE(RegExpLiteral)
Handle<String> pattern() const { return pattern_->string(); }
- Handle<String> flags() const { return flags_->string(); }
+ int flags() const { return flags_; }
protected:
- RegExpLiteral(Zone* zone, const AstRawString* pattern,
- const AstRawString* flags, int literal_index, bool is_strong,
- int pos)
+ RegExpLiteral(Zone* zone, const AstRawString* pattern, int flags,
+ int literal_index, bool is_strong, int pos)
: MaterializedLiteral(zone, literal_index, is_strong, pos),
pattern_(pattern),
flags_(flags) {
@@ -1535,8 +1639,8 @@ class RegExpLiteral final : public MaterializedLiteral {
}
private:
- const AstRawString* pattern_;
- const AstRawString* flags_;
+ const AstRawString* const pattern_;
+ int const flags_;
};
@@ -1586,6 +1690,10 @@ class ArrayLiteral final : public MaterializedLiteral {
kIsStrong = 1 << 2
};
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlot LiteralFeedbackSlot() const { return literal_slot_; }
+
protected:
ArrayLiteral(Zone* zone, ZoneList<Expression*>* values,
int first_spread_index, int literal_index, bool is_strong,
@@ -1601,6 +1709,7 @@ class ArrayLiteral final : public MaterializedLiteral {
Handle<FixedArray> constant_elements_;
ZoneList<Expression*>* values_;
int first_spread_index_;
+ FeedbackVectorSlot literal_slot_;
};
@@ -1656,11 +1765,9 @@ class VariableProxy final : public Expression {
}
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache) override;
- FeedbackVectorICSlot VariableFeedbackSlot() {
- return variable_feedback_slot_;
- }
+ FeedbackVectorSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId BeforeId() const { return BailoutId(local_id(0)); }
@@ -1683,7 +1790,7 @@ class VariableProxy final : public Expression {
// Start with 16-bit (or smaller) field, which should get packed together
// with Expression's trailing 16-bit field.
uint8_t bit_field_;
- FeedbackVectorICSlot variable_feedback_slot_;
+ FeedbackVectorSlot variable_feedback_slot_;
union {
const AstRawString* raw_name_; // if !is_resolved_
Variable* var_; // if is_resolved_
@@ -1715,6 +1822,9 @@ class Property final : public Expression {
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
+ void set_obj(Expression* e) { obj_ = e; }
+ void set_key(Expression* e) { key_ = e; }
+
static int num_ids() { return parent_num_ids() + 1; }
BailoutId LoadId() const { return BailoutId(local_id(0)); }
@@ -1755,14 +1865,14 @@ class Property final : public Expression {
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override {
+ FeedbackVectorSlotCache* cache) override {
FeedbackVectorSlotKind kind = key()->IsPropertyName()
? FeedbackVectorSlotKind::LOAD_IC
: FeedbackVectorSlotKind::KEYED_LOAD_IC;
property_feedback_slot_ = spec->AddSlot(kind);
}
- FeedbackVectorICSlot PropertyFeedbackSlot() const {
+ FeedbackVectorSlot PropertyFeedbackSlot() const {
return property_feedback_slot_;
}
@@ -1780,7 +1890,6 @@ class Property final : public Expression {
bit_field_(IsForCallField::encode(false) |
IsStringAccessField::encode(false) |
InlineCacheStateField::encode(UNINITIALIZED)),
- property_feedback_slot_(FeedbackVectorICSlot::Invalid()),
obj_(obj),
key_(key) {}
static int parent_num_ids() { return Expression::num_ids(); }
@@ -1793,7 +1902,7 @@ class Property final : public Expression {
class KeyTypeField : public BitField8<IcCheckType, 2, 1> {};
class InlineCacheStateField : public BitField8<InlineCacheState, 3, 4> {};
uint8_t bit_field_;
- FeedbackVectorICSlot property_feedback_slot_;
+ FeedbackVectorSlot property_feedback_slot_;
Expression* obj_;
Expression* key_;
SmallMapList receiver_types_;
@@ -1807,13 +1916,15 @@ class Call final : public Expression {
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
+ void set_expression(Expression* e) { expression_ = e; }
+
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache) override;
- FeedbackVectorSlot CallFeedbackSlot() const { return slot_; }
+ FeedbackVectorSlot CallFeedbackSlot() const { return stub_slot_; }
- FeedbackVectorICSlot CallFeedbackICSlot() const { return ic_slot_; }
+ FeedbackVectorSlot CallFeedbackICSlot() const { return ic_slot_; }
SmallMapList* GetReceiverTypes() override {
if (expression()->IsProperty()) {
@@ -1851,10 +1962,11 @@ class Call final : public Expression {
allocation_site_ = site;
}
- static int num_ids() { return parent_num_ids() + 3; }
+ static int num_ids() { return parent_num_ids() + 4; }
BailoutId ReturnId() const { return BailoutId(local_id(0)); }
BailoutId EvalId() const { return BailoutId(local_id(1)); }
BailoutId LookupId() const { return BailoutId(local_id(2)); }
+ BailoutId CallId() const { return BailoutId(local_id(3)); }
bool is_uninitialized() const {
return IsUninitializedField::decode(bit_field_);
@@ -1863,11 +1975,19 @@ class Call final : public Expression {
bit_field_ = IsUninitializedField::update(bit_field_, b);
}
+ bool is_tail() const { return IsTailField::decode(bit_field_); }
+ void MarkTail() override {
+ bit_field_ = IsTailField::update(bit_field_, true);
+ }
+
enum CallType {
POSSIBLY_EVAL_CALL,
GLOBAL_CALL,
LOOKUP_SLOT_CALL,
- PROPERTY_CALL,
+ NAMED_PROPERTY_CALL,
+ KEYED_PROPERTY_CALL,
+ NAMED_SUPER_PROPERTY_CALL,
+ KEYED_SUPER_PROPERTY_CALL,
SUPER_CALL,
OTHER_CALL
};
@@ -1886,8 +2006,6 @@ class Call final : public Expression {
Call(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
int pos)
: Expression(zone, pos),
- ic_slot_(FeedbackVectorICSlot::Invalid()),
- slot_(FeedbackVectorSlot::Invalid()),
expression_(expression),
arguments_(arguments),
bit_field_(IsUninitializedField::encode(false)) {
@@ -1900,13 +2018,14 @@ class Call final : public Expression {
private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- FeedbackVectorICSlot ic_slot_;
- FeedbackVectorSlot slot_;
+ FeedbackVectorSlot ic_slot_;
+ FeedbackVectorSlot stub_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
Handle<AllocationSite> allocation_site_;
class IsUninitializedField : public BitField8<bool, 0, 1> {};
+ class IsTailField : public BitField8<bool, 1, 1> {};
uint8_t bit_field_;
};
@@ -1918,10 +2037,12 @@ class CallNew final : public Expression {
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
+ void set_expression(Expression* e) { expression_ = e; }
+
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override {
- callnew_feedback_slot_ = spec->AddStubSlot();
+ FeedbackVectorSlotCache* cache) override {
+ callnew_feedback_slot_ = spec->AddGeneralSlot();
}
FeedbackVectorSlot CallNewFeedbackSlot() {
@@ -1955,8 +2076,7 @@ class CallNew final : public Expression {
: Expression(zone, pos),
expression_(expression),
arguments_(arguments),
- is_monomorphic_(false),
- callnew_feedback_slot_(FeedbackVectorSlot::Invalid()) {}
+ is_monomorphic_(false) {}
static int parent_num_ids() { return Expression::num_ids(); }
@@ -2028,6 +2148,7 @@ class UnaryOperation final : public Expression {
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
+ void set_expression(Expression* e) { expression_ = e; }
// For unary not (Token::NOT), the AST ids where true and false will
// actually be materialized, respectively.
@@ -2035,7 +2156,7 @@ class UnaryOperation final : public Expression {
BailoutId MaterializeTrueId() const { return BailoutId(local_id(0)); }
BailoutId MaterializeFalseId() const { return BailoutId(local_id(1)); }
- virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) override;
+ void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) override;
protected:
UnaryOperation(Zone* zone, Token::Value op, Expression* expression, int pos)
@@ -2058,12 +2179,25 @@ class BinaryOperation final : public Expression {
Token::Value op() const { return static_cast<Token::Value>(op_); }
Expression* left() const { return left_; }
+ void set_left(Expression* e) { left_ = e; }
Expression* right() const { return right_; }
+ void set_right(Expression* e) { right_ = e; }
Handle<AllocationSite> allocation_site() const { return allocation_site_; }
void set_allocation_site(Handle<AllocationSite> allocation_site) {
allocation_site_ = allocation_site;
}
+ void MarkTail() override {
+ switch (op()) {
+ case Token::COMMA:
+ case Token::AND:
+ case Token::OR:
+ right_->MarkTail();
+ default:
+ break;
+ }
+ }
+
// The short-circuit logical operations need an AST ID for their
// right-hand subexpression.
static int num_ids() { return parent_num_ids() + 2; }
@@ -2080,7 +2214,7 @@ class BinaryOperation final : public Expression {
if (arg.IsJust()) fixed_right_arg_value_ = arg.FromJust();
}
- virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) override;
+ void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) override;
protected:
BinaryOperation(Zone* zone, Token::Value op, Expression* left,
@@ -2122,6 +2256,7 @@ class CountOperation final : public Expression {
}
Expression* expression() const { return expression_; }
+ void set_expression(Expression* e) { expression_ = e; }
bool IsMonomorphic() override { return receiver_types_.length() == 1; }
SmallMapList* GetReceiverTypes() override { return &receiver_types_; }
@@ -2151,8 +2286,8 @@ class CountOperation final : public Expression {
}
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
- FeedbackVectorICSlot CountSlot() const { return slot_; }
+ FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlot CountSlot() const { return slot_; }
protected:
CountOperation(Zone* zone, Token::Value op, bool is_prefix, Expression* expr,
@@ -2162,8 +2297,7 @@ class CountOperation final : public Expression {
IsPrefixField::encode(is_prefix) | KeyTypeField::encode(ELEMENT) |
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op)),
type_(NULL),
- expression_(expr),
- slot_(FeedbackVectorICSlot::Invalid()) {}
+ expression_(expr) {}
static int parent_num_ids() { return Expression::num_ids(); }
private:
@@ -2180,7 +2314,7 @@ class CountOperation final : public Expression {
Type* type_;
Expression* expression_;
SmallMapList receiver_types_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
};
@@ -2192,6 +2326,9 @@ class CompareOperation final : public Expression {
Expression* left() const { return left_; }
Expression* right() const { return right_; }
+ void set_left(Expression* e) { left_ = e; }
+ void set_right(Expression* e) { right_ = e; }
+
// Type feedback information.
static int num_ids() { return parent_num_ids() + 1; }
TypeFeedbackId CompareOperationFeedbackId() const {
@@ -2233,6 +2370,7 @@ class Spread final : public Expression {
DECLARE_NODE_TYPE(Spread)
Expression* expression() const { return expression_; }
+ void set_expression(Expression* e) { expression_ = e; }
static int num_ids() { return parent_num_ids(); }
@@ -2256,6 +2394,15 @@ class Conditional final : public Expression {
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
+ void set_condition(Expression* e) { condition_ = e; }
+ void set_then_expression(Expression* e) { then_expression_ = e; }
+ void set_else_expression(Expression* e) { else_expression_ = e; }
+
+ void MarkTail() override {
+ then_expression_->MarkTail();
+ else_expression_->MarkTail();
+ }
+
static int num_ids() { return parent_num_ids() + 2; }
BailoutId ThenId() const { return BailoutId(local_id(0)); }
BailoutId ElseId() const { return BailoutId(local_id(1)); }
@@ -2289,6 +2436,10 @@ class Assignment final : public Expression {
Token::Value op() const { return TokenField::decode(bit_field_); }
Expression* target() const { return target_; }
Expression* value() const { return value_; }
+
+ void set_target(Expression* e) { target_ = e; }
+ void set_value(Expression* e) { value_ = e; }
+
BinaryOperation* binary_operation() const { return binary_operation_; }
// This check relies on the definition order of token in token.h.
@@ -2324,8 +2475,8 @@ class Assignment final : public Expression {
}
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
- FeedbackVectorICSlot AssignmentSlot() const { return slot_; }
+ FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlot AssignmentSlot() const { return slot_; }
protected:
Assignment(Zone* zone, Token::Value op, Expression* target, Expression* value,
@@ -2336,9 +2487,12 @@ class Assignment final : public Expression {
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
class IsUninitializedField : public BitField16<bool, 0, 1> {};
- class KeyTypeField : public BitField16<IcCheckType, 1, 1> {};
- class StoreModeField : public BitField16<KeyedAccessStoreMode, 2, 3> {};
- class TokenField : public BitField16<Token::Value, 5, 8> {};
+ class KeyTypeField
+ : public BitField16<IcCheckType, IsUninitializedField::kNext, 1> {};
+ class StoreModeField
+ : public BitField16<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
+ class TokenField : public BitField16<Token::Value, StoreModeField::kNext, 8> {
+ };
// Starts with 16-bit field, which should get packed together with
// Expression's trailing 16-bit field.
@@ -2347,7 +2501,39 @@ class Assignment final : public Expression {
Expression* value_;
BinaryOperation* binary_operation_;
SmallMapList receiver_types_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
+};
+
+
+class RewritableAssignmentExpression : public Expression {
+ public:
+ DECLARE_NODE_TYPE(RewritableAssignmentExpression)
+
+ Expression* expression() { return expr_; }
+ bool is_rewritten() const { return is_rewritten_; }
+
+ void set_expression(Expression* e) { expr_ = e; }
+
+ void Rewrite(Expression* new_expression) {
+ DCHECK(!is_rewritten());
+ DCHECK_NOT_NULL(new_expression);
+ expr_ = new_expression;
+ is_rewritten_ = true;
+ }
+
+ static int num_ids() { return parent_num_ids(); }
+
+ protected:
+ RewritableAssignmentExpression(Zone* zone, Expression* expression)
+ : Expression(zone, expression->position()),
+ is_rewritten_(false),
+ expr_(expression) {}
+
+ private:
+ int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+
+ bool is_rewritten_;
+ Expression* expr_;
};
@@ -2366,26 +2552,28 @@ class Yield final : public Expression {
Expression* expression() const { return expression_; }
Kind yield_kind() const { return yield_kind_; }
+ void set_generator_object(Expression* e) { generator_object_ = e; }
+ void set_expression(Expression* e) { expression_ = e; }
+
// Type feedback information.
bool HasFeedbackSlots() const { return yield_kind() == kDelegating; }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override {
+ FeedbackVectorSlotCache* cache) override {
if (HasFeedbackSlots()) {
yield_first_feedback_slot_ = spec->AddKeyedLoadICSlot();
- spec->AddLoadICSlots(2);
+ keyed_load_feedback_slot_ = spec->AddLoadICSlot();
+ done_feedback_slot_ = spec->AddLoadICSlot();
}
}
- FeedbackVectorICSlot KeyedLoadFeedbackSlot() {
+ FeedbackVectorSlot KeyedLoadFeedbackSlot() {
DCHECK(!HasFeedbackSlots() || !yield_first_feedback_slot_.IsInvalid());
return yield_first_feedback_slot_;
}
- FeedbackVectorICSlot DoneFeedbackSlot() {
- return KeyedLoadFeedbackSlot().next();
- }
+ FeedbackVectorSlot DoneFeedbackSlot() { return keyed_load_feedback_slot_; }
- FeedbackVectorICSlot ValueFeedbackSlot() { return DoneFeedbackSlot().next(); }
+ FeedbackVectorSlot ValueFeedbackSlot() { return done_feedback_slot_; }
protected:
Yield(Zone* zone, Expression* generator_object, Expression* expression,
@@ -2393,14 +2581,15 @@ class Yield final : public Expression {
: Expression(zone, pos),
generator_object_(generator_object),
expression_(expression),
- yield_kind_(yield_kind),
- yield_first_feedback_slot_(FeedbackVectorICSlot::Invalid()) {}
+ yield_kind_(yield_kind) {}
private:
Expression* generator_object_;
Expression* expression_;
Kind yield_kind_;
- FeedbackVectorICSlot yield_first_feedback_slot_;
+ FeedbackVectorSlot yield_first_feedback_slot_;
+ FeedbackVectorSlot keyed_load_feedback_slot_;
+ FeedbackVectorSlot done_feedback_slot_;
};
@@ -2409,6 +2598,7 @@ class Throw final : public Expression {
DECLARE_NODE_TYPE(Throw)
Expression* exception() const { return exception_; }
+ void set_exception(Expression* e) { exception_ = e; }
protected:
Throw(Zone* zone, Expression* exception, int pos)
@@ -2422,35 +2612,23 @@ class Throw final : public Expression {
class FunctionLiteral final : public Expression {
public:
enum FunctionType {
- ANONYMOUS_EXPRESSION,
- NAMED_EXPRESSION,
- DECLARATION
- };
-
- enum ParameterFlag {
- kNoDuplicateParameters = 0,
- kHasDuplicateParameters = 1
+ kAnonymousExpression,
+ kNamedExpression,
+ kDeclaration,
+ kGlobalOrEval
};
- enum IsFunctionFlag {
- kGlobalOrEval,
- kIsFunction
- };
+ enum ParameterFlag { kNoDuplicateParameters, kHasDuplicateParameters };
enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
- enum ShouldBeUsedOnceHint { kShouldBeUsedOnce, kDontKnowIfShouldBeUsedOnce };
-
- enum ArityRestriction {
- NORMAL_ARITY,
- GETTER_ARITY,
- SETTER_ARITY
- };
+ enum ArityRestriction { kNormalArity, kGetterArity, kSetterArity };
DECLARE_NODE_TYPE(FunctionLiteral)
Handle<String> name() const { return raw_name_->string(); }
- const AstRawString* raw_name() const { return raw_name_; }
+ const AstString* raw_name() const { return raw_name_; }
+ void set_raw_name(const AstString* name) { raw_name_ = name; }
Scope* scope() const { return scope_; }
ZoneList<Statement*>* body() const { return body_; }
void set_function_token_position(int pos) { function_token_position_ = pos; }
@@ -2505,14 +2683,14 @@ class FunctionLiteral final : public Expression {
inferred_name_ = Handle<String>();
}
- bool pretenure() { return Pretenure::decode(bitfield_); }
- void set_pretenure() { bitfield_ |= Pretenure::encode(true); }
+ bool pretenure() const { return Pretenure::decode(bitfield_); }
+ void set_pretenure() { bitfield_ = Pretenure::update(bitfield_, true); }
- bool has_duplicate_parameters() {
+ bool has_duplicate_parameters() const {
return HasDuplicateParameters::decode(bitfield_);
}
- bool is_function() { return IsFunction::decode(bitfield_) == kIsFunction; }
+ bool is_function() const { return IsFunction::decode(bitfield_); }
// This is used as a heuristic on when to eagerly compile a function
// literal. We consider the following constructs as hints that the
@@ -2520,19 +2698,19 @@ class FunctionLiteral final : public Expression {
// - (function() { ... })();
// - var x = function() { ... }();
bool should_eager_compile() const {
- return EagerCompileHintBit::decode(bitfield_) == kShouldEagerCompile;
+ return ShouldEagerCompile::decode(bitfield_);
}
void set_should_eager_compile() {
- bitfield_ = EagerCompileHintBit::update(bitfield_, kShouldEagerCompile);
+ bitfield_ = ShouldEagerCompile::update(bitfield_, true);
}
// A hint that we expect this function to be called (exactly) once,
// i.e. we suspect it's an initialization function.
bool should_be_used_once_hint() const {
- return ShouldBeUsedOnceHintBit::decode(bitfield_) == kShouldBeUsedOnce;
+ return ShouldBeUsedOnceHint::decode(bitfield_);
}
void set_should_be_used_once_hint() {
- bitfield_ = ShouldBeUsedOnceHintBit::update(bitfield_, kShouldBeUsedOnce);
+ bitfield_ = ShouldBeUsedOnceHint::update(bitfield_, true);
}
FunctionKind kind() const { return FunctionKindBits::decode(bitfield_); }
@@ -2552,13 +2730,12 @@ class FunctionLiteral final : public Expression {
}
protected:
- FunctionLiteral(Zone* zone, const AstRawString* name,
+ FunctionLiteral(Zone* zone, const AstString* name,
AstValueFactory* ast_value_factory, Scope* scope,
ZoneList<Statement*>* body, int materialized_literal_count,
int expected_property_count, int parameter_count,
FunctionType function_type,
ParameterFlag has_duplicate_parameters,
- IsFunctionFlag is_function,
EagerCompileHint eager_compile_hint, FunctionKind kind,
int position)
: Expression(zone, position),
@@ -2572,20 +2749,33 @@ class FunctionLiteral final : public Expression {
expected_property_count_(expected_property_count),
parameter_count_(parameter_count),
function_token_position_(RelocInfo::kNoPosition) {
- bitfield_ = IsExpression::encode(function_type != DECLARATION) |
- IsAnonymous::encode(function_type == ANONYMOUS_EXPRESSION) |
- Pretenure::encode(false) |
- HasDuplicateParameters::encode(has_duplicate_parameters) |
- IsFunction::encode(is_function) |
- EagerCompileHintBit::encode(eager_compile_hint) |
- FunctionKindBits::encode(kind) |
- ShouldBeUsedOnceHintBit::encode(kDontKnowIfShouldBeUsedOnce);
+ bitfield_ =
+ IsExpression::encode(function_type != kDeclaration) |
+ IsAnonymous::encode(function_type == kAnonymousExpression) |
+ Pretenure::encode(false) |
+ HasDuplicateParameters::encode(has_duplicate_parameters ==
+ kHasDuplicateParameters) |
+ IsFunction::encode(function_type != kGlobalOrEval) |
+ ShouldEagerCompile::encode(eager_compile_hint == kShouldEagerCompile) |
+ FunctionKindBits::encode(kind) | ShouldBeUsedOnceHint::encode(false);
DCHECK(IsValidFunctionKind(kind));
}
private:
- const AstRawString* raw_name_;
- Handle<String> name_;
+ class IsExpression : public BitField16<bool, 0, 1> {};
+ class IsAnonymous : public BitField16<bool, 1, 1> {};
+ class Pretenure : public BitField16<bool, 2, 1> {};
+ class HasDuplicateParameters : public BitField16<bool, 3, 1> {};
+ class IsFunction : public BitField16<bool, 4, 1> {};
+ class ShouldEagerCompile : public BitField16<bool, 5, 1> {};
+ class FunctionKindBits : public BitField16<FunctionKind, 6, 8> {};
+ class ShouldBeUsedOnceHint : public BitField16<bool, 15, 1> {};
+
+ // Start with 16-bit field, which should get packed together
+ // with Expression's trailing 16-bit field.
+ uint16_t bitfield_;
+
+ const AstString* raw_name_;
Scope* scope_;
ZoneList<Statement*>* body_;
const AstString* raw_inferred_name_;
@@ -2597,17 +2787,6 @@ class FunctionLiteral final : public Expression {
int expected_property_count_;
int parameter_count_;
int function_token_position_;
-
- unsigned bitfield_;
- class IsExpression : public BitField<bool, 0, 1> {};
- class IsAnonymous : public BitField<bool, 1, 1> {};
- class Pretenure : public BitField<bool, 2, 1> {};
- class HasDuplicateParameters : public BitField<ParameterFlag, 3, 1> {};
- class IsFunction : public BitField<IsFunctionFlag, 4, 1> {};
- class EagerCompileHintBit : public BitField<EagerCompileHint, 5, 1> {};
- class FunctionKindBits : public BitField<FunctionKind, 6, 8> {};
- class ShouldBeUsedOnceHintBit : public BitField<ShouldBeUsedOnceHint, 15, 1> {
- };
};
@@ -2619,10 +2798,17 @@ class ClassLiteral final : public Expression {
Handle<String> name() const { return raw_name_->string(); }
const AstRawString* raw_name() const { return raw_name_; }
+ void set_raw_name(const AstRawString* name) {
+ DCHECK_NULL(raw_name_);
+ raw_name_ = name;
+ }
+
Scope* scope() const { return scope_; }
VariableProxy* class_variable_proxy() const { return class_variable_proxy_; }
Expression* extends() const { return extends_; }
+ void set_extends(Expression* e) { extends_ = e; }
FunctionLiteral* constructor() const { return constructor_; }
+ void set_constructor(FunctionLiteral* f) { constructor_ = f; }
ZoneList<Property*>* properties() const { return properties_; }
int start_position() const { return position(); }
int end_position() const { return end_position_; }
@@ -2642,14 +2828,14 @@ class ClassLiteral final : public Expression {
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- ICSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache) override;
bool NeedsProxySlot() const {
- return FLAG_vector_stores && scope() != NULL &&
+ return class_variable_proxy() != nullptr &&
class_variable_proxy()->var()->IsUnallocated();
}
- FeedbackVectorICSlot ProxySlot() const { return slot_; }
+ FeedbackVectorSlot ProxySlot() const { return slot_; }
protected:
ClassLiteral(Zone* zone, const AstRawString* name, Scope* scope,
@@ -2663,9 +2849,7 @@ class ClassLiteral final : public Expression {
extends_(extends),
constructor_(constructor),
properties_(properties),
- end_position_(end_position),
- slot_(FeedbackVectorICSlot::Invalid()) {
- }
+ end_position_(end_position) {}
static int parent_num_ids() { return Expression::num_ids(); }
@@ -2679,7 +2863,7 @@ class ClassLiteral final : public Expression {
FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
int end_position_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
};
@@ -2715,7 +2899,9 @@ class SuperPropertyReference final : public Expression {
DECLARE_NODE_TYPE(SuperPropertyReference)
VariableProxy* this_var() const { return this_var_; }
+ void set_this_var(VariableProxy* v) { this_var_ = v; }
Expression* home_object() const { return home_object_; }
+ void set_home_object(Expression* e) { home_object_ = e; }
protected:
SuperPropertyReference(Zone* zone, VariableProxy* this_var,
@@ -2736,8 +2922,11 @@ class SuperCallReference final : public Expression {
DECLARE_NODE_TYPE(SuperCallReference)
VariableProxy* this_var() const { return this_var_; }
+ void set_this_var(VariableProxy* v) { this_var_ = v; }
VariableProxy* new_target_var() const { return new_target_var_; }
+ void set_new_target_var(VariableProxy* v) { new_target_var_ = v; }
VariableProxy* this_function_var() const { return this_function_var_; }
+ void set_this_function_var(VariableProxy* v) { this_function_var_ = v; }
protected:
SuperCallReference(Zone* zone, VariableProxy* this_var,
@@ -2774,363 +2963,6 @@ class EmptyParentheses final : public Expression {
// ----------------------------------------------------------------------------
-// Regular expressions
-
-
-class RegExpVisitor BASE_EMBEDDED {
- public:
- virtual ~RegExpVisitor() { }
-#define MAKE_CASE(Name) \
- virtual void* Visit##Name(RegExp##Name*, void* data) = 0;
- FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
-#undef MAKE_CASE
-};
-
-
-class RegExpTree : public ZoneObject {
- public:
- static const int kInfinity = kMaxInt;
- virtual ~RegExpTree() {}
- virtual void* Accept(RegExpVisitor* visitor, void* data) = 0;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) = 0;
- virtual bool IsTextElement() { return false; }
- virtual bool IsAnchoredAtStart() { return false; }
- virtual bool IsAnchoredAtEnd() { return false; }
- virtual int min_match() = 0;
- virtual int max_match() = 0;
- // Returns the interval of registers used for captures within this
- // expression.
- virtual Interval CaptureRegisters() { return Interval::Empty(); }
- virtual void AppendToText(RegExpText* text, Zone* zone);
- std::ostream& Print(std::ostream& os, Zone* zone); // NOLINT
-#define MAKE_ASTYPE(Name) \
- virtual RegExp##Name* As##Name(); \
- virtual bool Is##Name();
- FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ASTYPE)
-#undef MAKE_ASTYPE
-};
-
-
-class RegExpDisjunction final : public RegExpTree {
- public:
- explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
- void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
- RegExpDisjunction* AsDisjunction() override;
- Interval CaptureRegisters() override;
- bool IsDisjunction() override;
- bool IsAnchoredAtStart() override;
- bool IsAnchoredAtEnd() override;
- int min_match() override { return min_match_; }
- int max_match() override { return max_match_; }
- ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
- private:
- bool SortConsecutiveAtoms(RegExpCompiler* compiler);
- void RationalizeConsecutiveAtoms(RegExpCompiler* compiler);
- void FixSingleCharacterDisjunctions(RegExpCompiler* compiler);
- ZoneList<RegExpTree*>* alternatives_;
- int min_match_;
- int max_match_;
-};
-
-
-class RegExpAlternative final : public RegExpTree {
- public:
- explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
- void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
- RegExpAlternative* AsAlternative() override;
- Interval CaptureRegisters() override;
- bool IsAlternative() override;
- bool IsAnchoredAtStart() override;
- bool IsAnchoredAtEnd() override;
- int min_match() override { return min_match_; }
- int max_match() override { return max_match_; }
- ZoneList<RegExpTree*>* nodes() { return nodes_; }
- private:
- ZoneList<RegExpTree*>* nodes_;
- int min_match_;
- int max_match_;
-};
-
-
-class RegExpAssertion final : public RegExpTree {
- public:
- enum AssertionType {
- START_OF_LINE,
- START_OF_INPUT,
- END_OF_LINE,
- END_OF_INPUT,
- BOUNDARY,
- NON_BOUNDARY
- };
- explicit RegExpAssertion(AssertionType type) : assertion_type_(type) { }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
- RegExpAssertion* AsAssertion() override;
- bool IsAssertion() override;
- bool IsAnchoredAtStart() override;
- bool IsAnchoredAtEnd() override;
- int min_match() override { return 0; }
- int max_match() override { return 0; }
- AssertionType assertion_type() { return assertion_type_; }
- private:
- AssertionType assertion_type_;
-};
-
-
-class CharacterSet final BASE_EMBEDDED {
- public:
- explicit CharacterSet(uc16 standard_set_type)
- : ranges_(NULL),
- standard_set_type_(standard_set_type) {}
- explicit CharacterSet(ZoneList<CharacterRange>* ranges)
- : ranges_(ranges),
- standard_set_type_(0) {}
- ZoneList<CharacterRange>* ranges(Zone* zone);
- uc16 standard_set_type() { return standard_set_type_; }
- void set_standard_set_type(uc16 special_set_type) {
- standard_set_type_ = special_set_type;
- }
- bool is_standard() { return standard_set_type_ != 0; }
- void Canonicalize();
- private:
- ZoneList<CharacterRange>* ranges_;
- // If non-zero, the value represents a standard set (e.g., all whitespace
- // characters) without having to expand the ranges.
- uc16 standard_set_type_;
-};
-
-
-class RegExpCharacterClass final : public RegExpTree {
- public:
- RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
- : set_(ranges),
- is_negated_(is_negated) { }
- explicit RegExpCharacterClass(uc16 type)
- : set_(type),
- is_negated_(false) { }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
- RegExpCharacterClass* AsCharacterClass() override;
- bool IsCharacterClass() override;
- bool IsTextElement() override { return true; }
- int min_match() override { return 1; }
- int max_match() override { return 1; }
- void AppendToText(RegExpText* text, Zone* zone) override;
- CharacterSet character_set() { return set_; }
- // TODO(lrn): Remove need for complex version if is_standard that
- // recognizes a mangled standard set and just do { return set_.is_special(); }
- bool is_standard(Zone* zone);
- // Returns a value representing the standard character set if is_standard()
- // returns true.
- // Currently used values are:
- // s : unicode whitespace
- // S : unicode non-whitespace
- // w : ASCII word character (digit, letter, underscore)
- // W : non-ASCII word character
- // d : ASCII digit
- // D : non-ASCII digit
- // . : non-unicode non-newline
- // * : All characters
- uc16 standard_type() { return set_.standard_set_type(); }
- ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
- bool is_negated() { return is_negated_; }
-
- private:
- CharacterSet set_;
- bool is_negated_;
-};
-
-
-class RegExpAtom final : public RegExpTree {
- public:
- explicit RegExpAtom(Vector<const uc16> data) : data_(data) { }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
- RegExpAtom* AsAtom() override;
- bool IsAtom() override;
- bool IsTextElement() override { return true; }
- int min_match() override { return data_.length(); }
- int max_match() override { return data_.length(); }
- void AppendToText(RegExpText* text, Zone* zone) override;
- Vector<const uc16> data() { return data_; }
- int length() { return data_.length(); }
- private:
- Vector<const uc16> data_;
-};
-
-
-class RegExpText final : public RegExpTree {
- public:
- explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
- void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
- RegExpText* AsText() override;
- bool IsText() override;
- bool IsTextElement() override { return true; }
- int min_match() override { return length_; }
- int max_match() override { return length_; }
- void AppendToText(RegExpText* text, Zone* zone) override;
- void AddElement(TextElement elm, Zone* zone) {
- elements_.Add(elm, zone);
- length_ += elm.length();
- }
- ZoneList<TextElement>* elements() { return &elements_; }
- private:
- ZoneList<TextElement> elements_;
- int length_;
-};
-
-
-class RegExpQuantifier final : public RegExpTree {
- public:
- enum QuantifierType { GREEDY, NON_GREEDY, POSSESSIVE };
- RegExpQuantifier(int min, int max, QuantifierType type, RegExpTree* body)
- : body_(body),
- min_(min),
- max_(max),
- min_match_(min * body->min_match()),
- quantifier_type_(type) {
- if (max > 0 && body->max_match() > kInfinity / max) {
- max_match_ = kInfinity;
- } else {
- max_match_ = max * body->max_match();
- }
- }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
- static RegExpNode* ToNode(int min,
- int max,
- bool is_greedy,
- RegExpTree* body,
- RegExpCompiler* compiler,
- RegExpNode* on_success,
- bool not_at_start = false);
- RegExpQuantifier* AsQuantifier() override;
- Interval CaptureRegisters() override;
- bool IsQuantifier() override;
- int min_match() override { return min_match_; }
- int max_match() override { return max_match_; }
- int min() { return min_; }
- int max() { return max_; }
- bool is_possessive() { return quantifier_type_ == POSSESSIVE; }
- bool is_non_greedy() { return quantifier_type_ == NON_GREEDY; }
- bool is_greedy() { return quantifier_type_ == GREEDY; }
- RegExpTree* body() { return body_; }
-
- private:
- RegExpTree* body_;
- int min_;
- int max_;
- int min_match_;
- int max_match_;
- QuantifierType quantifier_type_;
-};
-
-
-class RegExpCapture final : public RegExpTree {
- public:
- explicit RegExpCapture(RegExpTree* body, int index)
- : body_(body), index_(index) { }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
- static RegExpNode* ToNode(RegExpTree* body,
- int index,
- RegExpCompiler* compiler,
- RegExpNode* on_success);
- RegExpCapture* AsCapture() override;
- bool IsAnchoredAtStart() override;
- bool IsAnchoredAtEnd() override;
- Interval CaptureRegisters() override;
- bool IsCapture() override;
- int min_match() override { return body_->min_match(); }
- int max_match() override { return body_->max_match(); }
- RegExpTree* body() { return body_; }
- int index() { return index_; }
- static int StartRegister(int index) { return index * 2; }
- static int EndRegister(int index) { return index * 2 + 1; }
-
- private:
- RegExpTree* body_;
- int index_;
-};
-
-
-class RegExpLookahead final : public RegExpTree {
- public:
- RegExpLookahead(RegExpTree* body,
- bool is_positive,
- int capture_count,
- int capture_from)
- : body_(body),
- is_positive_(is_positive),
- capture_count_(capture_count),
- capture_from_(capture_from) { }
-
- void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
- RegExpLookahead* AsLookahead() override;
- Interval CaptureRegisters() override;
- bool IsLookahead() override;
- bool IsAnchoredAtStart() override;
- int min_match() override { return 0; }
- int max_match() override { return 0; }
- RegExpTree* body() { return body_; }
- bool is_positive() { return is_positive_; }
- int capture_count() { return capture_count_; }
- int capture_from() { return capture_from_; }
-
- private:
- RegExpTree* body_;
- bool is_positive_;
- int capture_count_;
- int capture_from_;
-};
-
-
-class RegExpBackReference final : public RegExpTree {
- public:
- explicit RegExpBackReference(RegExpCapture* capture)
- : capture_(capture) { }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
- RegExpBackReference* AsBackReference() override;
- bool IsBackReference() override;
- int min_match() override { return 0; }
- int max_match() override { return capture_->max_match(); }
- int index() { return capture_->index(); }
- RegExpCapture* capture() { return capture_; }
- private:
- RegExpCapture* capture_;
-};
-
-
-class RegExpEmpty final : public RegExpTree {
- public:
- RegExpEmpty() { }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) override;
- RegExpEmpty* AsEmpty() override;
- bool IsEmpty() override;
- int min_match() override { return 0; }
- int max_match() override { return 0; }
-};
-
-
-// ----------------------------------------------------------------------------
// Basic visitor
// - leaf node visitors are abstract.
@@ -3154,7 +2986,6 @@ class AstVisitor BASE_EMBEDDED {
#undef DEF_VISIT
};
-
#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
public: \
void Visit(AstNode* node) final { \
@@ -3167,25 +2998,90 @@ class AstVisitor BASE_EMBEDDED {
\
bool CheckStackOverflow() { \
if (stack_overflow_) return true; \
- StackLimitCheck check(isolate_); \
- if (!check.HasOverflowed()) return false; \
- stack_overflow_ = true; \
- return true; \
+ if (GetCurrentStackPosition() < stack_limit_) { \
+ stack_overflow_ = true; \
+ return true; \
+ } \
+ return false; \
} \
\
private: \
- void InitializeAstVisitor(Isolate* isolate, Zone* zone) { \
- isolate_ = isolate; \
- zone_ = zone; \
+ void InitializeAstVisitor(Isolate* isolate) { \
+ stack_limit_ = isolate->stack_guard()->real_climit(); \
stack_overflow_ = false; \
} \
- Zone* zone() { return zone_; } \
- Isolate* isolate() { return isolate_; } \
\
- Isolate* isolate_; \
- Zone* zone_; \
+ void InitializeAstVisitor(uintptr_t stack_limit) { \
+ stack_limit_ = stack_limit; \
+ stack_overflow_ = false; \
+ } \
+ \
+ uintptr_t stack_limit_; \
bool stack_overflow_
+#define DEFINE_AST_REWRITER_SUBCLASS_MEMBERS() \
+ public: \
+ AstNode* Rewrite(AstNode* node) { \
+ DCHECK_NULL(replacement_); \
+ DCHECK_NOT_NULL(node); \
+ Visit(node); \
+ if (HasStackOverflow()) return node; \
+ if (replacement_ == nullptr) return node; \
+ AstNode* result = replacement_; \
+ replacement_ = nullptr; \
+ return result; \
+ } \
+ \
+ private: \
+ void InitializeAstRewriter(Isolate* isolate) { \
+ InitializeAstVisitor(isolate); \
+ replacement_ = nullptr; \
+ } \
+ \
+ void InitializeAstRewriter(uintptr_t stack_limit) { \
+ InitializeAstVisitor(stack_limit); \
+ replacement_ = nullptr; \
+ } \
+ \
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); \
+ \
+ protected: \
+ AstNode* replacement_
+
+// Generic macro for rewriting things; `GET` is the expression to be
+// rewritten; `SET` is a command that should do the rewriting, i.e.
+// something sensible with the variable called `replacement`.
+#define AST_REWRITE(Type, GET, SET) \
+ do { \
+ DCHECK(!HasStackOverflow()); \
+ DCHECK_NULL(replacement_); \
+ Visit(GET); \
+ if (HasStackOverflow()) return; \
+ if (replacement_ == nullptr) break; \
+ Type* replacement = reinterpret_cast<Type*>(replacement_); \
+ do { \
+ SET; \
+ } while (false); \
+ replacement_ = nullptr; \
+ } while (false)
+
+// Macro for rewriting object properties; it assumes that `object` has
+// `property` with a public getter and setter.
+#define AST_REWRITE_PROPERTY(Type, object, property) \
+ do { \
+ auto _obj = (object); \
+ AST_REWRITE(Type, _obj->property(), _obj->set_##property(replacement)); \
+ } while (false)
+
+// Macro for rewriting list elements; it assumes that `list` has methods
+// `at` and `Set`.
+#define AST_REWRITE_LIST_ELEMENT(Type, list, index) \
+ do { \
+ auto _list = (list); \
+ auto _index = (index); \
+ AST_REWRITE(Type, _list->at(_index), _list->Set(_index, replacement)); \
+ } while (false)
+
// ----------------------------------------------------------------------------
// AstNode factory
@@ -3197,6 +3093,8 @@ class AstNodeFactory final BASE_EMBEDDED {
parser_zone_(ast_value_factory->zone()),
ast_value_factory_(ast_value_factory) {}
+ AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
+
VariableDeclaration* NewVariableDeclaration(
VariableProxy* proxy, VariableMode mode, Scope* scope, int pos,
bool is_class_declaration = false, int declaration_group_start = -1) {
@@ -3315,8 +3213,8 @@ class AstNodeFactory final BASE_EMBEDDED {
SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(
Statement* statement, Scope* scope) {
- return new (local_zone_)
- SloppyBlockFunctionStatement(local_zone_, statement, scope);
+ return new (parser_zone_)
+ SloppyBlockFunctionStatement(parser_zone_, statement, scope);
}
CaseClause* NewCaseClause(
@@ -3392,11 +3290,8 @@ class AstNodeFactory final BASE_EMBEDDED {
ast_value_factory_, key, value, is_static, is_computed_name);
}
- RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern,
- const AstRawString* flags,
- int literal_index,
- bool is_strong,
- int pos) {
+ RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, int flags,
+ int literal_index, bool is_strong, int pos) {
return new (local_zone_) RegExpLiteral(local_zone_, pattern, flags,
literal_index, is_strong, pos);
}
@@ -3506,6 +3401,14 @@ class AstNodeFactory final BASE_EMBEDDED {
local_zone_, condition, then_expression, else_expression, position);
}
+ RewritableAssignmentExpression* NewRewritableAssignmentExpression(
+ Expression* expression) {
+ DCHECK_NOT_NULL(expression);
+ DCHECK(expression->IsAssignment());
+ return new (local_zone_)
+ RewritableAssignmentExpression(local_zone_, expression);
+ }
+
Assignment* NewAssignment(Token::Value op,
Expression* target,
Expression* value,
@@ -3535,19 +3438,18 @@ class AstNodeFactory final BASE_EMBEDDED {
}
FunctionLiteral* NewFunctionLiteral(
- const AstRawString* name, AstValueFactory* ast_value_factory,
- Scope* scope, ZoneList<Statement*>* body, int materialized_literal_count,
- int expected_property_count, int parameter_count,
+ const AstRawString* name, Scope* scope, ZoneList<Statement*>* body,
+ int materialized_literal_count, int expected_property_count,
+ int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
- FunctionLiteral::IsFunctionFlag is_function,
FunctionLiteral::EagerCompileHint eager_compile_hint, FunctionKind kind,
int position) {
return new (parser_zone_) FunctionLiteral(
- parser_zone_, name, ast_value_factory, scope, body,
+ parser_zone_, name, ast_value_factory_, scope, body,
materialized_literal_count, expected_property_count, parameter_count,
- function_type, has_duplicate_parameters, is_function,
- eager_compile_hint, kind, position);
+ function_type, has_duplicate_parameters, eager_compile_hint, kind,
+ position);
}
ClassLiteral* NewClassLiteral(const AstRawString* name, Scope* scope,
@@ -3567,6 +3469,11 @@ class AstNodeFactory final BASE_EMBEDDED {
NativeFunctionLiteral(parser_zone_, name, extension, pos);
}
+ DoExpression* NewDoExpression(Block* block, Variable* result_var, int pos) {
+ VariableProxy* result = NewVariableProxy(result_var, pos);
+ return new (parser_zone_) DoExpression(parser_zone_, block, result, pos);
+ }
+
ThisFunction* NewThisFunction(int pos) {
return new (local_zone_) ThisFunction(local_zone_, pos);
}
@@ -3622,6 +3529,7 @@ class AstNodeFactory final BASE_EMBEDDED {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_AST_H_
+#endif // V8_AST_AST_H_
diff --git a/chromium/v8/src/modules.cc b/chromium/v8/src/ast/modules.cc
index f72693cd66d..225cd8d62cd 100644
--- a/chromium/v8/src/modules.cc
+++ b/chromium/v8/src/ast/modules.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/modules.h"
+#include "src/ast/modules.h"
-#include "src/ast-value-factory.h"
+#include "src/ast/ast-value-factory.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/modules.h b/chromium/v8/src/ast/modules.h
index 33afd6128d3..e3c66dce94d 100644
--- a/chromium/v8/src/modules.h
+++ b/chromium/v8/src/ast/modules.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MODULES_H_
-#define V8_MODULES_H_
+#ifndef V8_AST_MODULES_H_
+#define V8_AST_MODULES_H_
#include "src/zone.h"
@@ -115,6 +115,7 @@ class ModuleDescriptor : public ZoneObject {
int index_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MODULES_H_
+#endif // V8_AST_MODULES_H_
diff --git a/chromium/v8/src/prettyprinter.cc b/chromium/v8/src/ast/prettyprinter.cc
index 59db57ac7e0..1f6b8c31deb 100644
--- a/chromium/v8/src/prettyprinter.cc
+++ b/chromium/v8/src/ast/prettyprinter.cc
@@ -2,25 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/prettyprinter.h"
+#include "src/ast/prettyprinter.h"
#include <stdarg.h>
-#include "src/ast-value-factory.h"
+#include "src/ast/ast-value-factory.h"
+#include "src/ast/scopes.h"
#include "src/base/platform/platform.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
-CallPrinter::CallPrinter(Isolate* isolate, Zone* zone) {
+CallPrinter::CallPrinter(Isolate* isolate, bool is_builtin) {
output_ = NULL;
size_ = 0;
pos_ = 0;
position_ = 0;
found_ = false;
done_ = false;
- InitializeAstVisitor(isolate, zone);
+ is_builtin_ = is_builtin;
+ InitializeAstVisitor(isolate);
}
@@ -192,8 +193,9 @@ void CallPrinter::VisitForInStatement(ForInStatement* node) {
void CallPrinter::VisitForOfStatement(ForOfStatement* node) {
Find(node->each());
- Find(node->iterable());
+ Find(node->assign_iterator());
Find(node->body());
+ Find(node->next_result());
}
@@ -228,6 +230,9 @@ void CallPrinter::VisitClassLiteral(ClassLiteral* node) {
void CallPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {}
+void CallPrinter::VisitDoExpression(DoExpression* node) { Find(node->block()); }
+
+
void CallPrinter::VisitConditional(Conditional* node) {
Find(node->condition());
Find(node->then_expression());
@@ -236,15 +241,19 @@ void CallPrinter::VisitConditional(Conditional* node) {
void CallPrinter::VisitLiteral(Literal* node) {
- PrintLiteral(node->value(), true);
+ PrintLiteral(*node->value(), true);
}
void CallPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
Print("/");
- PrintLiteral(node->pattern(), false);
+ PrintLiteral(*node->pattern(), false);
Print("/");
- PrintLiteral(node->flags(), false);
+ if (node->flags() & RegExp::kGlobal) Print("g");
+ if (node->flags() & RegExp::kIgnoreCase) Print("i");
+ if (node->flags() & RegExp::kMultiline) Print("m");
+ if (node->flags() & RegExp::kUnicode) Print("u");
+ if (node->flags() & RegExp::kSticky) Print("y");
}
@@ -266,7 +275,12 @@ void CallPrinter::VisitArrayLiteral(ArrayLiteral* node) {
void CallPrinter::VisitVariableProxy(VariableProxy* node) {
- PrintLiteral(node->name(), false);
+ if (is_builtin_) {
+ // Variable names of builtins are meaningless due to minification.
+ Print("(var)");
+ } else {
+ PrintLiteral(*node->name(), false);
+ }
}
@@ -288,7 +302,7 @@ void CallPrinter::VisitProperty(Property* node) {
if (literal != NULL && literal->value()->IsInternalizedString()) {
Find(node->obj(), true);
Print(".");
- PrintLiteral(literal->value(), false);
+ PrintLiteral(*literal->value(), false);
} else {
Find(node->obj(), true);
Print("[");
@@ -300,7 +314,15 @@ void CallPrinter::VisitProperty(Property* node) {
void CallPrinter::VisitCall(Call* node) {
bool was_found = !found_ && node->position() == position_;
- if (was_found) found_ = true;
+ if (was_found) {
+ // Bail out if the error is caused by a direct call to a variable in builtin
+ // code. The variable name is meaningless due to minification.
+ if (is_builtin_ && node->expression()->IsVariableProxy()) {
+ done_ = true;
+ return;
+ }
+ found_ = true;
+ }
Find(node->expression(), true);
if (!was_found) Print("(...)");
FindArguments(node->arguments());
@@ -310,7 +332,15 @@ void CallPrinter::VisitCall(Call* node) {
void CallPrinter::VisitCallNew(CallNew* node) {
bool was_found = !found_ && node->position() == position_;
- if (was_found) found_ = true;
+ if (was_found) {
+ // Bail out if the error is caused by a direct call to a variable in builtin
+ // code. The variable name is meaningless due to minification.
+ if (is_builtin_ && node->expression()->IsVariableProxy()) {
+ done_ = true;
+ return;
+ }
+ found_ = true;
+ }
Find(node->expression(), was_found);
FindArguments(node->arguments());
if (was_found) done_ = true;
@@ -377,7 +407,15 @@ void CallPrinter::VisitThisFunction(ThisFunction* node) {}
void CallPrinter::VisitSuperPropertyReference(SuperPropertyReference* node) {}
-void CallPrinter::VisitSuperCallReference(SuperCallReference* node) {}
+void CallPrinter::VisitSuperCallReference(SuperCallReference* node) {
+ Print("super");
+}
+
+
+void CallPrinter::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ Find(node->expression());
+}
void CallPrinter::FindStatements(ZoneList<Statement*>* statements) {
@@ -396,14 +434,11 @@ void CallPrinter::FindArguments(ZoneList<Expression*>* arguments) {
}
-void CallPrinter::PrintLiteral(Handle<Object> value, bool quote) {
- Object* object = *value;
+void CallPrinter::PrintLiteral(Object* value, bool quote) {
+ Object* object = value;
if (object->IsString()) {
- String* string = String::cast(object);
if (quote) Print("\"");
- for (int i = 0; i < string->length(); i++) {
- Print("%c", string->Get(i));
- }
+ Print("%s", String::cast(object)->ToCString().get());
if (quote) Print("\"");
} else if (object->IsNull()) {
Print("null");
@@ -415,12 +450,15 @@ void CallPrinter::PrintLiteral(Handle<Object> value, bool quote) {
Print("undefined");
} else if (object->IsNumber()) {
Print("%g", object->Number());
+ } else if (object->IsSymbol()) {
+ // Symbols can only occur as literals if they were inserted by the parser.
+ PrintLiteral(Symbol::cast(object)->name(), false);
}
}
void CallPrinter::PrintLiteral(const AstRawString* value, bool quote) {
- PrintLiteral(value->string(), quote);
+ PrintLiteral(*value->string(), quote);
}
@@ -429,22 +467,22 @@ void CallPrinter::PrintLiteral(const AstRawString* value, bool quote) {
#ifdef DEBUG
-// A helper for ast nodes that use FeedbackVectorICSlots.
-static int FormatICSlotNode(Vector<char>* buf, Expression* node,
- const char* node_name, FeedbackVectorICSlot slot) {
+// A helper for ast nodes that use FeedbackVectorSlots.
+static int FormatSlotNode(Vector<char>* buf, Expression* node,
+ const char* node_name, FeedbackVectorSlot slot) {
int pos = SNPrintF(*buf, "%s", node_name);
if (!slot.IsInvalid()) {
- pos = SNPrintF(*buf + pos, " ICSlot(%d)", slot.ToInt());
+ pos = SNPrintF(*buf + pos, " Slot(%d)", slot.ToInt());
}
return pos;
}
-PrettyPrinter::PrettyPrinter(Isolate* isolate, Zone* zone) {
+PrettyPrinter::PrettyPrinter(Isolate* isolate) {
output_ = NULL;
size_ = 0;
pos_ = 0;
- InitializeAstVisitor(isolate, zone);
+ InitializeAstVisitor(isolate);
}
@@ -701,6 +739,13 @@ void PrettyPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
}
+void PrettyPrinter::VisitDoExpression(DoExpression* node) {
+ Print("(do {");
+ PrintStatements(node->block()->statements());
+ Print("})");
+}
+
+
void PrettyPrinter::VisitConditional(Conditional* node) {
Visit(node->condition());
Print(" ? ");
@@ -719,7 +764,11 @@ void PrettyPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
Print(" RegExp(");
PrintLiteral(node->pattern(), false);
Print(",");
- PrintLiteral(node->flags(), false);
+ if (node->flags() & RegExp::kGlobal) Print("g");
+ if (node->flags() & RegExp::kIgnoreCase) Print("i");
+ if (node->flags() & RegExp::kMultiline) Print("m");
+ if (node->flags() & RegExp::kUnicode) Print("u");
+ if (node->flags() & RegExp::kSticky) Print("y");
Print(") ");
}
@@ -880,6 +929,12 @@ void PrettyPrinter::VisitSuperCallReference(SuperCallReference* node) {
}
+void PrettyPrinter::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ Visit(node->expression());
+}
+
+
const char* PrettyPrinter::Print(AstNode* node) {
Init();
Visit(node);
@@ -904,8 +959,8 @@ const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
}
-void PrettyPrinter::PrintOut(Isolate* isolate, Zone* zone, AstNode* node) {
- PrettyPrinter printer(isolate, zone);
+void PrettyPrinter::PrintOut(Isolate* isolate, AstNode* node) {
+ PrettyPrinter printer(isolate);
PrintF("%s\n", printer.Print(node));
}
@@ -1061,6 +1116,13 @@ class IndentedScope BASE_EMBEDDED {
ast_printer_->inc_indent();
}
+ IndentedScope(AstPrinter* printer, const char* txt, int pos)
+ : ast_printer_(printer) {
+ ast_printer_->PrintIndented(txt);
+ ast_printer_->Print(" at %d\n", pos);
+ ast_printer_->inc_indent();
+ }
+
virtual ~IndentedScope() {
ast_printer_->dec_indent();
}
@@ -1073,8 +1135,7 @@ class IndentedScope BASE_EMBEDDED {
//-----------------------------------------------------------------------------
-AstPrinter::AstPrinter(Isolate* isolate, Zone* zone)
- : PrettyPrinter(isolate, zone), indent_(0) {}
+AstPrinter::AstPrinter(Isolate* isolate) : PrettyPrinter(isolate), indent_(0) {}
AstPrinter::~AstPrinter() {
@@ -1124,14 +1185,14 @@ void AstPrinter::PrintLabelsIndented(ZoneList<const AstRawString*>* labels) {
void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
- IndentedScope indent(this, s);
+ IndentedScope indent(this, s, node->position());
Visit(node);
}
const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
Init();
- { IndentedScope indent(this, "FUNC");
+ { IndentedScope indent(this, "FUNC", program->position());
PrintLiteralIndented("NAME", program->name(), true);
PrintLiteralIndented("INFERRED NAME", program->inferred_name(), true);
PrintParameters(program->scope());
@@ -1180,7 +1241,7 @@ void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
void AstPrinter::VisitBlock(Block* node) {
const char* block_txt =
node->ignore_completion_value() ? "BLOCK NOCOMPLETIONS" : "BLOCK";
- IndentedScope indent(this, block_txt);
+ IndentedScope indent(this, block_txt, node->position());
PrintStatements(node->statements());
}
@@ -1204,26 +1265,26 @@ void AstPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
void AstPrinter::VisitImportDeclaration(ImportDeclaration* node) {
- IndentedScope indent(this, "IMPORT");
+ IndentedScope indent(this, "IMPORT", node->position());
PrintLiteralIndented("NAME", node->proxy()->name(), true);
PrintLiteralIndented("FROM", node->module_specifier()->string(), true);
}
void AstPrinter::VisitExportDeclaration(ExportDeclaration* node) {
- IndentedScope indent(this, "EXPORT ");
+ IndentedScope indent(this, "EXPORT", node->position());
PrintLiteral(node->proxy()->name(), true);
}
void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) {
- IndentedScope indent(this, "EXPRESSION STATEMENT");
+ IndentedScope indent(this, "EXPRESSION STATEMENT", node->position());
Visit(node->expression());
}
void AstPrinter::VisitEmptyStatement(EmptyStatement* node) {
- IndentedScope indent(this, "EMPTY");
+ IndentedScope indent(this, "EMPTY", node->position());
}
@@ -1234,7 +1295,7 @@ void AstPrinter::VisitSloppyBlockFunctionStatement(
void AstPrinter::VisitIfStatement(IfStatement* node) {
- IndentedScope indent(this, "IF");
+ IndentedScope indent(this, "IF", node->position());
PrintIndentedVisit("CONDITION", node->condition());
PrintIndentedVisit("THEN", node->then_statement());
if (node->HasElseStatement()) {
@@ -1244,32 +1305,32 @@ void AstPrinter::VisitIfStatement(IfStatement* node) {
void AstPrinter::VisitContinueStatement(ContinueStatement* node) {
- IndentedScope indent(this, "CONTINUE");
+ IndentedScope indent(this, "CONTINUE", node->position());
PrintLabelsIndented(node->target()->labels());
}
void AstPrinter::VisitBreakStatement(BreakStatement* node) {
- IndentedScope indent(this, "BREAK");
+ IndentedScope indent(this, "BREAK", node->position());
PrintLabelsIndented(node->target()->labels());
}
void AstPrinter::VisitReturnStatement(ReturnStatement* node) {
- IndentedScope indent(this, "RETURN");
+ IndentedScope indent(this, "RETURN", node->position());
Visit(node->expression());
}
void AstPrinter::VisitWithStatement(WithStatement* node) {
- IndentedScope indent(this, "WITH");
+ IndentedScope indent(this, "WITH", node->position());
PrintIndentedVisit("OBJECT", node->expression());
PrintIndentedVisit("BODY", node->statement());
}
void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
- IndentedScope indent(this, "SWITCH");
+ IndentedScope indent(this, "SWITCH", node->position());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("TAG", node->tag());
for (int i = 0; i < node->cases()->length(); i++) {
@@ -1280,10 +1341,10 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
void AstPrinter::VisitCaseClause(CaseClause* clause) {
if (clause->is_default()) {
- IndentedScope indent(this, "DEFAULT");
+ IndentedScope indent(this, "DEFAULT", clause->position());
PrintStatements(clause->statements());
} else {
- IndentedScope indent(this, "CASE");
+ IndentedScope indent(this, "CASE", clause->position());
Visit(clause->label());
PrintStatements(clause->statements());
}
@@ -1291,7 +1352,7 @@ void AstPrinter::VisitCaseClause(CaseClause* clause) {
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
- IndentedScope indent(this, "DO");
+ IndentedScope indent(this, "DO", node->position());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond());
@@ -1299,7 +1360,7 @@ void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
void AstPrinter::VisitWhileStatement(WhileStatement* node) {
- IndentedScope indent(this, "WHILE");
+ IndentedScope indent(this, "WHILE", node->position());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
@@ -1307,7 +1368,7 @@ void AstPrinter::VisitWhileStatement(WhileStatement* node) {
void AstPrinter::VisitForStatement(ForStatement* node) {
- IndentedScope indent(this, "FOR");
+ IndentedScope indent(this, "FOR", node->position());
PrintLabelsIndented(node->labels());
if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond());
@@ -1317,7 +1378,7 @@ void AstPrinter::VisitForStatement(ForStatement* node) {
void AstPrinter::VisitForInStatement(ForInStatement* node) {
- IndentedScope indent(this, "FOR IN");
+ IndentedScope indent(this, "FOR IN", node->position());
PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("IN", node->enumerable());
PrintIndentedVisit("BODY", node->body());
@@ -1325,7 +1386,7 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
- IndentedScope indent(this, "FOR OF");
+ IndentedScope indent(this, "FOR OF", node->position());
PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("OF", node->iterable());
PrintIndentedVisit("BODY", node->body());
@@ -1333,7 +1394,7 @@ void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
- IndentedScope indent(this, "TRY CATCH");
+ IndentedScope indent(this, "TRY CATCH", node->position());
PrintIndentedVisit("TRY", node->try_block());
PrintLiteralWithModeIndented("CATCHVAR",
node->variable(),
@@ -1343,19 +1404,19 @@ void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
void AstPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
- IndentedScope indent(this, "TRY finalLY");
+ IndentedScope indent(this, "TRY FINALLY", node->position());
PrintIndentedVisit("TRY", node->try_block());
PrintIndentedVisit("FINALLY", node->finally_block());
}
void AstPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
- IndentedScope indent(this, "DEBUGGER");
+ IndentedScope indent(this, "DEBUGGER", node->position());
}
void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
- IndentedScope indent(this, "FUNC LITERAL");
+ IndentedScope indent(this, "FUNC LITERAL", node->position());
PrintLiteralIndented("NAME", node->name(), false);
PrintLiteralIndented("INFERRED NAME", node->inferred_name(), false);
PrintParameters(node->scope());
@@ -1367,7 +1428,7 @@ void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
- IndentedScope indent(this, "CLASS LITERAL");
+ IndentedScope indent(this, "CLASS LITERAL", node->position());
if (node->raw_name() != nullptr) {
PrintLiteralIndented("NAME", node->name(), false);
}
@@ -1414,13 +1475,19 @@ void AstPrinter::PrintProperties(
void AstPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
- IndentedScope indent(this, "NATIVE FUNC LITERAL");
+ IndentedScope indent(this, "NATIVE FUNC LITERAL", node->position());
PrintLiteralIndented("NAME", node->name(), false);
}
+void AstPrinter::VisitDoExpression(DoExpression* node) {
+ IndentedScope indent(this, "DO EXPRESSION", node->position());
+ PrintStatements(node->block()->statements());
+}
+
+
void AstPrinter::VisitConditional(Conditional* node) {
- IndentedScope indent(this, "CONDITIONAL");
+ IndentedScope indent(this, "CONDITIONAL", node->position());
PrintIndentedVisit("CONDITION", node->condition());
PrintIndentedVisit("THEN", node->then_expression());
PrintIndentedVisit("ELSE", node->else_expression());
@@ -1434,17 +1501,26 @@ void AstPrinter::VisitLiteral(Literal* node) {
void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
- IndentedScope indent(this, "REGEXP LITERAL");
+ IndentedScope indent(this, "REGEXP LITERAL", node->position());
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "literal_index = %d\n", node->literal_index());
PrintIndented(buf.start());
PrintLiteralIndented("PATTERN", node->pattern(), false);
- PrintLiteralIndented("FLAGS", node->flags(), false);
+ int i = 0;
+ if (node->flags() & RegExp::kGlobal) buf[i++] = 'g';
+ if (node->flags() & RegExp::kIgnoreCase) buf[i++] = 'i';
+ if (node->flags() & RegExp::kMultiline) buf[i++] = 'm';
+ if (node->flags() & RegExp::kUnicode) buf[i++] = 'u';
+ if (node->flags() & RegExp::kSticky) buf[i++] = 'y';
+ buf[i] = '\0';
+ PrintIndented("FLAGS ");
+ Print(buf.start());
+ Print("\n");
}
void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
- IndentedScope indent(this, "OBJ LITERAL");
+ IndentedScope indent(this, "OBJ LITERAL", node->position());
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "literal_index = %d\n", node->literal_index());
PrintIndented(buf.start());
@@ -1453,13 +1529,13 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
- IndentedScope indent(this, "ARRAY LITERAL");
+ IndentedScope indent(this, "ARRAY LITERAL", node->position());
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "literal_index = %d\n", node->literal_index());
PrintIndented(buf.start());
if (node->values()->length() > 0) {
- IndentedScope indent(this, "VALUES");
+ IndentedScope indent(this, "VALUES", node->position());
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
@@ -1471,7 +1547,7 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
Variable* var = node->var();
EmbeddedVector<char, 128> buf;
int pos =
- FormatICSlotNode(&buf, node, "VAR PROXY", node->VariableFeedbackSlot());
+ FormatSlotNode(&buf, node, "VAR PROXY", node->VariableFeedbackSlot());
switch (var->location()) {
case VariableLocation::UNALLOCATED:
@@ -1497,28 +1573,28 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
void AstPrinter::VisitAssignment(Assignment* node) {
- IndentedScope indent(this, Token::Name(node->op()));
+ IndentedScope indent(this, Token::Name(node->op()), node->position());
Visit(node->target());
Visit(node->value());
}
void AstPrinter::VisitYield(Yield* node) {
- IndentedScope indent(this, "YIELD");
+ IndentedScope indent(this, "YIELD", node->position());
Visit(node->expression());
}
void AstPrinter::VisitThrow(Throw* node) {
- IndentedScope indent(this, "THROW");
+ IndentedScope indent(this, "THROW", node->position());
Visit(node->exception());
}
void AstPrinter::VisitProperty(Property* node) {
EmbeddedVector<char, 128> buf;
- FormatICSlotNode(&buf, node, "PROPERTY", node->PropertyFeedbackSlot());
- IndentedScope indent(this, buf.start());
+ FormatSlotNode(&buf, node, "PROPERTY", node->PropertyFeedbackSlot());
+ IndentedScope indent(this, buf.start(), node->position());
Visit(node->obj());
Literal* literal = node->key()->AsLiteral();
@@ -1532,7 +1608,7 @@ void AstPrinter::VisitProperty(Property* node) {
void AstPrinter::VisitCall(Call* node) {
EmbeddedVector<char, 128> buf;
- FormatICSlotNode(&buf, node, "CALL", node->CallFeedbackICSlot());
+ FormatSlotNode(&buf, node, "CALL", node->CallFeedbackICSlot());
IndentedScope indent(this, buf.start());
Visit(node->expression());
@@ -1541,7 +1617,7 @@ void AstPrinter::VisitCall(Call* node) {
void AstPrinter::VisitCallNew(CallNew* node) {
- IndentedScope indent(this, "CALL NEW");
+ IndentedScope indent(this, "CALL NEW", node->position());
Visit(node->expression());
PrintArguments(node->arguments());
}
@@ -1550,13 +1626,13 @@ void AstPrinter::VisitCallNew(CallNew* node) {
void AstPrinter::VisitCallRuntime(CallRuntime* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "CALL RUNTIME %s", node->debug_name());
- IndentedScope indent(this, buf.start());
+ IndentedScope indent(this, buf.start(), node->position());
PrintArguments(node->arguments());
}
void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()));
+ IndentedScope indent(this, Token::Name(node->op()), node->position());
Visit(node->expression());
}
@@ -1565,48 +1641,54 @@ void AstPrinter::VisitCountOperation(CountOperation* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()));
- IndentedScope indent(this, buf.start());
+ IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression());
}
void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()));
+ IndentedScope indent(this, Token::Name(node->op()), node->position());
Visit(node->left());
Visit(node->right());
}
void AstPrinter::VisitCompareOperation(CompareOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()));
+ IndentedScope indent(this, Token::Name(node->op()), node->position());
Visit(node->left());
Visit(node->right());
}
void AstPrinter::VisitSpread(Spread* node) {
- IndentedScope indent(this, "...");
+ IndentedScope indent(this, "...", node->position());
Visit(node->expression());
}
void AstPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
- IndentedScope indent(this, "()");
+ IndentedScope indent(this, "()", node->position());
}
void AstPrinter::VisitThisFunction(ThisFunction* node) {
- IndentedScope indent(this, "THIS-FUNCTION");
+ IndentedScope indent(this, "THIS-FUNCTION", node->position());
}
void AstPrinter::VisitSuperPropertyReference(SuperPropertyReference* node) {
- IndentedScope indent(this, "SUPER-PROPERTY-REFERENCE");
+ IndentedScope indent(this, "SUPER-PROPERTY-REFERENCE", node->position());
}
void AstPrinter::VisitSuperCallReference(SuperCallReference* node) {
- IndentedScope indent(this, "SUPER-CALL-REFERENCE");
+ IndentedScope indent(this, "SUPER-CALL-REFERENCE", node->position());
+}
+
+
+void AstPrinter::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ Visit(node->expression());
}
diff --git a/chromium/v8/src/prettyprinter.h b/chromium/v8/src/ast/prettyprinter.h
index 1971cfe8399..7e4dcdc804c 100644
--- a/chromium/v8/src/prettyprinter.h
+++ b/chromium/v8/src/ast/prettyprinter.h
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PRETTYPRINTER_H_
-#define V8_PRETTYPRINTER_H_
+#ifndef V8_AST_PRETTYPRINTER_H_
+#define V8_AST_PRETTYPRINTER_H_
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
namespace v8 {
namespace internal {
class CallPrinter : public AstVisitor {
public:
- CallPrinter(Isolate* isolate, Zone* zone);
+ explicit CallPrinter(Isolate* isolate, bool is_builtin);
virtual ~CallPrinter();
// The following routine prints the node with position |position| into a
@@ -37,11 +37,12 @@ class CallPrinter : public AstVisitor {
int position_; // position of ast node to print
bool found_;
bool done_;
+ bool is_builtin_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
protected:
- void PrintLiteral(Handle<Object> value, bool quote);
+ void PrintLiteral(Object* value, bool quote);
void PrintLiteral(const AstRawString* value, bool quote);
void FindStatements(ZoneList<Statement*>* statements);
void FindArguments(ZoneList<Expression*>* arguments);
@@ -52,7 +53,7 @@ class CallPrinter : public AstVisitor {
class PrettyPrinter: public AstVisitor {
public:
- PrettyPrinter(Isolate* isolate, Zone* zone);
+ explicit PrettyPrinter(Isolate* isolate);
virtual ~PrettyPrinter();
// The following routines print a node into a string.
@@ -64,7 +65,7 @@ class PrettyPrinter: public AstVisitor {
void Print(const char* format, ...);
// Print a node to stdout.
- static void PrintOut(Isolate* isolate, Zone* zone, AstNode* node);
+ static void PrintOut(Isolate* isolate, AstNode* node);
// Individual nodes
#define DECLARE_VISIT(type) void Visit##type(type* node) override;
@@ -98,7 +99,7 @@ class PrettyPrinter: public AstVisitor {
// Prints the AST structure
class AstPrinter: public PrettyPrinter {
public:
- AstPrinter(Isolate* isolate, Zone* zone);
+ explicit AstPrinter(Isolate* isolate);
virtual ~AstPrinter();
const char* PrintProgram(FunctionLiteral* program);
@@ -133,6 +134,7 @@ class AstPrinter: public PrettyPrinter {
#endif // DEBUG
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_PRETTYPRINTER_H_
+#endif // V8_AST_PRETTYPRINTER_H_
diff --git a/chromium/v8/src/scopeinfo.cc b/chromium/v8/src/ast/scopeinfo.cc
index 732908a9e64..668879fe512 100644
--- a/chromium/v8/src/scopeinfo.cc
+++ b/chromium/v8/src/ast/scopeinfo.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/scopeinfo.h"
+#include "src/ast/scopeinfo.h"
#include <stdlib.h>
+#include "src/ast/scopes.h"
#include "src/bootstrapper.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -49,6 +49,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
receiver_info = NONE;
}
+ bool has_new_target = scope->new_target_var() != nullptr;
+
// Determine use and location of the function variable if it is present.
VariableAllocationInfo function_name_info;
VariableMode function_variable_mode;
@@ -90,6 +92,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
LanguageModeField::encode(scope->language_mode()) |
DeclarationScopeField::encode(scope->is_declaration_scope()) |
ReceiverVariableField::encode(receiver_info) |
+ HasNewTargetField::encode(has_new_target) |
FunctionVariableField::encode(function_name_info) |
FunctionVariableMode::encode(function_variable_mode) |
AsmModuleField::encode(scope->asm_module()) |
@@ -343,7 +346,6 @@ int ScopeInfo::ContextLength() {
scope_type() == WITH_SCOPE ||
(scope_type() == BLOCK_SCOPE && CallsSloppyEval() &&
is_declaration_scope()) ||
- (scope_type() == ARROW_SCOPE && CallsSloppyEval()) ||
(scope_type() == FUNCTION_SCOPE && CallsSloppyEval()) ||
scope_type() == MODULE_SCOPE;
@@ -375,6 +377,9 @@ bool ScopeInfo::HasAllocatedReceiver() {
}
+bool ScopeInfo::HasNewTarget() { return HasNewTargetField::decode(Flags()); }
+
+
bool ScopeInfo::HasFunctionName() {
if (length() > 0) {
return NONE != FunctionVariableField::decode(Flags());
diff --git a/chromium/v8/src/scopeinfo.h b/chromium/v8/src/ast/scopeinfo.h
index 70a17cd7d4a..489a672ed8d 100644
--- a/chromium/v8/src/scopeinfo.h
+++ b/chromium/v8/src/ast/scopeinfo.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SCOPEINFO_H_
-#define V8_SCOPEINFO_H_
+#ifndef V8_AST_SCOPEINFO_H_
+#define V8_AST_SCOPEINFO_H_
#include "src/allocation.h"
-#include "src/modules.h"
-#include "src/variables.h"
+#include "src/ast/modules.h"
+#include "src/ast/variables.h"
namespace v8 {
namespace internal {
@@ -169,6 +169,7 @@ class ModuleInfo: public FixedArray {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_SCOPEINFO_H_
+#endif // V8_AST_SCOPEINFO_H_
diff --git a/chromium/v8/src/scopes.cc b/chromium/v8/src/ast/scopes.cc
index a611d7364ce..c2b05b7c040 100644
--- a/chromium/v8/src/scopes.cc
+++ b/chromium/v8/src/ast/scopes.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/scopes.h"
+#include "src/ast/scopes.h"
#include "src/accessors.h"
+#include "src/ast/scopeinfo.h"
#include "src/bootstrapper.h"
#include "src/messages.h"
-#include "src/parser.h"
-#include "src/scopeinfo.h"
+#include "src/parsing/parser.h" // for ParseInfo
namespace v8 {
namespace internal {
@@ -194,7 +194,6 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
language_mode_ = outer_scope != NULL ? outer_scope->language_mode_ : SLOPPY;
outer_scope_calls_sloppy_eval_ = false;
inner_scope_calls_eval_ = false;
- inner_scope_uses_arguments_ = false;
scope_nonlinear_ = false;
force_eager_compilation_ = false;
force_context_allocation_ = (outer_scope != NULL && !is_function_scope())
@@ -203,8 +202,6 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
num_stack_slots_ = 0;
num_heap_slots_ = 0;
num_global_slots_ = 0;
- num_modules_ = 0;
- module_var_ = NULL;
arity_ = 0;
has_simple_parameters_ = true;
rest_parameter_ = NULL;
@@ -309,14 +306,10 @@ bool Scope::Analyze(ParseInfo* info) {
}
#ifdef DEBUG
- bool native = info->isolate()->bootstrapper()->IsActive();
- if (!info->shared_info().is_null()) {
- Object* script = info->shared_info()->script();
- native = script->IsScript() &&
- Script::cast(script)->type() == Script::TYPE_NATIVE;
+ if (info->script_is_native() ? FLAG_print_builtin_scopes
+ : FLAG_print_scopes) {
+ scope->Print();
}
-
- if (native ? FLAG_print_builtin_scopes : FLAG_print_scopes) scope->Print();
#endif
info->set_scope(scope);
@@ -325,7 +318,6 @@ bool Scope::Analyze(ParseInfo* info) {
void Scope::Initialize() {
- bool subclass_constructor = IsSubclassConstructor(function_kind_);
DCHECK(!already_resolved());
// Add this scope as a new inner scope of the outer scope.
@@ -338,6 +330,7 @@ void Scope::Initialize() {
// Declare convenience variables and the receiver.
if (is_declaration_scope() && has_this_declaration()) {
+ bool subclass_constructor = IsSubclassConstructor(function_kind_);
Variable* var = variables_.Declare(
this, ast_value_factory_->this_string(),
subclass_constructor ? CONST : VAR, Variable::THIS,
@@ -352,10 +345,8 @@ void Scope::Initialize() {
variables_.Declare(this, ast_value_factory_->arguments_string(), VAR,
Variable::ARGUMENTS, kCreatedInitialized);
- if (subclass_constructor || FLAG_harmony_new_target) {
- variables_.Declare(this, ast_value_factory_->new_target_string(), CONST,
- Variable::NORMAL, kCreatedInitialized);
- }
+ variables_.Declare(this, ast_value_factory_->new_target_string(), CONST,
+ Variable::NORMAL, kCreatedInitialized);
if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
IsAccessorFunction(function_kind_)) {
@@ -377,12 +368,7 @@ Scope* Scope::FinalizeBlockScope() {
}
// Remove this scope from outer scope.
- for (int i = 0; i < outer_scope_->inner_scopes_.length(); i++) {
- if (outer_scope_->inner_scopes_[i] == this) {
- outer_scope_->inner_scopes_.Remove(i);
- break;
- }
- }
+ outer_scope()->RemoveInnerScope(this);
// Reparent inner scopes.
for (int i = 0; i < inner_scopes_.length(); i++) {
@@ -394,15 +380,35 @@ Scope* Scope::FinalizeBlockScope() {
outer_scope()->unresolved_.Add(unresolved_[i], zone());
}
- // Propagate usage flags to outer scope.
- if (uses_arguments()) outer_scope_->RecordArgumentsUsage();
- if (uses_super_property()) outer_scope_->RecordSuperPropertyUsage();
- if (scope_calls_eval_) outer_scope_->RecordEvalCall();
+ PropagateUsageFlagsToScope(outer_scope_);
return NULL;
}
+void Scope::ReplaceOuterScope(Scope* outer) {
+ DCHECK_NOT_NULL(outer);
+ DCHECK_NOT_NULL(outer_scope_);
+ DCHECK(!already_resolved());
+ DCHECK(!outer->already_resolved());
+ DCHECK(!outer_scope_->already_resolved());
+ outer_scope_->RemoveInnerScope(this);
+ outer->AddInnerScope(this);
+ outer_scope_ = outer;
+}
+
+
+void Scope::PropagateUsageFlagsToScope(Scope* other) {
+ DCHECK_NOT_NULL(other);
+ DCHECK(!already_resolved());
+ DCHECK(!other->already_resolved());
+ if (uses_arguments()) other->RecordArgumentsUsage();
+ if (uses_super_property()) other->RecordSuperPropertyUsage();
+ if (calls_eval()) other->RecordEvalCall();
+ if (scope_contains_with_) other->RecordWithStatement();
+}
+
+
Variable* Scope::LookupLocal(const AstRawString* name) {
Variable* result = variables_.Lookup(name);
if (result != NULL || scope_info_.is_null()) {
@@ -548,15 +554,16 @@ Variable* Scope::DeclareDynamicGlobal(const AstRawString* name) {
}
-void Scope::RemoveUnresolved(VariableProxy* var) {
+bool Scope::RemoveUnresolved(VariableProxy* var) {
// Most likely (always?) any variable we want to remove
// was just added before, so we search backwards.
for (int i = unresolved_.length(); i-- > 0;) {
if (unresolved_[i] == var) {
unresolved_.Remove(i);
- return;
+ return true;
}
}
+ return false;
}
@@ -568,11 +575,24 @@ Variable* Scope::NewTemporary(const AstRawString* name) {
TEMPORARY,
Variable::NORMAL,
kCreatedInitialized);
- scope->temps_.Add(var, zone());
+ scope->AddTemporary(var);
return var;
}
+bool Scope::RemoveTemporary(Variable* var) {
+ // Most likely (always?) any temporary variable we want to remove
+ // was just added before, so we search backwards.
+ for (int i = temps_.length(); i-- > 0;) {
+ if (temps_[i] == var) {
+ temps_.Remove(i);
+ return true;
+ }
+ }
+ return false;
+}
+
+
void Scope::AddDeclaration(Declaration* declaration) {
decls_.Add(declaration, zone());
}
@@ -597,30 +617,30 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
int length = decls_.length();
for (int i = 0; i < length; i++) {
Declaration* decl = decls_[i];
- if (decl->mode() != VAR && !is_block_scope()) continue;
+ // We don't create a separate scope to hold the function name of a function
+ // expression, so we have to make sure not to consider it when checking for
+ // conflicts (since it's conceptually "outside" the declaration scope).
+ if (is_function_scope() && decl == function()) continue;
+ if (IsLexicalVariableMode(decl->mode()) && !is_block_scope()) continue;
const AstRawString* name = decl->proxy()->raw_name();
// Iterate through all scopes until and including the declaration scope.
- // If the declaration scope is a (declaration) block scope, also continue
- // (that is to handle the special inner scope of functions with
- // destructuring parameters, which may not shadow any variables from
- // the surrounding function scope).
Scope* previous = NULL;
Scope* current = decl->scope();
// Lexical vs lexical conflicts within the same scope have already been
// captured in Parser::Declare. The only conflicts we still need to check
// are lexical vs VAR, or any declarations within a declaration block scope
// vs lexical declarations in its surrounding (function) scope.
- if (decl->mode() != VAR) current = current->outer_scope_;
+ if (IsLexicalVariableMode(decl->mode())) current = current->outer_scope_;
do {
// There is a conflict if there exists a non-VAR binding.
Variable* other_var = current->variables_.Lookup(name);
- if (other_var != NULL && other_var->mode() != VAR) {
+ if (other_var != NULL && IsLexicalVariableMode(other_var->mode())) {
return decl;
}
previous = current;
current = current->outer_scope_;
- } while (!previous->is_declaration_scope() || previous->is_block_scope());
+ } while (!previous->is_declaration_scope());
}
return NULL;
}
@@ -705,16 +725,10 @@ bool Scope::AllocateVariables(ParseInfo* info, AstNodeFactory* factory) {
}
PropagateScopeInfo(outer_scope_calls_sloppy_eval);
- // 2) Allocate module instances.
- if (FLAG_harmony_modules && is_script_scope()) {
- DCHECK(num_modules_ == 0);
- AllocateModules();
- }
-
- // 3) Resolve variables.
+ // 2) Resolve variables.
if (!ResolveVariablesRecursively(info, factory)) return false;
- // 4) Allocate variables.
+ // 3) Allocate variables.
AllocateVariablesRecursively(info->isolate());
return true;
@@ -769,15 +783,26 @@ int Scope::ContextChainLength(Scope* scope) {
int n = 0;
for (Scope* s = this; s != scope; s = s->outer_scope_) {
DCHECK(s != NULL); // scope must be in the scope chain
- if (s->is_with_scope() || s->num_heap_slots() > 0) n++;
- // Catch and module scopes always have heap slots.
- DCHECK(!s->is_catch_scope() || s->num_heap_slots() > 0);
- DCHECK(!s->is_module_scope() || s->num_heap_slots() > 0);
+ if (s->NeedsContext()) n++;
}
return n;
}
+int Scope::MaxNestedContextChainLength() {
+ int max_context_chain_length = 0;
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ Scope* scope = inner_scopes_[i];
+ max_context_chain_length = std::max(scope->MaxNestedContextChainLength(),
+ max_context_chain_length);
+ }
+ if (NeedsContext()) {
+ max_context_chain_length += 1;
+ }
+ return max_context_chain_length;
+}
+
+
Scope* Scope::DeclarationScope() {
Scope* scope = this;
while (!scope->is_declaration_scope()) {
@@ -832,6 +857,24 @@ void Scope::GetNestedScopeChain(Isolate* isolate,
}
+void Scope::CollectNonLocals(HashMap* non_locals) {
+ // Collect non-local variables referenced in the scope.
+ // TODO(yangguo): store non-local variables explicitly if we can no longer
+ // rely on unresolved_ to find them.
+ for (int i = 0; i < unresolved_.length(); i++) {
+ VariableProxy* proxy = unresolved_[i];
+ if (proxy->is_resolved() && proxy->var()->IsStackAllocated()) continue;
+ Handle<String> name = proxy->name();
+ void* key = reinterpret_cast<void*>(name.location());
+ HashMap::Entry* entry = non_locals->LookupOrInsert(key, name->Hash());
+ entry->value = key;
+ }
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ inner_scopes_[i]->CollectNonLocals(non_locals);
+ }
+}
+
+
void Scope::ReportMessage(int start_position, int end_position,
MessageTemplate::Template message,
const AstRawString* arg) {
@@ -848,16 +891,18 @@ void Scope::ReportMessage(int start_position, int end_position,
#ifdef DEBUG
-static const char* Header(ScopeType scope_type, bool is_declaration_scope) {
+static const char* Header(ScopeType scope_type, FunctionKind function_kind,
+ bool is_declaration_scope) {
switch (scope_type) {
case EVAL_SCOPE: return "eval";
- case FUNCTION_SCOPE: return "function";
+ // TODO(adamk): Should we print concise method scopes specially?
+ case FUNCTION_SCOPE:
+ return IsArrowFunction(function_kind) ? "arrow" : "function";
case MODULE_SCOPE: return "module";
case SCRIPT_SCOPE: return "global";
case CATCH_SCOPE: return "catch";
case BLOCK_SCOPE: return is_declaration_scope ? "varblock" : "block";
case WITH_SCOPE: return "with";
- case ARROW_SCOPE: return "arrow";
}
UNREACHABLE();
return NULL;
@@ -939,8 +984,8 @@ void Scope::Print(int n) {
int n1 = n0 + 2; // indentation
// Print header.
- Indent(n0, Header(scope_type_, is_declaration_scope()));
- if (!scope_name_->IsEmpty()) {
+ Indent(n0, Header(scope_type_, function_kind_, is_declaration_scope()));
+ if (scope_name_ != nullptr && !scope_name_->IsEmpty()) {
PrintF(" ");
PrintName(scope_name_);
}
@@ -983,9 +1028,6 @@ void Scope::Print(int n) {
if (scope_uses_arguments_) Indent(n1, "// scope uses 'arguments'\n");
if (scope_uses_super_property_)
Indent(n1, "// scope uses 'super' property\n");
- if (inner_scope_uses_arguments_) {
- Indent(n1, "// inner scope uses 'arguments'\n");
- }
if (outer_scope_calls_sloppy_eval_) {
Indent(n1, "// outer scope calls 'eval' in sloppy context\n");
}
@@ -1111,7 +1153,8 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy,
if (var != NULL && proxy->is_assigned()) var->set_maybe_assigned();
*binding_kind = DYNAMIC_LOOKUP;
return NULL;
- } else if (calls_sloppy_eval() && name_can_be_shadowed) {
+ } else if (calls_sloppy_eval() && !is_script_scope() &&
+ name_can_be_shadowed) {
// A variable binding may have been found in an outer scope, but the current
// scope makes a sloppy 'eval' call, so the found variable may not be
// the correct one (the 'eval' may introduce a binding with the same name).
@@ -1137,6 +1180,28 @@ bool Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy,
// Otherwise, try to resolve the variable.
BindingKind binding_kind;
Variable* var = LookupRecursive(proxy, &binding_kind, factory);
+
+#ifdef DEBUG
+ if (info->script_is_native()) {
+ // To avoid polluting the global object in native scripts
+ // - Variables must not be allocated to the global scope.
+ CHECK_NOT_NULL(outer_scope());
+ // - Variables must be bound locally or unallocated.
+ if (BOUND != binding_kind) {
+ // The following variable name may be minified. If so, disable
+ // minification in js2c.py for better output.
+ Handle<String> name = proxy->raw_name()->string();
+ V8_Fatal(__FILE__, __LINE__, "Unbound variable: '%s' in native script.",
+ name->ToCString().get());
+ }
+ VariableLocation location = var->location();
+ CHECK(location == VariableLocation::LOCAL ||
+ location == VariableLocation::CONTEXT ||
+ location == VariableLocation::PARAMETER ||
+ location == VariableLocation::UNALLOCATED);
+ }
+#endif
+
switch (binding_kind) {
case BOUND:
// We found a variable binding.
@@ -1332,14 +1397,6 @@ void Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) {
if (inner->scope_calls_eval_ || inner->inner_scope_calls_eval_) {
inner_scope_calls_eval_ = true;
}
- // If the inner scope is an arrow function, propagate the flags tracking
- // usage of arguments/super/this, but do not propagate them out from normal
- // functions.
- if (!inner->is_function_scope() || inner->is_arrow_scope()) {
- if (inner->scope_uses_arguments_ || inner->inner_scope_uses_arguments_) {
- inner_scope_uses_arguments_ = true;
- }
- }
if (inner->force_eager_compilation_) {
force_eager_compilation_ = true;
}
@@ -1620,23 +1677,6 @@ void Scope::AllocateVariablesRecursively(Isolate* isolate) {
}
-void Scope::AllocateModules() {
- DCHECK(is_script_scope());
- DCHECK(!already_resolved());
- for (int i = 0; i < inner_scopes_.length(); i++) {
- Scope* scope = inner_scopes_.at(i);
- if (scope->is_module_scope()) {
- DCHECK(!scope->already_resolved());
- DCHECK(scope->module_descriptor_->IsFrozen());
- DCHECK_NULL(scope->module_var_);
- scope->module_var_ =
- NewTemporary(ast_value_factory_->dot_module_string());
- ++num_modules_;
- }
- }
-}
-
-
int Scope::StackLocalCount() const {
return num_stack_slots() -
(function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0);
diff --git a/chromium/v8/src/scopes.h b/chromium/v8/src/ast/scopes.h
index 61bf6338f7c..6c261f63c37 100644
--- a/chromium/v8/src/scopes.h
+++ b/chromium/v8/src/ast/scopes.h
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SCOPES_H_
-#define V8_SCOPES_H_
+#ifndef V8_AST_SCOPES_H_
+#define V8_AST_SCOPES_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/hashmap.h"
#include "src/pending-compilation-error-handler.h"
#include "src/zone.h"
@@ -112,6 +113,15 @@ class Scope: public ZoneObject {
// tree and its children are reparented.
Scope* FinalizeBlockScope();
+ // Inserts outer_scope into this scope's scope chain (and removes this
+ // from the current outer_scope_'s inner_scopes_).
+ // Assumes outer_scope_ is non-null.
+ void ReplaceOuterScope(Scope* outer_scope);
+
+ // Propagates any eagerly-gathered scope usage flags (such as calls_eval())
+ // to the passed-in scope.
+ void PropagateUsageFlagsToScope(Scope* other);
+
Zone* zone() const { return zone_; }
// ---------------------------------------------------------------------------
@@ -178,13 +188,19 @@ class Scope: public ZoneObject {
return proxy;
}
+ void AddUnresolved(VariableProxy* proxy) {
+ DCHECK(!already_resolved());
+ DCHECK(!proxy->is_resolved());
+ unresolved_.Add(proxy, zone_);
+ }
+
// Remove a unresolved variable. During parsing, an unresolved variable
// may have been added optimistically, but then only the variable name
// was used (typically for labels). If the variable was not declared, the
// addition introduced a new unresolved variable which may end up being
// allocated globally as a "ghost" variable. RemoveUnresolved removes
// such a variable again if it was added; otherwise this is a no-op.
- void RemoveUnresolved(VariableProxy* var);
+ bool RemoveUnresolved(VariableProxy* var);
// Creates a new temporary variable in this scope's TemporaryScope. The
// name is only used for printing and cannot be used to find the variable.
@@ -193,6 +209,15 @@ class Scope: public ZoneObject {
// names.
Variable* NewTemporary(const AstRawString* name);
+ // Remove a temporary variable. This is for adjusting the scope of
+ // temporaries used when desugaring parameter initializers.
+ bool RemoveTemporary(Variable* var);
+
+ // Adds a temporary variable in this scope's TemporaryScope. This is for
+ // adjusting the scope of temporaries used when desugaring parameter
+ // initializers.
+ void AddTemporary(Variable* var) { temps_.Add(var, zone()); }
+
// Adds the specific declaration node to the list of declarations in
// this scope. The declarations are processed as part of entering
// the scope; see codegen.cc:ProcessDeclarations.
@@ -226,7 +251,7 @@ class Scope: public ZoneObject {
void RecordWithStatement() { scope_contains_with_ = true; }
// Inform the scope that the corresponding code contains an eval call.
- void RecordEvalCall() { if (!is_script_scope()) scope_calls_eval_ = true; }
+ void RecordEvalCall() { scope_calls_eval_ = true; }
// Inform the scope that the corresponding code uses "arguments".
void RecordArgumentsUsage() { scope_uses_arguments_ = true; }
@@ -302,15 +327,15 @@ class Scope: public ZoneObject {
// Specific scope types.
bool is_eval_scope() const { return scope_type_ == EVAL_SCOPE; }
- bool is_function_scope() const {
- return scope_type_ == FUNCTION_SCOPE || scope_type_ == ARROW_SCOPE;
- }
+ bool is_function_scope() const { return scope_type_ == FUNCTION_SCOPE; }
bool is_module_scope() const { return scope_type_ == MODULE_SCOPE; }
bool is_script_scope() const { return scope_type_ == SCRIPT_SCOPE; }
bool is_catch_scope() const { return scope_type_ == CATCH_SCOPE; }
bool is_block_scope() const { return scope_type_ == BLOCK_SCOPE; }
bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
- bool is_arrow_scope() const { return scope_type_ == ARROW_SCOPE; }
+ bool is_arrow_scope() const {
+ return is_function_scope() && IsArrowFunction(function_kind_);
+ }
bool is_declaration_scope() const { return is_declaration_scope_; }
void set_is_declaration_scope() { is_declaration_scope_ = true; }
@@ -328,26 +353,28 @@ class Scope: public ZoneObject {
// Is this scope inside a with statement.
bool inside_with() const { return scope_inside_with_; }
- // Does this scope contain a with statement.
- bool contains_with() const { return scope_contains_with_; }
// Does this scope access "arguments".
bool uses_arguments() const { return scope_uses_arguments_; }
- // Does any inner scope access "arguments".
- bool inner_uses_arguments() const { return inner_scope_uses_arguments_; }
// Does this scope access "super" property (super.foo).
bool uses_super_property() const { return scope_uses_super_property_; }
// Does this scope have the potential to execute declarations non-linearly?
bool is_nonlinear() const { return scope_nonlinear_; }
// Whether this needs to be represented by a runtime context.
- bool NeedsContext() const { return num_heap_slots() > 0; }
+ bool NeedsContext() const {
+ // Catch and module scopes always have heap slots.
+ DCHECK(!is_catch_scope() || num_heap_slots() > 0);
+ DCHECK(!is_module_scope() || num_heap_slots() > 0);
+ return is_with_scope() || num_heap_slots() > 0;
+ }
bool NeedsHomeObject() const {
return scope_uses_super_property_ ||
- (scope_calls_eval_ && (IsConciseMethod(function_kind()) ||
- IsAccessorFunction(function_kind()) ||
- IsClassConstructor(function_kind())));
+ ((scope_calls_eval_ || inner_scope_calls_eval_) &&
+ (IsConciseMethod(function_kind()) ||
+ IsAccessorFunction(function_kind()) ||
+ IsClassConstructor(function_kind())));
}
const Scope* NearestOuterEvalScope() const {
@@ -374,8 +401,6 @@ class Scope: public ZoneObject {
return receiver_;
}
- Variable* LookupThis() { return Lookup(ast_value_factory_->this_string()); }
-
// TODO(wingo): Add a GLOBAL_SCOPE scope type which will lexically allocate
// "this" (and no other variable) on the native context. Script scopes then
// will not have a "this" declaration.
@@ -412,9 +437,7 @@ class Scope: public ZoneObject {
return rest_parameter_;
}
- bool has_rest_parameter() const {
- return rest_index_ >= 0;
- }
+ bool has_rest_parameter() const { return rest_index_ >= 0; }
bool has_simple_parameters() const {
return has_simple_parameters_;
@@ -495,12 +518,6 @@ class Scope: public ZoneObject {
int ContextLocalCount() const;
int ContextGlobalCount() const;
- // For script scopes, the number of module literals (including nested ones).
- int num_modules() const { return num_modules_; }
-
- // For module scopes, the host scope's internal variable binding this module.
- Variable* module_var() const { return module_var_; }
-
// Make sure this scope and all outer scopes are eagerly compiled.
void ForceEagerCompilation() { force_eager_compilation_ = true; }
@@ -519,6 +536,10 @@ class Scope: public ZoneObject {
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
+ // The maximum number of nested contexts required for this scope and any inner
+ // scopes.
+ int MaxNestedContextChainLength();
+
// Find the first function, script, eval or (declaration) block scope. This is
// the scope where var declarations will be hoisted to in the implementation.
Scope* DeclarationScope();
@@ -543,6 +564,8 @@ class Scope: public ZoneObject {
void GetNestedScopeChain(Isolate* isolate, List<Handle<ScopeInfo> >* chain,
int statement_position);
+ void CollectNonLocals(HashMap* non_locals);
+
// ---------------------------------------------------------------------------
// Strict mode support.
bool IsDeclared(const AstRawString* name) {
@@ -579,9 +602,7 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Implementation.
- protected:
- friend class ParserFactory;
-
+ private:
// Scope tree.
Scope* outer_scope_; // the immediately enclosing outer scope, or NULL
ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
@@ -657,7 +678,6 @@ class Scope: public ZoneObject {
// Computed via PropagateScopeInfo.
bool outer_scope_calls_sloppy_eval_;
bool inner_scope_calls_eval_;
- bool inner_scope_uses_arguments_;
bool force_eager_compilation_;
bool force_context_allocation_;
@@ -676,12 +696,6 @@ class Scope: public ZoneObject {
int num_heap_slots_;
int num_global_slots_;
- // The number of modules (including nested ones).
- int num_modules_;
-
- // For module scopes, the host scope's temporary variable binding this module.
- Variable* module_var_;
-
// Info about the parameter list of a function.
int arity_;
bool has_simple_parameters_;
@@ -778,7 +792,6 @@ class Scope: public ZoneObject {
void AllocateVariablesRecursively(Isolate* isolate);
void AllocateParameter(Variable* var, int index);
void AllocateReceiver();
- void AllocateModules();
// Resolve and fill in the allocation information for all variables
// in this scopes. Must be called *after* all scopes have been
@@ -791,7 +804,6 @@ class Scope: public ZoneObject {
MUST_USE_RESULT
bool AllocateVariables(ParseInfo* info, AstNodeFactory* factory);
- private:
// Construct a scope based on the scope info.
Scope(Zone* zone, Scope* inner_scope, ScopeType type,
Handle<ScopeInfo> scope_info, AstValueFactory* value_factory);
@@ -807,6 +819,16 @@ class Scope: public ZoneObject {
}
}
+ void RemoveInnerScope(Scope* inner_scope) {
+ DCHECK_NOT_NULL(inner_scope);
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ if (inner_scopes_[i] == inner_scope) {
+ inner_scopes_.Remove(i);
+ break;
+ }
+ }
+ }
+
void SetDefaults(ScopeType type, Scope* outer_scope,
Handle<ScopeInfo> scope_info,
FunctionKind function_kind = kNormalFunction);
@@ -821,6 +843,7 @@ class Scope: public ZoneObject {
int class_declaration_group_start_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_SCOPES_H_
+#endif // V8_AST_SCOPES_H_
diff --git a/chromium/v8/src/variables.cc b/chromium/v8/src/ast/variables.cc
index f4f7a7a9174..8e007823863 100644
--- a/chromium/v8/src/variables.cc
+++ b/chromium/v8/src/ast/variables.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/variables.h"
+#include "src/ast/variables.h"
-#include "src/ast.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
@@ -44,6 +44,7 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
strong_mode_reference_start_position_(RelocInfo::kNoPosition),
strong_mode_reference_end_position_(RelocInfo::kNoPosition),
local_if_not_shadowed_(NULL),
+ is_from_eval_(false),
force_context_allocation_(false),
is_used_(false),
initialization_flag_(initialization_flag),
diff --git a/chromium/v8/src/variables.h b/chromium/v8/src/ast/variables.h
index dcd2e6af6e0..ca5d1cdd403 100644
--- a/chromium/v8/src/variables.h
+++ b/chromium/v8/src/ast/variables.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_VARIABLES_H_
-#define V8_VARIABLES_H_
+#ifndef V8_AST_VARIABLES_H_
+#define V8_AST_VARIABLES_H_
-#include "src/ast-value-factory.h"
+#include "src/ast/ast-value-factory.h"
#include "src/zone.h"
namespace v8 {
@@ -37,6 +37,10 @@ class Variable: public ZoneObject {
// scope is only used to follow the context chain length.
Scope* scope() const { return scope_; }
+ // This is for adjusting the scope of temporaries used when desugaring
+ // parameter initializers.
+ void set_scope(Scope* scope) { scope_ = scope; }
+
Handle<String> name() const { return name_->string(); }
const AstRawString* raw_name() const { return name_; }
VariableMode mode() const { return mode_; }
@@ -124,6 +128,8 @@ class Variable: public ZoneObject {
index_ = index;
}
+ void SetFromEval() { is_from_eval_ = true; }
+
static int CompareIndex(Variable* const* v, Variable* const* w);
void RecordStrongModeReference(int start_position, int end_position) {
@@ -144,6 +150,16 @@ class Variable: public ZoneObject {
int strong_mode_reference_end_position() const {
return strong_mode_reference_end_position_;
}
+ PropertyAttributes DeclarationPropertyAttributes() const {
+ int property_attributes = NONE;
+ if (IsImmutableVariableMode(mode_)) {
+ property_attributes |= READ_ONLY;
+ }
+ if (is_from_eval_) {
+ property_attributes |= EVAL_DECLARED;
+ }
+ return static_cast<PropertyAttributes>(property_attributes);
+ }
private:
Scope* scope_;
@@ -165,6 +181,9 @@ class Variable: public ZoneObject {
// binding scope (exclusive).
Variable* local_if_not_shadowed_;
+ // True if this variable is introduced by a sloppy eval
+ bool is_from_eval_;
+
// Usage info.
bool force_context_allocation_; // set by variable resolver
bool is_used_;
@@ -193,6 +212,7 @@ class ClassVariable : public Variable {
// checks for functions too.
int declaration_group_start_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_VARIABLES_H_
+#endif // V8_AST_VARIABLES_H_
diff --git a/chromium/v8/src/atomic-utils.h b/chromium/v8/src/atomic-utils.h
index 2aa78f8b5e2..34e1cb0269a 100644
--- a/chromium/v8/src/atomic-utils.h
+++ b/chromium/v8/src/atomic-utils.h
@@ -19,9 +19,10 @@ class AtomicNumber {
AtomicNumber() : value_(0) {}
explicit AtomicNumber(T initial) : value_(initial) {}
- V8_INLINE void Increment(T increment) {
- base::Barrier_AtomicIncrement(&value_,
- static_cast<base::AtomicWord>(increment));
+ // Returns the newly set value.
+ V8_INLINE T Increment(T increment) {
+ return static_cast<T>(base::Barrier_AtomicIncrement(
+ &value_, static_cast<base::AtomicWord>(increment)));
}
V8_INLINE T Value() { return static_cast<T>(base::Acquire_Load(&value_)); }
diff --git a/chromium/v8/src/background-parsing-task.h b/chromium/v8/src/background-parsing-task.h
index e99916169cf..0f290fb7f05 100644
--- a/chromium/v8/src/background-parsing-task.h
+++ b/chromium/v8/src/background-parsing-task.h
@@ -9,7 +9,7 @@
#include "src/base/platform/semaphore.h"
#include "src/base/smart-pointers.h"
#include "src/compiler.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -54,7 +54,7 @@ class BackgroundParsingTask : public ScriptCompiler::ScriptStreamingTask {
StreamedSource* source_; // Not owned.
int stack_size_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_BACKGROUND_PARSING_TASK_H_
diff --git a/chromium/v8/src/bailout-reason.h b/chromium/v8/src/bailout-reason.h
index b63c5fbfba9..83898d12bf7 100644
--- a/chromium/v8/src/bailout-reason.h
+++ b/chromium/v8/src/bailout-reason.h
@@ -29,7 +29,6 @@ namespace internal {
"Assignment to parameter, function uses arguments object") \
V(kAssignmentToParameterInArgumentsObject, \
"Assignment to parameter in arguments object") \
- V(kAttemptToUseUndefinedCache, "Attempt to use undefined cache") \
V(kBadValueContextForArgumentsObjectValue, \
"Bad value context for arguments object value") \
V(kBadValueContextForArgumentsValue, \
@@ -58,6 +57,7 @@ namespace internal {
V(kDestinationOfCopyNotAligned, "Destination of copy not aligned") \
V(kDontDeleteCellsCannotContainTheHole, \
"DontDelete cells can't contain the hole") \
+ V(kDoExpression, "Do expression encountered") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
@@ -105,14 +105,6 @@ namespace internal {
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
"Input GPR is expected to have upper32 cleared") \
V(kInputStringTooLong, "Input string too long") \
- V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
- "InstanceofStub unexpected call site cache (check)") \
- V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
- "InstanceofStub unexpected call site cache (cmp 1)") \
- V(kInstanceofStubUnexpectedCallSiteCacheCmp2, \
- "InstanceofStub unexpected call site cache (cmp 2)") \
- V(kInstanceofStubUnexpectedCallSiteCacheMov, \
- "InstanceofStub unexpected call site cache (mov)") \
V(kInteger32ToSmiFieldWritingToNonSmiLocation, \
"Integer32ToSmiField writing to non-smi location") \
V(kInvalidCaptureReferenced, "Invalid capture referenced") \
@@ -154,11 +146,14 @@ namespace internal {
V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
V(kOffsetOutOfRange, "Offset out of range") \
+ V(kOperandIsASmiAndNotABoundFunction, \
+ "Operand is a smi and not a bound function") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsNotADate, "Operand is not a date") \
+ V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAName, "Operand is not a name") \
V(kOperandIsNotANumber, "Operand is not a number") \
@@ -182,8 +177,10 @@ namespace internal {
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
+ V(kRestParameter, "Rest parameters") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
- V(kScriptContext, "Allocation of script context") \
+ V(kSloppyFunctionExpectsJSReceiverReceiver, \
+ "Sloppy function expects JSReceiver as receiver.") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
V(kSpread, "Spread in array literal") \
@@ -192,20 +189,6 @@ namespace internal {
V(kSuperReference, "Super reference") \
V(kTheCurrentStackPointerIsBelowCsp, \
"The current stack pointer is below csp") \
- V(kTheInstructionShouldBeALis, "The instruction should be a lis") \
- V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
- V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
- V(kTheInstructionShouldBeAnOris, "The instruction should be an oris") \
- V(kTheInstructionShouldBeALi, "The instruction should be a li") \
- V(kTheInstructionShouldBeASldi, "The instruction should be a sldi") \
- V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
- "The instruction to patch should be a ldr literal") \
- V(kTheInstructionToPatchShouldBeALis, \
- "The instruction to patch should be a lis") \
- V(kTheInstructionToPatchShouldBeALui, \
- "The instruction to patch should be a lui") \
- V(kTheInstructionToPatchShouldBeAnOri, \
- "The instruction to patch should be an ori") \
V(kTheSourceAndDestinationAreTheSame, \
"The source and destination are the same") \
V(kTheStackWasCorruptedByMacroAssemblerCall, \
@@ -220,7 +203,6 @@ namespace internal {
"ToOperand Unsupported double immediate") \
V(kTryCatchStatement, "TryCatchStatement") \
V(kTryFinallyStatement, "TryFinallyStatement") \
- V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
V(kUnexpectedAllocationTop, "Unexpected allocation top") \
@@ -254,15 +236,11 @@ namespace internal {
"Unexpected number of pre-allocated property fields") \
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
V(kUnexpectedSmi, "Unexpected smi value") \
- V(kUnexpectedStringFunction, "Unexpected String function") \
+ V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
V(kUnexpectedStringType, "Unexpected string type") \
- V(kUnexpectedStringWrapperInstanceSize, \
- "Unexpected string wrapper instance size") \
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
"Unexpected type for RegExp data, FixedArray expected") \
V(kUnexpectedValue, "Unexpected value") \
- V(kUnexpectedUnusedPropertiesOfStringWrapper, \
- "Unexpected unused properties of string wrapper") \
V(kUnsupportedConstCompoundAssignment, \
"Unsupported const compound assignment") \
V(kUnsupportedCountOperationWithConst, \
@@ -275,6 +253,8 @@ namespace internal {
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
"Unsupported phi use of const variable") \
+ V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
+ V(kUnsupportedSwitchStatement, "Unsupported switch statement") \
V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
V(kVariableResolvedToWithContext, "Variable resolved to with context") \
V(kWeShouldNotHaveAnEmptyLexicalContext, \
diff --git a/chromium/v8/src/base.isolate b/chromium/v8/src/base.isolate
index dceb4133399..8422ec7b60f 100644
--- a/chromium/v8/src/base.isolate
+++ b/chromium/v8/src/base.isolate
@@ -2,8 +2,19 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
+ 'includes': [
+ '../third_party/icu/icu.isolate',
+ '../build/config/win/msvs_dependencies.isolate',
+ ],
'conditions': [
- ['v8_use_external_startup_data==1', {
+ ['use_custom_libcxx==1', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/lib/libc++.so',
+ ],
+ },
+ }],
+ ['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
'variables': {
'files': [
'<(PRODUCT_DIR)/natives_blob.bin',
@@ -11,5 +22,54 @@
],
},
}],
+ ['OS=="linux" and component=="shared_library" and target_arch=="ia32"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/lib/',
+ ],
+ },
+ }],
+ ['OS=="win" and component=="shared_library"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/icui18n.dll',
+ '<(PRODUCT_DIR)/icuuc.dll',
+ '<(PRODUCT_DIR)/v8.dll',
+ ],
+ },
+ }],
+ ['OS=="mac" and asan==1', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib',
+ ],
+ },
+ }],
+ ['tsan==1', {
+ 'variables': {
+ 'files': [
+ '../tools/sanitizers/tsan_suppressions.txt',
+ ],
+ },
+ }],
+ ['OS=="linux" and (asan==1 or cfi_vptr==1 or msan==1 or tsan==1)', {
+ 'variables': {
+ 'files': [
+ # For llvm-symbolizer.
+ '../third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6',
+ ],
+ },
+ }],
+ ['asan==1 or cfi_vptr==1 or msan==1 or tsan==1', {
+ 'variables': {
+ 'files': [
+ '../third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer<(EXECUTABLE_SUFFIX)',
+ ],
+ },
+ }],
+ # Workaround for https://code.google.com/p/swarming/issues/detail?id=211
+ ['asan==0 or cfi_vptr==0 or msan==0 or tsan==0', {
+ 'variables': {},
+ }],
],
-} \ No newline at end of file
+}
diff --git a/chromium/v8/src/base/atomicops.h b/chromium/v8/src/base/atomicops.h
index e76b3d02d24..3e628fead91 100644
--- a/chromium/v8/src/base/atomicops.h
+++ b/chromium/v8/src/base/atomicops.h
@@ -133,7 +133,8 @@ Atomic64 Acquire_Load(volatile const Atomic64* ptr);
Atomic64 Release_Load(volatile const Atomic64* ptr);
#endif // V8_HOST_ARCH_64_BIT
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
// Include our platform specific implementation.
#if defined(THREAD_SANITIZER)
diff --git a/chromium/v8/src/base/atomicops_internals_arm64_gcc.h b/chromium/v8/src/base/atomicops_internals_arm64_gcc.h
index b01783e6a7e..f24050a3e6e 100644
--- a/chromium/v8/src/base/atomicops_internals_arm64_gcc.h
+++ b/chromium/v8/src/base/atomicops_internals_arm64_gcc.h
@@ -311,6 +311,7 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
return *ptr;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_arm_gcc.h b/chromium/v8/src/base/atomicops_internals_arm_gcc.h
index e399657e13c..6c8b27ea24e 100644
--- a/chromium/v8/src/base/atomicops_internals_arm_gcc.h
+++ b/chromium/v8/src/base/atomicops_internals_arm_gcc.h
@@ -59,7 +59,8 @@ inline void MemoryBarrier() {
// variant of the target architecture is being used. This tests against
// any known ARMv6 or ARMv7 variant, where it is possible to directly
// use ldrex/strex instructions to implement fast atomic operations.
-#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+#if defined(__ARM_ARCH_8A__) || \
+ defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
@@ -296,6 +297,7 @@ inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_atomicword_compat.h b/chromium/v8/src/base/atomicops_internals_atomicword_compat.h
index 0530ced2a44..4f758a72990 100644
--- a/chromium/v8/src/base/atomicops_internals_atomicword_compat.h
+++ b/chromium/v8/src/base/atomicops_internals_atomicword_compat.h
@@ -92,7 +92,8 @@ inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
reinterpret_cast<volatile const Atomic32*>(ptr));
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // !defined(V8_HOST_ARCH_64_BIT)
diff --git a/chromium/v8/src/base/atomicops_internals_mac.h b/chromium/v8/src/base/atomicops_internals_mac.h
index 84f9dbcd758..c112506238a 100644
--- a/chromium/v8/src/base/atomicops_internals_mac.h
+++ b/chromium/v8/src/base/atomicops_internals_mac.h
@@ -210,6 +210,7 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
#endif // defined(__LP64__)
#undef ATOMICOPS_COMPILER_BARRIER
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_mips64_gcc.h b/chromium/v8/src/base/atomicops_internals_mips64_gcc.h
index ccb973c0391..85b4e462b9d 100644
--- a/chromium/v8/src/base/atomicops_internals_mips64_gcc.h
+++ b/chromium/v8/src/base/atomicops_internals_mips64_gcc.h
@@ -302,6 +302,7 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
return *ptr;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_mips_gcc.h b/chromium/v8/src/base/atomicops_internals_mips_gcc.h
index 442fdd0f96a..8d65db21277 100644
--- a/chromium/v8/src/base/atomicops_internals_mips_gcc.h
+++ b/chromium/v8/src/base/atomicops_internals_mips_gcc.h
@@ -155,6 +155,7 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return *ptr;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_portable.h b/chromium/v8/src/base/atomicops_internals_portable.h
index a3a6e74c72e..bb999737862 100644
--- a/chromium/v8/src/base/atomicops_internals_portable.h
+++ b/chromium/v8/src/base/atomicops_internals_portable.h
@@ -132,7 +132,7 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
return __sync_add_and_fetch(ptr, 0);
}
-}
-} // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
diff --git a/chromium/v8/src/base/atomicops_internals_ppc_gcc.h b/chromium/v8/src/base/atomicops_internals_ppc_gcc.h
index daa27b4693d..0d16500d1ba 100644
--- a/chromium/v8/src/base/atomicops_internals_ppc_gcc.h
+++ b/chromium/v8/src/base/atomicops_internals_ppc_gcc.h
@@ -162,7 +162,7 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
}
#endif
-}
-} // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_x86_gcc.cc b/chromium/v8/src/base/atomicops_internals_x86_gcc.cc
index ab7dd8d091b..c0310300a15 100644
--- a/chromium/v8/src/base/atomicops_internals_x86_gcc.cc
+++ b/chromium/v8/src/base/atomicops_internals_x86_gcc.cc
@@ -47,7 +47,8 @@ struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
#endif
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
namespace {
diff --git a/chromium/v8/src/base/atomicops_internals_x86_gcc.h b/chromium/v8/src/base/atomicops_internals_x86_gcc.h
index ec87c421212..55bc44cd8bd 100644
--- a/chromium/v8/src/base/atomicops_internals_x86_gcc.h
+++ b/chromium/v8/src/base/atomicops_internals_x86_gcc.h
@@ -267,7 +267,8 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
#endif // defined(__x86_64__)
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#undef ATOMICOPS_COMPILER_BARRIER
diff --git a/chromium/v8/src/base/atomicops_internals_x86_msvc.h b/chromium/v8/src/base/atomicops_internals_x86_msvc.h
index adc40318e92..c37bc78df6d 100644
--- a/chromium/v8/src/base/atomicops_internals_x86_msvc.h
+++ b/chromium/v8/src/base/atomicops_internals_x86_msvc.h
@@ -197,6 +197,7 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
#endif // defined(_WIN64)
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/chromium/v8/src/base/bits.h b/chromium/v8/src/base/bits.h
index f5710dfb5c9..4ba3c47ad96 100644
--- a/chromium/v8/src/base/bits.h
+++ b/chromium/v8/src/base/bits.h
@@ -212,6 +212,26 @@ inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
}
+// SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and
+// |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed summation resulted in an overflow.
+inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
+ uint64_t res = static_cast<uint64_t>(lhs) + static_cast<uint64_t>(rhs);
+ *val = bit_cast<int64_t>(res);
+ return ((res ^ lhs) & (res ^ rhs) & (1ULL << 63)) != 0;
+}
+
+
+// SignedSubOverflow64(lhs,rhs,val) performs a signed subtraction of |lhs| and
+// |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed subtraction resulted in an overflow.
+inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
+ uint64_t res = static_cast<uint64_t>(lhs) - static_cast<uint64_t>(rhs);
+ *val = bit_cast<int64_t>(res);
+ return ((res ^ lhs) & (res ^ ~rhs) & (1ULL << 63)) != 0;
+}
+
+
// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
// |rhs|, extracts the most significant 32 bits of the result, and returns
// those.
diff --git a/chromium/v8/src/base/build_config.h b/chromium/v8/src/base/build_config.h
index 8016218e5cc..9637f657f92 100644
--- a/chromium/v8/src/base/build_config.h
+++ b/chromium/v8/src/base/build_config.h
@@ -55,6 +55,13 @@
#else
#define V8_HOST_ARCH_32_BIT 1
#endif
+#elif defined(__s390__) || defined(__s390x__)
+#define V8_HOST_ARCH_S390 1
+#if defined(__s390x__)
+#define V8_HOST_ARCH_64_BIT 1
+#else
+#define V8_HOST_ARCH_32_BIT 1
+#endif
#else
#error "Host architecture was not detected as supported by v8"
#endif
@@ -68,13 +75,17 @@
# endif
#endif
+#if defined(__ARM_ARCH_8A__)
+# define CAN_USE_ARMV8_INSTRUCTIONS 1
+#endif
+
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_S390
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@@ -117,6 +128,12 @@
#else
#define V8_TARGET_ARCH_32_BIT 1
#endif
+#elif V8_TARGET_ARCH_S390
+#if V8_TARGET_ARCH_S390X
+#define V8_TARGET_ARCH_64_BIT 1
+#else
+#define V8_TARGET_ARCH_32_BIT 1
+#endif
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_ARCH_32_BIT 1
#else
@@ -175,10 +192,23 @@
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_PPC_BE
#define V8_TARGET_BIG_ENDIAN 1
+#elif V8_TARGET_ARCH_S390
+#if V8_TARGET_ARCH_S390_LE_SIM
+#define V8_TARGET_LITTLE_ENDIAN 1
+#else
+#define V8_TARGET_BIG_ENDIAN 1
+#endif
#else
#error Unknown target architecture endianness
#endif
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) || \
+ defined(V8_TARGET_ARCH_X87)
+#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 1
+#else
+#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 0
+#endif
+
// Number of bits to represent the page size for paged spaces. The value of 20
// gives 1Mb bytes per page.
#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
diff --git a/chromium/v8/src/base/cpu.cc b/chromium/v8/src/base/cpu.cc
index 4f587201fad..692494afcb4 100644
--- a/chromium/v8/src/base/cpu.cc
+++ b/chromium/v8/src/base/cpu.cc
@@ -691,4 +691,5 @@ CPU::CPU()
#endif // V8_HOST_ARCH_PPC
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/cpu.h b/chromium/v8/src/base/cpu.h
index 1dc0a91f650..ca108fa2bf9 100644
--- a/chromium/v8/src/base/cpu.h
+++ b/chromium/v8/src/base/cpu.h
@@ -145,6 +145,7 @@ class CPU final {
bool is_fp64_mode_;
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_CPU_H_
diff --git a/chromium/v8/src/base/flags.h b/chromium/v8/src/base/flags.h
index 467ecf67c95..6bdb69319d5 100644
--- a/chromium/v8/src/base/flags.h
+++ b/chromium/v8/src/base/flags.h
@@ -5,6 +5,8 @@
#ifndef V8_BASE_FLAGS_H_
#define V8_BASE_FLAGS_H_
+#include <cstddef>
+
#include "src/base/compiler-specific.h"
namespace v8 {
@@ -30,6 +32,13 @@ class Flags final {
: mask_(static_cast<S>(flag)) {}
explicit Flags(mask_type mask) : mask_(static_cast<S>(mask)) {}
+ bool operator==(flag_type flag) const {
+ return mask_ == static_cast<S>(flag);
+ }
+ bool operator!=(flag_type flag) const {
+ return mask_ != static_cast<S>(flag);
+ }
+
Flags& operator&=(const Flags& flags) {
mask_ &= flags.mask_;
return *this;
@@ -60,6 +69,8 @@ class Flags final {
operator mask_type() const { return mask_; }
bool operator!() const { return !mask_; }
+ friend size_t hash_value(const Flags& flags) { return flags.mask_; }
+
private:
mask_type mask_;
};
@@ -97,13 +108,17 @@ class Flags final {
ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) { \
return Type(lhs) ^ rhs; \
- } inline Type operator^(Type::flag_type lhs, const Type& rhs) \
+ } inline Type \
+ operator^(Type::flag_type lhs, const Type& rhs) \
ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
inline Type operator^(Type::flag_type lhs, const Type& rhs) { \
return rhs ^ lhs; \
- } inline void operator^(Type::flag_type lhs, Type::mask_type rhs) \
- ALLOW_UNUSED_TYPE; \
- inline void operator^(Type::flag_type lhs, Type::mask_type rhs) {}
+ } inline void \
+ operator^(Type::flag_type lhs, Type::mask_type rhs) ALLOW_UNUSED_TYPE; \
+ inline void operator^(Type::flag_type lhs, Type::mask_type rhs) { \
+ } inline Type \
+ operator~(Type::flag_type val)ALLOW_UNUSED_TYPE; \
+ inline Type operator~(Type::flag_type val) { return ~Type(val); }
} // namespace base
} // namespace v8
diff --git a/chromium/v8/src/base/lazy-instance.h b/chromium/v8/src/base/lazy-instance.h
index a20689a16c4..4c0a3f897b7 100644
--- a/chromium/v8/src/base/lazy-instance.h
+++ b/chromium/v8/src/base/lazy-instance.h
@@ -232,6 +232,7 @@ struct LazyDynamicInstance {
CreateTrait, InitOnceTrait, DestroyTrait> type;
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_LAZY_INSTANCE_H_
diff --git a/chromium/v8/src/base/logging.h b/chromium/v8/src/base/logging.h
index 511ebf1e9c3..e4e3f49bfaf 100644
--- a/chromium/v8/src/base/logging.h
+++ b/chromium/v8/src/base/logging.h
@@ -11,7 +11,8 @@
#include "src/base/build_config.h"
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+extern "C" V8_NORETURN void V8_Fatal(const char* file, int line,
+ const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@@ -28,7 +29,7 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
V8_Fatal("", 0, "%s", (msg))
#define UNIMPLEMENTED() \
V8_Fatal("", 0, "unimplemented code")
-#define UNREACHABLE() ((void) 0)
+#define UNREACHABLE() V8_Fatal("", 0, "unreachable code")
#endif
@@ -153,6 +154,7 @@ void DumpBacktrace();
#define DCHECK(condition) CHECK(condition)
#define DCHECK_EQ(v1, v2) CHECK_EQ(v1, v2)
#define DCHECK_NE(v1, v2) CHECK_NE(v1, v2)
+#define DCHECK_GT(v1, v2) CHECK_GT(v1, v2)
#define DCHECK_GE(v1, v2) CHECK_GE(v1, v2)
#define DCHECK_LT(v1, v2) CHECK_LT(v1, v2)
#define DCHECK_LE(v1, v2) CHECK_LE(v1, v2)
@@ -163,6 +165,7 @@ void DumpBacktrace();
#define DCHECK(condition) ((void) 0)
#define DCHECK_EQ(v1, v2) ((void) 0)
#define DCHECK_NE(v1, v2) ((void) 0)
+#define DCHECK_GT(v1, v2) ((void) 0)
#define DCHECK_GE(v1, v2) ((void) 0)
#define DCHECK_LT(v1, v2) ((void) 0)
#define DCHECK_LE(v1, v2) ((void) 0)
diff --git a/chromium/v8/src/base/macros.h b/chromium/v8/src/base/macros.h
index 6dc96f4f3ba..10cab4b2bfc 100644
--- a/chromium/v8/src/base/macros.h
+++ b/chromium/v8/src/base/macros.h
@@ -102,66 +102,6 @@ char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif // V8_OS_NACL
-// The COMPILE_ASSERT macro can be used to verify that a compile time
-// expression is true. For example, you could use it to verify the
-// size of a static array:
-//
-// COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
-// content_type_names_incorrect_size);
-//
-// or to make sure a struct is smaller than a certain size:
-//
-// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
-//
-// The second argument to the macro is the name of the variable. If
-// the expression is false, most compilers will issue a warning/error
-// containing the name of the variable.
-#if V8_HAS_CXX11_STATIC_ASSERT
-
-// Under C++11, just use static_assert.
-#define COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)
-
-#else
-
-template <bool>
-struct CompileAssert {};
-
-#define COMPILE_ASSERT(expr, msg) \
- typedef CompileAssert<static_cast<bool>(expr)> \
- msg[static_cast<bool>(expr) ? 1 : -1] ALLOW_UNUSED_TYPE
-
-// Implementation details of COMPILE_ASSERT:
-//
-// - COMPILE_ASSERT works by defining an array type that has -1
-// elements (and thus is invalid) when the expression is false.
-//
-// - The simpler definition
-//
-// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
-//
-// does not work, as gcc supports variable-length arrays whose sizes
-// are determined at run-time (this is gcc's extension and not part
-// of the C++ standard). As a result, gcc fails to reject the
-// following code with the simple definition:
-//
-// int foo;
-// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
-// // not a compile-time constant.
-//
-// - By using the type CompileAssert<static_cast<bool>(expr)>, we ensure that
-// expr is a compile-time constant. (Template arguments must be
-// determined at compile-time.)
-//
-// - The array size is (static_cast<bool>(expr) ? 1 : -1), instead of simply
-//
-// ((expr) ? 1 : -1).
-//
-// This is to avoid running into a bug in MS VC 7.1, which
-// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
-
-#endif
-
-
// bit_cast<Dest,Source> is a template function that implements the
// equivalent of "*reinterpret_cast<Dest*>(&source)". We need this in
// very low-level functions like the protobuf library and fast math
@@ -217,8 +157,8 @@ struct CompileAssert {};
// is likely to surprise you.
template <class Dest, class Source>
V8_INLINE Dest bit_cast(Source const& source) {
- COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual);
-
+ static_assert(sizeof(Dest) == sizeof(Source),
+ "source and dest must be same size");
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
@@ -278,32 +218,8 @@ V8_INLINE Dest bit_cast(Source const& source) {
#endif
-// Use C++11 static_assert if possible, which gives error
-// messages that are easier to understand on first sight.
-#if V8_HAS_CXX11_STATIC_ASSERT
+// TODO(all) Replace all uses of this macro with static_assert, remove macro.
#define STATIC_ASSERT(test) static_assert(test, #test)
-#else
-// This is inspired by the static assertion facility in boost. This
-// is pretty magical. If it causes you trouble on a platform you may
-// find a fix in the boost code.
-template <bool> class StaticAssertion;
-template <> class StaticAssertion<true> { };
-// This macro joins two tokens. If one of the tokens is a macro the
-// helper call causes it to be resolved before joining.
-#define SEMI_STATIC_JOIN(a, b) SEMI_STATIC_JOIN_HELPER(a, b)
-#define SEMI_STATIC_JOIN_HELPER(a, b) a##b
-// Causes an error during compilation of the condition is not
-// statically known to be true. It is formulated as a typedef so that
-// it can be used wherever a typedef can be used. Beware that this
-// actually causes each use to introduce a new defined type with a
-// name depending on the source line.
-template <int> class StaticAssertionHelper { };
-#define STATIC_ASSERT(test) \
- typedef StaticAssertionHelper< \
- sizeof(StaticAssertion<static_cast<bool>((test))>)> \
- SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) ALLOW_UNUSED_TYPE
-
-#endif
// The USE(x) template is used to silence C++ compiler warnings
@@ -417,7 +333,8 @@ template <>
inline bool is_fundamental<uint8_t>() {
return true;
}
-}
-} // namespace v8::base
+
+} // namespace base
+} // namespace v8
#endif // V8_BASE_MACROS_H_
diff --git a/chromium/v8/src/base/once.cc b/chromium/v8/src/base/once.cc
index eaabf40d9a5..818a9f2e84b 100644
--- a/chromium/v8/src/base/once.cc
+++ b/chromium/v8/src/base/once.cc
@@ -50,4 +50,5 @@ void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
}
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/once.h b/chromium/v8/src/base/once.h
index 6bf741d38a9..790a8866e09 100644
--- a/chromium/v8/src/base/once.h
+++ b/chromium/v8/src/base/once.h
@@ -97,6 +97,7 @@ inline void CallOnce(OnceType* once,
}
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_ONCE_H_
diff --git a/chromium/v8/src/base/platform/condition-variable.cc b/chromium/v8/src/base/platform/condition-variable.cc
index e5b9bd0810e..fcd6cf7974d 100644
--- a/chromium/v8/src/base/platform/condition-variable.cc
+++ b/chromium/v8/src/base/platform/condition-variable.cc
@@ -313,4 +313,5 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
#endif // V8_OS_POSIX
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/platform/condition-variable.h b/chromium/v8/src/base/platform/condition-variable.h
index 9cb706460fc..72d6f28507b 100644
--- a/chromium/v8/src/base/platform/condition-variable.h
+++ b/chromium/v8/src/base/platform/condition-variable.h
@@ -113,6 +113,7 @@ typedef LazyStaticInstance<
#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
diff --git a/chromium/v8/src/base/platform/elapsed-timer.h b/chromium/v8/src/base/platform/elapsed-timer.h
index 3fe7e8f7205..f9a9ef43619 100644
--- a/chromium/v8/src/base/platform/elapsed-timer.h
+++ b/chromium/v8/src/base/platform/elapsed-timer.h
@@ -92,6 +92,7 @@ class ElapsedTimer final {
#endif
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_PLATFORM_ELAPSED_TIMER_H_
diff --git a/chromium/v8/src/base/platform/mutex.cc b/chromium/v8/src/base/platform/mutex.cc
index 8b1e305701f..14016058ae7 100644
--- a/chromium/v8/src/base/platform/mutex.cc
+++ b/chromium/v8/src/base/platform/mutex.cc
@@ -188,4 +188,5 @@ bool RecursiveMutex::TryLock() {
return true;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/platform/mutex.h b/chromium/v8/src/base/platform/mutex.h
index 18e85de7bca..61df19d66a5 100644
--- a/chromium/v8/src/base/platform/mutex.h
+++ b/chromium/v8/src/base/platform/mutex.h
@@ -210,6 +210,7 @@ class LockGuard final {
DISALLOW_COPY_AND_ASSIGN(LockGuard);
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_PLATFORM_MUTEX_H_
diff --git a/chromium/v8/src/base/platform/platform-aix.cc b/chromium/v8/src/base/platform/platform-aix.cc
index 03e9aa3717f..ea2824d8c32 100644
--- a/chromium/v8/src/base/platform/platform-aix.cc
+++ b/chromium/v8/src/base/platform/platform-aix.cc
@@ -239,5 +239,5 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
bool VirtualMemory::HasLazyCommits() { return true; }
-}
-} // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/platform/platform-cygwin.cc b/chromium/v8/src/base/platform/platform-cygwin.cc
index 18f151ac293..a49e28723d2 100644
--- a/chromium/v8/src/base/platform/platform-cygwin.cc
+++ b/chromium/v8/src/base/platform/platform-cygwin.cc
@@ -252,4 +252,5 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/platform/platform-freebsd.cc b/chromium/v8/src/base/platform/platform-freebsd.cc
index b279e0c9268..8b3398039fe 100644
--- a/chromium/v8/src/base/platform/platform-freebsd.cc
+++ b/chromium/v8/src/base/platform/platform-freebsd.cc
@@ -255,4 +255,5 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/platform/platform-linux.cc b/chromium/v8/src/base/platform/platform-linux.cc
index 2a2abfeb25c..a4b742adc71 100644
--- a/chromium/v8/src/base/platform/platform-linux.cc
+++ b/chromium/v8/src/base/platform/platform-linux.cc
@@ -388,4 +388,5 @@ bool VirtualMemory::HasLazyCommits() {
return true;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/platform/platform-macos.cc b/chromium/v8/src/base/platform/platform-macos.cc
index f16f329fc35..419281f6694 100644
--- a/chromium/v8/src/base/platform/platform-macos.cc
+++ b/chromium/v8/src/base/platform/platform-macos.cc
@@ -249,4 +249,5 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/platform/platform-openbsd.cc b/chromium/v8/src/base/platform/platform-openbsd.cc
index 369dd8e1a6d..af145e2fca1 100644
--- a/chromium/v8/src/base/platform/platform-openbsd.cc
+++ b/chromium/v8/src/base/platform/platform-openbsd.cc
@@ -286,4 +286,5 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/platform/platform-qnx.cc b/chromium/v8/src/base/platform/platform-qnx.cc
index b16652886e6..3c904676273 100644
--- a/chromium/v8/src/base/platform/platform-qnx.cc
+++ b/chromium/v8/src/base/platform/platform-qnx.cc
@@ -312,4 +312,5 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/platform/platform-solaris.cc b/chromium/v8/src/base/platform/platform-solaris.cc
index 7e07f1a1e29..a2ce2c13f63 100644
--- a/chromium/v8/src/base/platform/platform-solaris.cc
+++ b/chromium/v8/src/base/platform/platform-solaris.cc
@@ -208,4 +208,5 @@ bool VirtualMemory::HasLazyCommits() {
return false;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/platform/platform-win32.cc b/chromium/v8/src/base/platform/platform-win32.cc
index a73dc523c41..6afa6f9c373 100644
--- a/chromium/v8/src/base/platform/platform-win32.cc
+++ b/chromium/v8/src/base/platform/platform-win32.cc
@@ -751,9 +751,19 @@ void* OS::GetRandomMmapAddr() {
static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
LPVOID base = NULL;
+ static BOOL use_aslr = -1;
+#ifdef V8_HOST_ARCH_32_BIT
+ // Don't bother randomizing on 32-bit hosts, because they lack the room and
+ // don't have viable ASLR anyway.
+ if (use_aslr == -1 && !IsWow64Process(GetCurrentProcess(), &use_aslr))
+ use_aslr = FALSE;
+#else
+ use_aslr = TRUE;
+#endif
- if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
- // For exectutable pages try and randomize the allocation address
+ if (use_aslr &&
+ (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS)) {
+ // For executable pages try and randomize the allocation address
for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
}
@@ -823,6 +833,9 @@ void OS::Abort() {
}
// Make the MSVCRT do a silent abort.
raise(SIGABRT);
+
+ // Make sure function doesn't return.
+ abort();
}
@@ -1133,9 +1146,9 @@ static std::vector<OS::SharedLibraryAddress> LoadSymbols(
WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0],
lib_name_length, NULL, NULL);
result.push_back(OS::SharedLibraryAddress(
- lib_name, reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
- reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
- module_entry.modBaseSize)));
+ lib_name, reinterpret_cast<uintptr_t>(module_entry.modBaseAddr),
+ reinterpret_cast<uintptr_t>(module_entry.modBaseAddr +
+ module_entry.modBaseSize)));
cont = _Module32NextW(snapshot, &module_entry);
}
CloseHandle(snapshot);
diff --git a/chromium/v8/src/base/platform/platform.h b/chromium/v8/src/base/platform/platform.h
index 2d08ecbd7f6..89d6225edeb 100644
--- a/chromium/v8/src/base/platform/platform.h
+++ b/chromium/v8/src/base/platform/platform.h
@@ -194,7 +194,7 @@ class OS {
static void Sleep(TimeDelta interval);
// Abort the current process.
- static void Abort();
+ V8_NORETURN static void Abort();
// Debug break.
static void DebugBreak();
@@ -479,6 +479,7 @@ class Thread {
DISALLOW_COPY_AND_ASSIGN(Thread);
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_PLATFORM_PLATFORM_H_
diff --git a/chromium/v8/src/base/platform/semaphore.cc b/chromium/v8/src/base/platform/semaphore.cc
index 0679c00d95e..9e7b59a1d2c 100644
--- a/chromium/v8/src/base/platform/semaphore.cc
+++ b/chromium/v8/src/base/platform/semaphore.cc
@@ -75,6 +75,10 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
Semaphore::Semaphore(int count) {
DCHECK(count >= 0);
+#if V8_LIBC_GLIBC
+ // sem_init in glibc prior to 2.1 does not zero out semaphores.
+ memset(&native_handle_, 0, sizeof(native_handle_));
+#endif
int result = sem_init(&native_handle_, 0, count);
DCHECK_EQ(0, result);
USE(result);
@@ -201,4 +205,5 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
#endif // V8_OS_MACOSX
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/platform/semaphore.h b/chromium/v8/src/base/platform/semaphore.h
index fa131018b37..18700d1ba00 100644
--- a/chromium/v8/src/base/platform/semaphore.h
+++ b/chromium/v8/src/base/platform/semaphore.h
@@ -96,6 +96,7 @@ struct LazySemaphore {
#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_PLATFORM_SEMAPHORE_H_
diff --git a/chromium/v8/src/base/platform/time.cc b/chromium/v8/src/base/platform/time.cc
index 5162182b7a3..e847d54de82 100644
--- a/chromium/v8/src/base/platform/time.cc
+++ b/chromium/v8/src/base/platform/time.cc
@@ -644,4 +644,5 @@ bool TimeTicks::KernelTimestampAvailable() {
#endif // V8_OS_WIN
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/platform/time.h b/chromium/v8/src/base/platform/time.h
index 5fc01e88fd7..29300e54044 100644
--- a/chromium/v8/src/base/platform/time.h
+++ b/chromium/v8/src/base/platform/time.h
@@ -398,6 +398,7 @@ inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
return ticks + delta;
}
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_PLATFORM_TIME_H_
diff --git a/chromium/v8/src/base/utils/random-number-generator.cc b/chromium/v8/src/base/utils/random-number-generator.cc
index 29a48ffb05d..ff428402b6b 100644
--- a/chromium/v8/src/base/utils/random-number-generator.cc
+++ b/chromium/v8/src/base/utils/random-number-generator.cc
@@ -97,15 +97,14 @@ int RandomNumberGenerator::NextInt(int max) {
double RandomNumberGenerator::NextDouble() {
- return ((static_cast<int64_t>(Next(26)) << 27) + Next(27)) /
- static_cast<double>(static_cast<int64_t>(1) << 53);
+ XorShift128(&state0_, &state1_);
+ return ToDouble(state0_, state1_);
}
int64_t RandomNumberGenerator::NextInt64() {
- uint64_t lo = bit_cast<unsigned>(Next(32));
- uint64_t hi = bit_cast<unsigned>(Next(32));
- return lo | (hi << 32);
+ XorShift128(&state0_, &state1_);
+ return bit_cast<int64_t>(state0_ + state1_);
}
@@ -119,21 +118,27 @@ void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
int RandomNumberGenerator::Next(int bits) {
DCHECK_LT(0, bits);
DCHECK_GE(32, bits);
- // Do unsigned multiplication, which has the intended modulo semantics, while
- // signed multiplication would expose undefined behavior.
- uint64_t product = static_cast<uint64_t>(seed_) * kMultiplier;
- // Assigning a uint64_t to an int64_t is implementation defined, but this
- // should be OK. Use a static_cast to explicitly state that we know what we're
- // doing. (Famous last words...)
- int64_t seed = static_cast<int64_t>((product + kAddend) & kMask);
- seed_ = seed;
- return static_cast<int>(seed >> (48 - bits));
+ XorShift128(&state0_, &state1_);
+ return static_cast<int>((state0_ + state1_) >> (64 - bits));
}
void RandomNumberGenerator::SetSeed(int64_t seed) {
+ if (seed == 0) seed = 1;
initial_seed_ = seed;
- seed_ = (seed ^ kMultiplier) & kMask;
+ state0_ = MurmurHash3(bit_cast<uint64_t>(seed));
+ state1_ = MurmurHash3(state0_);
}
-} } // namespace v8::base
+
+uint64_t RandomNumberGenerator::MurmurHash3(uint64_t h) {
+ h ^= h >> 33;
+ h *= V8_UINT64_C(0xFF51AFD7ED558CCD);
+ h ^= h >> 33;
+ h *= V8_UINT64_C(0xC4CEB9FE1A85EC53);
+ h ^= h >> 33;
+ return h;
+}
+
+} // namespace base
+} // namespace v8
diff --git a/chromium/v8/src/base/utils/random-number-generator.h b/chromium/v8/src/base/utils/random-number-generator.h
index 62c6b27b1b4..cd3e6bfdc89 100644
--- a/chromium/v8/src/base/utils/random-number-generator.h
+++ b/chromium/v8/src/base/utils/random-number-generator.h
@@ -12,10 +12,16 @@ namespace base {
// -----------------------------------------------------------------------------
// RandomNumberGenerator
-//
-// This class is used to generate a stream of pseudorandom numbers. The class
-// uses a 48-bit seed, which is modified using a linear congruential formula.
-// (See Donald Knuth, The Art of Computer Programming, Volume 3, Section 3.2.1.)
+
+// This class is used to generate a stream of pseudo-random numbers. The class
+// uses a 64-bit seed, which is passed through MurmurHash3 to create two 64-bit
+// state values. This pair of state values is then used in xorshift128+.
+// The resulting stream of pseudo-random numbers has a period length of 2^128-1.
+// See Marsaglia: http://www.jstatsoft.org/v08/i14/paper
+// And Vigna: http://vigna.di.unimi.it/ftp/papers/xorshiftplus.pdf
+// NOTE: Any changes to the algorithm must be tested against TestU01.
+// Please find instructions for this in the internal repository.
+
// If two instances of RandomNumberGenerator are created with the same seed, and
// the same sequence of method calls is made for each, they will generate and
// return identical sequences of numbers.
@@ -83,6 +89,27 @@ class RandomNumberGenerator final {
int64_t initial_seed() const { return initial_seed_; }
+ // Static and exposed for external use.
+ static inline double ToDouble(uint64_t state0, uint64_t state1) {
+ // Exponent for double values for [1.0 .. 2.0)
+ static const uint64_t kExponentBits = V8_UINT64_C(0x3FF0000000000000);
+ static const uint64_t kMantissaMask = V8_UINT64_C(0x000FFFFFFFFFFFFF);
+ uint64_t random = ((state0 + state1) & kMantissaMask) | kExponentBits;
+ return bit_cast<double>(random) - 1;
+ }
+
+ // Static and exposed for external use.
+ static inline void XorShift128(uint64_t* state0, uint64_t* state1) {
+ uint64_t s1 = *state0;
+ uint64_t s0 = *state1;
+ *state0 = s0;
+ s1 ^= s1 << 23;
+ s1 ^= s1 >> 17;
+ s1 ^= s0;
+ s1 ^= s0 >> 26;
+ *state1 = s1;
+ }
+
private:
static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
static const int64_t kAddend = 0xb;
@@ -90,10 +117,14 @@ class RandomNumberGenerator final {
int Next(int bits) WARN_UNUSED_RESULT;
+ static uint64_t MurmurHash3(uint64_t);
+
int64_t initial_seed_;
- int64_t seed_;
+ uint64_t state0_;
+ uint64_t state1_;
};
-} } // namespace v8::base
+} // namespace base
+} // namespace v8
#endif // V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
diff --git a/chromium/v8/src/bignum-dtoa.h b/chromium/v8/src/bignum-dtoa.h
index d42801bd69b..dab27badba8 100644
--- a/chromium/v8/src/bignum-dtoa.h
+++ b/chromium/v8/src/bignum-dtoa.h
@@ -55,6 +55,7 @@ enum BignumDtoaMode {
void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
Vector<char> buffer, int* length, int* point);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_BIGNUM_DTOA_H_
diff --git a/chromium/v8/src/bignum.cc b/chromium/v8/src/bignum.cc
index 9baf77e7f28..e7c6747665f 100644
--- a/chromium/v8/src/bignum.cc
+++ b/chromium/v8/src/bignum.cc
@@ -68,7 +68,9 @@ static uint64_t ReadUInt64(Vector<const char> buffer,
int from,
int digits_to_read) {
uint64_t result = 0;
- for (int i = from; i < from + digits_to_read; ++i) {
+ int to = from + digits_to_read;
+
+ for (int i = from; i < to; ++i) {
int digit = buffer[i] - '0';
DCHECK(0 <= digit && digit <= 9);
result = result * 10 + digit;
diff --git a/chromium/v8/src/bignum.h b/chromium/v8/src/bignum.h
index 7ebdae47bc1..167c1842da4 100644
--- a/chromium/v8/src/bignum.h
+++ b/chromium/v8/src/bignum.h
@@ -115,6 +115,7 @@ class Bignum {
DISALLOW_COPY_AND_ASSIGN(Bignum);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_BIGNUM_H_
diff --git a/chromium/v8/src/bit-vector.cc b/chromium/v8/src/bit-vector.cc
index cdd00f89c4b..0fbb01811a3 100644
--- a/chromium/v8/src/bit-vector.cc
+++ b/chromium/v8/src/bit-vector.cc
@@ -5,7 +5,6 @@
#include "src/bit-vector.h"
#include "src/base/bits.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/bootstrapper.cc b/chromium/v8/src/bootstrapper.cc
index a8a5f976804..f68a12ab14c 100644
--- a/chromium/v8/src/bootstrapper.cc
+++ b/chromium/v8/src/bootstrapper.cc
@@ -6,7 +6,6 @@
#include "src/accessors.h"
#include "src/api-natives.h"
-#include "src/base/utils/random-number-generator.h"
#include "src/code-stubs.h"
#include "src/extensions/externalize-string-extension.h"
#include "src/extensions/free-buffer-extension.h"
@@ -17,11 +16,7 @@
#include "src/isolate-inl.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
-#include "third_party/fdlibm/fdlibm.h"
-
-#if defined(V8_WASM)
#include "src/wasm/wasm-js.h"
-#endif
namespace v8 {
namespace internal {
@@ -60,7 +55,6 @@ template Handle<String> Bootstrapper::SourceLookup<ExperimentalNatives>(
template Handle<String> Bootstrapper::SourceLookup<ExperimentalExtraNatives>(
int index);
template Handle<String> Bootstrapper::SourceLookup<ExtraNatives>(int index);
-template Handle<String> Bootstrapper::SourceLookup<CodeStubNatives>(int index);
void Bootstrapper::Initialize(bool create_heap_objects) {
@@ -131,7 +125,6 @@ void Bootstrapper::TearDown() {
DeleteNativeSources(ExtraNatives::GetSourceCache(isolate_->heap()));
DeleteNativeSources(
ExperimentalExtraNatives::GetSourceCache(isolate_->heap()));
- DeleteNativeSources(CodeStubNatives::GetSourceCache(isolate_->heap()));
extensions_cache_.Initialize(isolate_, false); // Yes, symmetrical
}
@@ -164,6 +157,7 @@ class Genesis BASE_EMBEDDED {
void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
void CreateStrongModeFunctionMaps(Handle<JSFunction> empty);
+ void CreateIteratorMaps();
// Make the "arguments" and "caller" properties throw a TypeError on access.
void AddRestrictedFunctionProperties(Handle<Map> map);
@@ -174,31 +168,29 @@ class Genesis BASE_EMBEDDED {
// but in the latter case we don't use the objects it produces directly, as
// we have to used the deserialized ones that are linked together with the
// rest of the context snapshot.
- Handle<GlobalObject> CreateNewGlobals(
+ Handle<JSGlobalObject> CreateNewGlobals(
v8::Local<v8::ObjectTemplate> global_proxy_template,
Handle<JSGlobalProxy> global_proxy);
// Hooks the given global proxy into the context. If the context was created
// by deserialization then this will unhook the global proxy that was
// deserialized, leaving the GC to pick it up.
- void HookUpGlobalProxy(Handle<GlobalObject> global_object,
+ void HookUpGlobalProxy(Handle<JSGlobalObject> global_object,
Handle<JSGlobalProxy> global_proxy);
// Similarly, we want to use the global that has been created by the templates
// passed through the API. The global from the snapshot is detached from the
// other objects in the snapshot.
- void HookUpGlobalObject(Handle<GlobalObject> global_object,
- Handle<FixedArray> outdated_contexts);
+ void HookUpGlobalObject(Handle<JSGlobalObject> global_object);
// The native context has a ScriptContextTable that store declarative bindings
// made in script scopes. Add a "this" binding to that table pointing to the
// global proxy.
void InstallGlobalThisBinding();
- void HookUpGlobalThisBinding(Handle<FixedArray> outdated_contexts);
// New context initialization. Used for creating a context from scratch.
- void InitializeGlobal(Handle<GlobalObject> global_object,
+ void InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> empty_function,
ContextType context_type);
void InitializeExperimentalGlobal();
- // Typed arrays are not serializable and have to initialized afterwards.
- void InitializeBuiltinTypedArrays();
+ // Depending on the situation, expose and/or get rid of the utils object.
+ void ConfigureUtilsObject(ContextType context_type);
#define DECLARE_FEATURE_INITIALIZATION(id, descr) \
void InitializeGlobal_##id();
@@ -206,8 +198,11 @@ class Genesis BASE_EMBEDDED {
HARMONY_INPROGRESS(DECLARE_FEATURE_INITIALIZATION)
HARMONY_STAGED(DECLARE_FEATURE_INITIALIZATION)
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
+ DECLARE_FEATURE_INITIALIZATION(promise_extra, "")
#undef DECLARE_FEATURE_INITIALIZATION
+ Handle<JSFunction> InstallArrayBuffer(Handle<JSObject> target,
+ const char* name);
Handle<JSFunction> InstallInternalArray(Handle<JSObject> target,
const char* name,
ElementsKind elements_kind);
@@ -222,6 +217,7 @@ class Genesis BASE_EMBEDDED {
void InstallBuiltinFunctionIds();
void InstallExperimentalBuiltinFunctionIds();
void InitializeNormalizedMapCaches();
+ void InstallJSProxyMaps();
enum ExtensionTraversalState {
UNVISITED, VISITED, INSTALLED
@@ -271,8 +267,7 @@ class Genesis BASE_EMBEDDED {
FUNCTION_WITH_WRITEABLE_PROTOTYPE,
FUNCTION_WITH_READONLY_PROTOTYPE,
// Without prototype.
- FUNCTION_WITHOUT_PROTOTYPE,
- BOUND_FUNCTION
+ FUNCTION_WITHOUT_PROTOTYPE
};
static bool IsFunctionModeWithPrototype(FunctionMode function_mode) {
@@ -340,26 +335,6 @@ Handle<Context> Bootstrapper::CreateEnvironment(
}
-bool Bootstrapper::CreateCodeStubContext(Isolate* isolate) {
- HandleScope scope(isolate);
- SaveContext save_context(isolate);
- BootstrapperActive active(this);
-
- v8::ExtensionConfiguration no_extensions;
- Handle<Context> native_context = CreateEnvironment(
- MaybeHandle<JSGlobalProxy>(), v8::Local<v8::ObjectTemplate>(),
- &no_extensions, THIN_CONTEXT);
- isolate->heap()->SetRootCodeStubContext(*native_context);
- isolate->set_context(*native_context);
- Handle<JSObject> code_stub_exports =
- isolate->factory()->NewJSObject(isolate->object_function());
- JSObject::NormalizeProperties(code_stub_exports, CLEAR_INOBJECT_PROPERTIES, 2,
- "container to export to extra natives");
- isolate->heap()->SetRootCodeStubExportsObject(*code_stub_exports);
- return InstallCodeStubNatives(isolate);
-}
-
-
static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
// object.__proto__ = proto;
Handle<Map> old_map = Handle<Map>(object->map());
@@ -370,6 +345,9 @@ static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
void Bootstrapper::DetachGlobal(Handle<Context> env) {
+ env->GetIsolate()->counters()->errors_thrown_per_context()->AddSample(
+ env->GetErrorsThrown());
+
Factory* factory = env->GetIsolate()->factory();
Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
global_proxy->set_native_context(*factory->null_value());
@@ -383,31 +361,55 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
namespace {
+void InstallFunction(Handle<JSObject> target, Handle<Name> property_name,
+ Handle<JSFunction> function, Handle<String> function_name,
+ PropertyAttributes attributes = DONT_ENUM) {
+ JSObject::AddProperty(target, property_name, function, attributes);
+ if (target->IsJSGlobalObject()) {
+ function->shared()->set_instance_class_name(*function_name);
+ }
+ function->shared()->set_native(true);
+}
+
+
+static void InstallFunction(Handle<JSObject> target,
+ Handle<JSFunction> function, Handle<Name> name,
+ PropertyAttributes attributes = DONT_ENUM) {
+ Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
+ InstallFunction(target, name, function, name_string, attributes);
+}
+
+
+static Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
+ InstanceType type, int instance_size,
+ MaybeHandle<JSObject> maybe_prototype,
+ Builtins::Name call,
+ bool strict_function_map = false) {
+ Factory* factory = isolate->factory();
+ Handle<Code> call_code(isolate->builtins()->builtin(call));
+ Handle<JSObject> prototype;
+ static const bool kReadOnlyPrototype = false;
+ static const bool kInstallConstructor = false;
+ return maybe_prototype.ToHandle(&prototype)
+ ? factory->NewFunction(name, call_code, prototype, type,
+ instance_size, kReadOnlyPrototype,
+ kInstallConstructor, strict_function_map)
+ : factory->NewFunctionWithoutPrototype(name, call_code,
+ strict_function_map);
+}
+
+
Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<Name> name,
InstanceType type, int instance_size,
MaybeHandle<JSObject> maybe_prototype,
Builtins::Name call,
PropertyAttributes attributes,
bool strict_function_map = false) {
- Isolate* isolate = target->GetIsolate();
- Factory* factory = isolate->factory();
Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
- Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
- Handle<JSObject> prototype;
- static const bool kReadOnlyPrototype = false;
- static const bool kInstallConstructor = false;
Handle<JSFunction> function =
- maybe_prototype.ToHandle(&prototype)
- ? factory->NewFunction(name_string, call_code, prototype, type,
- instance_size, kReadOnlyPrototype,
- kInstallConstructor, strict_function_map)
- : factory->NewFunctionWithoutPrototype(name_string, call_code,
- strict_function_map);
- JSObject::AddProperty(target, name, function, attributes);
- if (target->IsJSGlobalObject()) {
- function->shared()->set_instance_class_name(*name_string);
- }
- function->shared()->set_native(true);
+ CreateFunction(target->GetIsolate(), name_string, type, instance_size,
+ maybe_prototype, call, strict_function_map);
+ InstallFunction(target, name, function, name_string, attributes);
return function;
}
@@ -418,13 +420,7 @@ Handle<JSFunction> InstallFunction(Handle<JSObject> target, const char* name,
Builtins::Name call,
bool strict_function_map = false) {
Factory* const factory = target->GetIsolate()->factory();
- PropertyAttributes attributes;
- if (target->IsJSBuiltinsObject()) {
- attributes =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- } else {
- attributes = DONT_ENUM;
- }
+ PropertyAttributes attributes = DONT_ENUM;
return InstallFunction(target, factory->InternalizeUtf8String(name), type,
instance_size, maybe_prototype, call, attributes,
strict_function_map);
@@ -487,7 +483,7 @@ void Genesis::SetFunctionInstanceDescriptor(Handle<Map> map,
Handle<Map> Genesis::CreateSloppyFunctionMap(FunctionMode function_mode) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetFunctionInstanceDescriptor(map, function_mode);
- map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
+ if (IsFunctionModeWithPrototype(function_mode)) map->set_is_constructor();
map->set_is_callable();
return map;
}
@@ -560,7 +556,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
}
// Allocate the empty function as the prototype for function - ES6 19.2.3
- Handle<Code> code(isolate->builtins()->builtin(Builtins::kEmptyFunction));
+ Handle<Code> code(isolate->builtins()->EmptyFunction());
Handle<JSFunction> empty_function =
factory->NewFunctionWithoutPrototype(factory->empty_string(), code);
@@ -600,7 +596,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
void Genesis::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode) {
- int size = IsFunctionModeWithPrototype(function_mode) ? 5 : 4;
+ int size = IsFunctionModeWithPrototype(function_mode) ? 3 : 2;
Map::EnsureDescriptorSlack(map, size);
PropertyAttributes rw_attribs =
@@ -610,35 +606,22 @@ void Genesis::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
PropertyAttributes roc_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- if (function_mode == BOUND_FUNCTION) {
- { // Add length.
- Handle<String> length_string = isolate()->factory()->length_string();
- DataDescriptor d(length_string, 0, roc_attribs, Representation::Tagged());
- map->AppendDescriptor(&d);
- }
- { // Add name.
- Handle<String> name_string = isolate()->factory()->name_string();
- DataDescriptor d(name_string, 1, roc_attribs, Representation::Tagged());
- map->AppendDescriptor(&d);
- }
- } else {
- DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
- function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
- function_mode == FUNCTION_WITHOUT_PROTOTYPE);
- { // Add length.
- Handle<AccessorInfo> length =
- Accessors::FunctionLengthInfo(isolate(), roc_attribs);
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
- length, roc_attribs);
- map->AppendDescriptor(&d);
- }
- { // Add name.
- Handle<AccessorInfo> name =
- Accessors::FunctionNameInfo(isolate(), roc_attribs);
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
- roc_attribs);
- map->AppendDescriptor(&d);
- }
+ DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
+ function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
+ function_mode == FUNCTION_WITHOUT_PROTOTYPE);
+ { // Add length.
+ Handle<AccessorInfo> length =
+ Accessors::FunctionLengthInfo(isolate(), roc_attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
+ length, roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+ { // Add name.
+ Handle<AccessorInfo> name =
+ Accessors::FunctionNameInfo(isolate(), roc_attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
+ roc_attribs);
+ map->AppendDescriptor(&d);
}
if (IsFunctionModeWithPrototype(function_mode)) {
// Add prototype.
@@ -685,11 +668,13 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
Handle<Code> code(isolate()->builtins()->builtin(builtin_name));
Handle<JSFunction> function =
factory()->NewFunctionWithoutPrototype(name, code);
- function->set_map(native_context()->sloppy_function_map());
function->shared()->DontAdaptArguments();
// %ThrowTypeError% must not have a name property.
- JSReceiver::DeleteProperty(function, factory()->name_string()).Assert();
+ if (JSReceiver::DeleteProperty(function, factory()->name_string())
+ .IsNothing()) {
+ DCHECK(false);
+ }
// length needs to be non configurable.
Handle<Object> value(Smi::FromInt(function->shared()->length()), isolate());
@@ -698,7 +683,10 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY))
.Assert();
- JSObject::PreventExtensions(function).Assert();
+ if (JSObject::PreventExtensions(function, Object::THROW_ON_ERROR)
+ .IsNothing()) {
+ DCHECK(false);
+ }
return function;
}
@@ -727,7 +715,7 @@ Handle<Map> Genesis::CreateStrictFunctionMap(
FunctionMode function_mode, Handle<JSFunction> empty_function) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetStrictFunctionInstanceDescriptor(map, function_mode);
- map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
+ if (IsFunctionModeWithPrototype(function_mode)) map->set_is_constructor();
map->set_is_callable();
Map::SetPrototype(map, empty_function);
return map;
@@ -738,7 +726,7 @@ Handle<Map> Genesis::CreateStrongFunctionMap(
Handle<JSFunction> empty_function, bool is_constructor) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetStrongFunctionInstanceDescriptor(map);
- map->set_is_constructor(is_constructor);
+ if (is_constructor) map->set_is_constructor();
Map::SetPrototype(map, empty_function);
map->set_is_callable();
map->set_is_extensible(is_constructor);
@@ -765,21 +753,6 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// This map is installed in MakeFunctionInstancePrototypeWritable.
strict_function_map_writable_prototype_ =
CreateStrictFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE, empty);
-
- // Special map for non-constructor bound functions.
- // TODO(bmeurer): Bound functions should not be represented as JSFunctions.
- Handle<Map> bound_function_without_constructor_map =
- CreateStrictFunctionMap(BOUND_FUNCTION, empty);
- native_context()->set_bound_function_without_constructor_map(
- *bound_function_without_constructor_map);
-
- // Special map for constructor bound functions.
- // TODO(bmeurer): Bound functions should not be represented as JSFunctions.
- Handle<Map> bound_function_with_constructor_map =
- Map::Copy(bound_function_without_constructor_map, "IsConstructor");
- bound_function_with_constructor_map->set_is_constructor(true);
- native_context()->set_bound_function_with_constructor_map(
- *bound_function_with_constructor_map);
}
@@ -793,6 +766,57 @@ void Genesis::CreateStrongModeFunctionMaps(Handle<JSFunction> empty) {
}
+void Genesis::CreateIteratorMaps() {
+ // Create iterator-related meta-objects.
+ Handle<JSObject> iterator_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ Handle<JSObject> generator_object_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ Handle<JSObject> generator_function_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ SetObjectPrototype(generator_object_prototype, iterator_prototype);
+
+ JSObject::AddProperty(generator_function_prototype,
+ factory()->InternalizeUtf8String("prototype"),
+ generator_object_prototype,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Create maps for generator functions and their prototypes. Store those
+ // maps in the native context. The "prototype" property descriptor is
+ // writable, non-enumerable, and non-configurable (as per ES6 draft
+ // 04-14-15, section 25.2.4.3).
+ Handle<Map> strict_function_map(strict_function_map_writable_prototype_);
+ // Generator functions do not have "caller" or "arguments" accessors.
+ Handle<Map> sloppy_generator_function_map =
+ Map::Copy(strict_function_map, "SloppyGeneratorFunction");
+ Map::SetPrototype(sloppy_generator_function_map,
+ generator_function_prototype);
+ native_context()->set_sloppy_generator_function_map(
+ *sloppy_generator_function_map);
+
+ Handle<Map> strict_generator_function_map =
+ Map::Copy(strict_function_map, "StrictGeneratorFunction");
+ Map::SetPrototype(strict_generator_function_map,
+ generator_function_prototype);
+ native_context()->set_strict_generator_function_map(
+ *strict_generator_function_map);
+
+ Handle<Map> strong_function_map(native_context()->strong_function_map());
+ Handle<Map> strong_generator_function_map =
+ Map::Copy(strong_function_map, "StrongGeneratorFunction");
+ Map::SetPrototype(strong_generator_function_map,
+ generator_function_prototype);
+ native_context()->set_strong_generator_function_map(
+ *strong_generator_function_map);
+
+ Handle<JSFunction> object_function(native_context()->object_function());
+ Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
+ Map::SetPrototype(generator_object_prototype_map, generator_object_prototype);
+ native_context()->set_generator_object_prototype_map(
+ *generator_object_prototype_map);
+}
+
+
static void ReplaceAccessors(Handle<Map> map,
Handle<String> name,
PropertyAttributes attributes,
@@ -871,24 +895,7 @@ void Genesis::InstallGlobalThisBinding() {
}
-void Genesis::HookUpGlobalThisBinding(Handle<FixedArray> outdated_contexts) {
- // One of these contexts should be the one that declares the global "this"
- // binding.
- for (int i = 0; i < outdated_contexts->length(); ++i) {
- Context* context = Context::cast(outdated_contexts->get(i));
- if (context->IsScriptContext()) {
- ScopeInfo* scope_info = context->scope_info();
- int slot = scope_info->ReceiverContextSlotIndex();
- if (slot >= 0) {
- DCHECK_EQ(slot, Context::MIN_CONTEXT_SLOTS);
- context->set(slot, native_context()->global_proxy());
- }
- }
- }
-}
-
-
-Handle<GlobalObject> Genesis::CreateNewGlobals(
+Handle<JSGlobalObject> Genesis::CreateNewGlobals(
v8::Local<v8::ObjectTemplate> global_proxy_template,
Handle<JSGlobalProxy> global_proxy) {
// The argument global_proxy_template aka data is an ObjectTemplateInfo.
@@ -924,8 +931,7 @@ Handle<GlobalObject> Genesis::CreateNewGlobals(
if (js_global_object_template.is_null()) {
Handle<String> name = Handle<String>(heap()->empty_string());
- Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
- Builtins::kIllegal));
+ Handle<Code> code = isolate()->builtins()->Illegal();
Handle<JSObject> prototype =
factory()->NewFunctionPrototype(isolate()->object_function());
js_global_object_function = factory()->NewFunction(
@@ -948,15 +954,14 @@ Handle<GlobalObject> Genesis::CreateNewGlobals(
js_global_object_function->initial_map()->set_is_prototype_map(true);
js_global_object_function->initial_map()->set_is_hidden_prototype();
js_global_object_function->initial_map()->set_dictionary_map(true);
- Handle<GlobalObject> global_object =
- factory()->NewGlobalObject(js_global_object_function);
+ Handle<JSGlobalObject> global_object =
+ factory()->NewJSGlobalObject(js_global_object_function);
// Step 2: (re)initialize the global proxy object.
Handle<JSFunction> global_proxy_function;
if (global_proxy_template.IsEmpty()) {
Handle<String> name = Handle<String>(heap()->empty_string());
- Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
- Builtins::kIllegal));
+ Handle<Code> code = isolate()->builtins()->Illegal();
global_proxy_function = factory()->NewFunction(
name, code, JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize);
} else {
@@ -981,7 +986,7 @@ Handle<GlobalObject> Genesis::CreateNewGlobals(
}
-void Genesis::HookUpGlobalProxy(Handle<GlobalObject> global_object,
+void Genesis::HookUpGlobalProxy(Handle<JSGlobalObject> global_object,
Handle<JSGlobalProxy> global_proxy) {
// Set the native context for the global object.
global_object->set_native_context(*native_context());
@@ -995,38 +1000,68 @@ void Genesis::HookUpGlobalProxy(Handle<GlobalObject> global_object,
}
-void Genesis::HookUpGlobalObject(Handle<GlobalObject> global_object,
- Handle<FixedArray> outdated_contexts) {
- Handle<GlobalObject> global_object_from_snapshot(
- GlobalObject::cast(native_context()->extension()));
- Handle<JSBuiltinsObject> builtins_global(native_context()->builtins());
+void Genesis::HookUpGlobalObject(Handle<JSGlobalObject> global_object) {
+ Handle<JSGlobalObject> global_object_from_snapshot(
+ JSGlobalObject::cast(native_context()->extension()));
native_context()->set_extension(*global_object);
native_context()->set_security_token(*global_object);
- // Replace outdated global objects in deserialized contexts.
- for (int i = 0; i < outdated_contexts->length(); ++i) {
- Context* context = Context::cast(outdated_contexts->get(i));
- // Assert that there is only one native context.
- DCHECK(!context->IsNativeContext() || context == *native_context());
- DCHECK_EQ(context->global_object(), *global_object_from_snapshot);
- context->set_global_object(*global_object);
- }
-
- static const PropertyAttributes attributes =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- JSObject::SetOwnPropertyIgnoreAttributes(builtins_global,
- factory()->global_string(),
- global_object, attributes).Assert();
- // Set up the reference from the global object to the builtins object.
- JSGlobalObject::cast(*global_object)->set_builtins(*builtins_global);
TransferNamedProperties(global_object_from_snapshot, global_object);
TransferIndexedProperties(global_object_from_snapshot, global_object);
}
+static Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
+ Handle<String> name,
+ Builtins::Name call, int len,
+ bool adapt) {
+ Handle<JSFunction> fun =
+ CreateFunction(isolate, name, JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), call);
+ if (adapt) {
+ fun->shared()->set_internal_formal_parameter_count(len);
+ } else {
+ fun->shared()->DontAdaptArguments();
+ }
+ fun->shared()->set_length(len);
+ return fun;
+}
+
+
+static Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
+ Handle<String> name,
+ Builtins::Name call, int len,
+ bool adapt) {
+ Handle<JSFunction> fun =
+ SimpleCreateFunction(base->GetIsolate(), name, call, len, adapt);
+ InstallFunction(base, fun, name, DONT_ENUM);
+ return fun;
+}
+
+
+static Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
+ const char* name,
+ Builtins::Name call, int len,
+ bool adapt) {
+ Factory* const factory = base->GetIsolate()->factory();
+ return SimpleInstallFunction(base, factory->InternalizeUtf8String(name), call,
+ len, adapt);
+}
+
+
+static void InstallWithIntrinsicDefaultProto(Isolate* isolate,
+ Handle<JSFunction> function,
+ int context_index) {
+ Handle<Smi> index(Smi::FromInt(context_index), isolate);
+ JSObject::AddProperty(
+ function, isolate->factory()->native_context_index_symbol(), index, NONE);
+ isolate->native_context()->set(context_index, *function);
+}
+
+
// This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpGlobalObject.
-void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
+void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> empty_function,
ContextType context_type) {
// --- N a t i v e C o n t e x t ---
@@ -1035,7 +1070,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
native_context()->set_previous(NULL);
// Set extension and global object.
native_context()->set_extension(*global_object);
- native_context()->set_global_object(*global_object);
// Security setup: Set the security token of the native context to the global
// object. This makes the security check between two different contexts fail
// by default even in case of global object reinitialization.
@@ -1043,24 +1077,77 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Isolate* isolate = global_object->GetIsolate();
Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
Handle<ScriptContextTable> script_context_table =
factory->NewScriptContextTable();
native_context()->set_script_context_table(*script_context_table);
InstallGlobalThisBinding();
- Handle<String> object_name = factory->Object_string();
- JSObject::AddProperty(
- global_object, object_name, isolate->object_function(), DONT_ENUM);
+ { // --- O b j e c t ---
+ Handle<String> object_name = factory->Object_string();
+ Handle<JSFunction> object_function = isolate->object_function();
+ JSObject::AddProperty(global_object, object_name, object_function,
+ DONT_ENUM);
+ SimpleInstallFunction(object_function, factory->assign_string(),
+ Builtins::kObjectAssign, 2, false);
+ SimpleInstallFunction(object_function, factory->create_string(),
+ Builtins::kObjectCreate, 2, false);
+ Handle<JSFunction> object_freeze = SimpleInstallFunction(
+ object_function, "freeze", Builtins::kObjectFreeze, 1, false);
+ native_context()->set_object_freeze(*object_freeze);
+ Handle<JSFunction> object_is_extensible =
+ SimpleInstallFunction(object_function, "isExtensible",
+ Builtins::kObjectIsExtensible, 1, false);
+ native_context()->set_object_is_extensible(*object_is_extensible);
+ Handle<JSFunction> object_is_frozen = SimpleInstallFunction(
+ object_function, "isFrozen", Builtins::kObjectIsFrozen, 1, false);
+ native_context()->set_object_is_frozen(*object_is_frozen);
+ Handle<JSFunction> object_is_sealed = SimpleInstallFunction(
+ object_function, "isSealed", Builtins::kObjectIsSealed, 1, false);
+ native_context()->set_object_is_sealed(*object_is_sealed);
+ Handle<JSFunction> object_keys = SimpleInstallFunction(
+ object_function, "keys", Builtins::kObjectKeys, 1, false);
+ native_context()->set_object_keys(*object_keys);
+ SimpleInstallFunction(object_function, "preventExtensions",
+ Builtins::kObjectPreventExtensions, 1, false);
+ SimpleInstallFunction(object_function, "seal", Builtins::kObjectSeal, 1,
+ false);
+ }
Handle<JSObject> global(native_context()->global_object());
- // Install global Function object
- Handle<JSFunction> function_function =
- InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
- empty_function, Builtins::kIllegal);
- function_function->initial_map()->set_is_callable();
+ { // --- F u n c t i o n ---
+ Handle<JSFunction> prototype = empty_function;
+ Handle<JSFunction> function_fun =
+ InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
+ prototype, Builtins::kFunctionConstructor);
+ function_fun->set_prototype_or_initial_map(
+ *sloppy_function_map_writable_prototype_);
+ function_fun->shared()->DontAdaptArguments();
+ function_fun->shared()->set_construct_stub(
+ *isolate->builtins()->FunctionConstructor());
+ function_fun->shared()->set_length(1);
+ InstallWithIntrinsicDefaultProto(isolate, function_fun,
+ Context::FUNCTION_FUNCTION_INDEX);
+
+ // Setup the methods on the %FunctionPrototype%.
+ SimpleInstallFunction(prototype, factory->apply_string(),
+ Builtins::kFunctionPrototypeApply, 2, false);
+ SimpleInstallFunction(prototype, factory->bind_string(),
+ Builtins::kFunctionPrototypeBind, 1, false);
+ SimpleInstallFunction(prototype, factory->call_string(),
+ Builtins::kFunctionPrototypeCall, 1, false);
+ SimpleInstallFunction(prototype, factory->toString_string(),
+ Builtins::kFunctionPrototypeToString, 0, false);
+
+ // Install the "constructor" property on the %FunctionPrototype%.
+ JSObject::AddProperty(prototype, factory->constructor_string(),
+ function_fun, DONT_ENUM);
+
+ sloppy_function_map_writable_prototype_->SetConstructor(*function_fun);
+ strict_function_map_writable_prototype_->SetConstructor(*function_fun);
+ native_context()->strong_function_map()->SetConstructor(*function_fun);
+ }
{ // --- A r r a y ---
Handle<JSFunction> array_function =
@@ -1093,11 +1180,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
initial_map->AppendDescriptor(&d);
}
- // array_function is used internally. JS code creating array object should
- // search for the 'Array' property on the global object and use that one
- // as the constructor. 'Array' property on a global object can be
- // overwritten by JS code.
- native_context()->set_array_function(*array_function);
+ InstallWithIntrinsicDefaultProto(isolate, array_function,
+ Context::ARRAY_FUNCTION_INDEX);
// Cache the array maps, needed by ArrayConstructorStub
CacheInitialJSArrayMaps(native_context(), initial_map);
@@ -1109,14 +1193,23 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Map::Copy(initial_map, "SetInstancePrototype");
initial_strong_map->set_is_strong();
CacheInitialJSArrayMaps(native_context(), initial_strong_map);
+
+ Handle<JSFunction> is_arraylike = SimpleInstallFunction(
+ array_function, isolate->factory()->InternalizeUtf8String("isArray"),
+ Builtins::kArrayIsArray, 1, true);
+ native_context()->set_is_arraylike(*is_arraylike);
}
{ // --- N u m b e r ---
- Handle<JSFunction> number_fun =
- InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal);
- native_context()->set_number_function(*number_fun);
+ Handle<JSFunction> number_fun = InstallFunction(
+ global, "Number", JS_VALUE_TYPE, JSValue::kSize,
+ isolate->initial_object_prototype(), Builtins::kNumberConstructor);
+ number_fun->shared()->DontAdaptArguments();
+ number_fun->shared()->set_construct_stub(
+ *isolate->builtins()->NumberConstructor_ConstructStub());
+ number_fun->shared()->set_length(1);
+ InstallWithIntrinsicDefaultProto(isolate, number_fun,
+ Context::NUMBER_FUNCTION_INDEX);
}
{ // --- B o o l e a n ---
@@ -1124,18 +1217,20 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
Builtins::kIllegal);
- native_context()->set_boolean_function(*boolean_fun);
+ InstallWithIntrinsicDefaultProto(isolate, boolean_fun,
+ Context::BOOLEAN_FUNCTION_INDEX);
}
{ // --- S t r i n g ---
Handle<JSFunction> string_fun = InstallFunction(
global, "String", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(), Builtins::kStringConstructor);
- string_fun->shared()->set_construct_stub(isolate->builtins()->builtin(
- Builtins::kStringConstructor_ConstructStub));
+ string_fun->shared()->set_construct_stub(
+ *isolate->builtins()->StringConstructor_ConstructStub());
string_fun->shared()->DontAdaptArguments();
string_fun->shared()->set_length(1);
- native_context()->set_string_function(*string_fun);
+ InstallWithIntrinsicDefaultProto(isolate, string_fun,
+ Context::STRING_FUNCTION_INDEX);
Handle<Map> string_map =
Handle<Map>(native_context()->string_function()->initial_map());
@@ -1158,19 +1253,143 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Handle<JSFunction> symbol_fun = InstallFunction(
global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(), Builtins::kSymbolConstructor);
- symbol_fun->shared()->set_construct_stub(isolate->builtins()->builtin(
- Builtins::kSymbolConstructor_ConstructStub));
- symbol_fun->shared()->set_internal_formal_parameter_count(1);
+ symbol_fun->shared()->set_construct_stub(
+ *isolate->builtins()->SymbolConstructor_ConstructStub());
symbol_fun->shared()->set_length(1);
+ symbol_fun->shared()->DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
}
{ // --- D a t e ---
// Builtin functions for Date.prototype.
- InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- }
+ Handle<JSObject> prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ Handle<JSFunction> date_fun =
+ InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize, prototype,
+ Builtins::kDateConstructor);
+ InstallWithIntrinsicDefaultProto(isolate, date_fun,
+ Context::DATE_FUNCTION_INDEX);
+ date_fun->shared()->set_construct_stub(
+ *isolate->builtins()->DateConstructor_ConstructStub());
+ date_fun->shared()->set_length(7);
+ date_fun->shared()->DontAdaptArguments();
+
+ // Install the Date.now, Date.parse and Date.UTC functions.
+ SimpleInstallFunction(date_fun, "now", Builtins::kDateNow, 0, false);
+ SimpleInstallFunction(date_fun, "parse", Builtins::kDateParse, 1, false);
+ SimpleInstallFunction(date_fun, "UTC", Builtins::kDateUTC, 7, false);
+
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory->constructor_string(), date_fun,
+ DONT_ENUM);
+
+ // Install the Date.prototype methods.
+ SimpleInstallFunction(prototype, "toString",
+ Builtins::kDatePrototypeToString, 0, false);
+ SimpleInstallFunction(prototype, "toDateString",
+ Builtins::kDatePrototypeToDateString, 0, false);
+ SimpleInstallFunction(prototype, "toTimeString",
+ Builtins::kDatePrototypeToTimeString, 0, false);
+ SimpleInstallFunction(prototype, "toGMTString",
+ Builtins::kDatePrototypeToUTCString, 0, false);
+ SimpleInstallFunction(prototype, "toISOString",
+ Builtins::kDatePrototypeToISOString, 0, false);
+ SimpleInstallFunction(prototype, "toUTCString",
+ Builtins::kDatePrototypeToUTCString, 0, false);
+ SimpleInstallFunction(prototype, "getDate", Builtins::kDatePrototypeGetDate,
+ 0, true);
+ SimpleInstallFunction(prototype, "setDate", Builtins::kDatePrototypeSetDate,
+ 1, false);
+ SimpleInstallFunction(prototype, "getDay", Builtins::kDatePrototypeGetDay,
+ 0, true);
+ SimpleInstallFunction(prototype, "getFullYear",
+ Builtins::kDatePrototypeGetFullYear, 0, true);
+ SimpleInstallFunction(prototype, "setFullYear",
+ Builtins::kDatePrototypeSetFullYear, 3, false);
+ SimpleInstallFunction(prototype, "getHours",
+ Builtins::kDatePrototypeGetHours, 0, true);
+ SimpleInstallFunction(prototype, "setHours",
+ Builtins::kDatePrototypeSetHours, 4, false);
+ SimpleInstallFunction(prototype, "getMilliseconds",
+ Builtins::kDatePrototypeGetMilliseconds, 0, true);
+ SimpleInstallFunction(prototype, "setMilliseconds",
+ Builtins::kDatePrototypeSetMilliseconds, 1, false);
+ SimpleInstallFunction(prototype, "getMinutes",
+ Builtins::kDatePrototypeGetMinutes, 0, true);
+ SimpleInstallFunction(prototype, "setMinutes",
+ Builtins::kDatePrototypeSetMinutes, 3, false);
+ SimpleInstallFunction(prototype, "getMonth",
+ Builtins::kDatePrototypeGetMonth, 0, true);
+ SimpleInstallFunction(prototype, "setMonth",
+ Builtins::kDatePrototypeSetMonth, 2, false);
+ SimpleInstallFunction(prototype, "getSeconds",
+ Builtins::kDatePrototypeGetSeconds, 0, true);
+ SimpleInstallFunction(prototype, "setSeconds",
+ Builtins::kDatePrototypeSetSeconds, 2, false);
+ SimpleInstallFunction(prototype, "getTime", Builtins::kDatePrototypeGetTime,
+ 0, true);
+ SimpleInstallFunction(prototype, "setTime", Builtins::kDatePrototypeSetTime,
+ 1, false);
+ SimpleInstallFunction(prototype, "getTimezoneOffset",
+ Builtins::kDatePrototypeGetTimezoneOffset, 0, true);
+ SimpleInstallFunction(prototype, "getUTCDate",
+ Builtins::kDatePrototypeGetUTCDate, 0, true);
+ SimpleInstallFunction(prototype, "setUTCDate",
+ Builtins::kDatePrototypeSetUTCDate, 1, false);
+ SimpleInstallFunction(prototype, "getUTCDay",
+ Builtins::kDatePrototypeGetUTCDay, 0, true);
+ SimpleInstallFunction(prototype, "getUTCFullYear",
+ Builtins::kDatePrototypeGetUTCFullYear, 0, true);
+ SimpleInstallFunction(prototype, "setUTCFullYear",
+ Builtins::kDatePrototypeSetUTCFullYear, 3, false);
+ SimpleInstallFunction(prototype, "getUTCHours",
+ Builtins::kDatePrototypeGetUTCHours, 0, true);
+ SimpleInstallFunction(prototype, "setUTCHours",
+ Builtins::kDatePrototypeSetUTCHours, 4, false);
+ SimpleInstallFunction(prototype, "getUTCMilliseconds",
+ Builtins::kDatePrototypeGetUTCMilliseconds, 0, true);
+ SimpleInstallFunction(prototype, "setUTCMilliseconds",
+ Builtins::kDatePrototypeSetUTCMilliseconds, 1, false);
+ SimpleInstallFunction(prototype, "getUTCMinutes",
+ Builtins::kDatePrototypeGetUTCMinutes, 0, true);
+ SimpleInstallFunction(prototype, "setUTCMinutes",
+ Builtins::kDatePrototypeSetUTCMinutes, 3, false);
+ SimpleInstallFunction(prototype, "getUTCMonth",
+ Builtins::kDatePrototypeGetUTCMonth, 0, true);
+ SimpleInstallFunction(prototype, "setUTCMonth",
+ Builtins::kDatePrototypeSetUTCMonth, 2, false);
+ SimpleInstallFunction(prototype, "getUTCSeconds",
+ Builtins::kDatePrototypeGetUTCSeconds, 0, true);
+ SimpleInstallFunction(prototype, "setUTCSeconds",
+ Builtins::kDatePrototypeSetUTCSeconds, 2, false);
+ SimpleInstallFunction(prototype, "valueOf", Builtins::kDatePrototypeValueOf,
+ 0, false);
+ SimpleInstallFunction(prototype, "getYear", Builtins::kDatePrototypeGetYear,
+ 0, true);
+ SimpleInstallFunction(prototype, "setYear", Builtins::kDatePrototypeSetYear,
+ 1, false);
+
+ // Install i18n fallback functions.
+ SimpleInstallFunction(prototype, "toLocaleString",
+ Builtins::kDatePrototypeToString, 0, false);
+ SimpleInstallFunction(prototype, "toLocaleDateString",
+ Builtins::kDatePrototypeToDateString, 0, false);
+ SimpleInstallFunction(prototype, "toLocaleTimeString",
+ Builtins::kDatePrototypeToTimeString, 0, false);
+
+ // Install the @@toPrimitive function.
+ Handle<JSFunction> to_primitive = InstallFunction(
+ prototype, factory->to_primitive_symbol(), JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, MaybeHandle<JSObject>(),
+ Builtins::kDatePrototypeToPrimitive,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Set the expected parameters for @@toPrimitive to 1; required by builtin.
+ to_primitive->shared()->set_internal_formal_parameter_count(1);
+ // Set the length for the function to satisfy ECMA-262.
+ to_primitive->shared()->set_length(1);
+ }
{ // -- R e g E x p
// Builtin functions for RegExp.prototype.
@@ -1178,81 +1397,87 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
isolate->initial_object_prototype(),
Builtins::kIllegal);
- native_context()->set_regexp_function(*regexp_fun);
+ InstallWithIntrinsicDefaultProto(isolate, regexp_fun,
+ Context::REGEXP_FUNCTION_INDEX);
+ regexp_fun->shared()->set_construct_stub(
+ *isolate->builtins()->JSBuiltinsConstructStub());
DCHECK(regexp_fun->has_initial_map());
Handle<Map> initial_map(regexp_fun->initial_map());
DCHECK_EQ(0, initial_map->GetInObjectProperties());
- PropertyAttributes final =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Map::EnsureDescriptorSlack(initial_map, 5);
+ Map::EnsureDescriptorSlack(initial_map, 1);
- {
- // ECMA-262, section 15.10.7.1.
- DataDescriptor field(factory->source_string(),
- JSRegExp::kSourceFieldIndex, final,
- Representation::Tagged());
- initial_map->AppendDescriptor(&field);
- }
- {
- // ECMA-262, section 15.10.7.2.
- DataDescriptor field(factory->global_string(),
- JSRegExp::kGlobalFieldIndex, final,
- Representation::Tagged());
- initial_map->AppendDescriptor(&field);
- }
- {
- // ECMA-262, section 15.10.7.3.
- DataDescriptor field(factory->ignore_case_string(),
- JSRegExp::kIgnoreCaseFieldIndex, final,
- Representation::Tagged());
- initial_map->AppendDescriptor(&field);
- }
- {
- // ECMA-262, section 15.10.7.4.
- DataDescriptor field(factory->multiline_string(),
- JSRegExp::kMultilineFieldIndex, final,
- Representation::Tagged());
- initial_map->AppendDescriptor(&field);
- }
- {
- // ECMA-262, section 15.10.7.5.
- PropertyAttributes writable =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- DataDescriptor field(factory->last_index_string(),
- JSRegExp::kLastIndexFieldIndex, writable,
- Representation::Tagged());
- initial_map->AppendDescriptor(&field);
- }
+ // ECMA-262, section 15.10.7.5.
+ PropertyAttributes writable =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ DataDescriptor field(factory->last_index_string(),
+ JSRegExp::kLastIndexFieldIndex, writable,
+ Representation::Tagged());
+ initial_map->AppendDescriptor(&field);
static const int num_fields = JSRegExp::kInObjectFieldCount;
initial_map->SetInObjectProperties(num_fields);
initial_map->set_unused_property_fields(0);
initial_map->set_instance_size(initial_map->instance_size() +
num_fields * kPointerSize);
+ }
- // RegExp prototype object is itself a RegExp.
- Handle<Map> proto_map = Map::Copy(initial_map, "RegExpPrototype");
- DCHECK(proto_map->prototype() == *isolate->initial_object_prototype());
- Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
- proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
- heap->query_colon_string());
- proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
- heap->false_value());
- proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
- heap->false_value());
- proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex,
- heap->false_value());
- proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
- Smi::FromInt(0),
- SKIP_WRITE_BARRIER); // It's a Smi.
- proto_map->set_is_prototype_map(true);
- Map::SetPrototype(initial_map, proto);
- factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
- JSRegExp::IRREGEXP, factory->empty_string(),
- JSRegExp::Flags(0), 0);
+ { // -- E r r o r
+ Handle<JSFunction> error_fun = InstallFunction(
+ global, "Error", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, error_fun,
+ Context::ERROR_FUNCTION_INDEX);
+ }
+
+ { // -- E v a l E r r o r
+ Handle<JSFunction> eval_error_fun = InstallFunction(
+ global, "EvalError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, eval_error_fun,
+ Context::EVAL_ERROR_FUNCTION_INDEX);
+ }
+
+ { // -- R a n g e E r r o r
+ Handle<JSFunction> range_error_fun = InstallFunction(
+ global, "RangeError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, range_error_fun,
+ Context::RANGE_ERROR_FUNCTION_INDEX);
+ }
+
+ { // -- R e f e r e n c e E r r o r
+ Handle<JSFunction> reference_error_fun = InstallFunction(
+ global, "ReferenceError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, reference_error_fun,
+ Context::REFERENCE_ERROR_FUNCTION_INDEX);
+ }
+
+ { // -- S y n t a x E r r o r
+ Handle<JSFunction> syntax_error_fun = InstallFunction(
+ global, "SyntaxError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, syntax_error_fun,
+ Context::SYNTAX_ERROR_FUNCTION_INDEX);
+ }
+
+ { // -- T y p e E r r o r
+ Handle<JSFunction> type_error_fun = InstallFunction(
+ global, "TypeError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, type_error_fun,
+ Context::TYPE_ERROR_FUNCTION_INDEX);
+ }
+
+ { // -- U R I E r r o r
+ Handle<JSFunction> uri_error_fun = InstallFunction(
+ global, "URIError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, uri_error_fun,
+ Context::URI_ERROR_FUNCTION_INDEX);
}
// Initialize the embedder data slot.
@@ -1266,7 +1491,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Handle<JSFunction> cons = factory->NewFunction(name);
JSFunction::SetInstancePrototype(cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
- cons->SetInstanceClassName(*name);
+ cons->shared()->set_instance_class_name(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
DCHECK(json_object->IsJSObject());
JSObject::AddProperty(global, name, json_object, DONT_ENUM);
@@ -1278,7 +1503,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
JSFunction::SetInstancePrototype(
cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
- cons->SetInstanceClassName(*name);
+ cons->shared()->set_instance_class_name(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
DCHECK(json_object->IsJSObject());
JSObject::AddProperty(global, name, json_object, DONT_ENUM);
@@ -1286,20 +1511,18 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
{ // -- A r r a y B u f f e r
Handle<JSFunction> array_buffer_fun =
- InstallFunction(
- global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSizeWithInternalFields,
- isolate->initial_object_prototype(),
- Builtins::kIllegal);
- native_context()->set_array_buffer_fun(*array_buffer_fun);
+ InstallArrayBuffer(global, "ArrayBuffer");
+ InstallWithIntrinsicDefaultProto(isolate, array_buffer_fun,
+ Context::ARRAY_BUFFER_FUN_INDEX);
}
{ // -- T y p e d A r r a y s
-#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
- { \
- Handle<JSFunction> fun; \
- InstallTypedArray(#Type "Array", TYPE##_ELEMENTS, &fun); \
- native_context()->set_##type##_array_fun(*fun); \
+#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { \
+ Handle<JSFunction> fun; \
+ InstallTypedArray(#Type "Array", TYPE##_ELEMENTS, &fun); \
+ InstallWithIntrinsicDefaultProto(isolate, fun, \
+ Context::TYPE##_ARRAY_FUN_INDEX); \
}
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
@@ -1310,21 +1533,26 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
JSDataView::kSizeWithInternalFields,
isolate->initial_object_prototype(),
Builtins::kIllegal);
- native_context()->set_data_view_fun(*data_view_fun);
+ InstallWithIntrinsicDefaultProto(isolate, data_view_fun,
+ Context::DATA_VIEW_FUN_INDEX);
+ data_view_fun->shared()->set_construct_stub(
+ *isolate->builtins()->JSBuiltinsConstructStub());
}
{ // -- M a p
Handle<JSFunction> js_map_fun = InstallFunction(
global, "Map", JS_MAP_TYPE, JSMap::kSize,
isolate->initial_object_prototype(), Builtins::kIllegal);
- native_context()->set_js_map_fun(*js_map_fun);
+ InstallWithIntrinsicDefaultProto(isolate, js_map_fun,
+ Context::JS_MAP_FUN_INDEX);
}
{ // -- S e t
Handle<JSFunction> js_set_fun = InstallFunction(
global, "Set", JS_SET_TYPE, JSSet::kSize,
isolate->initial_object_prototype(), Builtins::kIllegal);
- native_context()->set_js_set_fun(*js_set_fun);
+ InstallWithIntrinsicDefaultProto(isolate, js_set_fun,
+ Context::JS_SET_FUN_INDEX);
}
{ // -- I t e r a t o r R e s u l t
@@ -1349,25 +1577,63 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
native_context()->set_iterator_result_map(*map);
}
- // -- W e a k M a p
- InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- // -- W e a k S e t
- InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
+ { // -- W e a k M a p
+ Handle<JSFunction> js_weak_map_fun = InstallFunction(
+ global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, js_weak_map_fun,
+ Context::JS_WEAK_MAP_FUN_INDEX);
+ }
+
+ { // -- W e a k S e t
+ Handle<JSFunction> js_weak_set_fun = InstallFunction(
+ global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, js_weak_set_fun,
+ Context::JS_WEAK_SET_FUN_INDEX);
+ }
+
+ { // --- B o u n d F u n c t i o n
+ Handle<Map> map =
+ factory->NewMap(JS_BOUND_FUNCTION_TYPE, JSBoundFunction::kSize);
+ map->set_is_callable();
+ Map::SetPrototype(map, empty_function);
+
+ PropertyAttributes roc_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+ Map::EnsureDescriptorSlack(map, 2);
+
+ { // length
+ DataDescriptor d(factory->length_string(), JSBoundFunction::kLengthIndex,
+ roc_attribs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+ { // name
+ DataDescriptor d(factory->name_string(), JSBoundFunction::kNameIndex,
+ roc_attribs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ map->SetInObjectProperties(2);
+ native_context()->set_bound_function_without_constructor_map(*map);
+
+ map = Map::Copy(map, "IsConstructor");
+ map->set_is_constructor();
+ native_context()->set_bound_function_with_constructor_map(*map);
+ }
{ // --- sloppy arguments map
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
// class_name equals 'Arguments'.
Handle<String> arguments_string = factory->Arguments_string();
- Handle<Code> code(isolate->builtins()->builtin(Builtins::kIllegal));
+ Handle<Code> code = isolate->builtins()->Illegal();
Handle<JSFunction> function = factory->NewFunctionWithoutPrototype(
arguments_string, code);
function->shared()->set_instance_class_name(*arguments_string);
- Handle<Map> map =
- factory->NewMap(JS_OBJECT_TYPE, Heap::kSloppyArgumentsObjectSize);
+ Handle<Map> map = factory->NewMap(
+ JS_OBJECT_TYPE, Heap::kSloppyArgumentsObjectSize, FAST_ELEMENTS);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(map, 2);
@@ -1426,8 +1692,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
caller->set_setter(*poison);
// Create the map. Allocate one in-object field for length.
- Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
- Heap::kStrictArgumentsObjectSize);
+ Handle<Map> map = factory->NewMap(
+ JS_OBJECT_TYPE, Heap::kStrictArgumentsObjectSize, FAST_ELEMENTS);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(map, 3);
@@ -1466,8 +1732,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
{ // --- context extension
// Create a function for the context extension objects.
- Handle<Code> code = Handle<Code>(
- isolate->builtins()->builtin(Builtins::kIllegal));
+ Handle<Code> code = isolate->builtins()->Illegal();
Handle<JSFunction> context_extension_fun = factory->NewFunction(
factory->empty_string(), code, JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JSObject::kHeaderSize);
@@ -1481,9 +1746,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
{
// Set up the call-as-function delegate.
- Handle<Code> code =
- Handle<Code>(isolate->builtins()->builtin(
- Builtins::kHandleApiCallAsFunction));
+ Handle<Code> code = isolate->builtins()->HandleApiCallAsFunction();
Handle<JSFunction> delegate = factory->NewFunction(
factory->empty_string(), code, JS_OBJECT_TYPE, JSObject::kHeaderSize);
native_context()->set_call_as_function_delegate(*delegate);
@@ -1492,15 +1755,13 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
{
// Set up the call-as-constructor delegate.
- Handle<Code> code =
- Handle<Code>(isolate->builtins()->builtin(
- Builtins::kHandleApiCallAsConstructor));
+ Handle<Code> code = isolate->builtins()->HandleApiCallAsConstructor();
Handle<JSFunction> delegate = factory->NewFunction(
factory->empty_string(), code, JS_OBJECT_TYPE, JSObject::kHeaderSize);
native_context()->set_call_as_constructor_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
}
-}
+} // NOLINT(readability/fn_size)
void Genesis::InstallTypedArray(const char* name, ElementsKind elements_kind,
@@ -1526,6 +1787,7 @@ void Genesis::InitializeExperimentalGlobal() {
HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
+ FEATURE_INITIALIZE_GLOBAL(promise_extra, "")
#undef FEATURE_INITIALIZE_GLOBAL
}
@@ -1542,9 +1804,8 @@ bool Bootstrapper::CompileBuiltin(Isolate* isolate, int index) {
Handle<Object> extras_utils = isolate->extras_utils_object();
Handle<Object> args[] = {global, utils, extras_utils};
- return Bootstrapper::CompileNative(
- isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
- source_code, arraysize(args), args);
+ return Bootstrapper::CompileNative(isolate, name, source_code,
+ arraysize(args), args);
}
@@ -1556,9 +1817,8 @@ bool Bootstrapper::CompileExperimentalBuiltin(Isolate* isolate, int index) {
Handle<Object> global = isolate->global_object();
Handle<Object> utils = isolate->natives_utils_object();
Handle<Object> args[] = {global, utils};
- return Bootstrapper::CompileNative(
- isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
- source_code, arraysize(args), args);
+ return Bootstrapper::CompileNative(isolate, name, source_code,
+ arraysize(args), args);
}
@@ -1571,9 +1831,8 @@ bool Bootstrapper::CompileExtraBuiltin(Isolate* isolate, int index) {
Handle<Object> binding = isolate->extras_binding_object();
Handle<Object> extras_utils = isolate->extras_utils_object();
Handle<Object> args[] = {global, binding, extras_utils};
- return Bootstrapper::CompileNative(
- isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
- source_code, arraysize(args), args);
+ return Bootstrapper::CompileNative(isolate, name, source_code,
+ arraysize(args), args);
}
@@ -1587,28 +1846,12 @@ bool Bootstrapper::CompileExperimentalExtraBuiltin(Isolate* isolate,
Handle<Object> binding = isolate->extras_binding_object();
Handle<Object> extras_utils = isolate->extras_utils_object();
Handle<Object> args[] = {global, binding, extras_utils};
- return Bootstrapper::CompileNative(
- isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
- source_code, arraysize(args), args);
-}
-
-
-bool Bootstrapper::CompileCodeStubBuiltin(Isolate* isolate, int index) {
- HandleScope scope(isolate);
- Vector<const char> name = CodeStubNatives::GetScriptName(index);
- Handle<String> source_code =
- isolate->bootstrapper()->SourceLookup<CodeStubNatives>(index);
- Handle<JSObject> global(isolate->global_object());
- Handle<JSObject> exports(isolate->heap()->code_stub_exports_object());
- Handle<Object> args[] = {global, exports};
- bool result =
- CompileNative(isolate, name, global, source_code, arraysize(args), args);
- return result;
+ return Bootstrapper::CompileNative(isolate, name, source_code,
+ arraysize(args), args);
}
bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
- Handle<JSObject> receiver,
Handle<String> source, int argc,
Handle<Object> argv[]) {
SuppressDebug compiling_natives(isolate->debug());
@@ -1633,10 +1876,10 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
DCHECK(context->IsNativeContext());
- Handle<Context> runtime_context(context->runtime_context());
Handle<JSFunction> fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(function_info,
- runtime_context);
+ context);
+ Handle<Object> receiver = isolate->factory()->undefined_value();
// For non-extension scripts, run script to get the function wrapper.
Handle<Object> wrapper;
@@ -1707,7 +1950,7 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
const char* holder_expr) {
Isolate* isolate = native_context->GetIsolate();
Factory* factory = isolate->factory();
- Handle<GlobalObject> global(native_context->global_object());
+ Handle<JSGlobalObject> global(native_context->global_object());
const char* period_pos = strchr(holder_expr, '.');
if (period_pos == NULL) {
return Handle<JSObject>::cast(
@@ -1735,106 +1978,290 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
}
-template <typename Data>
-Data* SetBuiltinTypedArray(Isolate* isolate, Handle<JSBuiltinsObject> builtins,
- ExternalArrayType type, Data* data,
- size_t num_elements, const char* name,
- const SharedFlag shared = SharedFlag::kNotShared,
- const PretenureFlag pretenure = TENURED) {
- size_t byte_length = num_elements * sizeof(*data);
- Handle<JSArrayBuffer> buffer =
- isolate->factory()->NewJSArrayBuffer(shared, pretenure);
- bool is_external = data != nullptr;
- if (!is_external) {
- data = reinterpret_cast<Data*>(
- isolate->array_buffer_allocator()->Allocate(byte_length));
- }
- JSArrayBuffer::Setup(buffer, isolate, is_external, data, byte_length, shared);
-
- Handle<JSTypedArray> typed_array = isolate->factory()->NewJSTypedArray(
- type, buffer, 0, num_elements, pretenure);
- Handle<String> name_string = isolate->factory()->InternalizeUtf8String(name);
- // Reset property cell type before (re)initializing.
- JSBuiltinsObject::InvalidatePropertyCell(builtins, name_string);
- JSObject::SetOwnPropertyIgnoreAttributes(builtins, name_string, typed_array,
- FROZEN)
- .Assert();
- return data;
-}
-
-
-void Genesis::InitializeBuiltinTypedArrays() {
- Handle<JSBuiltinsObject> builtins(native_context()->builtins());
- { // Initially seed the per-context random number generator using the
- // per-isolate random number generator.
- const size_t num_elements = 2;
- const size_t num_bytes = num_elements * sizeof(uint32_t);
- uint32_t* state = SetBuiltinTypedArray<uint32_t>(isolate(), builtins,
- kExternalUint32Array, NULL,
- num_elements, "rngstate");
- do {
- isolate()->random_number_generator()->NextBytes(state, num_bytes);
- } while (state[0] == 0 || state[1] == 0);
- }
-
- { // Initialize trigonometric lookup tables and constants.
- const size_t num_elements = arraysize(fdlibm::MathConstants::constants);
- double* data = const_cast<double*>(fdlibm::MathConstants::constants);
- SetBuiltinTypedArray<double>(isolate(), builtins, kExternalFloat64Array,
- data, num_elements, "kMath");
+void Genesis::ConfigureUtilsObject(ContextType context_type) {
+ switch (context_type) {
+ // We still need the utils object to find debug functions.
+ case DEBUG_CONTEXT:
+ return;
+ // Expose the natives in global if a valid name for it is specified.
+ case FULL_CONTEXT: {
+ // We still need the utils object after deserialization.
+ if (isolate()->serializer_enabled()) return;
+ if (FLAG_expose_natives_as == NULL) break;
+ if (strlen(FLAG_expose_natives_as) == 0) break;
+ HandleScope scope(isolate());
+ Handle<String> natives_key =
+ factory()->InternalizeUtf8String(FLAG_expose_natives_as);
+ uint32_t dummy_index;
+ if (natives_key->AsArrayIndex(&dummy_index)) break;
+ Handle<Object> utils = isolate()->natives_utils_object();
+ Handle<JSObject> global = isolate()->global_object();
+ JSObject::AddProperty(global, natives_key, utils, DONT_ENUM);
+ break;
+ }
+ case THIN_CONTEXT:
+ break;
}
- { // Initialize a result array for rempio2 calculation
- const size_t num_elements = 2;
- double* data =
- SetBuiltinTypedArray<double>(isolate(), builtins, kExternalFloat64Array,
- NULL, num_elements, "rempio2result");
- for (size_t i = 0; i < num_elements; i++) data[i] = 0;
- }
+ // The utils object can be removed for cases that reach this point.
+ native_context()->set_natives_utils_object(heap()->undefined_value());
}
void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSObject> container) {
+ Factory* factory = isolate->factory();
HandleScope scope(isolate);
-#define EXPORT_PRIVATE_SYMBOL(NAME) \
- Handle<String> NAME##_name = \
- isolate->factory()->NewStringFromAsciiChecked(#NAME); \
- JSObject::AddProperty(container, NAME##_name, isolate->factory()->NAME(), \
- NONE);
+ Handle<Context> native_context = isolate->native_context();
+#define EXPORT_PRIVATE_SYMBOL(NAME) \
+ Handle<String> NAME##_name = factory->NewStringFromAsciiChecked(#NAME); \
+ JSObject::AddProperty(container, NAME##_name, factory->NAME(), NONE);
PRIVATE_SYMBOL_LIST(EXPORT_PRIVATE_SYMBOL)
#undef EXPORT_PRIVATE_SYMBOL
-#define EXPORT_PUBLIC_SYMBOL(NAME, DESCRIPTION) \
- Handle<String> NAME##_name = \
- isolate->factory()->NewStringFromAsciiChecked(#NAME); \
- JSObject::AddProperty(container, NAME##_name, isolate->factory()->NAME(), \
- NONE);
+#define EXPORT_PUBLIC_SYMBOL(NAME, DESCRIPTION) \
+ Handle<String> NAME##_name = factory->NewStringFromAsciiChecked(#NAME); \
+ JSObject::AddProperty(container, NAME##_name, factory->NAME(), NONE);
PUBLIC_SYMBOL_LIST(EXPORT_PUBLIC_SYMBOL)
+ WELL_KNOWN_SYMBOL_LIST(EXPORT_PUBLIC_SYMBOL)
#undef EXPORT_PUBLIC_SYMBOL
{
Handle<JSFunction> apply = InstallFunction(
container, "reflect_apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
MaybeHandle<JSObject>(), Builtins::kReflectApply);
- apply->shared()->set_internal_formal_parameter_count(3);
+ apply->shared()->DontAdaptArguments();
apply->shared()->set_length(3);
- Handle<TypeFeedbackVector> feedback_vector =
- TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate);
- apply->shared()->set_feedback_vector(*feedback_vector);
- isolate->native_context()->set_reflect_apply(*apply);
+ native_context->set_reflect_apply(*apply);
}
{
Handle<JSFunction> construct = InstallFunction(
container, "reflect_construct", JS_OBJECT_TYPE, JSObject::kHeaderSize,
MaybeHandle<JSObject>(), Builtins::kReflectConstruct);
- construct->shared()->set_internal_formal_parameter_count(3);
+ construct->shared()->DontAdaptArguments();
construct->shared()->set_length(2);
- Handle<TypeFeedbackVector> feedback_vector =
- TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate);
- construct->shared()->set_feedback_vector(*feedback_vector);
- isolate->native_context()->set_reflect_construct(*construct);
+ native_context->set_reflect_construct(*construct);
+ }
+
+ {
+ Handle<JSFunction> to_string = InstallFunction(
+ container, "object_to_string", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), Builtins::kObjectProtoToString);
+ to_string->shared()->DontAdaptArguments();
+ to_string->shared()->set_length(0);
+ native_context->set_object_to_string(*to_string);
+ }
+
+ Handle<JSObject> iterator_prototype;
+
+ {
+ PrototypeIterator iter(native_context->generator_object_prototype_map());
+ iter.Advance(); // Advance to the prototype of generator_object_prototype.
+ iterator_prototype = Handle<JSObject>(iter.GetCurrent<JSObject>());
+
+ JSObject::AddProperty(container,
+ factory->InternalizeUtf8String("IteratorPrototype"),
+ iterator_prototype, NONE);
+ }
+
+ {
+ PrototypeIterator iter(native_context->sloppy_generator_function_map());
+ Handle<JSObject> generator_function_prototype(iter.GetCurrent<JSObject>());
+
+ JSObject::AddProperty(
+ container, factory->InternalizeUtf8String("GeneratorFunctionPrototype"),
+ generator_function_prototype, NONE);
+
+ static const bool kUseStrictFunctionMap = true;
+ Handle<JSFunction> generator_function_function = InstallFunction(
+ container, "GeneratorFunction", JS_FUNCTION_TYPE, JSFunction::kSize,
+ generator_function_prototype, Builtins::kGeneratorFunctionConstructor,
+ kUseStrictFunctionMap);
+ generator_function_function->set_prototype_or_initial_map(
+ native_context->sloppy_generator_function_map());
+ generator_function_function->shared()->DontAdaptArguments();
+ generator_function_function->shared()->set_construct_stub(
+ *isolate->builtins()->GeneratorFunctionConstructor());
+ generator_function_function->shared()->set_length(1);
+ InstallWithIntrinsicDefaultProto(
+ isolate, generator_function_function,
+ Context::GENERATOR_FUNCTION_FUNCTION_INDEX);
+
+ native_context->sloppy_generator_function_map()->SetConstructor(
+ *generator_function_function);
+ native_context->strict_generator_function_map()->SetConstructor(
+ *generator_function_function);
+ native_context->strong_generator_function_map()->SetConstructor(
+ *generator_function_function);
+ }
+
+ { // -- S e t I t e r a t o r
+ Handle<JSObject> set_iterator_prototype =
+ isolate->factory()->NewJSObject(isolate->object_function(), TENURED);
+ SetObjectPrototype(set_iterator_prototype, iterator_prototype);
+ Handle<JSFunction> set_iterator_function = InstallFunction(
+ container, "SetIterator", JS_SET_ITERATOR_TYPE, JSSetIterator::kSize,
+ set_iterator_prototype, Builtins::kIllegal);
+ native_context->set_set_iterator_map(set_iterator_function->initial_map());
+ }
+
+ { // -- M a p I t e r a t o r
+ Handle<JSObject> map_iterator_prototype =
+ isolate->factory()->NewJSObject(isolate->object_function(), TENURED);
+ SetObjectPrototype(map_iterator_prototype, iterator_prototype);
+ Handle<JSFunction> map_iterator_function = InstallFunction(
+ container, "MapIterator", JS_MAP_ITERATOR_TYPE, JSMapIterator::kSize,
+ map_iterator_prototype, Builtins::kIllegal);
+ native_context->set_map_iterator_map(map_iterator_function->initial_map());
+ }
+
+ { // -- S c r i p t
+ // Builtin functions for Script.
+ Handle<JSFunction> script_fun = InstallFunction(
+ container, "Script", JS_VALUE_TYPE, JSValue::kSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ Handle<JSObject> prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ Accessors::FunctionSetPrototype(script_fun, prototype).Assert();
+ native_context->set_script_function(*script_fun);
+
+ Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
+ Map::EnsureDescriptorSlack(script_map, 15);
+
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+ Handle<AccessorInfo> script_column =
+ Accessors::ScriptColumnOffsetInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_column->name())), script_column,
+ attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_id = Accessors::ScriptIdInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(script_id->name())),
+ script_id, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+
+ Handle<AccessorInfo> script_name =
+ Accessors::ScriptNameInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_name->name())), script_name, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_line =
+ Accessors::ScriptLineOffsetInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_line->name())), script_line, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_source =
+ Accessors::ScriptSourceInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_source->name())), script_source,
+ attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_type =
+ Accessors::ScriptTypeInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_type->name())), script_type, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_compilation_type =
+ Accessors::ScriptCompilationTypeInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_compilation_type->name())),
+ script_compilation_type, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_line_ends =
+ Accessors::ScriptLineEndsInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_line_ends->name())), script_line_ends,
+ attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_context_data =
+ Accessors::ScriptContextDataInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_context_data->name())),
+ script_context_data, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_eval_from_script =
+ Accessors::ScriptEvalFromScriptInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_eval_from_script->name())),
+ script_eval_from_script, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_eval_from_script_position =
+ Accessors::ScriptEvalFromScriptPositionInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_eval_from_script_position->name())),
+ script_eval_from_script_position, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_eval_from_function_name =
+ Accessors::ScriptEvalFromFunctionNameInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_eval_from_function_name->name())),
+ script_eval_from_function_name, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_source_url =
+ Accessors::ScriptSourceUrlInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_source_url->name())),
+ script_source_url, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_source_mapping_url =
+ Accessors::ScriptSourceMappingUrlInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_source_mapping_url->name())),
+ script_source_mapping_url, attribs);
+ script_map->AppendDescriptor(&d);
+ }
+
+ Handle<AccessorInfo> script_is_embedder_debug_script =
+ Accessors::ScriptIsEmbedderDebugScriptInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(
+ Handle<Name>(Name::cast(script_is_embedder_debug_script->name())),
+ script_is_embedder_debug_script, attribs);
+ script_map->AppendDescriptor(&d);
+ }
}
}
@@ -1851,9 +2278,9 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
isolate->factory()->ToBoolean(FLAG), NONE); \
}
- INITIALIZE_FLAG(FLAG_harmony_regexps)
- INITIALIZE_FLAG(FLAG_harmony_unicode_regexps)
INITIALIZE_FLAG(FLAG_harmony_tostring)
+ INITIALIZE_FLAG(FLAG_harmony_tolength)
+ INITIALIZE_FLAG(FLAG_harmony_species)
#undef INITIALIZE_FLAG
}
@@ -1863,58 +2290,119 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
void Genesis::InitializeGlobal_##id() {}
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_modules)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_array_includes)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrow_functions)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_proxies)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_function)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_let)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_rest_parameters)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_default_parameters)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_spread_calls)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring_bind)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring_assignment)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_observe)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_spread_arrays)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_new_target)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_concat_spreadable)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexps)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode_regexps)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tostring)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_completion)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tolength)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_name)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(promise_extra)
-void Genesis::InitializeGlobal_harmony_tolength() {
- Handle<JSObject> builtins(native_context()->builtins());
- Handle<Object> flag(factory()->ToBoolean(FLAG_harmony_tolength));
- Runtime::SetObjectProperty(isolate(), builtins,
- factory()->harmony_tolength_string(), flag,
- STRICT).Assert();
+void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
+ const char* name, Handle<Symbol> value) {
+ Handle<JSGlobalObject> global(
+ JSGlobalObject::cast(native_context->global_object()));
+ Handle<String> symbol_string = factory->InternalizeUtf8String("Symbol");
+ Handle<JSObject> symbol = Handle<JSObject>::cast(
+ JSObject::GetProperty(global, symbol_string).ToHandleChecked());
+ Handle<String> name_string = factory->InternalizeUtf8String(name);
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ JSObject::AddProperty(symbol, name_string, value, attributes);
+}
+
+
+void Genesis::InitializeGlobal_harmony_tostring() {
+ if (!FLAG_harmony_tostring) return;
+ InstallPublicSymbol(factory(), native_context(), "toStringTag",
+ factory()->to_string_tag_symbol());
+}
+
+
+void Genesis::InitializeGlobal_harmony_concat_spreadable() {
+ if (!FLAG_harmony_concat_spreadable) return;
+ InstallPublicSymbol(factory(), native_context(), "isConcatSpreadable",
+ factory()->is_concat_spreadable_symbol());
+}
+
+
+void Genesis::InitializeGlobal_harmony_regexp_subclass() {
+ if (!FLAG_harmony_regexp_subclass) return;
+ InstallPublicSymbol(factory(), native_context(), "match",
+ factory()->match_symbol());
+ InstallPublicSymbol(factory(), native_context(), "replace",
+ factory()->replace_symbol());
+ InstallPublicSymbol(factory(), native_context(), "search",
+ factory()->search_symbol());
+ InstallPublicSymbol(factory(), native_context(), "split",
+ factory()->split_symbol());
}
void Genesis::InitializeGlobal_harmony_reflect() {
+ Factory* factory = isolate()->factory();
+
+ // We currently use some of the Reflect functions internally, even when
+ // the --harmony-reflect flag is not given.
+
+ Handle<JSFunction> define_property =
+ SimpleCreateFunction(isolate(), factory->defineProperty_string(),
+ Builtins::kReflectDefineProperty, 3, true);
+ native_context()->set_reflect_define_property(*define_property);
+
+ Handle<JSFunction> delete_property =
+ SimpleCreateFunction(isolate(), factory->deleteProperty_string(),
+ Builtins::kReflectDeleteProperty, 2, true);
+ native_context()->set_reflect_delete_property(*delete_property);
+
if (!FLAG_harmony_reflect) return;
Handle<JSGlobalObject> global(JSGlobalObject::cast(
native_context()->global_object()));
- Handle<String> reflect_string =
- factory()->NewStringFromStaticChars("Reflect");
- Handle<Object> reflect =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
+ Handle<String> reflect_string = factory->NewStringFromStaticChars("Reflect");
+ Handle<JSObject> reflect =
+ factory->NewJSObject(isolate()->object_function(), TENURED);
JSObject::AddProperty(global, reflect_string, reflect, DONT_ENUM);
-}
+ InstallFunction(reflect, define_property, factory->defineProperty_string());
+ InstallFunction(reflect, delete_property, factory->deleteProperty_string());
+
+ SimpleInstallFunction(reflect, factory->get_string(),
+ Builtins::kReflectGet, 2, false);
+ SimpleInstallFunction(reflect, factory->getOwnPropertyDescriptor_string(),
+ Builtins::kReflectGetOwnPropertyDescriptor, 2, true);
+ SimpleInstallFunction(reflect, factory->getPrototypeOf_string(),
+ Builtins::kReflectGetPrototypeOf, 1, true);
+ SimpleInstallFunction(reflect, factory->has_string(),
+ Builtins::kReflectHas, 2, true);
+ SimpleInstallFunction(reflect, factory->isExtensible_string(),
+ Builtins::kReflectIsExtensible, 1, true);
+ SimpleInstallFunction(reflect, factory->ownKeys_string(),
+ Builtins::kReflectOwnKeys, 1, true);
+ SimpleInstallFunction(reflect, factory->preventExtensions_string(),
+ Builtins::kReflectPreventExtensions, 1, true);
+ SimpleInstallFunction(reflect, factory->set_string(),
+ Builtins::kReflectSet, 3, false);
+ SimpleInstallFunction(reflect, factory->setPrototypeOf_string(),
+ Builtins::kReflectSetPrototypeOf, 2, true);
+}
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
- Handle<JSGlobalObject> global(
- JSGlobalObject::cast(native_context()->global_object()));
-
- Handle<JSFunction> shared_array_buffer_fun = InstallFunction(
- global, "SharedArrayBuffer", JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSizeWithInternalFields,
- isolate()->initial_object_prototype(), Builtins::kIllegal);
+ Handle<JSGlobalObject> global(native_context()->global_object());
+ Handle<JSFunction> shared_array_buffer_fun =
+ InstallArrayBuffer(global, "SharedArrayBuffer");
native_context()->set_shared_array_buffer_fun(*shared_array_buffer_fun);
}
@@ -1932,24 +2420,118 @@ void Genesis::InitializeGlobal_harmony_simd() {
JSFunction::SetInstancePrototype(
cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
- cons->SetInstanceClassName(*name);
+ cons->shared()->set_instance_class_name(*name);
Handle<JSObject> simd_object = factory->NewJSObject(cons, TENURED);
DCHECK(simd_object->IsJSObject());
JSObject::AddProperty(global, name, simd_object, DONT_ENUM);
// Install SIMD type functions. Set the instance class names since
-// InstallFunction only does this when we install on the GlobalObject.
+// InstallFunction only does this when we install on the JSGlobalObject.
#define SIMD128_INSTALL_FUNCTION(TYPE, Type, type, lane_count, lane_type) \
Handle<JSFunction> type##_function = InstallFunction( \
simd_object, #Type, JS_VALUE_TYPE, JSValue::kSize, \
isolate->initial_object_prototype(), Builtins::kIllegal); \
native_context()->set_##type##_function(*type##_function); \
- type##_function->SetInstanceClassName(*factory->Type##_string());
+ type##_function->shared()->set_instance_class_name(*factory->Type##_string());
SIMD128_TYPES(SIMD128_INSTALL_FUNCTION)
#undef SIMD128_INSTALL_FUNCTION
}
+void Genesis::InstallJSProxyMaps() {
+ // Allocate the different maps for all Proxy types.
+ // Next to the default proxy, we need maps indicating callable and
+ // constructable proxies.
+
+ Handle<Map> proxy_function_map =
+ Map::Copy(isolate()->sloppy_function_without_prototype_map(), "Proxy");
+ proxy_function_map->set_is_constructor();
+ native_context()->set_proxy_function_map(*proxy_function_map);
+
+ Handle<Map> proxy_map =
+ factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize, FAST_ELEMENTS);
+ proxy_map->set_dictionary_map(true);
+ native_context()->set_proxy_map(*proxy_map);
+
+ Handle<Map> proxy_callable_map = Map::Copy(proxy_map, "callable Proxy");
+ proxy_callable_map->set_is_callable();
+ native_context()->set_proxy_callable_map(*proxy_callable_map);
+ proxy_callable_map->SetConstructor(native_context()->function_function());
+
+ Handle<Map> proxy_constructor_map =
+ Map::Copy(proxy_callable_map, "constructor Proxy");
+ proxy_constructor_map->set_is_constructor();
+ native_context()->set_proxy_constructor_map(*proxy_constructor_map);
+}
+
+
+void Genesis::InitializeGlobal_harmony_proxies() {
+ if (!FLAG_harmony_proxies) return;
+ Handle<JSGlobalObject> global(
+ JSGlobalObject::cast(native_context()->global_object()));
+ Isolate* isolate = global->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ InstallJSProxyMaps();
+
+ // Create the Proxy object.
+ Handle<String> name = factory->Proxy_string();
+ Handle<Code> code(isolate->builtins()->ProxyConstructor());
+
+ Handle<JSFunction> proxy_function = factory->NewFunction(
+ isolate->proxy_function_map(), factory->Proxy_string(), code);
+
+ JSFunction::SetInitialMap(proxy_function,
+ Handle<Map>(native_context()->proxy_map(), isolate),
+ factory->null_value());
+
+ proxy_function->shared()->set_construct_stub(
+ *isolate->builtins()->ProxyConstructor_ConstructStub());
+ proxy_function->shared()->set_internal_formal_parameter_count(2);
+ proxy_function->shared()->set_length(2);
+
+ native_context()->set_proxy_function(*proxy_function);
+ InstallFunction(global, name, proxy_function, factory->Object_string());
+}
+
+
+Handle<JSFunction> Genesis::InstallArrayBuffer(Handle<JSObject> target,
+ const char* name) {
+ // Setup the {prototype} with the given {name} for @@toStringTag.
+ Handle<JSObject> prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ JSObject::AddProperty(prototype, factory()->to_string_tag_symbol(),
+ factory()->NewStringFromAsciiChecked(name),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Allocate the constructor with the given {prototype}.
+ Handle<JSFunction> array_buffer_fun =
+ InstallFunction(target, name, JS_ARRAY_BUFFER_TYPE,
+ JSArrayBuffer::kSizeWithInternalFields, prototype,
+ Builtins::kArrayBufferConstructor);
+ array_buffer_fun->shared()->set_construct_stub(
+ *isolate()->builtins()->ArrayBufferConstructor_ConstructStub());
+ array_buffer_fun->shared()->DontAdaptArguments();
+ array_buffer_fun->shared()->set_length(1);
+
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory()->constructor_string(),
+ array_buffer_fun, DONT_ENUM);
+
+ SimpleInstallFunction(array_buffer_fun, factory()->isView_string(),
+ Builtins::kArrayBufferIsView, 1, true);
+
+ return array_buffer_fun;
+}
+
+
+void Genesis::InitializeGlobal_harmony_species() {
+ if (!FLAG_harmony_species) return;
+ InstallPublicSymbol(factory(), native_context(), "species",
+ factory()->species_symbol());
+}
+
+
Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
const char* name,
ElementsKind elements_kind) {
@@ -1996,51 +2578,6 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
bool Genesis::InstallNatives(ContextType context_type) {
HandleScope scope(isolate());
- // Create a function for the builtins object. Allocate space for the
- // JavaScript builtins, a reference to the builtins object
- // (itself) and a reference to the native_context directly in the object.
- Handle<Code> code = Handle<Code>(
- isolate()->builtins()->builtin(Builtins::kIllegal));
- Handle<JSFunction> builtins_fun = factory()->NewFunction(
- factory()->empty_string(), code, JS_BUILTINS_OBJECT_TYPE,
- JSBuiltinsObject::kSize);
-
- Handle<String> name =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("builtins"));
- builtins_fun->shared()->set_instance_class_name(*name);
- builtins_fun->initial_map()->set_dictionary_map(true);
- builtins_fun->initial_map()->set_prototype(heap()->null_value());
-
- // Allocate the builtins object.
- Handle<JSBuiltinsObject> builtins =
- Handle<JSBuiltinsObject>::cast(factory()->NewGlobalObject(builtins_fun));
- builtins->set_builtins(*builtins);
- builtins->set_native_context(*native_context());
- builtins->set_global_proxy(native_context()->global_proxy());
-
-
- // Set up the 'builtin' property, which refers to the js builtins object.
- static const PropertyAttributes attributes =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- Handle<String> builtins_string =
- factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("builtins"));
- JSObject::AddProperty(builtins, builtins_string, builtins, attributes);
-
- // Set up the reference from the global object to the builtins object.
- JSGlobalObject::cast(native_context()->global_object())->
- set_builtins(*builtins);
-
- // Create a bridge function that has context in the native context.
- Handle<JSFunction> bridge = factory()->NewFunction(factory()->empty_string());
- DCHECK(bridge->context() == *isolate()->native_context());
-
- // Allocate the builtins context.
- Handle<Context> context =
- factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
- context->set_global_object(*builtins); // override builtins global object
-
- native_context()->set_runtime_context(*context);
-
// Set up the utils object as shared container between native scripts.
Handle<JSObject> utils = factory()->NewJSObject(isolate()->object_function());
JSObject::NormalizeProperties(utils, CLEAR_INOBJECT_PROPERTIES, 16,
@@ -2065,166 +2602,13 @@ bool Genesis::InstallNatives(ContextType context_type) {
// A thin context is ready at this point.
if (context_type == THIN_CONTEXT) return true;
- if (FLAG_expose_natives_as != NULL) {
- Handle<String> utils_key = factory()->NewStringFromAsciiChecked("utils");
- JSObject::AddProperty(builtins, utils_key, utils, NONE);
- }
-
- { // -- S c r i p t
- // Builtin functions for Script.
- Handle<JSFunction> script_fun = InstallFunction(
- builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
- isolate()->initial_object_prototype(), Builtins::kIllegal);
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- Accessors::FunctionSetPrototype(script_fun, prototype).Assert();
- native_context()->set_script_function(*script_fun);
-
- Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
- Map::EnsureDescriptorSlack(script_map, 15);
-
- PropertyAttributes attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-
- Handle<AccessorInfo> script_column =
- Accessors::ScriptColumnOffsetInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_column->name())), script_column,
- attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_id =
- Accessors::ScriptIdInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(script_id->name())),
- script_id, attribs);
- script_map->AppendDescriptor(&d);
- }
-
-
- Handle<AccessorInfo> script_name =
- Accessors::ScriptNameInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_name->name())), script_name, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_line =
- Accessors::ScriptLineOffsetInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_line->name())), script_line, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_source =
- Accessors::ScriptSourceInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_source->name())), script_source,
- attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_type =
- Accessors::ScriptTypeInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_type->name())), script_type, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_compilation_type =
- Accessors::ScriptCompilationTypeInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_compilation_type->name())),
- script_compilation_type, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_line_ends =
- Accessors::ScriptLineEndsInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_line_ends->name())), script_line_ends,
- attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_context_data =
- Accessors::ScriptContextDataInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_context_data->name())),
- script_context_data, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_eval_from_script =
- Accessors::ScriptEvalFromScriptInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_eval_from_script->name())),
- script_eval_from_script, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_eval_from_script_position =
- Accessors::ScriptEvalFromScriptPositionInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_eval_from_script_position->name())),
- script_eval_from_script_position, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_eval_from_function_name =
- Accessors::ScriptEvalFromFunctionNameInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_eval_from_function_name->name())),
- script_eval_from_function_name, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_source_url =
- Accessors::ScriptSourceUrlInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_source_url->name())),
- script_source_url, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_source_mapping_url =
- Accessors::ScriptSourceMappingUrlInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_source_mapping_url->name())),
- script_source_mapping_url, attribs);
- script_map->AppendDescriptor(&d);
- }
-
- Handle<AccessorInfo> script_is_embedder_debug_script =
- Accessors::ScriptIsEmbedderDebugScriptInfo(isolate(), attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_is_embedder_debug_script->name())),
- script_is_embedder_debug_script, attribs);
- script_map->AppendDescriptor(&d);
- }
- }
{
// Builtin function for OpaqueReference -- a JSValue-based object,
// that keeps its field isolated from JavaScript code. It may store
// objects, that JavaScript code may not access.
- Handle<JSFunction> opaque_reference_fun = InstallFunction(
- builtins, "OpaqueReference", JS_VALUE_TYPE, JSValue::kSize,
- isolate()->initial_object_prototype(), Builtins::kIllegal);
+ Handle<JSFunction> opaque_reference_fun = factory()->NewFunction(
+ factory()->empty_string(), isolate()->builtins()->Illegal(),
+ isolate()->initial_object_prototype(), JS_VALUE_TYPE, JSValue::kSize);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Accessors::FunctionSetPrototype(opaque_reference_fun, prototype).Assert();
@@ -2246,96 +2630,6 @@ bool Genesis::InstallNatives(ContextType context_type) {
InstallInternalArray(utils, "InternalPackedArray", FAST_ELEMENTS);
}
- { // -- S e t I t e r a t o r
- Handle<JSFunction> set_iterator_function = InstallFunction(
- builtins, "SetIterator", JS_SET_ITERATOR_TYPE, JSSetIterator::kSize,
- isolate()->initial_object_prototype(), Builtins::kIllegal);
- native_context()->set_set_iterator_map(
- set_iterator_function->initial_map());
- }
-
- { // -- M a p I t e r a t o r
- Handle<JSFunction> map_iterator_function = InstallFunction(
- builtins, "MapIterator", JS_MAP_ITERATOR_TYPE, JSMapIterator::kSize,
- isolate()->initial_object_prototype(), Builtins::kIllegal);
- native_context()->set_map_iterator_map(
- map_iterator_function->initial_map());
- }
-
- {
- // Create generator meta-objects and install them on the builtins object.
- Handle<JSObject> builtins(native_context()->builtins());
- Handle<JSObject> iterator_prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- Handle<JSObject> generator_object_prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- Handle<JSObject> generator_function_prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetObjectPrototype(generator_object_prototype, iterator_prototype);
- JSObject::AddProperty(
- builtins, factory()->InternalizeUtf8String("$iteratorPrototype"),
- iterator_prototype,
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY));
- JSObject::AddProperty(
- builtins,
- factory()->InternalizeUtf8String("GeneratorFunctionPrototype"),
- generator_function_prototype,
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY));
-
- JSObject::AddProperty(
- generator_function_prototype,
- factory()->InternalizeUtf8String("prototype"),
- generator_object_prototype,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
-
- static const bool kUseStrictFunctionMap = true;
- Handle<JSFunction> generator_function_function =
- InstallFunction(builtins, "GeneratorFunction", JS_FUNCTION_TYPE,
- JSFunction::kSize, generator_function_prototype,
- Builtins::kIllegal, kUseStrictFunctionMap);
- generator_function_function->initial_map()->set_is_callable();
-
- // Create maps for generator functions and their prototypes. Store those
- // maps in the native context. The "prototype" property descriptor is
- // writable, non-enumerable, and non-configurable (as per ES6 draft
- // 04-14-15, section 25.2.4.3).
- Handle<Map> strict_function_map(strict_function_map_writable_prototype_);
- // Generator functions do not have "caller" or "arguments" accessors.
- Handle<Map> sloppy_generator_function_map =
- Map::Copy(strict_function_map, "SloppyGeneratorFunction");
- Map::SetPrototype(sloppy_generator_function_map,
- generator_function_prototype);
- native_context()->set_sloppy_generator_function_map(
- *sloppy_generator_function_map);
-
- Handle<Map> strict_generator_function_map =
- Map::Copy(strict_function_map, "StrictGeneratorFunction");
- Map::SetPrototype(strict_generator_function_map,
- generator_function_prototype);
- native_context()->set_strict_generator_function_map(
- *strict_generator_function_map);
-
- Handle<Map> strong_function_map(native_context()->strong_function_map());
- Handle<Map> strong_generator_function_map =
- Map::Copy(strong_function_map, "StrongGeneratorFunction");
- Map::SetPrototype(strong_generator_function_map,
- generator_function_prototype);
- native_context()->set_strong_generator_function_map(
- *strong_generator_function_map);
-
- Handle<JSFunction> object_function(native_context()->object_function());
- Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
- Map::SetPrototype(generator_object_prototype_map,
- generator_object_prototype);
- native_context()->set_generator_object_prototype_map(
- *generator_object_prototype_map);
- }
-
- if (FLAG_disable_native_files) {
- PrintF("Warning: Running without installed natives!\n");
- return true;
- }
-
// Run the rest of the native scripts.
while (builtin_index < Natives::GetBuiltinsCount()) {
if (!Bootstrapper::CompileBuiltin(isolate(), builtin_index++)) return false;
@@ -2364,27 +2658,12 @@ bool Genesis::InstallNatives(ContextType context_type) {
native_context()->set_string_function_prototype_map(
HeapObject::cast(string_function->initial_map()->prototype())->map());
- // Install Date.prototype[@@toPrimitive].
+ // Install Global.eval.
{
- Handle<String> key = factory()->Date_string();
- Handle<JSFunction> date = Handle<JSFunction>::cast(
- Object::GetProperty(handle(native_context()->global_object()), key)
- .ToHandleChecked());
- Handle<JSObject> proto =
- Handle<JSObject>(JSObject::cast(date->instance_prototype()));
-
- // Install the @@toPrimitive function.
- Handle<JSFunction> to_primitive =
- InstallFunction(proto, factory()->to_primitive_symbol(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, MaybeHandle<JSObject>(),
- Builtins::kDateToPrimitive,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
-
- // Set the expected parameters for @@toPrimitive to 1; required by builtin.
- to_primitive->shared()->set_internal_formal_parameter_count(1);
-
- // Set the length for the function to satisfy ECMA-262.
- to_primitive->shared()->set_length(1);
+ Handle<JSFunction> eval = SimpleInstallFunction(
+ handle(native_context()->global_object()), factory()->eval_string(),
+ Builtins::kGlobalEval, 1, false);
+ native_context()->set_global_eval_fun(*eval);
}
// Install Array.prototype.concat
@@ -2421,38 +2700,19 @@ bool Genesis::InstallNatives(ContextType context_type) {
// Set the lengths for the functions to satisfy ECMA-262.
concat->shared()->set_length(1);
}
- // Install Function.prototype.call and apply.
- {
- Handle<String> key = factory()->Function_string();
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(Object::GetProperty(
- handle(native_context()->global_object()), key).ToHandleChecked());
- Handle<JSObject> proto =
- Handle<JSObject>(JSObject::cast(function->instance_prototype()));
-
- // Install the call and the apply functions.
- Handle<JSFunction> call =
- InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kFunctionCall);
- Handle<JSFunction> apply =
- InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kFunctionApply);
- Handle<TypeFeedbackVector> feedback_vector =
- TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate());
- apply->shared()->set_feedback_vector(*feedback_vector);
-
- // Make sure that Function.prototype.call appears to be compiled.
- // The code will never be called, but inline caching for call will
- // only work if it appears to be compiled.
- call->shared()->DontAdaptArguments();
- DCHECK(call->is_compiled());
-
- // Set the expected parameters for apply to 2; required by builtin.
- apply->shared()->set_internal_formal_parameter_count(2);
- // Set the lengths for the functions to satisfy ECMA-262.
- call->shared()->set_length(1);
- apply->shared()->set_length(2);
+ // Set up the Promise constructor.
+ {
+ Handle<String> key = factory()->Promise_string();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(
+ Object::GetProperty(handle(native_context()->global_object()), key)
+ .ToHandleChecked());
+ JSFunction::EnsureHasInitialMap(function);
+ function->initial_map()->set_instance_type(JS_PROMISE_TYPE);
+ function->shared()->set_construct_stub(
+ *isolate()->builtins()->JSBuiltinsConstructStub());
+ InstallWithIntrinsicDefaultProto(isolate(), function,
+ Context::PROMISE_FUNCTION_INDEX);
}
InstallBuiltinFunctionIds();
@@ -2547,48 +2807,43 @@ bool Genesis::InstallNatives(ContextType context_type) {
}
}
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- builtins->ObjectVerify();
- }
-#endif
-
return true;
}
bool Genesis::InstallExperimentalNatives() {
- static const char* harmony_array_includes_natives[] = {
- "native harmony-array-includes.js", nullptr};
static const char* harmony_proxies_natives[] = {"native proxy.js", nullptr};
static const char* harmony_modules_natives[] = {nullptr};
static const char* harmony_regexps_natives[] = {"native harmony-regexp.js",
nullptr};
- static const char* harmony_arrow_functions_natives[] = {nullptr};
- static const char* harmony_tostring_natives[] = {"native harmony-tostring.js",
- nullptr};
+ static const char* harmony_tostring_natives[] = {nullptr};
static const char* harmony_sloppy_natives[] = {nullptr};
static const char* harmony_sloppy_function_natives[] = {nullptr};
static const char* harmony_sloppy_let_natives[] = {nullptr};
- static const char* harmony_unicode_regexps_natives[] = {nullptr};
- static const char* harmony_rest_parameters_natives[] = {nullptr};
+ static const char* harmony_species_natives[] = {"native harmony-species.js",
+ nullptr};
+ static const char* harmony_unicode_regexps_natives[] = {
+ "native harmony-unicode-regexps.js", nullptr};
static const char* harmony_default_parameters_natives[] = {nullptr};
static const char* harmony_reflect_natives[] = {"native harmony-reflect.js",
nullptr};
- static const char* harmony_spread_calls_natives[] = {
- "native harmony-spread.js", nullptr};
- static const char* harmony_destructuring_natives[] = {nullptr};
+ static const char* harmony_destructuring_bind_natives[] = {nullptr};
+ static const char* harmony_destructuring_assignment_natives[] = {nullptr};
static const char* harmony_object_observe_natives[] = {
"native harmony-object-observe.js", nullptr};
- static const char* harmony_spread_arrays_natives[] = {nullptr};
static const char* harmony_sharedarraybuffer_natives[] = {
"native harmony-sharedarraybuffer.js", "native harmony-atomics.js", NULL};
- static const char* harmony_new_target_natives[] = {nullptr};
- static const char* harmony_concat_spreadable_natives[] = {
- "native harmony-concat-spreadable.js", nullptr};
+ static const char* harmony_concat_spreadable_natives[] = {nullptr};
static const char* harmony_simd_natives[] = {"native harmony-simd.js",
nullptr};
static const char* harmony_tolength_natives[] = {nullptr};
+ static const char* harmony_completion_natives[] = {nullptr};
+ static const char* harmony_do_expressions_natives[] = {nullptr};
+ static const char* harmony_regexp_subclass_natives[] = {nullptr};
+ static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
+ static const char* harmony_function_name_natives[] = {nullptr};
+ static const char* promise_extra_natives[] = {"native promise-extra.js",
+ nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -2607,6 +2862,7 @@ bool Genesis::InstallExperimentalNatives() {
HARMONY_INPROGRESS(INSTALL_EXPERIMENTAL_NATIVES);
HARMONY_STAGED(INSTALL_EXPERIMENTAL_NATIVES);
HARMONY_SHIPPING(INSTALL_EXPERIMENTAL_NATIVES);
+ INSTALL_EXPERIMENTAL_NATIVES(promise_extra, "");
#undef INSTALL_EXPERIMENTAL_NATIVES
}
@@ -2652,16 +2908,6 @@ bool Genesis::InstallDebuggerNatives() {
}
-bool Bootstrapper::InstallCodeStubNatives(Isolate* isolate) {
- for (int i = CodeStubNatives::GetDebuggerCount();
- i < CodeStubNatives::GetBuiltinsCount(); i++) {
- if (!CompileCodeStubBuiltin(isolate, i)) return false;
- }
-
- return true;
-}
-
-
static void InstallBuiltinFunctionId(Handle<JSObject> holder,
const char* function_name,
BuiltinFunctionId id) {
@@ -2746,23 +2992,12 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Handle<JSGlobalObject> global(JSGlobalObject::cast(
native_context->global_object()));
- Handle<JSObject> Error = Handle<JSObject>::cast(
- Object::GetProperty(isolate, global, "Error").ToHandleChecked());
+ Handle<JSObject> Error = isolate->error_function();
Handle<String> name =
factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("stackTraceLimit"));
Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
JSObject::AddProperty(Error, name, stack_trace_limit, NONE);
- // Expose the natives in global if a name for it is specified.
- if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
- Handle<String> natives_key =
- factory->InternalizeUtf8String(FLAG_expose_natives_as);
- uint32_t dummy_index;
- if (natives_key->AsArrayIndex(&dummy_index)) return true;
- Handle<JSBuiltinsObject> natives(global->builtins());
- JSObject::AddProperty(global, natives_key, natives, DONT_ENUM);
- }
-
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
// If loading fails we just bail out without installing the
@@ -2782,9 +3017,9 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
JSObject::AddProperty(global, debug_string, global_proxy, DONT_ENUM);
}
-#if defined(V8_WASM)
- WasmJs::Install(isolate, global);
-#endif
+ if (FLAG_expose_wasm) {
+ WasmJs::Install(isolate, global);
+ }
return true;
}
@@ -3023,7 +3258,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
}
}
}
- } else if (from->IsGlobalObject()) {
+ } else if (from->IsJSGlobalObject()) {
Handle<GlobalDictionary> properties =
Handle<GlobalDictionary>(from->global_dictionary());
int capacity = properties->Capacity();
@@ -3169,10 +3404,8 @@ Genesis::Genesis(Isolate* isolate,
// We can only de-serialize a context if the isolate was initialized from
// a snapshot. Otherwise we have to build the context from scratch.
// Also create a context from scratch to expose natives, if required by flag.
- Handle<FixedArray> outdated_contexts;
if (!isolate->initialized_from_snapshot() ||
- !Snapshot::NewContextFromSnapshot(isolate, global_proxy,
- &outdated_contexts)
+ !Snapshot::NewContextFromSnapshot(isolate, global_proxy)
.ToHandle(&native_context_)) {
native_context_ = Handle<Context>();
}
@@ -3190,14 +3423,11 @@ Genesis::Genesis(Isolate* isolate,
Map::TraceAllTransitions(object_fun->initial_map());
}
#endif
- Handle<GlobalObject> global_object =
+ Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalProxy(global_object, global_proxy);
- HookUpGlobalObject(global_object, outdated_contexts);
- native_context()->builtins()->set_global_proxy(
- native_context()->global_proxy());
- HookUpGlobalThisBinding(outdated_contexts);
+ HookUpGlobalObject(global_object);
if (!ConfigureGlobalObjects(global_proxy_template)) return;
} else {
@@ -3206,7 +3436,8 @@ Genesis::Genesis(Isolate* isolate,
Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
CreateStrictModeFunctionMaps(empty_function);
CreateStrongModeFunctionMaps(empty_function);
- Handle<GlobalObject> global_object =
+ CreateIteratorMaps();
+ Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalProxy(global_object, global_proxy);
InitializeGlobal(global_object, empty_function, context_type);
@@ -3221,6 +3452,9 @@ Genesis::Genesis(Isolate* isolate,
if (!ConfigureGlobalObjects(global_proxy_template)) return;
}
isolate->counters()->contexts_created_from_scratch()->Increment();
+ // Re-initialize the counter because it got incremented during snapshot
+ // creation.
+ isolate->native_context()->set_errors_thrown(Smi::FromInt(0));
}
// Install experimental natives. Do not include them into the
@@ -3234,20 +3468,17 @@ Genesis::Genesis(Isolate* isolate,
if (FLAG_experimental_extras) {
if (!InstallExperimentalExtraNatives()) return;
}
-
- // By now the utils object is useless and can be removed.
- native_context()->set_natives_utils_object(
- isolate->heap()->undefined_value());
}
// The serializer cannot serialize typed arrays. Reset those typed arrays
// for each new context.
- InitializeBuiltinTypedArrays();
} else if (context_type == DEBUG_CONTEXT) {
DCHECK(!isolate->serializer_enabled());
InitializeExperimentalGlobal();
if (!InstallDebuggerNatives()) return;
}
+ ConfigureUtilsObject(context_type);
+
// Check that the script context table is empty except for the 'this' binding.
// We do not need script contexts for native scripts.
if (!FLAG_global_var_shortcuts) {
diff --git a/chromium/v8/src/bootstrapper.h b/chromium/v8/src/bootstrapper.h
index 659d74aad25..44f0f1b2a55 100644
--- a/chromium/v8/src/bootstrapper.h
+++ b/chromium/v8/src/bootstrapper.h
@@ -82,8 +82,6 @@ class Bootstrapper final {
v8::ExtensionConfiguration* extensions,
ContextType context_type = FULL_CONTEXT);
- bool CreateCodeStubContext(Isolate* isolate);
-
// Detach the environment from its outer global object.
void DetachGlobal(Handle<Context> env);
@@ -110,14 +108,12 @@ class Bootstrapper final {
SourceCodeCache* extensions_cache() { return &extensions_cache_; }
static bool CompileNative(Isolate* isolate, Vector<const char> name,
- Handle<JSObject> receiver, Handle<String> source,
- int argc, Handle<Object> argv[]);
+ Handle<String> source, int argc,
+ Handle<Object> argv[]);
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
static bool CompileExtraBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalExtraBuiltin(Isolate* isolate, int index);
- static bool CompileCodeStubBuiltin(Isolate* isolate, int index);
- static bool InstallCodeStubNatives(Isolate* isolate);
static void ExportFromRuntime(Isolate* isolate, Handle<JSObject> container);
static void ExportExperimentalFromRuntime(Isolate* isolate,
@@ -176,6 +172,7 @@ class NativesExternalStringResource final
size_t length_;
};
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_BOOTSTRAPPER_H_
diff --git a/chromium/v8/src/builtins.cc b/chromium/v8/src/builtins.cc
index 13225d2065b..77df498a07f 100644
--- a/chromium/v8/src/builtins.cc
+++ b/chromium/v8/src/builtins.cc
@@ -9,6 +9,7 @@
#include "src/arguments.h"
#include "src/base/once.h"
#include "src/bootstrapper.h"
+#include "src/dateparser-inl.h"
#include "src/elements.h"
#include "src/frames-inl.h"
#include "src/gdb-jit.h"
@@ -17,7 +18,9 @@
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/profiler/cpu-profiler.h"
+#include "src/property-descriptor.h"
#include "src/prototype.h"
+#include "src/string-builder.h"
#include "src/vm-state-inl.h"
namespace v8 {
@@ -30,7 +33,10 @@ template <BuiltinExtraArguments extra_args>
class BuiltinArguments : public Arguments {
public:
BuiltinArguments(int length, Object** arguments)
- : Arguments(length, arguments) { }
+ : Arguments(length, arguments) {
+ // Check we have at least the receiver.
+ DCHECK_LE(1, this->length());
+ }
Object*& operator[] (int index) {
DCHECK(index < length());
@@ -42,51 +48,75 @@ class BuiltinArguments : public Arguments {
return Arguments::at<S>(index);
}
+ Handle<Object> atOrUndefined(Isolate* isolate, int index) {
+ if (index >= length()) {
+ return isolate->factory()->undefined_value();
+ }
+ return at<Object>(index);
+ }
+
Handle<Object> receiver() {
return Arguments::at<Object>(0);
}
- Handle<JSFunction> called_function() {
- STATIC_ASSERT(extra_args == NEEDS_CALLED_FUNCTION);
- return Arguments::at<JSFunction>(Arguments::length() - 1);
- }
+ Handle<JSFunction> target();
+ Handle<HeapObject> new_target();
// Gets the total number of arguments including the receiver (but
// excluding extra arguments).
- int length() const {
- STATIC_ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- return Arguments::length();
- }
-
-#ifdef DEBUG
- void Verify() {
- // Check we have at least the receiver.
- DCHECK(Arguments::length() >= 1);
- }
-#endif
+ int length() const;
};
-// Specialize BuiltinArguments for the called function extra argument.
+// Specialize BuiltinArguments for the extra arguments.
template <>
-int BuiltinArguments<NEEDS_CALLED_FUNCTION>::length() const {
+int BuiltinArguments<BuiltinExtraArguments::kNone>::length() const {
+ return Arguments::length();
+}
+
+template <>
+int BuiltinArguments<BuiltinExtraArguments::kTarget>::length() const {
return Arguments::length() - 1;
}
-#ifdef DEBUG
template <>
-void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
- // Check we have at least the receiver and the called function.
- DCHECK(Arguments::length() >= 2);
- // Make sure cast to JSFunction succeeds.
- called_function();
+Handle<JSFunction> BuiltinArguments<BuiltinExtraArguments::kTarget>::target() {
+ return Arguments::at<JSFunction>(Arguments::length() - 1);
+}
+
+template <>
+int BuiltinArguments<BuiltinExtraArguments::kNewTarget>::length() const {
+ return Arguments::length() - 1;
}
-#endif
+template <>
+Handle<HeapObject>
+BuiltinArguments<BuiltinExtraArguments::kNewTarget>::new_target() {
+ return Arguments::at<HeapObject>(Arguments::length() - 1);
+}
+
+template <>
+int BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::length()
+ const {
+ return Arguments::length() - 2;
+}
+
+template <>
+Handle<JSFunction>
+BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::target() {
+ return Arguments::at<JSFunction>(Arguments::length() - 2);
+}
+
+template <>
+Handle<HeapObject>
+BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::new_target() {
+ return Arguments::at<HeapObject>(Arguments::length() - 1);
+}
-#define DEF_ARG_TYPE(name, spec) \
- typedef BuiltinArguments<spec> name##ArgumentsType;
+
+#define DEF_ARG_TYPE(name, spec) \
+ typedef BuiltinArguments<BuiltinExtraArguments::spec> name##ArgumentsType;
BUILTIN_LIST_C(DEF_ARG_TYPE)
#undef DEF_ARG_TYPE
@@ -104,65 +134,30 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
// In the body of the builtin function the arguments can be accessed
// through the BuiltinArguments object args.
-#ifdef DEBUG
-
#define BUILTIN(name) \
MUST_USE_RESULT static Object* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate); \
MUST_USE_RESULT static Object* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
name##ArgumentsType args(args_length, args_object); \
- args.Verify(); \
return Builtin_Impl_##name(args, isolate); \
} \
MUST_USE_RESULT static Object* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate)
-#else // For release mode.
-#define BUILTIN(name) \
- static Object* Builtin_impl##name( \
- name##ArgumentsType args, Isolate* isolate); \
- static Object* Builtin_##name( \
- int args_length, Object** args_object, Isolate* isolate) { \
- name##ArgumentsType args(args_length, args_object); \
- return Builtin_impl##name(args, isolate); \
- } \
- static Object* Builtin_impl##name( \
- name##ArgumentsType args, Isolate* isolate)
-#endif
-
-
-#ifdef DEBUG
-inline bool CalledAsConstructor(Isolate* isolate) {
- // Calculate the result using a full stack frame iterator and check
- // that the state of the stack is as we assume it to be in the
- // code below.
- StackFrameIterator it(isolate);
- DCHECK(it.frame()->is_exit());
- it.Advance();
- StackFrame* frame = it.frame();
- bool reference_result = frame->is_construct();
- Address fp = Isolate::c_entry_fp(isolate->thread_local_top());
- // Because we know fp points to an exit frame we can use the relevant
- // part of ExitFrame::ComputeCallerState directly.
- const int kCallerOffset = ExitFrameConstants::kCallerFPOffset;
- Address caller_fp = Memory::Address_at(fp + kCallerOffset);
- // This inlines the part of StackFrame::ComputeType that grabs the
- // type of the current frame. Note that StackFrame::ComputeType
- // has been specialized for each architecture so if any one of them
- // changes this code has to be changed as well.
- const int kMarkerOffset = StandardFrameConstants::kMarkerOffset;
- const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT);
- Object* marker = Memory::Object_at(caller_fp + kMarkerOffset);
- bool result = (marker == kConstructMarker);
- DCHECK_EQ(result, reference_result);
- return result;
-}
-#endif
+// ----------------------------------------------------------------------------
-// ----------------------------------------------------------------------------
+#define CHECK_RECEIVER(Type, name, method) \
+ if (!args.receiver()->Is##Type()) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, \
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, \
+ isolate->factory()->NewStringFromAsciiChecked(method), \
+ args.receiver())); \
+ } \
+ Handle<Type> name = Handle<Type>::cast(args.receiver())
inline bool ClampedToInteger(Object* object, int* out) {
@@ -196,16 +191,12 @@ inline bool ClampedToInteger(Object* object, int* out) {
inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
int* out) {
- Map* arguments_map =
- isolate->context()->native_context()->sloppy_arguments_map();
- if (object->map() != arguments_map || !object->HasFastElements()) {
- return false;
- }
+ Map* arguments_map = isolate->native_context()->sloppy_arguments_map();
+ if (object->map() != arguments_map) return false;
+ DCHECK(object->HasFastElements());
Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
- if (!len_obj->IsSmi()) {
- return false;
- }
- *out = Smi::cast(len_obj)->value();
+ if (!len_obj->IsSmi()) return false;
+ *out = Max(0, Smi::cast(len_obj)->value());
return *out <= object->elements()->length();
}
@@ -309,7 +300,7 @@ inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
MUST_USE_RESULT static Object* CallJsIntrinsic(
Isolate* isolate, Handle<JSFunction> function,
- BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
+ BuiltinArguments<BuiltinExtraArguments::kNone> args) {
HandleScope handleScope(isolate);
int argc = args.length() - 1;
ScopedVector<Handle<Object> > argv(argc);
@@ -467,6 +458,14 @@ BUILTIN(ArraySlice) {
int relative_end = 0;
bool is_sloppy_arguments = false;
+ // TODO(littledan): Look up @@species only once, not once here and
+ // again in the JS builtin. Pass the species out?
+ Handle<Object> species;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
+ if (*species != isolate->context()->native_context()->array_function()) {
+ return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+ }
if (receiver->IsJSArray()) {
DisallowHeapAllocation no_gc;
JSArray* array = JSArray::cast(*receiver);
@@ -552,6 +551,14 @@ BUILTIN(ArraySplice) {
if (!maybe_elms_obj.ToHandle(&elms_obj)) {
return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
+ // TODO(littledan): Look up @@species only once, not once here and
+ // again in the JS builtin. Pass the species out?
+ Handle<Object> species;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
+ if (*species != isolate->context()->native_context()->array_function()) {
+ return CallJsIntrinsic(isolate, isolate->array_splice(), args);
+ }
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
DCHECK(!array->map()->is_observed());
@@ -958,7 +965,7 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
}
-bool IterateElementsSlow(Isolate* isolate, Handle<JSObject> receiver,
+bool IterateElementsSlow(Isolate* isolate, Handle<JSReceiver> receiver,
uint32_t length, ArrayConcatVisitor* visitor) {
for (uint32_t i = 0; i < length; ++i) {
HandleScope loop_scope(isolate);
@@ -978,7 +985,7 @@ bool IterateElementsSlow(Isolate* isolate, Handle<JSObject> receiver,
/**
- * A helper function that visits elements of a JSObject in numerical
+ * A helper function that visits "array" elements of a JSReceiver in numerical
* order.
*
* The visitor argument called for each existing element in the array
@@ -987,16 +994,16 @@ bool IterateElementsSlow(Isolate* isolate, Handle<JSObject> receiver,
* length.
* Returns false if any access threw an exception, otherwise true.
*/
-bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
+bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
ArrayConcatVisitor* visitor) {
uint32_t length = 0;
if (receiver->IsJSArray()) {
- Handle<JSArray> array(Handle<JSArray>::cast(receiver));
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
length = static_cast<uint32_t>(array->length()->Number());
} else {
Handle<Object> val;
- Handle<Object> key(isolate->heap()->length_string(), isolate);
+ Handle<Object> key = isolate->factory()->length_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, val, Runtime::GetObjectProperty(isolate, receiver, key),
false);
@@ -1013,15 +1020,16 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
// use the slow case.
return IterateElementsSlow(isolate, receiver, length, visitor);
}
+ Handle<JSObject> array = Handle<JSObject>::cast(receiver);
- switch (receiver->GetElementsKind()) {
+ switch (array->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS: {
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
- Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
+ Handle<FixedArray> elements(FixedArray::cast(array->elements()));
int fast_length = static_cast<int>(length);
DCHECK(fast_length <= elements->length());
for (int j = 0; j < fast_length; j++) {
@@ -1030,14 +1038,14 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
if (!element_value->IsTheHole()) {
visitor->visit(j, element_value);
} else {
- Maybe<bool> maybe = JSReceiver::HasElement(receiver, j);
+ Maybe<bool> maybe = JSReceiver::HasElement(array, j);
if (!maybe.IsJust()) return false;
if (maybe.FromJust()) {
- // Call GetElement on receiver, not its prototype, or getters won't
+ // Call GetElement on array, not its prototype, or getters won't
// have the correct receiver.
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_value,
- Object::GetElement(isolate, receiver, j), false);
+ isolate, element_value, Object::GetElement(isolate, array, j),
+ false);
visitor->visit(j, element_value);
}
}
@@ -1050,12 +1058,12 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
if (length == 0) break;
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
- if (receiver->elements()->IsFixedArray()) {
- DCHECK(receiver->elements()->length() == 0);
+ if (array->elements()->IsFixedArray()) {
+ DCHECK(array->elements()->length() == 0);
break;
}
Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(receiver->elements()));
+ FixedDoubleArray::cast(array->elements()));
int fast_length = static_cast<int>(length);
DCHECK(fast_length <= elements->length());
for (int j = 0; j < fast_length; j++) {
@@ -1066,15 +1074,15 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
isolate->factory()->NewNumber(double_value);
visitor->visit(j, element_value);
} else {
- Maybe<bool> maybe = JSReceiver::HasElement(receiver, j);
+ Maybe<bool> maybe = JSReceiver::HasElement(array, j);
if (!maybe.IsJust()) return false;
if (maybe.FromJust()) {
- // Call GetElement on receiver, not its prototype, or getters won't
+ // Call GetElement on array, not its prototype, or getters won't
// have the correct receiver.
Handle<Object> element_value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_value,
- Object::GetElement(isolate, receiver, j), false);
+ isolate, element_value, Object::GetElement(isolate, array, j),
+ false);
visitor->visit(j, element_value);
}
}
@@ -1082,11 +1090,19 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
break;
}
case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> dict(receiver->element_dictionary());
+ // CollectElementIndices() can't be called when there's a JSProxy
+ // on the prototype chain.
+ for (PrototypeIterator iter(isolate, array); !iter.IsAtEnd();
+ iter.Advance()) {
+ if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
+ return IterateElementsSlow(isolate, array, length, visitor);
+ }
+ }
+ Handle<SeededNumberDictionary> dict(array->element_dictionary());
List<uint32_t> indices(dict->Capacity() / 2);
// Collect all indices in the object and the prototypes less
// than length. This might introduce duplicates in the indices list.
- CollectElementIndices(receiver, length, &indices);
+ CollectElementIndices(array, length, &indices);
indices.Sort(&compareUInt32);
int j = 0;
int n = indices.length();
@@ -1095,8 +1111,7 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
uint32_t index = indices[j];
Handle<Object> element;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element, Object::GetElement(isolate, receiver, index),
- false);
+ isolate, element, Object::GetElement(isolate, array, index), false);
visitor->visit(index, element);
// Skip to next different index (i.e., omit duplicates).
do {
@@ -1107,7 +1122,7 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
}
case UINT8_CLAMPED_ELEMENTS: {
Handle<FixedUint8ClampedArray> pixels(
- FixedUint8ClampedArray::cast(receiver->elements()));
+ FixedUint8ClampedArray::cast(array->elements()));
for (uint32_t j = 0; j < length; j++) {
Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
visitor->visit(j, e);
@@ -1115,43 +1130,43 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
break;
}
case INT8_ELEMENTS: {
- IterateTypedArrayElements<FixedInt8Array, int8_t>(isolate, receiver, true,
+ IterateTypedArrayElements<FixedInt8Array, int8_t>(isolate, array, true,
true, visitor);
break;
}
case UINT8_ELEMENTS: {
- IterateTypedArrayElements<FixedUint8Array, uint8_t>(isolate, receiver,
- true, true, visitor);
+ IterateTypedArrayElements<FixedUint8Array, uint8_t>(isolate, array, true,
+ true, visitor);
break;
}
case INT16_ELEMENTS: {
- IterateTypedArrayElements<FixedInt16Array, int16_t>(isolate, receiver,
- true, true, visitor);
+ IterateTypedArrayElements<FixedInt16Array, int16_t>(isolate, array, true,
+ true, visitor);
break;
}
case UINT16_ELEMENTS: {
IterateTypedArrayElements<FixedUint16Array, uint16_t>(
- isolate, receiver, true, true, visitor);
+ isolate, array, true, true, visitor);
break;
}
case INT32_ELEMENTS: {
- IterateTypedArrayElements<FixedInt32Array, int32_t>(isolate, receiver,
- true, false, visitor);
+ IterateTypedArrayElements<FixedInt32Array, int32_t>(isolate, array, true,
+ false, visitor);
break;
}
case UINT32_ELEMENTS: {
IterateTypedArrayElements<FixedUint32Array, uint32_t>(
- isolate, receiver, true, false, visitor);
+ isolate, array, true, false, visitor);
break;
}
case FLOAT32_ELEMENTS: {
- IterateTypedArrayElements<FixedFloat32Array, float>(
- isolate, receiver, false, false, visitor);
+ IterateTypedArrayElements<FixedFloat32Array, float>(isolate, array, false,
+ false, visitor);
break;
}
case FLOAT64_ELEMENTS: {
IterateTypedArrayElements<FixedFloat64Array, double>(
- isolate, receiver, false, false, visitor);
+ isolate, array, false, false, visitor);
break;
}
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -1160,8 +1175,7 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
HandleScope loop_scope(isolate);
Handle<Object> element;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element, Object::GetElement(isolate, receiver, index),
- false);
+ isolate, element, Object::GetElement(isolate, array, index), false);
visitor->visit(index, element);
}
break;
@@ -1173,37 +1187,29 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
bool HasConcatSpreadableModifier(Isolate* isolate, Handle<JSArray> obj) {
+ DCHECK(isolate->IsFastArrayConstructorPrototypeChainIntact());
if (!FLAG_harmony_concat_spreadable) return false;
Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
- Maybe<bool> maybe =
- JSReceiver::HasProperty(Handle<JSReceiver>::cast(obj), key);
- if (!maybe.IsJust()) return false;
- return maybe.FromJust();
+ Maybe<bool> maybe = JSReceiver::HasProperty(obj, key);
+ return maybe.FromMaybe(false);
}
-bool IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
+static Maybe<bool> IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
HandleScope handle_scope(isolate);
- if (!obj->IsSpecObject()) return false;
+ if (!obj->IsJSReceiver()) return Just(false);
if (FLAG_harmony_concat_spreadable) {
Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
Handle<Object> value;
MaybeHandle<Object> maybeValue =
i::Runtime::GetObjectProperty(isolate, obj, key);
- if (maybeValue.ToHandle(&value) && !value->IsUndefined()) {
- return value->BooleanValue();
- }
+ if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
+ if (!value->IsUndefined()) return Just(value->BooleanValue());
}
- return obj->IsJSArray();
+ return Object::IsArray(obj);
}
-/**
- * Array::concat implementation.
- * See ECMAScript 262, 15.4.4.4.
- * TODO(581): Fix non-compliance for very large concatenations and update to
- * following the ECMAScript 5 specification.
- */
Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
int argument_count = args->length();
@@ -1359,10 +1365,10 @@ Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
for (int i = 0; i < argument_count; i++) {
Handle<Object> obj((*args)[i], isolate);
- bool spreadable = IsConcatSpreadable(isolate, obj);
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- if (spreadable) {
- Handle<JSObject> object = Handle<JSObject>::cast(obj);
+ Maybe<bool> spreadable = IsConcatSpreadable(isolate, obj);
+ MAYBE_RETURN(spreadable, isolate->heap()->exception());
+ if (spreadable.FromJust()) {
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(obj);
if (!IterateElements(isolate, object, &visitor)) {
return isolate->heap()->exception();
}
@@ -1422,6 +1428,7 @@ MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, Arguments* args) {
} // namespace
+// ES6 22.1.3.1 Array.prototype.concat
BUILTIN(ArrayConcat) {
HandleScope scope(isolate);
@@ -1444,18 +1451,1406 @@ BUILTIN(ArrayConcat) {
}
-// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint )
-BUILTIN(DateToPrimitive) {
+// ES6 22.1.2.2 Array.isArray
+BUILTIN(ArrayIsArray) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> object = args.at<Object>(1);
+ Maybe<bool> result = Object::IsArray(object);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 19.1.2.1 Object.assign
+BUILTIN(ObjectAssign) {
+ HandleScope scope(isolate);
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+
+ // 1. Let to be ? ToObject(target).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target,
+ Execution::ToObject(isolate, target));
+ Handle<JSReceiver> to = Handle<JSReceiver>::cast(target);
+ // 2. If only one argument was passed, return to.
+ if (args.length() == 2) return *to;
+ // 3. Let sources be the List of argument values starting with the
+ // second argument.
+ // 4. For each element nextSource of sources, in ascending index order,
+ for (int i = 2; i < args.length(); ++i) {
+ Handle<Object> next_source = args.at<Object>(i);
+ // 4a. If nextSource is undefined or null, let keys be an empty List.
+ if (next_source->IsUndefined() || next_source->IsNull()) continue;
+ // 4b. Else,
+ // 4b i. Let from be ToObject(nextSource).
+ Handle<JSReceiver> from =
+ Object::ToObject(isolate, next_source).ToHandleChecked();
+ // 4b ii. Let keys be ? from.[[OwnPropertyKeys]]().
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys, JSReceiver::GetKeys(from, JSReceiver::OWN_ONLY,
+ ALL_PROPERTIES, KEEP_NUMBERS));
+ // 4c. Repeat for each element nextKey of keys in List order,
+ for (int j = 0; j < keys->length(); ++j) {
+ Handle<Object> next_key(keys->get(j), isolate);
+ // 4c i. Let desc be ? from.[[GetOwnProperty]](nextKey).
+ PropertyDescriptor desc;
+ Maybe<bool> found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, from, next_key, &desc);
+ if (found.IsNothing()) return isolate->heap()->exception();
+ // 4c ii. If desc is not undefined and desc.[[Enumerable]] is true, then
+ if (found.FromJust() && desc.enumerable()) {
+ // 4c ii 1. Let propValue be ? Get(from, nextKey).
+ Handle<Object> prop_value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, prop_value,
+ Runtime::GetObjectProperty(isolate, from, next_key, STRICT));
+ // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
+ Handle<Object> status;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, status, Runtime::SetObjectProperty(isolate, to, next_key,
+ prop_value, STRICT));
+ }
+ }
+ }
+ // 5. Return to.
+ return *to;
+}
+
+
+// ES6 section 19.1.2.2 Object.create ( O [ , Properties ] )
+BUILTIN(ObjectCreate) {
+ HandleScope scope(isolate);
+ Handle<Object> prototype = args.atOrUndefined(isolate, 1);
+ if (!prototype->IsNull() && !prototype->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
+ }
+
+ // Generate the map with the specified {prototype} based on the Object
+ // function's initial map from the current native context.
+ // TODO(bmeurer): Use a dedicated cache for Object.create; think about
+ // slack tracking for Object.create.
+ Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+ isolate);
+ if (map->prototype() != *prototype) {
+ map = Map::TransitionToPrototype(map, prototype, FAST_PROTOTYPE);
+ }
+
+ // Actually allocate the object.
+ Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(map);
+
+ // Define the properties if properties was specified and is not undefined.
+ Handle<Object> properties = args.atOrUndefined(isolate, 2);
+ if (!properties->IsUndefined()) {
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, JSReceiver::DefineProperties(isolate, object, properties));
+ }
+
+ return *object;
+}
+
+
+// ES6 section 19.1.2.5 Object.freeze ( O )
+BUILTIN(ObjectFreeze) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ if (object->IsJSReceiver()) {
+ MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
+ FROZEN, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ }
+ return *object;
+}
+
+
+// ES6 section 19.1.2.11 Object.isExtensible ( O )
+BUILTIN(ObjectIsExtensible) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Maybe<bool> result =
+ object->IsJSReceiver()
+ ? JSReceiver::IsExtensible(Handle<JSReceiver>::cast(object))
+ : Just(false);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 19.1.2.12 Object.isFrozen ( O )
+BUILTIN(ObjectIsFrozen) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Maybe<bool> result = object->IsJSReceiver()
+ ? JSReceiver::TestIntegrityLevel(
+ Handle<JSReceiver>::cast(object), FROZEN)
+ : Just(true);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 19.1.2.13 Object.isSealed ( O )
+BUILTIN(ObjectIsSealed) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Maybe<bool> result = object->IsJSReceiver()
+ ? JSReceiver::TestIntegrityLevel(
+ Handle<JSReceiver>::cast(object), SEALED)
+ : Just(true);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 19.1.2.14 Object.keys ( O )
+BUILTIN(ObjectKeys) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Execution::ToObject(isolate, object));
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys,
+ JSReceiver::GetKeys(receiver, JSReceiver::OWN_ONLY, ENUMERABLE_STRINGS,
+ CONVERT_TO_STRING));
+ return *isolate->factory()->NewJSArrayWithElements(keys);
+}
+
+
+// ES6 section 19.1.2.15 Object.preventExtensions ( O )
+BUILTIN(ObjectPreventExtensions) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ if (object->IsJSReceiver()) {
+ MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
+ Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ }
+ return *object;
+}
+
+
+// ES6 section 19.1.2.17 Object.seal ( O )
+BUILTIN(ObjectSeal) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ if (object->IsJSReceiver()) {
+ MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
+ SEALED, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ }
+ return *object;
+}
+
+
+namespace {
+
+bool CodeGenerationFromStringsAllowed(Isolate* isolate,
+ Handle<Context> context) {
+ DCHECK(context->allow_code_gen_from_strings()->IsFalse());
+ // Check with callback if set.
+ AllowCodeGenerationFromStringsCallback callback =
+ isolate->allow_code_gen_callback();
+ if (callback == NULL) {
+ // No callback set and code generation disallowed.
+ return false;
+ } else {
+ // Callback set. Let it decide if code generation is allowed.
+ VMState<EXTERNAL> state(isolate);
+ return callback(v8::Utils::ToLocal(context));
+ }
+}
+
+
+MaybeHandle<JSFunction> CompileString(Handle<Context> context,
+ Handle<String> source,
+ ParseRestriction restriction) {
+ Isolate* const isolate = context->GetIsolate();
+ Handle<Context> native_context(context->native_context(), isolate);
+
+ // Check if native context allows code generation from
+ // strings. Throw an exception if it doesn't.
+ if (native_context->allow_code_gen_from_strings()->IsFalse() &&
+ !CodeGenerationFromStringsAllowed(isolate, native_context)) {
+ Handle<Object> error_message =
+ native_context->ErrorMessageForCodeGenerationFromStrings();
+ THROW_NEW_ERROR(isolate, NewEvalError(MessageTemplate::kCodeGenFromStrings,
+ error_message),
+ JSFunction);
+ }
+
+ // Compile source string in the native context.
+ Handle<SharedFunctionInfo> outer_info(native_context->closure()->shared(),
+ isolate);
+ return Compiler::GetFunctionFromEval(source, outer_info, native_context,
+ SLOPPY, restriction,
+ RelocInfo::kNoPosition);
+}
+
+} // namespace
+
+
+// ES6 section 18.2.1 eval (x)
+BUILTIN(GlobalEval) {
+ HandleScope scope(isolate);
+ Handle<Object> x = args.atOrUndefined(isolate, 1);
+ Handle<JSFunction> target = args.target();
+ Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
+ if (!x->IsString()) return *x;
+ Handle<JSFunction> function;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, function,
+ CompileString(handle(target->native_context(), isolate),
+ Handle<String>::cast(x), NO_PARSE_RESTRICTION));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, function, target_global_proxy, 0, nullptr));
+ return *result;
+}
+
+
+// ES6 section 26.1.3 Reflect.defineProperty
+BUILTIN(ReflectDefineProperty) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+ Handle<Object> attributes = args.at<Object>(3);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.defineProperty")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ PropertyDescriptor desc;
+ if (!PropertyDescriptor::ToPropertyDescriptor(isolate, attributes, &desc)) {
+ return isolate->heap()->exception();
+ }
+
+ Maybe<bool> result =
+ JSReceiver::DefineOwnProperty(isolate, Handle<JSReceiver>::cast(target),
+ name, &desc, Object::DONT_THROW);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 26.1.4 Reflect.deleteProperty
+BUILTIN(ReflectDeleteProperty) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.deleteProperty")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ Maybe<bool> result = JSReceiver::DeletePropertyOrElement(
+ Handle<JSReceiver>::cast(target), name, SLOPPY);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 26.1.6 Reflect.get
+BUILTIN(ReflectGet) {
+ HandleScope scope(isolate);
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+ Handle<Object> key = args.atOrUndefined(isolate, 2);
+ Handle<Object> receiver = args.length() > 3 ? args.at<Object>(3) : target;
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.get")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::GetPropertyOrElement(
+ Handle<JSReceiver>::cast(target), name, receiver));
+
+ return *result;
+}
+
+
+// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor
+BUILTIN(ReflectGetOwnPropertyDescriptor) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.getOwnPropertyDescriptor")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, Handle<JSReceiver>::cast(target), name, &desc);
+ MAYBE_RETURN(found, isolate->heap()->exception());
+ if (!found.FromJust()) return isolate->heap()->undefined_value();
+ return *desc.ToObject(isolate);
+}
+
+
+// ES6 section 26.1.8 Reflect.getPrototypeOf
+BUILTIN(ReflectGetPrototypeOf) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- if (!args.receiver()->IsJSReceiver()) {
+ Handle<Object> target = args.at<Object>(1);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.getPrototypeOf")));
+ }
+ Handle<Object> prototype;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
+ Object::GetPrototype(isolate, target));
+ return *prototype;
+}
+
+
+// ES6 section 26.1.9 Reflect.has
+BUILTIN(ReflectHas) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.has")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ Maybe<bool> result =
+ JSReceiver::HasProperty(Handle<JSReceiver>::cast(target), name);
+ return result.IsJust() ? *isolate->factory()->ToBoolean(result.FromJust())
+ : isolate->heap()->exception();
+}
+
+
+// ES6 section 26.1.10 Reflect.isExtensible
+BUILTIN(ReflectIsExtensible) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> target = args.at<Object>(1);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.isExtensible")));
+ }
+
+ Maybe<bool> result =
+ JSReceiver::IsExtensible(Handle<JSReceiver>::cast(target));
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 26.1.11 Reflect.ownKeys
+BUILTIN(ReflectOwnKeys) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> target = args.at<Object>(1);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.ownKeys")));
+ }
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys, JSReceiver::GetKeys(Handle<JSReceiver>::cast(target),
+ JSReceiver::OWN_ONLY, ALL_PROPERTIES,
+ CONVERT_TO_STRING));
+ return *isolate->factory()->NewJSArrayWithElements(keys);
+}
+
+
+// ES6 section 26.1.12 Reflect.preventExtensions
+BUILTIN(ReflectPreventExtensions) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> target = args.at<Object>(1);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.preventExtensions")));
+ }
+
+ Maybe<bool> result = JSReceiver::PreventExtensions(
+ Handle<JSReceiver>::cast(target), Object::DONT_THROW);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 26.1.13 Reflect.set
+BUILTIN(ReflectSet) {
+ HandleScope scope(isolate);
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+ Handle<Object> key = args.atOrUndefined(isolate, 2);
+ Handle<Object> value = args.atOrUndefined(isolate, 3);
+ Handle<Object> receiver = args.length() > 4 ? args.at<Object>(4) : target;
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.set")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, receiver, name, Handle<JSReceiver>::cast(target));
+ Maybe<bool> result = Object::SetSuperProperty(
+ &it, value, SLOPPY, Object::MAY_BE_STORE_FROM_KEYED);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 26.1.14 Reflect.setPrototypeOf
+BUILTIN(ReflectSetPrototypeOf) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> proto = args.at<Object>(2);
+
+ if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
isolate->factory()->NewStringFromAsciiChecked(
- "Date.prototype [ @@toPrimitive ]"),
- args.receiver()));
+ "Reflect.setPrototypeOf")));
+ }
+
+ if (!proto->IsJSReceiver() && !proto->IsNull()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto));
+ }
+
+ Maybe<bool> result = JSReceiver::SetPrototype(
+ Handle<JSReceiver>::cast(target), proto, true, Object::DONT_THROW);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+
+// -----------------------------------------------------------------------------
+// ES6 section 20.3 Date Objects
+
+
+namespace {
+
+// ES6 section 20.3.1.1 Time Values and Time Range
+const double kMinYear = -1000000.0;
+const double kMaxYear = -kMinYear;
+const double kMinMonth = -10000000.0;
+const double kMaxMonth = -kMinMonth;
+
+
+// 20.3.1.2 Day Number and Time within Day
+const double kMsPerDay = 86400000.0;
+
+
+// ES6 section 20.3.1.11 Hours, Minutes, Second, and Milliseconds
+const double kMsPerSecond = 1000.0;
+const double kMsPerMinute = 60000.0;
+const double kMsPerHour = 3600000.0;
+
+
+// ES6 section 20.3.1.14 MakeDate (day, time)
+double MakeDate(double day, double time) {
+ if (std::isfinite(day) && std::isfinite(time)) {
+ return time + day * kMsPerDay;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+
+// ES6 section 20.3.1.13 MakeDay (year, month, date)
+double MakeDay(double year, double month, double date) {
+ if ((kMinYear <= year && year <= kMaxYear) &&
+ (kMinMonth <= month && month <= kMaxMonth) && std::isfinite(date)) {
+ int y = FastD2I(year);
+ int m = FastD2I(month);
+ y += m / 12;
+ m %= 12;
+ if (m < 0) {
+ m += 12;
+ y -= 1;
+ }
+ DCHECK_LE(0, m);
+ DCHECK_LT(m, 12);
+
+ // kYearDelta is an arbitrary number such that:
+ // a) kYearDelta = -1 (mod 400)
+ // b) year + kYearDelta > 0 for years in the range defined by
+ // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
+ // Jan 1 1970. This is required so that we don't run into integer
+ // division of negative numbers.
+ // c) there shouldn't be an overflow for 32-bit integers in the following
+ // operations.
+ static const int kYearDelta = 399999;
+ static const int kBaseDay =
+ 365 * (1970 + kYearDelta) + (1970 + kYearDelta) / 4 -
+ (1970 + kYearDelta) / 100 + (1970 + kYearDelta) / 400;
+ int day_from_year = 365 * (y + kYearDelta) + (y + kYearDelta) / 4 -
+ (y + kYearDelta) / 100 + (y + kYearDelta) / 400 -
+ kBaseDay;
+ if ((y % 4 != 0) || (y % 100 == 0 && y % 400 != 0)) {
+ static const int kDayFromMonth[] = {0, 31, 59, 90, 120, 151,
+ 181, 212, 243, 273, 304, 334};
+ day_from_year += kDayFromMonth[m];
+ } else {
+ static const int kDayFromMonth[] = {0, 31, 60, 91, 121, 152,
+ 182, 213, 244, 274, 305, 335};
+ day_from_year += kDayFromMonth[m];
+ }
+ return static_cast<double>(day_from_year - 1) + date;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+
+// ES6 section 20.3.1.12 MakeTime (hour, min, sec, ms)
+double MakeTime(double hour, double min, double sec, double ms) {
+ if (std::isfinite(hour) && std::isfinite(min) && std::isfinite(sec) &&
+ std::isfinite(ms)) {
+ double const h = DoubleToInteger(hour);
+ double const m = DoubleToInteger(min);
+ double const s = DoubleToInteger(sec);
+ double const milli = DoubleToInteger(ms);
+ return h * kMsPerHour + m * kMsPerMinute + s * kMsPerSecond + milli;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+
+// ES6 section 20.3.1.15 TimeClip (time)
+double TimeClip(double time) {
+ if (-DateCache::kMaxTimeInMs <= time && time <= DateCache::kMaxTimeInMs) {
+ return DoubleToInteger(time) + 0.0;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+
+const char* kShortWeekDays[] = {"Sun", "Mon", "Tue", "Wed",
+ "Thu", "Fri", "Sat"};
+const char* kShortMonths[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
+
+
+// ES6 section 20.3.1.16 Date Time String Format
+double ParseDateTimeString(Handle<String> str) {
+ Isolate* const isolate = str->GetIsolate();
+ str = String::Flatten(str);
+ // TODO(bmeurer): Change DateParser to not use the FixedArray.
+ Handle<FixedArray> tmp =
+ isolate->factory()->NewFixedArray(DateParser::OUTPUT_SIZE);
+ DisallowHeapAllocation no_gc;
+ String::FlatContent str_content = str->GetFlatContent();
+ bool result;
+ if (str_content.IsOneByte()) {
+ result = DateParser::Parse(str_content.ToOneByteVector(), *tmp,
+ isolate->unicode_cache());
+ } else {
+ result = DateParser::Parse(str_content.ToUC16Vector(), *tmp,
+ isolate->unicode_cache());
+ }
+ if (!result) return std::numeric_limits<double>::quiet_NaN();
+ double const day = MakeDay(tmp->get(0)->Number(), tmp->get(1)->Number(),
+ tmp->get(2)->Number());
+ double const time = MakeTime(tmp->get(3)->Number(), tmp->get(4)->Number(),
+ tmp->get(5)->Number(), tmp->get(6)->Number());
+ double date = MakeDate(day, time);
+ if (tmp->get(7)->IsNull()) {
+ if (!std::isnan(date)) {
+ date = isolate->date_cache()->ToUTC(static_cast<int64_t>(date));
+ }
+ } else {
+ date -= tmp->get(7)->Number() * 1000.0;
+ }
+ return date;
+}
+
+
+enum ToDateStringMode { kDateOnly, kTimeOnly, kDateAndTime };
+
+
+// ES6 section 20.3.4.41.1 ToDateString(tv)
+void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
+ ToDateStringMode mode = kDateAndTime) {
+ if (std::isnan(time_val)) {
+ SNPrintF(str, "Invalid Date");
+ return;
+ }
+ int64_t time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = date_cache->ToLocal(time_ms);
+ int year, month, day, weekday, hour, min, sec, ms;
+ date_cache->BreakDownTime(local_time_ms, &year, &month, &day, &weekday, &hour,
+ &min, &sec, &ms);
+ int timezone_offset = -date_cache->TimezoneOffset(time_ms);
+ int timezone_hour = std::abs(timezone_offset) / 60;
+ int timezone_min = std::abs(timezone_offset) % 60;
+ const char* local_timezone = date_cache->LocalTimezone(time_ms);
+ switch (mode) {
+ case kDateOnly:
+ SNPrintF(str, "%s %s %02d %4d", kShortWeekDays[weekday],
+ kShortMonths[month], day, year);
+ return;
+ case kTimeOnly:
+ SNPrintF(str, "%02d:%02d:%02d GMT%c%02d%02d (%s)", hour, min, sec,
+ (timezone_offset < 0) ? '-' : '+', timezone_hour, timezone_min,
+ local_timezone);
+ return;
+ case kDateAndTime:
+ SNPrintF(str, "%s %s %02d %4d %02d:%02d:%02d GMT%c%02d%02d (%s)",
+ kShortWeekDays[weekday], kShortMonths[month], day, year, hour,
+ min, sec, (timezone_offset < 0) ? '-' : '+', timezone_hour,
+ timezone_min, local_timezone);
+ return;
+ }
+ UNREACHABLE();
+}
+
+
+Object* SetLocalDateValue(Handle<JSDate> date, double time_val) {
+ if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
+ time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
+ Isolate* const isolate = date->GetIsolate();
+ time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
+ } else {
+ time_val = std::numeric_limits<double>::quiet_NaN();
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+} // namespace
+
+
+// ES6 section 20.3.2 The Date Constructor for the [[Call]] case.
+BUILTIN(DateConstructor) {
+ HandleScope scope(isolate);
+ double const time_val = JSDate::CurrentTimeValue(isolate);
+ char buffer[128];
+ Vector<char> str(buffer, arraysize(buffer));
+ ToDateString(time_val, str, isolate->date_cache());
+ return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+}
+
+
+// ES6 section 20.3.2 The Date Constructor for the [[Construct]] case.
+BUILTIN(DateConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ int const argc = args.length() - 1;
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ double time_val;
+ if (argc == 0) {
+ time_val = JSDate::CurrentTimeValue(isolate);
+ } else if (argc == 1) {
+ Handle<Object> value = args.at<Object>(1);
+ if (value->IsJSDate()) {
+ time_val = Handle<JSDate>::cast(value)->value()->Number();
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToPrimitive(value));
+ if (value->IsString()) {
+ time_val = ParseDateTimeString(Handle<String>::cast(value));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToNumber(value));
+ time_val = value->Number();
+ }
+ }
+ } else {
+ Handle<Object> year_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
+ Object::ToNumber(args.at<Object>(1)));
+ Handle<Object> month_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
+ Object::ToNumber(args.at<Object>(2)));
+ double year = year_object->Number();
+ double month = month_object->Number();
+ double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+ if (argc >= 3) {
+ Handle<Object> date_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
+ Object::ToNumber(args.at<Object>(3)));
+ date = date_object->Number();
+ if (argc >= 4) {
+ Handle<Object> hours_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+ hours = hours_object->Number();
+ if (argc >= 5) {
+ Handle<Object> minutes_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+ minutes = minutes_object->Number();
+ if (argc >= 6) {
+ Handle<Object> seconds_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, seconds_object, Object::ToNumber(args.at<Object>(6)));
+ seconds = seconds_object->Number();
+ if (argc >= 7) {
+ Handle<Object> ms_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+ ms = ms_object->Number();
+ }
+ }
+ }
+ }
+ }
+ if (!std::isnan(year)) {
+ double const y = DoubleToInteger(year);
+ if (0.0 <= y && y <= 99) year = 1900 + y;
+ }
+ double const day = MakeDay(year, month, date);
+ double const time = MakeTime(hours, minutes, seconds, ms);
+ time_val = MakeDate(day, time);
+ if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
+ time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
+ time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
+ } else {
+ time_val = std::numeric_limits<double>::quiet_NaN();
+ }
+ }
+ Handle<JSDate> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSDate::New(target, new_target, time_val));
+ return *result;
+}
+
+
+// ES6 section 20.3.3.1 Date.now ( )
+BUILTIN(DateNow) {
+ HandleScope scope(isolate);
+ return *isolate->factory()->NewNumber(JSDate::CurrentTimeValue(isolate));
+}
+
+
+// ES6 section 20.3.3.2 Date.parse ( string )
+BUILTIN(DateParse) {
+ HandleScope scope(isolate);
+ Handle<String> string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, string,
+ Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+ return *isolate->factory()->NewNumber(ParseDateTimeString(string));
+}
+
+
+// ES6 section 20.3.3.4 Date.UTC (year,month,date,hours,minutes,seconds,ms)
+BUILTIN(DateUTC) {
+ HandleScope scope(isolate);
+ int const argc = args.length() - 1;
+ double year = std::numeric_limits<double>::quiet_NaN();
+ double month = std::numeric_limits<double>::quiet_NaN();
+ double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+ if (argc >= 1) {
+ Handle<Object> year_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
+ Object::ToNumber(args.at<Object>(1)));
+ year = year_object->Number();
+ if (argc >= 2) {
+ Handle<Object> month_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
+ Object::ToNumber(args.at<Object>(2)));
+ month = month_object->Number();
+ if (argc >= 3) {
+ Handle<Object> date_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, date_object, Object::ToNumber(args.at<Object>(3)));
+ date = date_object->Number();
+ if (argc >= 4) {
+ Handle<Object> hours_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+ hours = hours_object->Number();
+ if (argc >= 5) {
+ Handle<Object> minutes_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+ minutes = minutes_object->Number();
+ if (argc >= 6) {
+ Handle<Object> seconds_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, seconds_object,
+ Object::ToNumber(args.at<Object>(6)));
+ seconds = seconds_object->Number();
+ if (argc >= 7) {
+ Handle<Object> ms_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+ ms = ms_object->Number();
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if (!std::isnan(year)) {
+ double const y = DoubleToInteger(year);
+ if (0.0 <= y && y <= 99) year = 1900 + y;
+ }
+ double const day = MakeDay(year, month, date);
+ double const time = MakeTime(hours, minutes, seconds, ms);
+ return *isolate->factory()->NewNumber(TimeClip(MakeDate(day, time)));
+}
+
+
+// ES6 section 20.3.4.20 Date.prototype.setDate ( date )
+BUILTIN(DatePrototypeSetDate) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setDate");
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ time_val = MakeDate(MakeDay(year, month, value->Number()), time_within_day);
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.21 Date.prototype.setFullYear (year, month, date)
+BUILTIN(DatePrototypeSetFullYear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setFullYear");
+ int const argc = args.length() - 1;
+ Handle<Object> year = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+ double y = year->Number(), m = 0.0, dt = 1.0;
+ int time_within_day = 0;
+ if (!std::isnan(date->value()->Number())) {
+ int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ m = month;
+ dt = day;
+ }
+ if (argc >= 2) {
+ Handle<Object> month = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ m = month->Number();
+ if (argc >= 3) {
+ Handle<Object> date = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ dt = date->Number();
+ }
+ }
+ double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.22 Date.prototype.setHours(hour, min, sec, ms)
+BUILTIN(DatePrototypeSetHours) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setHours");
+ int const argc = args.length() - 1;
+ Handle<Object> hour = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour, Object::ToNumber(hour));
+ double h = hour->Number();
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+ double m = (time_within_day / (60 * 1000)) % 60;
+ double s = (time_within_day / 1000) % 60;
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> min = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ m = min->Number();
+ if (argc >= 3) {
+ Handle<Object> sec = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ s = sec->Number();
+ if (argc >= 4) {
+ Handle<Object> ms = args.at<Object>(4);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ }
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.23 Date.prototype.setMilliseconds(ms)
+BUILTIN(DatePrototypeSetMilliseconds) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setMilliseconds");
+ Handle<Object> ms = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ int m = (time_within_day / (60 * 1000)) % 60;
+ int s = (time_within_day / 1000) % 60;
+ time_val = MakeDate(day, MakeTime(h, m, s, ms->Number()));
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.24 Date.prototype.setMinutes ( min, sec, ms )
+BUILTIN(DatePrototypeSetMinutes) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setMinutes");
+ int const argc = args.length() - 1;
+ Handle<Object> min = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ double m = min->Number();
+ double s = (time_within_day / 1000) % 60;
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> sec = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ s = sec->Number();
+ if (argc >= 3) {
+ Handle<Object> ms = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.25 Date.prototype.setMonth ( month, date )
+BUILTIN(DatePrototypeSetMonth) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setMonth");
+ int const argc = args.length() - 1;
+ Handle<Object> month = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+ int year, unused, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &unused, &day);
+ double m = month->Number();
+ double dt = day;
+ if (argc >= 2) {
+ Handle<Object> date = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ dt = date->Number();
+ }
+ time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.26 Date.prototype.setSeconds ( sec, ms )
+BUILTIN(DatePrototypeSetSeconds) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setSeconds");
+ int const argc = args.length() - 1;
+ Handle<Object> sec = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ double m = (time_within_day / (60 * 1000)) % 60;
+ double s = sec->Number();
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> ms = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.27 Date.prototype.setTime ( time )
+BUILTIN(DatePrototypeSetTime) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setTime");
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+ return *JSDate::SetValue(date, TimeClip(value->Number()));
+}
+
+
+// ES6 section 20.3.4.28 Date.prototype.setUTCDate ( date )
+BUILTIN(DatePrototypeSetUTCDate) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCDate");
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+ if (std::isnan(date->value()->Number())) return date->value();
+ int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ int const days = isolate->date_cache()->DaysFromTime(time_ms);
+ int const time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ double const time_val =
+ MakeDate(MakeDay(year, month, value->Number()), time_within_day);
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.29 Date.prototype.setUTCFullYear (year, month, date)
+BUILTIN(DatePrototypeSetUTCFullYear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCFullYear");
+ int const argc = args.length() - 1;
+ Handle<Object> year = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+ double y = year->Number(), m = 0.0, dt = 1.0;
+ int time_within_day = 0;
+ if (!std::isnan(date->value()->Number())) {
+ int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ int const days = isolate->date_cache()->DaysFromTime(time_ms);
+ time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ m = month;
+ dt = day;
+ }
+ if (argc >= 2) {
+ Handle<Object> month = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ m = month->Number();
+ if (argc >= 3) {
+ Handle<Object> date = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ dt = date->Number();
+ }
+ }
+ double const time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.30 Date.prototype.setUTCHours(hour, min, sec, ms)
+BUILTIN(DatePrototypeSetUTCHours) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCHours");
+ int const argc = args.length() - 1;
+ Handle<Object> hour = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour, Object::ToNumber(hour));
+ double h = hour->Number();
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int day = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+ double m = (time_within_day / (60 * 1000)) % 60;
+ double s = (time_within_day / 1000) % 60;
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> min = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ m = min->Number();
+ if (argc >= 3) {
+ Handle<Object> sec = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ s = sec->Number();
+ if (argc >= 4) {
+ Handle<Object> ms = args.at<Object>(4);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ }
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.31 Date.prototype.setUTCMilliseconds(ms)
+BUILTIN(DatePrototypeSetUTCMilliseconds) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMilliseconds");
+ Handle<Object> ms = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int day = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ int m = (time_within_day / (60 * 1000)) % 60;
+ int s = (time_within_day / 1000) % 60;
+ time_val = MakeDate(day, MakeTime(h, m, s, ms->Number()));
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.32 Date.prototype.setUTCMinutes ( min, sec, ms )
+BUILTIN(DatePrototypeSetUTCMinutes) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMinutes");
+ int const argc = args.length() - 1;
+ Handle<Object> min = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int day = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ double m = min->Number();
+ double s = (time_within_day / 1000) % 60;
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> sec = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ s = sec->Number();
+ if (argc >= 3) {
+ Handle<Object> ms = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.31 Date.prototype.setUTCMonth ( month, date )
+BUILTIN(DatePrototypeSetUTCMonth) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMonth");
+ int const argc = args.length() - 1;
+ Handle<Object> month = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int days = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
+ int year, unused, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &unused, &day);
+ double m = month->Number();
+ double dt = day;
+ if (argc >= 2) {
+ Handle<Object> date = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ dt = date->Number();
+ }
+ time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.34 Date.prototype.setUTCSeconds ( sec, ms )
+BUILTIN(DatePrototypeSetUTCSeconds) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCSeconds");
+ int const argc = args.length() - 1;
+ Handle<Object> sec = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int day = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ double m = (time_within_day / (60 * 1000)) % 60;
+ double s = sec->Number();
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> ms = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.35 Date.prototype.toDateString ( )
+BUILTIN(DatePrototypeToDateString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toDateString");
+ char buffer[128];
+ Vector<char> str(buffer, arraysize(buffer));
+ ToDateString(date->value()->Number(), str, isolate->date_cache(), kDateOnly);
+ return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+}
+
+
+// ES6 section 20.3.4.36 Date.prototype.toISOString ( )
+BUILTIN(DatePrototypeToISOString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toISOString");
+ double const time_val = date->value()->Number();
+ if (std::isnan(time_val)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidTimeValue));
+ }
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int year, month, day, weekday, hour, min, sec, ms;
+ isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
+ &hour, &min, &sec, &ms);
+ char buffer[128];
+ Vector<char> str(buffer, arraysize(buffer));
+ if (year >= 0 && year <= 9999) {
+ SNPrintF(str, "%04d-%02d-%02dT%02d:%02d:%02d.%03dZ", year, month + 1, day,
+ hour, min, sec, ms);
+ } else if (year < 0) {
+ SNPrintF(str, "-%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", -year, month + 1, day,
+ hour, min, sec, ms);
+ } else {
+ SNPrintF(str, "+%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", year, month + 1, day,
+ hour, min, sec, ms);
}
- Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
+ return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+}
+
+
+// ES6 section 20.3.4.41 Date.prototype.toString ( )
+BUILTIN(DatePrototypeToString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toString");
+ char buffer[128];
+ Vector<char> str(buffer, arraysize(buffer));
+ ToDateString(date->value()->Number(), str, isolate->date_cache());
+ return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+}
+
+
+// ES6 section 20.3.4.42 Date.prototype.toTimeString ( )
+BUILTIN(DatePrototypeToTimeString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toTimeString");
+ char buffer[128];
+ Vector<char> str(buffer, arraysize(buffer));
+ ToDateString(date->value()->Number(), str, isolate->date_cache(), kTimeOnly);
+ return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+}
+
+
+// ES6 section 20.3.4.43 Date.prototype.toUTCString ( )
+BUILTIN(DatePrototypeToUTCString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toUTCString");
+ double const time_val = date->value()->Number();
+ if (std::isnan(time_val)) {
+ return *isolate->factory()->NewStringFromAsciiChecked("Invalid Date");
+ }
+ char buffer[128];
+ Vector<char> str(buffer, arraysize(buffer));
+ int64_t time_ms = static_cast<int64_t>(time_val);
+ int year, month, day, weekday, hour, min, sec, ms;
+ isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
+ &hour, &min, &sec, &ms);
+ SNPrintF(str, "%s, %02d %s %4d %02d:%02d:%02d GMT", kShortWeekDays[weekday],
+ day, kShortMonths[month], year, hour, min, sec);
+ return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+}
+
+
+// ES6 section 20.3.4.44 Date.prototype.valueOf ( )
+BUILTIN(DatePrototypeValueOf) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.valueOf");
+ return date->value();
+}
+
+
+// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint )
+BUILTIN(DatePrototypeToPrimitive) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CHECK_RECEIVER(JSReceiver, receiver, "Date.prototype [ @@toPrimitive ]");
Handle<Object> hint = args.at<Object>(1);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
@@ -1464,12 +2859,375 @@ BUILTIN(DateToPrimitive) {
}
+// ES6 section B.2.4.1 Date.prototype.getYear ( )
+BUILTIN(DatePrototypeGetYear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.getYear");
+ double time_val = date->value()->Number();
+ if (std::isnan(time_val)) return date->value();
+ int64_t time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ return Smi::FromInt(year - 1900);
+}
+
+
+// ES6 section B.2.4.2 Date.prototype.setYear ( year )
+BUILTIN(DatePrototypeSetYear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setYear");
+ Handle<Object> year = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+ double m = 0.0, dt = 1.0, y = year->Number();
+ if (0.0 <= y && y <= 99.0) {
+ y = 1900.0 + DoubleToInteger(y);
+ }
+ int time_within_day = 0;
+ if (!std::isnan(date->value()->Number())) {
+ int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ m = month;
+ dt = day;
+ }
+ double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetDate(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kDay);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetDay(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kWeekday);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetFullYear(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kYear);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetHours(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kHour);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetMilliseconds(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMillisecond);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetMinutes(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMinute);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetMonth(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMonth);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetSeconds(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kSecond);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetTime(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kDateValue);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetTimezoneOffset(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kTimezoneOffset);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCDate(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kDayUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCDay(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kWeekdayUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCFullYear(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kYearUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCHours(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kHourUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCMilliseconds(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMillisecondUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCMinutes(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMinuteUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCMonth(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMonthUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kSecondUTC);
+}
+
+
+namespace {
+
+// ES6 section 19.2.1.1.1 CreateDynamicFunction
+MaybeHandle<JSFunction> CreateDynamicFunction(
+ Isolate* isolate,
+ BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget> args,
+ const char* token) {
+ // Compute number of arguments, ignoring the receiver.
+ DCHECK_LE(1, args.length());
+ int const argc = args.length() - 1;
+
+ // Build the source string.
+ Handle<String> source;
+ {
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCharacter('(');
+ builder.AppendCString(token);
+ builder.AppendCharacter('(');
+ bool parenthesis_in_arg_string = false;
+ if (argc > 1) {
+ for (int i = 1; i < argc; ++i) {
+ if (i > 1) builder.AppendCharacter(',');
+ Handle<String> param;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, param, Object::ToString(isolate, args.at<Object>(i)),
+ JSFunction);
+ param = String::Flatten(param);
+ builder.AppendString(param);
+ // If the formal parameters string include ) - an illegal
+ // character - it may make the combined function expression
+ // compile. We avoid this problem by checking for this early on.
+ DisallowHeapAllocation no_gc; // Ensure vectors stay valid.
+ String::FlatContent param_content = param->GetFlatContent();
+ for (int i = 0, length = param->length(); i < length; ++i) {
+ if (param_content.Get(i) == ')') {
+ parenthesis_in_arg_string = true;
+ break;
+ }
+ }
+ }
+ // If the formal parameters include an unbalanced block comment, the
+ // function must be rejected. Since JavaScript does not allow nested
+ // comments we can include a trailing block comment to catch this.
+ builder.AppendCString("\n/**/");
+ }
+ builder.AppendCString(") {\n");
+ if (argc > 0) {
+ Handle<String> body;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, body, Object::ToString(isolate, args.at<Object>(argc)),
+ JSFunction);
+ builder.AppendString(body);
+ }
+ builder.AppendCString("\n})");
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, source, builder.Finish(), JSFunction);
+
+ // The SyntaxError must be thrown after all the (observable) ToString
+ // conversions are done.
+ if (parenthesis_in_arg_string) {
+ THROW_NEW_ERROR(isolate,
+ NewSyntaxError(MessageTemplate::kParenthesisInArgString),
+ JSFunction);
+ }
+ }
+
+ // Compile the string in the constructor and not a helper so that errors to
+ // come from here.
+ Handle<JSFunction> target = args.target();
+ Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
+ Handle<JSFunction> function;
+ {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, function,
+ CompileString(handle(target->native_context(), isolate), source,
+ ONLY_SINGLE_FUNCTION_LITERAL),
+ JSFunction);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, function, target_global_proxy, 0, nullptr),
+ JSFunction);
+ function = Handle<JSFunction>::cast(result);
+ function->shared()->set_name_should_print_as_anonymous(true);
+ }
+
+ // If new.target is equal to target then the function created
+ // is already correctly setup and nothing else should be done
+ // here. But if new.target is not equal to target then we are
+ // have a Function builtin subclassing case and therefore the
+ // function has wrong initial map. To fix that we create a new
+ // function object with correct initial map.
+ Handle<Object> unchecked_new_target = args.new_target();
+ if (!unchecked_new_target->IsUndefined() &&
+ !unchecked_new_target.is_identical_to(target)) {
+ Handle<JSReceiver> new_target =
+ Handle<JSReceiver>::cast(unchecked_new_target);
+ Handle<Map> initial_map;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, initial_map,
+ JSFunction::GetDerivedMap(isolate, target, new_target), JSFunction);
+
+ Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
+ Handle<Map> map = Map::AsLanguageMode(
+ initial_map, shared_info->language_mode(), shared_info->kind());
+
+ Handle<Context> context(function->context(), isolate);
+ function = isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ map, shared_info, context, NOT_TENURED);
+ }
+ return function;
+}
+
+} // namespace
+
+
+// ES6 section 19.2.1.1 Function ( p1, p2, ... , pn, body )
+BUILTIN(FunctionConstructor) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, CreateDynamicFunction(isolate, args, "function"));
+ return *result;
+}
+
+
+// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
+BUILTIN(FunctionPrototypeBind) {
+ HandleScope scope(isolate);
+ DCHECK_LE(1, args.length());
+ if (!args.receiver()->IsCallable()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kFunctionBind));
+ }
+
+ // Allocate the bound function with the given {this_arg} and {args}.
+ Handle<JSReceiver> target = args.at<JSReceiver>(0);
+ Handle<Object> this_arg = isolate->factory()->undefined_value();
+ ScopedVector<Handle<Object>> argv(std::max(0, args.length() - 2));
+ if (args.length() > 1) {
+ this_arg = args.at<Object>(1);
+ for (int i = 2; i < args.length(); ++i) {
+ argv[i - 2] = args.at<Object>(i);
+ }
+ }
+ Handle<JSBoundFunction> function;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, function,
+ isolate->factory()->NewJSBoundFunction(target, this_arg, argv));
+
+ // TODO(bmeurer): Optimize the rest for the common cases where {target} is
+ // a function with some initial map or even a bound function.
+ // Setup the "length" property based on the "length" of the {target}.
+ Handle<Object> length(Smi::FromInt(0), isolate);
+ Maybe<bool> target_has_length =
+ JSReceiver::HasOwnProperty(target, isolate->factory()->length_string());
+ if (!target_has_length.IsJust()) {
+ return isolate->heap()->exception();
+ } else if (target_has_length.FromJust()) {
+ Handle<Object> target_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, target_length,
+ JSReceiver::GetProperty(target, isolate->factory()->length_string()));
+ if (target_length->IsNumber()) {
+ length = isolate->factory()->NewNumber(std::max(
+ 0.0, DoubleToInteger(target_length->Number()) - argv.length()));
+ }
+ }
+ function->set_length(*length);
+
+ // Setup the "name" property based on the "name" of the {target}.
+ Handle<Object> target_name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, target_name,
+ JSReceiver::GetProperty(target, isolate->factory()->name_string()));
+ Handle<String> name;
+ if (!target_name->IsString()) {
+ name = isolate->factory()->bound__string();
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, name, Name::ToFunctionName(Handle<String>::cast(target_name)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, name, isolate->factory()->NewConsString(
+ isolate->factory()->bound__string(), name));
+ }
+ function->set_name(*name);
+ return *function;
+}
+
+
+// ES6 section 19.2.3.5 Function.prototype.toString ( )
+BUILTIN(FunctionPrototypeToString) {
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ if (receiver->IsJSBoundFunction()) {
+ return *JSBoundFunction::ToString(Handle<JSBoundFunction>::cast(receiver));
+ } else if (receiver->IsJSFunction()) {
+ return *JSFunction::ToString(Handle<JSFunction>::cast(receiver));
+ }
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotGeneric,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Function.prototype.toString")));
+}
+
+
+// ES6 section 25.2.1.1 GeneratorFunction (p1, p2, ... , pn, body)
+BUILTIN(GeneratorFunctionConstructor) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, CreateDynamicFunction(isolate, args, "function*"));
+ return *result;
+}
+
+
// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
BUILTIN(SymbolConstructor) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
Handle<Symbol> result = isolate->factory()->NewSymbol();
- Handle<Object> description = args.at<Object>(1);
+ Handle<Object> description = args.atOrUndefined(isolate, 1);
if (!description->IsUndefined()) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, description,
Object::ToString(isolate, description));
@@ -1488,6 +3246,100 @@ BUILTIN(SymbolConstructor_ConstructStub) {
}
+// ES6 19.1.3.6 Object.prototype.toString
+BUILTIN(ObjectProtoToString) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at<Object>(0);
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, JSObject::ObjectProtoToString(isolate, object));
+ return *result;
+}
+
+
+// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
+BUILTIN(ArrayBufferConstructor) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> target = args.target();
+ DCHECK(*target == target->native_context()->array_buffer_fun() ||
+ *target == target->native_context()->shared_array_buffer_fun());
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ handle(target->shared()->name(), isolate)));
+}
+
+
+// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Construct]] case.
+BUILTIN(ArrayBufferConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> length = args.atOrUndefined(isolate, 1);
+ DCHECK(*target == target->native_context()->array_buffer_fun() ||
+ *target == target->native_context()->shared_array_buffer_fun());
+ Handle<Object> number_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_length,
+ Object::ToInteger(isolate, length));
+ if (number_length->Number() < 0.0) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ }
+ Handle<Map> initial_map;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, initial_map,
+ JSFunction::GetDerivedMap(isolate, target, new_target));
+ size_t byte_length;
+ if (!TryNumberToSize(isolate, *number_length, &byte_length)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ }
+ Handle<JSArrayBuffer> result = Handle<JSArrayBuffer>::cast(
+ isolate->factory()->NewJSObjectFromMap(initial_map));
+ SharedFlag shared_flag =
+ (*target == target->native_context()->array_buffer_fun())
+ ? SharedFlag::kNotShared
+ : SharedFlag::kShared;
+ if (!JSArrayBuffer::SetupAllocatingData(result, isolate, byte_length, true,
+ shared_flag)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
+ }
+ return *result;
+}
+
+
+// ES6 section 24.1.3.1 ArrayBuffer.isView ( arg )
+BUILTIN(ArrayBufferIsView) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(2, args.length());
+ Object* arg = args[1];
+ return isolate->heap()->ToBoolean(arg->IsJSArrayBufferView());
+}
+
+
+// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Call]] case.
+BUILTIN(ProxyConstructor) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked("Proxy")));
+}
+
+
+// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Construct]] case.
+BUILTIN(ProxyConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ DCHECK(isolate->proxy_function()->IsConstructor());
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+ Handle<Object> handler = args.atOrUndefined(isolate, 2);
+ Handle<JSProxy> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSProxy::New(isolate, target, handler));
+ return *result;
+}
+
+
// -----------------------------------------------------------------------------
// Throwers for restricted function properties and strict arguments object
// properties
@@ -1511,11 +3363,14 @@ BUILTIN(RestrictedStrictArgumentsPropertiesThrower) {
//
+namespace {
+
template <bool is_construct>
-MUST_USE_RESULT static MaybeHandle<Object> HandleApiCallHelper(
- Isolate* isolate, BuiltinArguments<NEEDS_CALLED_FUNCTION>& args) {
+MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
+ Isolate* isolate, BuiltinArguments<BuiltinExtraArguments::kTarget> args) {
HandleScope scope(isolate);
- Handle<JSFunction> function = args.called_function();
+ Handle<JSFunction> function = args.target();
+ DCHECK(args.receiver()->IsJSReceiver());
// TODO(ishell): turn this back to a DCHECK.
CHECK(function->shared()->IsApiFunction());
@@ -1529,14 +3384,11 @@ MUST_USE_RESULT static MaybeHandle<Object> HandleApiCallHelper(
Object);
}
- DCHECK(!args[0]->IsNull());
- if (args[0]->IsUndefined()) args[0] = function->global_proxy();
-
if (!is_construct && !fun_data->accept_any_receiver()) {
- Handle<Object> receiver(&args[0]);
+ Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
if (receiver->IsJSObject() && receiver->IsAccessCheckNeeded()) {
Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
- if (!isolate->MayAccess(js_receiver)) {
+ if (!isolate->MayAccess(handle(isolate->context()), js_receiver)) {
isolate->ReportFailedAccessCheck(js_receiver);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
@@ -1590,10 +3442,11 @@ MUST_USE_RESULT static MaybeHandle<Object> HandleApiCallHelper(
return scope.CloseAndEscape(args.receiver());
}
+} // namespace
+
BUILTIN(HandleApiCall) {
HandleScope scope(isolate);
- DCHECK(!CalledAsConstructor(isolate));
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
HandleApiCallHelper<false>(isolate, args));
@@ -1603,7 +3456,6 @@ BUILTIN(HandleApiCall) {
BUILTIN(HandleApiCallConstruct) {
HandleScope scope(isolate);
- DCHECK(CalledAsConstructor(isolate));
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
HandleApiCallHelper<true>(isolate, args));
@@ -1611,13 +3463,42 @@ BUILTIN(HandleApiCallConstruct) {
}
+Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return CallFunction_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return CallFunction_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return CallFunction_ReceiverIsAny();
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+
+Handle<Code> Builtins::Call(ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return Call_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return Call_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return Call_ReceiverIsAny();
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+
namespace {
-class RelocatableArguments : public BuiltinArguments<NEEDS_CALLED_FUNCTION>,
- public Relocatable {
+class RelocatableArguments
+ : public BuiltinArguments<BuiltinExtraArguments::kTarget>,
+ public Relocatable {
public:
RelocatableArguments(Isolate* isolate, int length, Object** arguments)
- : BuiltinArguments<NEEDS_CALLED_FUNCTION>(length, arguments),
+ : BuiltinArguments<BuiltinExtraArguments::kTarget>(length, arguments),
Relocatable(isolate) {}
virtual inline void IterateInstance(ObjectVisitor* v) {
@@ -1667,12 +3548,8 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Handle<JSFunction> function,
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
- Isolate* isolate,
- bool is_construct_call,
- BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
- // Non-functions are never called as constructors. Even if this is an object
- // called as a constructor the delegate call is not a construct call.
- DCHECK(!CalledAsConstructor(isolate));
+ Isolate* isolate, bool is_construct_call,
+ BuiltinArguments<BuiltinExtraArguments::kNone> args) {
Heap* heap = isolate->heap();
Handle<Object> receiver = args.receiver();
@@ -1864,11 +3741,6 @@ static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
}
-static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) {
- DebugCodegen::GeneratePlainReturnLiveEdit(masm);
-}
-
-
static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
DebugCodegen::GenerateFrameDropperLiveEdit(masm);
}
@@ -1929,36 +3801,34 @@ void Builtins::InitBuiltinFunctionTable() {
functions[builtin_count].s_name = NULL;
functions[builtin_count].name = builtin_count;
functions[builtin_count].flags = static_cast<Code::Flags>(0);
- functions[builtin_count].extra_args = NO_EXTRA_ARGUMENTS;
-
-#define DEF_FUNCTION_PTR_C(aname, aextra_args) \
- functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
- functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
- functions->s_name = #aname; \
- functions->name = c_##aname; \
- functions->flags = Code::ComputeFlags(Code::BUILTIN); \
- functions->extra_args = aextra_args; \
- ++functions;
-
-#define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
- functions->generator = FUNCTION_ADDR(Generate_##aname); \
- functions->c_code = NULL; \
- functions->s_name = #aname; \
- functions->name = k##aname; \
- functions->flags = Code::ComputeFlags(Code::kind, \
- state, \
- extra); \
- functions->extra_args = NO_EXTRA_ARGUMENTS; \
- ++functions;
-
-#define DEF_FUNCTION_PTR_H(aname, kind) \
- functions->generator = FUNCTION_ADDR(Generate_##aname); \
- functions->c_code = NULL; \
- functions->s_name = #aname; \
- functions->name = k##aname; \
- functions->flags = Code::ComputeHandlerFlags(Code::kind); \
- functions->extra_args = NO_EXTRA_ARGUMENTS; \
- ++functions;
+ functions[builtin_count].extra_args = BuiltinExtraArguments::kNone;
+
+#define DEF_FUNCTION_PTR_C(aname, aextra_args) \
+ functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
+ functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
+ functions->s_name = #aname; \
+ functions->name = c_##aname; \
+ functions->flags = Code::ComputeFlags(Code::BUILTIN); \
+ functions->extra_args = BuiltinExtraArguments::aextra_args; \
+ ++functions;
+
+#define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
+ functions->generator = FUNCTION_ADDR(Generate_##aname); \
+ functions->c_code = NULL; \
+ functions->s_name = #aname; \
+ functions->name = k##aname; \
+ functions->flags = Code::ComputeFlags(Code::kind, state, extra); \
+ functions->extra_args = BuiltinExtraArguments::kNone; \
+ ++functions;
+
+#define DEF_FUNCTION_PTR_H(aname, kind) \
+ functions->generator = FUNCTION_ADDR(Generate_##aname); \
+ functions->c_code = NULL; \
+ functions->s_name = #aname; \
+ functions->name = k##aname; \
+ functions->flags = Code::ComputeHandlerFlags(Code::kind); \
+ functions->extra_args = BuiltinExtraArguments::kNone; \
+ ++functions;
BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
@@ -1996,7 +3866,8 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
// separate code object for each one.
for (int i = 0; i < builtin_count; i++) {
if (create_heap_objects) {
- MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
+ MacroAssembler masm(isolate, u.buffer, sizeof u.buffer,
+ CodeObjectRequired::kYes);
// Generate the code/adaptor.
typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
@@ -2062,12 +3933,12 @@ const char* Builtins::Lookup(byte* pc) {
void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
+ masm->TailCallRuntime(Runtime::kInterrupt);
}
void Builtins::Generate_StackCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
+ masm->TailCallRuntime(Runtime::kStackGuard);
}
diff --git a/chromium/v8/src/builtins.h b/chromium/v8/src/builtins.h
index d9129608dcc..a707a947523 100644
--- a/chromium/v8/src/builtins.h
+++ b/chromium/v8/src/builtins.h
@@ -5,17 +5,24 @@
#ifndef V8_BUILTINS_H_
#define V8_BUILTINS_H_
+#include "src/base/flags.h"
#include "src/handles.h"
namespace v8 {
namespace internal {
// Specifies extra arguments required by a C++ builtin.
-enum BuiltinExtraArguments {
- NO_EXTRA_ARGUMENTS = 0,
- NEEDS_CALLED_FUNCTION = 1
+enum class BuiltinExtraArguments : uint8_t {
+ kNone = 0u,
+ kTarget = 1u << 0,
+ kNewTarget = 1u << 1,
+ kTargetAndNewTarget = kTarget | kNewTarget
};
+inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
+ return static_cast<uint8_t>(lhs) & static_cast<uint8_t>(rhs);
+}
+
#define CODE_AGE_LIST_WITH_ARG(V, A) \
V(Quadragenarian, A) \
@@ -44,106 +51,215 @@ enum BuiltinExtraArguments {
// Define list of builtins implemented in C++.
-#define BUILTIN_LIST_C(V) \
- V(Illegal, NO_EXTRA_ARGUMENTS) \
- \
- V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
- \
- V(ArrayPush, NO_EXTRA_ARGUMENTS) \
- V(ArrayPop, NO_EXTRA_ARGUMENTS) \
- V(ArrayShift, NO_EXTRA_ARGUMENTS) \
- V(ArrayUnshift, NO_EXTRA_ARGUMENTS) \
- V(ArraySlice, NO_EXTRA_ARGUMENTS) \
- V(ArraySplice, NO_EXTRA_ARGUMENTS) \
- V(ArrayConcat, NO_EXTRA_ARGUMENTS) \
- \
- V(DateToPrimitive, NO_EXTRA_ARGUMENTS) \
- \
- V(SymbolConstructor, NO_EXTRA_ARGUMENTS) \
- V(SymbolConstructor_ConstructStub, NO_EXTRA_ARGUMENTS) \
- \
- V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
- V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
- V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
- V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \
- \
- V(RestrictedFunctionPropertiesThrower, NO_EXTRA_ARGUMENTS) \
- V(RestrictedStrictArgumentsPropertiesThrower, NO_EXTRA_ARGUMENTS)
+#define BUILTIN_LIST_C(V) \
+ V(Illegal, kNone) \
+ \
+ V(EmptyFunction, kNone) \
+ \
+ V(ArrayConcat, kNone) \
+ V(ArrayIsArray, kNone) \
+ V(ArrayPop, kNone) \
+ V(ArrayPush, kNone) \
+ V(ArrayShift, kNone) \
+ V(ArraySlice, kNone) \
+ V(ArraySplice, kNone) \
+ V(ArrayUnshift, kNone) \
+ \
+ V(ArrayBufferConstructor, kTarget) \
+ V(ArrayBufferConstructor_ConstructStub, kTargetAndNewTarget) \
+ V(ArrayBufferIsView, kNone) \
+ \
+ V(DateConstructor, kNone) \
+ V(DateConstructor_ConstructStub, kTargetAndNewTarget) \
+ V(DateNow, kNone) \
+ V(DateParse, kNone) \
+ V(DateUTC, kNone) \
+ V(DatePrototypeSetDate, kNone) \
+ V(DatePrototypeSetFullYear, kNone) \
+ V(DatePrototypeSetHours, kNone) \
+ V(DatePrototypeSetMilliseconds, kNone) \
+ V(DatePrototypeSetMinutes, kNone) \
+ V(DatePrototypeSetMonth, kNone) \
+ V(DatePrototypeSetSeconds, kNone) \
+ V(DatePrototypeSetTime, kNone) \
+ V(DatePrototypeSetUTCDate, kNone) \
+ V(DatePrototypeSetUTCFullYear, kNone) \
+ V(DatePrototypeSetUTCHours, kNone) \
+ V(DatePrototypeSetUTCMilliseconds, kNone) \
+ V(DatePrototypeSetUTCMinutes, kNone) \
+ V(DatePrototypeSetUTCMonth, kNone) \
+ V(DatePrototypeSetUTCSeconds, kNone) \
+ V(DatePrototypeToDateString, kNone) \
+ V(DatePrototypeToISOString, kNone) \
+ V(DatePrototypeToPrimitive, kNone) \
+ V(DatePrototypeToUTCString, kNone) \
+ V(DatePrototypeToString, kNone) \
+ V(DatePrototypeToTimeString, kNone) \
+ V(DatePrototypeValueOf, kNone) \
+ V(DatePrototypeGetYear, kNone) \
+ V(DatePrototypeSetYear, kNone) \
+ \
+ V(FunctionConstructor, kTargetAndNewTarget) \
+ V(FunctionPrototypeBind, kNone) \
+ V(FunctionPrototypeToString, kNone) \
+ \
+ V(GeneratorFunctionConstructor, kTargetAndNewTarget) \
+ \
+ V(GlobalEval, kTarget) \
+ \
+ V(ObjectAssign, kNone) \
+ V(ObjectCreate, kNone) \
+ V(ObjectFreeze, kNone) \
+ V(ObjectIsExtensible, kNone) \
+ V(ObjectIsFrozen, kNone) \
+ V(ObjectIsSealed, kNone) \
+ V(ObjectKeys, kNone) \
+ V(ObjectPreventExtensions, kNone) \
+ V(ObjectSeal, kNone) \
+ V(ObjectProtoToString, kNone) \
+ \
+ V(ProxyConstructor, kNone) \
+ V(ProxyConstructor_ConstructStub, kTarget) \
+ \
+ V(ReflectDefineProperty, kNone) \
+ V(ReflectDeleteProperty, kNone) \
+ V(ReflectGet, kNone) \
+ V(ReflectGetOwnPropertyDescriptor, kNone) \
+ V(ReflectGetPrototypeOf, kNone) \
+ V(ReflectHas, kNone) \
+ V(ReflectIsExtensible, kNone) \
+ V(ReflectOwnKeys, kNone) \
+ V(ReflectPreventExtensions, kNone) \
+ V(ReflectSet, kNone) \
+ V(ReflectSetPrototypeOf, kNone) \
+ \
+ V(SymbolConstructor, kNone) \
+ V(SymbolConstructor_ConstructStub, kTarget) \
+ \
+ V(HandleApiCall, kTarget) \
+ V(HandleApiCallConstruct, kTarget) \
+ V(HandleApiCallAsFunction, kNone) \
+ V(HandleApiCallAsConstructor, kNone) \
+ \
+ V(RestrictedFunctionPropertiesThrower, kNone) \
+ V(RestrictedStrictArgumentsPropertiesThrower, kNone)
// Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V) \
- V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(CallFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(Call, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(ConstructFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ConstructProxy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(Construct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(PushArgsAndCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubForDerived, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState) \
- V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
- \
- V(KeyedLoadIC_Megamorphic_Strong, KEYED_LOAD_IC, MEGAMORPHIC, \
- LoadICState::kStrongModeState) \
- \
- V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
- StoreICState::kStrictModeState) \
- \
- V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState) \
- V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
- kNoExtraICState) \
- V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState) \
- \
- V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
- StoreICState::kStrictModeState) \
- V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
- StoreICState::kStrictModeState) \
- V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
- StoreICState::kStrictModeState) \
- \
- V(FunctionCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(FunctionApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ReflectApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ReflectConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(StringConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StringConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+#define BUILTIN_LIST_A(V) \
+ V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(ConstructedNonConstructable, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(CallFunction_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(CallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(CallFunction_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CallBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(Call_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(Call_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(Call_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(ConstructFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ConstructBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ConstructProxy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(Construct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(Apply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(HandleFastApiCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSBuiltinsConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterPushArgsAndCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterPushArgsAndConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterNotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterNotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterNotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState) \
+ V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
+ \
+ V(KeyedLoadIC_Megamorphic_Strong, KEYED_LOAD_IC, MEGAMORPHIC, \
+ LoadICState::kStrongModeState) \
+ \
+ V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
+ StoreICState::kStrictModeState) \
+ \
+ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
+ kNoExtraICState) \
+ V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState) \
+ \
+ V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
+ StoreICState::kStrictModeState) \
+ V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
+ StoreICState::kStrictModeState) \
+ V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
+ StoreICState::kStrictModeState) \
+ \
+ V(DatePrototypeGetDate, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetDay, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetFullYear, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetHours, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetMilliseconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetMinutes, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetMonth, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetSeconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetTime, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetTimezoneOffset, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCDate, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCDay, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCFullYear, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCHours, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCMilliseconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCMinutes, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCMonth, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCSeconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(FunctionPrototypeApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(FunctionPrototypeCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(ReflectApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ReflectConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(NumberConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NumberConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(StringConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StringConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
// Define list of builtin handlers implemented in assembly.
@@ -162,7 +278,6 @@ enum BuiltinExtraArguments {
#define BUILTIN_LIST_DEBUG_A(V) \
V(Return_DebugBreak, BUILTIN, DEBUG_STUB, kNoExtraICState) \
V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, kNoExtraICState) \
- V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, kNoExtraICState) \
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, kNoExtraICState)
@@ -216,6 +331,10 @@ class Builtins {
#undef DECLARE_BUILTIN_ACCESSOR_C
#undef DECLARE_BUILTIN_ACCESSOR_A
+ // Convenience wrappers.
+ Handle<Code> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
+ Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
+
Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins
// during the marking phase of mark sweep. See IC::Clear.
@@ -257,17 +376,16 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args);
+ static void Generate_ConstructedNonConstructable(MacroAssembler* masm);
static void Generate_CompileLazy(MacroAssembler* masm);
static void Generate_InOptimizationQueue(MacroAssembler* masm);
static void Generate_CompileOptimized(MacroAssembler* masm);
static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
- static void Generate_JSConstructStubForDerived(MacroAssembler* masm);
+ static void Generate_JSBuiltinsConstructStub(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
- static void Generate_InterpreterEntryTrampoline(MacroAssembler* masm);
- static void Generate_InterpreterExitTrampoline(MacroAssembler* masm);
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
@@ -275,28 +393,100 @@ class Builtins {
static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
+ static void Generate_Apply(MacroAssembler* masm);
+
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- static void Generate_CallFunction(MacroAssembler* masm);
+ static void Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode);
+ static void Generate_CallFunction_ReceiverIsNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined);
+ }
+ static void Generate_CallFunction_ReceiverIsNotNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined);
+ }
+ static void Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kAny);
+ }
+ // ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList)
+ static void Generate_CallBoundFunction(MacroAssembler* masm);
// ES6 section 7.3.12 Call(F, V, [argumentsList])
- static void Generate_Call(MacroAssembler* masm);
+ static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode);
+ static void Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined);
+ }
+ static void Generate_Call_ReceiverIsNotNullOrUndefined(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined);
+ }
+ static void Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kAny);
+ }
// ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget)
static void Generate_ConstructFunction(MacroAssembler* masm);
+ // ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget)
+ static void Generate_ConstructBoundFunction(MacroAssembler* masm);
// ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget)
static void Generate_ConstructProxy(MacroAssembler* masm);
// ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget])
static void Generate_Construct(MacroAssembler* masm);
- static void Generate_PushArgsAndCall(MacroAssembler* masm);
+ static void Generate_HandleFastApiCall(MacroAssembler* masm);
+
+ static void Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index);
+ // ES6 section 20.3.4.2 Date.prototype.getDate ( )
+ static void Generate_DatePrototypeGetDate(MacroAssembler* masm);
+ // ES6 section 20.3.4.3 Date.prototype.getDay ( )
+ static void Generate_DatePrototypeGetDay(MacroAssembler* masm);
+ // ES6 section 20.3.4.4 Date.prototype.getFullYear ( )
+ static void Generate_DatePrototypeGetFullYear(MacroAssembler* masm);
+ // ES6 section 20.3.4.5 Date.prototype.getHours ( )
+ static void Generate_DatePrototypeGetHours(MacroAssembler* masm);
+ // ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( )
+ static void Generate_DatePrototypeGetMilliseconds(MacroAssembler* masm);
+ // ES6 section 20.3.4.7 Date.prototype.getMinutes ( )
+ static void Generate_DatePrototypeGetMinutes(MacroAssembler* masm);
+ // ES6 section 20.3.4.8 Date.prototype.getMonth ( )
+ static void Generate_DatePrototypeGetMonth(MacroAssembler* masm);
+ // ES6 section 20.3.4.9 Date.prototype.getSeconds ( )
+ static void Generate_DatePrototypeGetSeconds(MacroAssembler* masm);
+ // ES6 section 20.3.4.10 Date.prototype.getTime ( )
+ static void Generate_DatePrototypeGetTime(MacroAssembler* masm);
+ // ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( )
+ static void Generate_DatePrototypeGetTimezoneOffset(MacroAssembler* masm);
+ // ES6 section 20.3.4.12 Date.prototype.getUTCDate ( )
+ static void Generate_DatePrototypeGetUTCDate(MacroAssembler* masm);
+ // ES6 section 20.3.4.13 Date.prototype.getUTCDay ( )
+ static void Generate_DatePrototypeGetUTCDay(MacroAssembler* masm);
+ // ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( )
+ static void Generate_DatePrototypeGetUTCFullYear(MacroAssembler* masm);
+ // ES6 section 20.3.4.15 Date.prototype.getUTCHours ( )
+ static void Generate_DatePrototypeGetUTCHours(MacroAssembler* masm);
+ // ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( )
+ static void Generate_DatePrototypeGetUTCMilliseconds(MacroAssembler* masm);
+ // ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( )
+ static void Generate_DatePrototypeGetUTCMinutes(MacroAssembler* masm);
+ // ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( )
+ static void Generate_DatePrototypeGetUTCMonth(MacroAssembler* masm);
+ // ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( )
+ static void Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm);
+
+ static void Generate_FunctionPrototypeApply(MacroAssembler* masm);
+ static void Generate_FunctionPrototypeCall(MacroAssembler* masm);
- static void Generate_FunctionCall(MacroAssembler* masm);
- static void Generate_FunctionApply(MacroAssembler* masm);
static void Generate_ReflectApply(MacroAssembler* masm);
static void Generate_ReflectConstruct(MacroAssembler* masm);
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
+ // ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case.
+ static void Generate_NumberConstructor(MacroAssembler* masm);
+ // ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case.
+ static void Generate_NumberConstructor_ConstructStub(MacroAssembler* masm);
+
static void Generate_StringConstructor(MacroAssembler* masm);
static void Generate_StringConstructor_ConstructStub(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
@@ -304,6 +494,14 @@ class Builtins {
static void Generate_InterruptCheck(MacroAssembler* masm);
static void Generate_StackCheck(MacroAssembler* masm);
+ static void Generate_InterpreterEntryTrampoline(MacroAssembler* masm);
+ static void Generate_InterpreterExitTrampoline(MacroAssembler* masm);
+ static void Generate_InterpreterPushArgsAndCall(MacroAssembler* masm);
+ static void Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm);
+ static void Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm);
+ static void Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm);
+ static void Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm);
+
#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
static void Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm); \
@@ -326,6 +524,7 @@ class Builtins {
DISALLOW_COPY_AND_ASSIGN(Builtins);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_BUILTINS_H_
diff --git a/chromium/v8/src/cached-powers.h b/chromium/v8/src/cached-powers.h
index bfe36351ba0..fade5c9fcab 100644
--- a/chromium/v8/src/cached-powers.h
+++ b/chromium/v8/src/cached-powers.h
@@ -37,6 +37,7 @@ class PowersOfTenCache {
int* found_exponent);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CACHED_POWERS_H_
diff --git a/chromium/v8/src/cancelable-task.cc b/chromium/v8/src/cancelable-task.cc
index 5927c22cdeb..d231bb799d5 100644
--- a/chromium/v8/src/cancelable-task.cc
+++ b/chromium/v8/src/cancelable-task.cc
@@ -11,18 +11,113 @@ namespace v8 {
namespace internal {
-Cancelable::Cancelable(Isolate* isolate)
- : isolate_(isolate), is_cancelled_(false) {
- isolate->RegisterCancelableTask(this);
+Cancelable::Cancelable(CancelableTaskManager* parent)
+ : parent_(parent), status_(kWaiting), id_(0), cancel_counter_(0) {
+ id_ = parent->Register(this);
+ CHECK(id_ != 0);
}
Cancelable::~Cancelable() {
- if (!is_cancelled_) {
- isolate_->RemoveCancelableTask(this);
+ // The following check is needed to avoid calling an already terminated
+ // manager object. This happens when the manager cancels all pending tasks
+ // in {CancelAndWait} only before destroying the manager object.
+ if (TryRun() || IsRunning()) {
+ parent_->RemoveFinishedTask(id_);
}
}
+static bool ComparePointers(void* ptr1, void* ptr2) { return ptr1 == ptr2; }
+
+
+CancelableTaskManager::CancelableTaskManager()
+ : task_id_counter_(0), cancelable_tasks_(ComparePointers) {}
+
+
+uint32_t CancelableTaskManager::Register(Cancelable* task) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ uint32_t id = ++task_id_counter_;
+ // The loop below is just used when task_id_counter_ overflows.
+ while ((id == 0) || (cancelable_tasks_.Lookup(reinterpret_cast<void*>(id),
+ id) != nullptr)) {
+ ++id;
+ }
+ HashMap::Entry* entry =
+ cancelable_tasks_.LookupOrInsert(reinterpret_cast<void*>(id), id);
+ entry->value = task;
+ return id;
+}
+
+
+void CancelableTaskManager::RemoveFinishedTask(uint32_t id) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ void* removed = cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
+ USE(removed);
+ DCHECK(removed != nullptr);
+ cancelable_tasks_barrier_.NotifyOne();
+}
+
+
+bool CancelableTaskManager::TryAbort(uint32_t id) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ HashMap::Entry* entry =
+ cancelable_tasks_.Lookup(reinterpret_cast<void*>(id), id);
+ if (entry != nullptr) {
+ Cancelable* value = reinterpret_cast<Cancelable*>(entry->value);
+ if (value->Cancel()) {
+ // Cannot call RemoveFinishedTask here because of recursive locking.
+ void* removed = cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
+ USE(removed);
+ DCHECK(removed != nullptr);
+ cancelable_tasks_barrier_.NotifyOne();
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void CancelableTaskManager::CancelAndWait() {
+ // Clean up all cancelable fore- and background tasks. Tasks are canceled on
+ // the way if possible, i.e., if they have not started yet. After each round
+ // of canceling we wait for the background tasks that have already been
+ // started.
+ base::LockGuard<base::Mutex> guard(&mutex_);
+
+ // HashMap does not support removing while iterating, hence keep a set of
+ // entries that are to be removed.
+ std::set<uint32_t> to_remove;
+
+ // Cancelable tasks could potentially register new tasks, requiring a loop
+ // here.
+ while (cancelable_tasks_.occupancy() > 0) {
+ for (HashMap::Entry* p = cancelable_tasks_.Start(); p != nullptr;
+ p = cancelable_tasks_.Next(p)) {
+ if (reinterpret_cast<Cancelable*>(p->value)->Cancel()) {
+ to_remove.insert(reinterpret_cast<Cancelable*>(p->value)->id());
+ }
+ }
+ // Remove tasks that were successfully canceled.
+ for (auto id : to_remove) {
+ cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
+ }
+ to_remove.clear();
+
+ // Finally, wait for already running background tasks.
+ if (cancelable_tasks_.occupancy() > 0) {
+ cancelable_tasks_barrier_.Wait(&mutex_);
+ }
+ }
+}
+
+
+CancelableTask::CancelableTask(Isolate* isolate)
+ : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {}
+
+
+CancelableIdleTask::CancelableIdleTask(Isolate* isolate)
+ : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/cancelable-task.h b/chromium/v8/src/cancelable-task.h
index bae5b580cd2..a8387fcd950 100644
--- a/chromium/v8/src/cancelable-task.h
+++ b/chromium/v8/src/cancelable-task.h
@@ -6,26 +6,114 @@
#define V8_CANCELABLE_TASK_H_
#include "include/v8-platform.h"
+#include "src/atomic-utils.h"
#include "src/base/macros.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/hashmap.h"
namespace v8 {
namespace internal {
+class Cancelable;
class Isolate;
+// Keeps track of cancelable tasks. It is possible to register and remove tasks
+// from any fore- and background task/thread.
+class CancelableTaskManager {
+ public:
+ CancelableTaskManager();
+
+ // Registers a new cancelable {task}. Returns the unique {id} of the task that
+ // can be used to try to abort a task by calling {Abort}.
+ uint32_t Register(Cancelable* task);
+
+ // Try to abort running a task identified by {id}. The possible outcomes are:
+ // (1) The task is already finished running and thus has been removed from
+ // the manager.
+ // (2) The task is currently running and cannot be canceled anymore.
+ // (3) The task is not yet running (or finished) so it is canceled and
+ // removed.
+ //
+ // Returns {false} for (1) and (2), and {true} for (3).
+ bool TryAbort(uint32_t id);
+
+ // Cancels all remaining registered tasks and waits for tasks that are
+ // already running.
+ void CancelAndWait();
+
+ private:
+ // Only called by {Cancelable} destructor. The task is done with executing,
+ // but needs to be removed.
+ void RemoveFinishedTask(uint32_t id);
+
+ // To mitigate the ABA problem, the api refers to tasks through an id.
+ uint32_t task_id_counter_;
+
+ // A set of cancelable tasks that are currently registered.
+ HashMap cancelable_tasks_;
+
+ // Mutex and condition variable enabling concurrent register and removing, as
+ // well as waiting for background tasks on {CancelAndWait}.
+ base::ConditionVariable cancelable_tasks_barrier_;
+ base::Mutex mutex_;
+
+ friend class Cancelable;
+
+ DISALLOW_COPY_AND_ASSIGN(CancelableTaskManager);
+};
+
+
class Cancelable {
public:
- explicit Cancelable(Isolate* isolate);
+ explicit Cancelable(CancelableTaskManager* parent);
virtual ~Cancelable();
- virtual void Cancel() { is_cancelled_ = true; }
+ // Never invoke after handing over the task to the platform! The reason is
+ // that {Cancelable} is used in combination with {v8::Task} and handed to
+ // a platform. This step transfers ownership to the platform, which destroys
+ // the task after running it. Since the exact time is not known, we cannot
+ // access the object after handing it to a platform.
+ uint32_t id() { return id_; }
protected:
- Isolate* isolate_;
- bool is_cancelled_;
+ bool TryRun() { return status_.TrySetValue(kWaiting, kRunning); }
+ bool IsRunning() { return status_.Value() == kRunning; }
+ intptr_t CancelAttempts() { return cancel_counter_.Value(); }
private:
+ // Identifies the state a cancelable task is in:
+ // |kWaiting|: The task is scheduled and waiting to be executed. {TryRun} will
+ // succeed.
+ // |kCanceled|: The task has been canceled. {TryRun} will fail.
+ // |kRunning|: The task is currently running and cannot be canceled anymore.
+ enum Status {
+ kWaiting,
+ kCanceled,
+ kRunning,
+ };
+
+ // Use {CancelableTaskManager} to abort a task that has not yet been
+ // executed.
+ bool Cancel() {
+ if (status_.TrySetValue(kWaiting, kCanceled)) {
+ return true;
+ }
+ cancel_counter_.Increment(1);
+ return false;
+ }
+
+ CancelableTaskManager* parent_;
+ AtomicValue<Status> status_;
+ uint32_t id_;
+
+ // The counter is incremented for failing tries to cancel a task. This can be
+ // used by the task itself as an indication how often external entities tried
+ // to abort it.
+ AtomicNumber<intptr_t> cancel_counter_;
+
+ friend class CancelableTaskManager;
+
DISALLOW_COPY_AND_ASSIGN(Cancelable);
};
@@ -33,18 +121,21 @@ class Cancelable {
// Multiple inheritance can be used because Task is a pure interface.
class CancelableTask : public Cancelable, public Task {
public:
- explicit CancelableTask(Isolate* isolate) : Cancelable(isolate) {}
+ explicit CancelableTask(Isolate* isolate);
// Task overrides.
void Run() final {
- if (!is_cancelled_) {
+ if (TryRun()) {
RunInternal();
}
}
virtual void RunInternal() = 0;
+ Isolate* isolate() { return isolate_; }
+
private:
+ Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(CancelableTask);
};
@@ -52,18 +143,21 @@ class CancelableTask : public Cancelable, public Task {
// Multiple inheritance can be used because IdleTask is a pure interface.
class CancelableIdleTask : public Cancelable, public IdleTask {
public:
- explicit CancelableIdleTask(Isolate* isolate) : Cancelable(isolate) {}
+ explicit CancelableIdleTask(Isolate* isolate);
// IdleTask overrides.
void Run(double deadline_in_seconds) final {
- if (!is_cancelled_) {
+ if (TryRun()) {
RunInternal(deadline_in_seconds);
}
}
virtual void RunInternal(double deadline_in_seconds) = 0;
+ Isolate* isolate() { return isolate_; }
+
private:
+ Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(CancelableIdleTask);
};
diff --git a/chromium/v8/src/char-predicates-inl.h b/chromium/v8/src/char-predicates-inl.h
index d90f919341f..ab5caa75573 100644
--- a/chromium/v8/src/char-predicates-inl.h
+++ b/chromium/v8/src/char-predicates-inl.h
@@ -85,6 +85,7 @@ inline bool IsRegExpNewline(uc16 c) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CHAR_PREDICATES_INL_H_
diff --git a/chromium/v8/src/char-predicates.h b/chromium/v8/src/char-predicates.h
index 89f417196ef..3161ae4ae93 100644
--- a/chromium/v8/src/char-predicates.h
+++ b/chromium/v8/src/char-predicates.h
@@ -79,6 +79,7 @@ struct WhiteSpaceOrLineTerminator {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CHAR_PREDICATES_H_
diff --git a/chromium/v8/src/checks.h b/chromium/v8/src/checks.h
index bd798663ad2..80404e8d897 100644
--- a/chromium/v8/src/checks.h
+++ b/chromium/v8/src/checks.h
@@ -23,7 +23,8 @@ extern bool FLAG_enable_slow_asserts;
const bool FLAG_enable_slow_asserts = false;
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#define DCHECK_TAG_ALIGNED(address) \
DCHECK((reinterpret_cast<intptr_t>(address) & \
diff --git a/chromium/v8/src/code-factory.cc b/chromium/v8/src/code-factory.cc
index 9e776b40fc5..6d31a5f530d 100644
--- a/chromium/v8/src/code-factory.cc
+++ b/chromium/v8/src/code-factory.cc
@@ -59,17 +59,17 @@ Callable CodeFactory::KeyedLoadICInOptimizedCode(
// static
Callable CodeFactory::CallIC(Isolate* isolate, int argc,
- CallICState::CallType call_type) {
- return Callable(CallIC::initialize_stub(isolate, argc, call_type),
+ ConvertReceiverMode mode) {
+ return Callable(CallIC::initialize_stub(isolate, argc, mode),
CallFunctionWithFeedbackDescriptor(isolate));
}
// static
Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
- CallICState::CallType call_type) {
+ ConvertReceiverMode mode) {
return Callable(
- CallIC::initialize_stub_in_optimized_code(isolate, argc, call_type),
+ CallIC::initialize_stub_in_optimized_code(isolate, argc, mode),
CallFunctionWithFeedbackAndVectorDescriptor(isolate));
}
@@ -78,8 +78,7 @@ Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
return Callable(
StoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
- FLAG_vector_stores ? VectorStoreICTrampolineDescriptor(isolate)
- : StoreDescriptor(isolate));
+ VectorStoreICTrampolineDescriptor(isolate));
}
@@ -87,10 +86,9 @@ Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
Callable CodeFactory::StoreICInOptimizedCode(
Isolate* isolate, LanguageMode language_mode,
InlineCacheState initialization_state) {
- CallInterfaceDescriptor descriptor =
- FLAG_vector_stores && initialization_state != MEGAMORPHIC
- ? VectorStoreICDescriptor(isolate)
- : StoreDescriptor(isolate);
+ CallInterfaceDescriptor descriptor = initialization_state != MEGAMORPHIC
+ ? VectorStoreICDescriptor(isolate)
+ : StoreDescriptor(isolate);
return Callable(StoreIC::initialize_stub_in_optimized_code(
isolate, language_mode, initialization_state),
descriptor);
@@ -102,8 +100,7 @@ Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
LanguageMode language_mode) {
return Callable(
KeyedStoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
- FLAG_vector_stores ? VectorStoreICTrampolineDescriptor(isolate)
- : StoreDescriptor(isolate));
+ VectorStoreICTrampolineDescriptor(isolate));
}
@@ -111,10 +108,9 @@ Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
Callable CodeFactory::KeyedStoreICInOptimizedCode(
Isolate* isolate, LanguageMode language_mode,
InlineCacheState initialization_state) {
- CallInterfaceDescriptor descriptor =
- FLAG_vector_stores && initialization_state != MEGAMORPHIC
- ? VectorStoreICDescriptor(isolate)
- : StoreDescriptor(isolate);
+ CallInterfaceDescriptor descriptor = initialization_state != MEGAMORPHIC
+ ? VectorStoreICDescriptor(isolate)
+ : StoreDescriptor(isolate);
return Callable(KeyedStoreIC::initialize_stub_in_optimized_code(
isolate, language_mode, initialization_state),
descriptor);
@@ -130,6 +126,13 @@ Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op,
// static
+Callable CodeFactory::CompareNilIC(Isolate* isolate, NilValue nil_value) {
+ Handle<Code> code = CompareNilICStub::GetUninitialized(isolate, nil_value);
+ return Callable(code, CompareNilDescriptor(isolate));
+}
+
+
+// static
Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op,
Strength strength) {
BinaryOpICStub stub(isolate, op, strength);
@@ -138,53 +141,64 @@ Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op,
// static
-Callable CodeFactory::LoadGlobalViaContext(Isolate* isolate, int depth) {
- LoadGlobalViaContextStub stub(isolate, depth);
+Callable CodeFactory::InstanceOf(Isolate* isolate) {
+ InstanceOfStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
-Callable CodeFactory::StoreGlobalViaContext(Isolate* isolate, int depth,
- LanguageMode language_mode) {
- StoreGlobalViaContextStub stub(isolate, depth, language_mode);
+Callable CodeFactory::ToBoolean(Isolate* isolate) {
+ Handle<Code> code = ToBooleanStub::GetUninitialized(isolate);
+ return Callable(code, ToBooleanDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::ToNumber(Isolate* isolate) {
+ ToNumberStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
-Callable CodeFactory::InstanceOf(Isolate* isolate) {
- InstanceOfStub stub(isolate);
+Callable CodeFactory::ToString(Isolate* isolate) {
+ ToStringStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
-Callable CodeFactory::ToBoolean(Isolate* isolate,
- ToBooleanStub::ResultMode mode,
- ToBooleanStub::Types types) {
- ToBooleanStub stub(isolate, mode, types);
+Callable CodeFactory::ToLength(Isolate* isolate) {
+ ToLengthStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
-Callable CodeFactory::ToNumber(Isolate* isolate) {
- ToNumberStub stub(isolate);
+Callable CodeFactory::ToObject(Isolate* isolate) {
+ ToObjectStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
-Callable CodeFactory::ToString(Isolate* isolate) {
- ToStringStub stub(isolate);
+Callable CodeFactory::NumberToString(Isolate* isolate) {
+ NumberToStringStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
-Callable CodeFactory::ToObject(Isolate* isolate) {
- ToObjectStub stub(isolate);
+Callable CodeFactory::RegExpConstructResult(Isolate* isolate) {
+ RegExpConstructResultStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::RegExpExec(Isolate* isolate) {
+ RegExpExecStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
@@ -205,6 +219,13 @@ Callable CodeFactory::StringCompare(Isolate* isolate) {
// static
+Callable CodeFactory::SubString(Isolate* isolate) {
+ SubStringStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::Typeof(Isolate* isolate) {
TypeofStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -212,6 +233,13 @@ Callable CodeFactory::Typeof(Isolate* isolate) {
// static
+Callable CodeFactory::FastCloneRegExp(Isolate* isolate) {
+ FastCloneRegExpStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::FastCloneShallowArray(Isolate* isolate) {
// TODO(mstarzinger): Thread through AllocationSiteMode at some point.
FastCloneShallowArrayStub stub(isolate, DONT_TRACK_ALLOCATION_SITE);
@@ -227,6 +255,13 @@ Callable CodeFactory::FastCloneShallowObject(Isolate* isolate, int length) {
// static
+Callable CodeFactory::FastNewContext(Isolate* isolate, int slot_count) {
+ FastNewContextStub stub(isolate, slot_count);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::FastNewClosure(Isolate* isolate,
LanguageMode language_mode,
FunctionKind kind) {
@@ -247,6 +282,13 @@ Callable CodeFactory::ArgumentsAccess(Isolate* isolate,
// static
+Callable CodeFactory::RestArgumentsAccess(Isolate* isolate) {
+ RestParamAccessStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
AllocateHeapNumberStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -254,17 +296,74 @@ Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
// static
-Callable CodeFactory::CallFunction(Isolate* isolate, int argc,
- CallFunctionFlags flags) {
- CallFunctionStub stub(isolate, argc, flags);
+Callable CodeFactory::AllocateMutableHeapNumber(Isolate* isolate) {
+ AllocateMutableHeapNumberStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
-Callable CodeFactory::PushArgsAndCall(Isolate* isolate) {
- return Callable(isolate->builtins()->PushArgsAndCall(),
- PushArgsAndCallDescriptor(isolate));
+Callable CodeFactory::AllocateInNewSpace(Isolate* isolate) {
+ AllocateInNewSpaceStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
+ return Callable(isolate->builtins()->ArgumentsAdaptorTrampoline(),
+ ArgumentAdaptorDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) {
+ return Callable(isolate->builtins()->Call(mode),
+ CallTrampolineDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode) {
+ return Callable(isolate->builtins()->CallFunction(mode),
+ CallTrampolineDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::Construct(Isolate* isolate) {
+ return Callable(isolate->builtins()->Construct(),
+ ConstructTrampolineDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::ConstructFunction(Isolate* isolate) {
+ return Callable(isolate->builtins()->ConstructFunction(),
+ ConstructTrampolineDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate) {
+ return Callable(isolate->builtins()->InterpreterPushArgsAndCall(),
+ InterpreterPushArgsAndCallDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::InterpreterPushArgsAndConstruct(Isolate* isolate) {
+ return Callable(isolate->builtins()->InterpreterPushArgsAndConstruct(),
+ InterpreterPushArgsAndConstructDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
+ // Note: If we ever use fpregs in the interpreter then we will need to
+ // save fpregs too.
+ CEntryStub stub(isolate, result_size, kDontSaveFPRegs, kArgvInRegister);
+ return Callable(stub.GetCode(), InterpreterCEntryDescriptor(isolate));
}
} // namespace internal
diff --git a/chromium/v8/src/code-factory.h b/chromium/v8/src/code-factory.h
index 5a67b27fd54..2126790359c 100644
--- a/chromium/v8/src/code-factory.h
+++ b/chromium/v8/src/code-factory.h
@@ -43,9 +43,10 @@ class CodeFactory final {
Isolate* isolate, LanguageMode language_mode,
InlineCacheState initialization_state);
static Callable CallIC(Isolate* isolate, int argc,
- CallICState::CallType call_type);
- static Callable CallICInOptimizedCode(Isolate* isolate, int argc,
- CallICState::CallType call_type);
+ ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ static Callable CallICInOptimizedCode(
+ Isolate* isolate, int argc,
+ ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable StoreIC(Isolate* isolate, LanguageMode mode);
static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode,
InlineCacheState initialization_state);
@@ -56,47 +57,60 @@ class CodeFactory final {
static Callable CompareIC(Isolate* isolate, Token::Value op,
Strength strength);
+ static Callable CompareNilIC(Isolate* isolate, NilValue nil_value);
static Callable BinaryOpIC(Isolate* isolate, Token::Value op,
Strength strength);
// Code stubs. Add methods here as needed to reduce dependency on
// code-stubs.h.
- static Callable LoadGlobalViaContext(Isolate* isolate, int depth);
- static Callable StoreGlobalViaContext(Isolate* isolate, int depth,
- LanguageMode language_mode);
-
static Callable InstanceOf(Isolate* isolate);
- static Callable ToBoolean(
- Isolate* isolate, ToBooleanStub::ResultMode mode,
- ToBooleanStub::Types types = ToBooleanStub::Types());
+ static Callable ToBoolean(Isolate* isolate);
static Callable ToNumber(Isolate* isolate);
static Callable ToString(Isolate* isolate);
+ static Callable ToLength(Isolate* isolate);
static Callable ToObject(Isolate* isolate);
+ static Callable NumberToString(Isolate* isolate);
+
+ static Callable RegExpConstructResult(Isolate* isolate);
+ static Callable RegExpExec(Isolate* isolate);
static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag);
static Callable StringCompare(Isolate* isolate);
+ static Callable SubString(Isolate* isolate);
static Callable Typeof(Isolate* isolate);
+ static Callable FastCloneRegExp(Isolate* isolate);
static Callable FastCloneShallowArray(Isolate* isolate);
static Callable FastCloneShallowObject(Isolate* isolate, int length);
+ static Callable FastNewContext(Isolate* isolate, int slot_count);
static Callable FastNewClosure(Isolate* isolate, LanguageMode language_mode,
FunctionKind kind);
static Callable ArgumentsAccess(Isolate* isolate, bool is_unmapped_arguments,
bool has_duplicate_parameters);
+ static Callable RestArgumentsAccess(Isolate* isolate);
static Callable AllocateHeapNumber(Isolate* isolate);
-
- static Callable CallFunction(Isolate* isolate, int argc,
- CallFunctionFlags flags);
-
- static Callable PushArgsAndCall(Isolate* isolate);
+ static Callable AllocateMutableHeapNumber(Isolate* isolate);
+ static Callable AllocateInNewSpace(Isolate* isolate);
+
+ static Callable ArgumentAdaptor(Isolate* isolate);
+ static Callable Call(Isolate* isolate,
+ ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ static Callable CallFunction(
+ Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ static Callable Construct(Isolate* isolate);
+ static Callable ConstructFunction(Isolate* isolate);
+
+ static Callable InterpreterPushArgsAndCall(Isolate* isolate);
+ static Callable InterpreterPushArgsAndConstruct(Isolate* isolate);
+ static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
};
} // namespace internal
diff --git a/chromium/v8/src/code-stubs-hydrogen.cc b/chromium/v8/src/code-stubs-hydrogen.cc
index 801079bcd94..2fab578b9b3 100644
--- a/chromium/v8/src/code-stubs-hydrogen.cc
+++ b/chromium/v8/src/code-stubs-hydrogen.cc
@@ -5,10 +5,10 @@
#include "src/code-stubs.h"
#include "src/bailout-reason.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
#include "src/field-index.h"
-#include "src/hydrogen.h"
#include "src/ic/ic.h"
-#include "src/lithium.h"
namespace v8 {
namespace internal {
@@ -250,7 +250,7 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(
Factory* factory = isolate()->factory();
// Generate the new code.
- MacroAssembler masm(isolate(), NULL, 256);
+ MacroAssembler masm(isolate(), NULL, 256, CodeObjectRequired::kYes);
{
// Update the static counter each time a new code stub is generated.
@@ -432,17 +432,78 @@ Handle<Code> TypeofStub::GenerateCode() { return DoGenerateCode(this); }
template <>
+HValue* CodeStubGraphBuilder<FastCloneRegExpStub>::BuildCodeStub() {
+ HValue* closure = GetParameter(0);
+ HValue* literal_index = GetParameter(1);
+
+ // This stub is very performance sensitive, the generated code must be tuned
+ // so that it doesn't build and eager frame.
+ info()->MarkMustNotHaveEagerFrame();
+
+ HValue* literals_array = Add<HLoadNamedField>(
+ closure, nullptr, HObjectAccess::ForLiteralsPointer());
+ HInstruction* boilerplate = Add<HLoadKeyed>(
+ literals_array, literal_index, nullptr, nullptr, FAST_ELEMENTS,
+ NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
+
+ IfBuilder if_notundefined(this);
+ if_notundefined.IfNot<HCompareObjectEqAndBranch>(
+ boilerplate, graph()->GetConstantUndefined());
+ if_notundefined.Then();
+ {
+ int result_size =
+ JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ HValue* result =
+ Add<HAllocate>(Add<HConstant>(result_size), HType::JSObject(),
+ NOT_TENURED, JS_REGEXP_TYPE);
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForMap(),
+ Add<HLoadNamedField>(boilerplate, nullptr, HObjectAccess::ForMap()));
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForPropertiesPointer(),
+ Add<HLoadNamedField>(boilerplate, nullptr,
+ HObjectAccess::ForPropertiesPointer()));
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForElementsPointer(),
+ Add<HLoadNamedField>(boilerplate, nullptr,
+ HObjectAccess::ForElementsPointer()));
+ for (int offset = JSObject::kHeaderSize; offset < result_size;
+ offset += kPointerSize) {
+ HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(offset);
+ Add<HStoreNamedField>(result, access,
+ Add<HLoadNamedField>(boilerplate, nullptr, access));
+ }
+ Push(result);
+ }
+ if_notundefined.ElseDeopt(Deoptimizer::kUninitializedBoilerplateInFastClone);
+ if_notundefined.End();
+
+ return Pop();
+}
+
+
+Handle<Code> FastCloneRegExpStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
Factory* factory = isolate()->factory();
HValue* undefined = graph()->GetConstantUndefined();
AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode();
+ HValue* closure = GetParameter(0);
+ HValue* literal_index = GetParameter(1);
// This stub is very performance sensitive, the generated code must be tuned
// so that it doesn't build and eager frame.
info()->MarkMustNotHaveEagerFrame();
+ HValue* literals_array = Add<HLoadNamedField>(
+ closure, nullptr, HObjectAccess::ForLiteralsPointer());
+
HInstruction* allocation_site = Add<HLoadKeyed>(
- GetParameter(0), GetParameter(1), nullptr, FAST_ELEMENTS,
+ literals_array, literal_index, nullptr, nullptr, FAST_ELEMENTS,
NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
IfBuilder checker(this);
checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
@@ -503,9 +564,14 @@ Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HValue* undefined = graph()->GetConstantUndefined();
+ HValue* closure = GetParameter(0);
+ HValue* literal_index = GetParameter(1);
+
+ HValue* literals_array = Add<HLoadNamedField>(
+ closure, nullptr, HObjectAccess::ForLiteralsPointer());
HInstruction* allocation_site = Add<HLoadKeyed>(
- GetParameter(0), GetParameter(1), nullptr, FAST_ELEMENTS,
+ literals_array, literal_index, nullptr, nullptr, FAST_ELEMENTS,
NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
IfBuilder checker(this);
@@ -635,7 +701,7 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
HInstruction* feedback_vector = GetParameter(0);
HInstruction* slot = GetParameter(1);
- Add<HStoreKeyed>(feedback_vector, slot, object, FAST_ELEMENTS,
+ Add<HStoreKeyed>(feedback_vector, slot, object, nullptr, FAST_ELEMENTS,
INITIALIZING_STORE);
return feedback_vector;
}
@@ -667,7 +733,7 @@ HValue* CodeStubGraphBuilder<CreateWeakCellStub>::BuildCodeStub() {
HInstruction* feedback_vector =
GetParameter(CreateWeakCellDescriptor::kVectorIndex);
HInstruction* slot = GetParameter(CreateWeakCellDescriptor::kSlotIndex);
- Add<HStoreKeyed>(feedback_vector, slot, object, FAST_ELEMENTS,
+ Add<HStoreKeyed>(feedback_vector, slot, object, nullptr, FAST_ELEMENTS,
INITIALIZING_STORE);
return graph()->GetConstant0();
}
@@ -823,8 +889,8 @@ HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key,
HValue* value) {
HValue* result = NULL;
HInstruction* backing_store =
- Add<HLoadKeyed>(elements, graph()->GetConstant1(), nullptr, FAST_ELEMENTS,
- ALLOW_RETURN_HOLE);
+ Add<HLoadKeyed>(elements, graph()->GetConstant1(), nullptr, nullptr,
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
Add<HCheckMaps>(backing_store, isolate()->factory()->fixed_array_map());
HValue* backing_store_length = Add<HLoadNamedField>(
backing_store, nullptr, HObjectAccess::ForFixedArrayLength());
@@ -834,10 +900,10 @@ HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key,
in_unmapped_range.Then();
{
if (value == NULL) {
- result = Add<HLoadKeyed>(backing_store, key, nullptr, FAST_HOLEY_ELEMENTS,
- NEVER_RETURN_HOLE);
+ result = Add<HLoadKeyed>(backing_store, key, nullptr, nullptr,
+ FAST_HOLEY_ELEMENTS, NEVER_RETURN_HOLE);
} else {
- Add<HStoreKeyed>(backing_store, key, value, FAST_HOLEY_ELEMENTS);
+ Add<HStoreKeyed>(backing_store, key, value, nullptr, FAST_HOLEY_ELEMENTS);
}
}
in_unmapped_range.ElseDeopt(Deoptimizer::kOutsideOfRange);
@@ -894,8 +960,9 @@ HValue* CodeStubGraphBuilderBase::EmitKeyedSloppyArguments(HValue* receiver,
in_range.Then();
{
HValue* index = AddUncasted<HAdd>(key, constant_two);
- HInstruction* mapped_index = Add<HLoadKeyed>(
- elements, index, nullptr, FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
+ HInstruction* mapped_index =
+ Add<HLoadKeyed>(elements, index, nullptr, nullptr, FAST_HOLEY_ELEMENTS,
+ ALLOW_RETURN_HOLE);
IfBuilder is_valid(this);
is_valid.IfNot<HCompareObjectEqAndBranch>(mapped_index,
@@ -906,15 +973,17 @@ HValue* CodeStubGraphBuilderBase::EmitKeyedSloppyArguments(HValue* receiver,
// mapped_index is not the hole that it is indeed, a smi. An unnecessary
// smi check is being emitted.
HValue* the_context = Add<HLoadKeyed>(elements, graph()->GetConstant0(),
- nullptr, FAST_ELEMENTS);
+ nullptr, nullptr, FAST_ELEMENTS);
STATIC_ASSERT(Context::kHeaderSize == FixedArray::kHeaderSize);
if (is_load) {
- HValue* result = Add<HLoadKeyed>(the_context, mapped_index, nullptr,
- FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ HValue* result =
+ Add<HLoadKeyed>(the_context, mapped_index, nullptr, nullptr,
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
environment()->Push(result);
} else {
DCHECK(value != NULL);
- Add<HStoreKeyed>(the_context, mapped_index, value, FAST_ELEMENTS);
+ Add<HStoreKeyed>(the_context, mapped_index, value, nullptr,
+ FAST_ELEMENTS);
environment()->Push(value);
}
}
@@ -1074,20 +1143,6 @@ Handle<Code> StoreTransitionStub::GenerateCode() {
template <>
-HValue* CodeStubGraphBuilder<StringLengthStub>::BuildCodeStub() {
- HValue* string = BuildLoadNamedField(GetParameter(0),
- FieldIndex::ForInObjectOffset(JSValue::kValueOffset));
- return BuildLoadNamedField(string,
- FieldIndex::ForInObjectOffset(String::kLengthOffset));
-}
-
-
-Handle<Code> StringLengthStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
HValue* CodeStubGraphBuilder<StoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(StoreDescriptor::kReceiverIndex),
@@ -1139,6 +1194,34 @@ Handle<Code> AllocateHeapNumberStub::GenerateCode() {
}
+template <>
+HValue* CodeStubGraphBuilder<AllocateMutableHeapNumberStub>::BuildCodeStub() {
+ HValue* result =
+ Add<HAllocate>(Add<HConstant>(HeapNumber::kSize), HType::HeapObject(),
+ NOT_TENURED, MUTABLE_HEAP_NUMBER_TYPE);
+ AddStoreMapConstant(result, isolate()->factory()->mutable_heap_number_map());
+ return result;
+}
+
+
+Handle<Code> AllocateMutableHeapNumberStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<AllocateInNewSpaceStub>::BuildCodeStub() {
+ HValue* result = Add<HAllocate>(GetParameter(0), HType::Tagged(), NOT_TENURED,
+ JS_OBJECT_TYPE);
+ return result;
+}
+
+
+Handle<Code> AllocateInNewSpaceStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
ElementsKind kind,
AllocationSiteOverrideMode override_mode,
@@ -1214,7 +1297,7 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
// trigger it.
HValue* length = GetArgumentsLength();
HConstant* max_alloc_length =
- Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ Add<HConstant>(JSArray::kInitialMaxFastElementArray);
HValue* checked_length = Add<HBoundsCheck>(length, max_alloc_length);
// We need to fill with the hole if it's a smi array in the multi-argument
@@ -1245,7 +1328,7 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
HInstruction* argument = Add<HAccessArgumentsAt>(
argument_elements, checked_length, key);
- Add<HStoreKeyed>(elements, key, argument, kind);
+ Add<HStoreKeyed>(elements, key, argument, nullptr, kind);
builder.EndBody();
return new_object;
}
@@ -1342,12 +1425,11 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
if_nil.Then();
if (continuation.IsFalseReachable()) {
if_nil.Else();
- if_nil.Return(graph()->GetConstant0());
+ if_nil.Return(graph()->GetConstantFalse());
}
if_nil.End();
- return continuation.IsTrueReachable()
- ? graph()->GetConstant1()
- : graph()->GetConstantUndefined();
+ return continuation.IsTrueReachable() ? graph()->GetConstantTrue()
+ : graph()->GetConstantUndefined();
}
@@ -1363,9 +1445,9 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
HValue* left = GetParameter(BinaryOpICStub::kLeft);
HValue* right = GetParameter(BinaryOpICStub::kRight);
- Type* left_type = state.GetLeftType(zone());
- Type* right_type = state.GetRightType(zone());
- Type* result_type = state.GetResultType(zone());
+ Type* left_type = state.GetLeftType();
+ Type* right_type = state.GetRightType();
+ Type* result_type = state.GetResultType();
DCHECK(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
(state.HasSideEffects() || !result_type->Is(Type::None())));
@@ -1444,9 +1526,9 @@ HValue* CodeStubGraphBuilder<BinaryOpWithAllocationSiteStub>::BuildCodeStub() {
HValue* left = GetParameter(BinaryOpWithAllocationSiteStub::kLeft);
HValue* right = GetParameter(BinaryOpWithAllocationSiteStub::kRight);
- Type* left_type = state.GetLeftType(zone());
- Type* right_type = state.GetRightType(zone());
- Type* result_type = state.GetResultType(zone());
+ Type* left_type = state.GetLeftType();
+ Type* right_type = state.GetRightType();
+ Type* result_type = state.GetResultType();
HAllocationMode allocation_mode(allocation_site);
return BuildBinaryOperation(state.op(), left, right, left_type, right_type,
@@ -1625,31 +1707,13 @@ Handle<Code> StringAddStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
ToBooleanStub* stub = casted_stub();
- HValue* true_value = NULL;
- HValue* false_value = NULL;
-
- switch (stub->mode()) {
- case ToBooleanStub::RESULT_AS_SMI:
- true_value = graph()->GetConstant1();
- false_value = graph()->GetConstant0();
- break;
- case ToBooleanStub::RESULT_AS_ODDBALL:
- true_value = graph()->GetConstantTrue();
- false_value = graph()->GetConstantFalse();
- break;
- case ToBooleanStub::RESULT_AS_INVERSE_ODDBALL:
- true_value = graph()->GetConstantFalse();
- false_value = graph()->GetConstantTrue();
- break;
- }
-
IfBuilder if_true(this);
if_true.If<HBranch>(GetParameter(0), stub->types());
if_true.Then();
- if_true.Return(true_value);
+ if_true.Return(graph()->GetConstantTrue());
if_true.Else();
if_true.End();
- return false_value;
+ return graph()->GetConstantFalse();
}
@@ -1805,19 +1869,29 @@ void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
HValue* context_slot = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kContextOffset);
+ context_slot = Add<HLoadNamedField>(context_slot, nullptr,
+ HObjectAccess::ForWeakCellValue());
HValue* osr_ast_slot = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kOsrAstIdOffset);
HValue* code_object = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kCachedCodeOffset);
+ code_object = Add<HLoadNamedField>(code_object, nullptr,
+ HObjectAccess::ForWeakCellValue());
builder->If<HCompareObjectEqAndBranch>(native_context,
context_slot);
builder->AndIf<HCompareObjectEqAndBranch>(osr_ast_slot, osr_ast_id_none);
builder->And();
builder->IfNot<HCompareObjectEqAndBranch>(code_object,
- graph()->GetConstantUndefined());
+ graph()->GetConstant0());
builder->Then();
HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
map_index, SharedFunctionInfo::kLiteralsOffset);
+ literals = Add<HLoadNamedField>(literals, nullptr,
+ HObjectAccess::ForWeakCellValue());
+ IfBuilder maybe_deopt(this);
+ maybe_deopt.If<HCompareObjectEqAndBranch>(literals, graph()->GetConstant0());
+ maybe_deopt.ThenDeopt(Deoptimizer::kLiteralsWereDisposed);
+ maybe_deopt.End();
BuildInstallOptimizedCode(js_function, native_context, code_object, literals);
@@ -1877,8 +1951,8 @@ HInstruction* CodeStubGraphBuilderBase::LoadFromOptimizedCodeMap(
HValue* field_offset_value = Add<HConstant>(field_offset);
field_slot = AddUncasted<HAdd>(iterator, field_offset_value);
}
- HInstruction* field_entry =
- Add<HLoadKeyed>(optimized_map, field_slot, nullptr, FAST_ELEMENTS);
+ HInstruction* field_entry = Add<HLoadKeyed>(optimized_map, field_slot,
+ nullptr, nullptr, FAST_ELEMENTS);
return field_entry;
}
@@ -1941,8 +2015,10 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* shared_code =
Add<HLoadNamedField>(optimized_map, nullptr,
HObjectAccess::ForOptimizedCodeMapSharedCode());
+ shared_code = Add<HLoadNamedField>(shared_code, nullptr,
+ HObjectAccess::ForWeakCellValue());
shared_code_check.IfNot<HCompareObjectEqAndBranch>(
- shared_code, graph()->GetConstantUndefined());
+ shared_code, graph()->GetConstant0());
shared_code_check.Then();
{
// Store the context-independent optimized code.
@@ -2041,16 +2117,15 @@ HValue* CodeStubGraphBuilder<FastNewContextStub>::BuildCodeStub() {
context());
Add<HStoreNamedField>(function_context,
HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX),
- graph()->GetConstant0());
+ graph()->GetConstantHole());
- // Copy the global object from the previous context.
- HValue* global_object = Add<HLoadNamedField>(
+ // Copy the native context from the previous context.
+ HValue* native_context = Add<HLoadNamedField>(
context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- Add<HStoreNamedField>(function_context,
- HObjectAccess::ForContextSlot(
- Context::GLOBAL_OBJECT_INDEX),
- global_object);
+ HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
+ Add<HStoreNamedField>(function_context, HObjectAccess::ForContextSlot(
+ Context::NATIVE_CONTEXT_INDEX),
+ native_context);
// Initialize the rest of the slots to undefined.
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; ++i) {
@@ -2298,13 +2373,13 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
Add<HConstant>(probe_base + KeyedLookupCache::kKeyIndex));
key_index->ClearFlag(HValue::kCanOverflow);
HValue* map_to_check =
- Add<HLoadKeyed>(cache_keys, map_index, nullptr, FAST_ELEMENTS,
- NEVER_RETURN_HOLE, 0);
+ Add<HLoadKeyed>(cache_keys, map_index, nullptr, nullptr,
+ FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
lookup_if->If<HCompareObjectEqAndBranch>(map_to_check, map);
lookup_if->And();
HValue* key_to_check =
- Add<HLoadKeyed>(cache_keys, key_index, nullptr, FAST_ELEMENTS,
- NEVER_RETURN_HOLE, 0);
+ Add<HLoadKeyed>(cache_keys, key_index, nullptr, nullptr,
+ FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
lookup_if->If<HCompareObjectEqAndBranch>(key_to_check, key);
lookup_if->Then();
{
@@ -2315,7 +2390,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
HValue* index = AddUncasted<HAdd>(hash, Add<HConstant>(probe));
index->ClearFlag(HValue::kCanOverflow);
HValue* property_index =
- Add<HLoadKeyed>(cache_field_offsets, index, nullptr,
+ Add<HLoadKeyed>(cache_field_offsets, index, nullptr, cache_keys,
INT32_ELEMENTS, NEVER_RETURN_HOLE, 0);
Push(property_index);
}
diff --git a/chromium/v8/src/code-stubs.cc b/chromium/v8/src/code-stubs.cc
index 5c8c763a3a9..1754288b6ed 100644
--- a/chromium/v8/src/code-stubs.cc
+++ b/chromium/v8/src/code-stubs.cc
@@ -7,12 +7,13 @@
#include <sstream>
#include "src/bootstrapper.h"
+#include "src/compiler/code-stub-assembler.h"
#include "src/factory.h"
#include "src/gdb-jit.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/profiler/cpu-profiler.h"
namespace v8 {
@@ -108,7 +109,7 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
Factory* factory = isolate()->factory();
// Generate the new code.
- MacroAssembler masm(isolate(), NULL, 256);
+ MacroAssembler masm(isolate(), NULL, 256, CodeObjectRequired::kYes);
{
// Update the static counter each time a new code stub is generated.
@@ -341,11 +342,6 @@ void StringAddStub::PrintBaseName(std::ostream& os) const { // NOLINT
}
-void StringAddTFStub::PrintBaseName(std::ostream& os) const { // NOLINT
- os << "StringAddTFStub_" << flags() << "_" << pretenure_flag();
-}
-
-
InlineCacheState CompareICStub::GetICState() const {
CompareICState::State state = Max(left(), right());
switch (state) {
@@ -357,8 +353,8 @@ InlineCacheState CompareICStub::GetICState() const {
case CompareICState::INTERNALIZED_STRING:
case CompareICState::STRING:
case CompareICState::UNIQUE_NAME:
- case CompareICState::OBJECT:
- case CompareICState::KNOWN_OBJECT:
+ case CompareICState::RECEIVER:
+ case CompareICState::KNOWN_RECEIVER:
return MONOMORPHIC;
case CompareICState::GENERIC:
return ::v8::internal::GENERIC;
@@ -435,12 +431,12 @@ void CompareICStub::Generate(MacroAssembler* masm) {
case CompareICState::UNIQUE_NAME:
GenerateUniqueNames(masm);
break;
- case CompareICState::OBJECT:
- GenerateObjects(masm);
+ case CompareICState::RECEIVER:
+ GenerateReceivers(masm);
break;
- case CompareICState::KNOWN_OBJECT:
+ case CompareICState::KNOWN_RECEIVER:
DCHECK(*known_map_ != NULL);
- GenerateKnownObjects(masm);
+ GenerateKnownReceivers(masm);
break;
case CompareICState::GENERIC:
GenerateGeneric(masm);
@@ -473,38 +469,25 @@ void CompareNilICStub::UpdateStatus(Handle<Object> object) {
}
-namespace {
-
-Handle<JSFunction> GetFunction(Isolate* isolate, const char* name) {
- v8::ExtensionConfiguration no_extensions;
- MaybeHandle<Object> fun = Object::GetProperty(
- isolate, isolate->factory()->code_stub_exports_object(), name);
- Handle<JSFunction> function = Handle<JSFunction>::cast(fun.ToHandleChecked());
- DCHECK(!function->IsUndefined() &&
- "JavaScript implementation of stub not found");
- return function;
-}
-} // namespace
-
-
Handle<Code> TurboFanCodeStub::GenerateCode() {
- // Get the outer ("stub generator") function.
const char* name = CodeStub::MajorName(MajorKey());
- Handle<JSFunction> outer = GetFunction(isolate(), name);
- DCHECK_EQ(2, outer->shared()->length());
+ Zone zone;
+ CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
+ compiler::CodeStubAssembler assembler(isolate(), &zone, descriptor,
+ GetCodeKind(), name);
+ GenerateAssembly(&assembler);
+ return assembler.GenerateCode();
+}
- // Invoke the outer function to get the stub itself.
- Factory* factory = isolate()->factory();
- Handle<Object> call_conv = factory->InternalizeUtf8String(name);
- Handle<Object> minor_key = factory->NewNumber(MinorKey());
- Handle<Object> args[] = {call_conv, minor_key};
- MaybeHandle<Object> result =
- Execution::Call(isolate(), outer, factory->undefined_value(), 2, args);
- Handle<JSFunction> inner = Handle<JSFunction>::cast(result.ToHandleChecked());
- // Just to make sure nobody calls this...
- inner->set_code(isolate()->builtins()->builtin(Builtins::kIllegal));
- return Compiler::GetStubCode(inner, this).ToHandleChecked();
+void StringLengthStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ compiler::Node* value = assembler->Parameter(0);
+ compiler::Node* string =
+ assembler->LoadObjectField(value, JSValue::kValueOffset);
+ compiler::Node* result =
+ assembler->LoadObjectField(string, String::kLengthOffset);
+ assembler->Return(result);
}
@@ -640,8 +623,7 @@ CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() const {
return LoadWithVectorDescriptor(isolate());
} else {
DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
- return FLAG_vector_stores ? VectorStoreICDescriptor(isolate())
- : StoreDescriptor(isolate());
+ return VectorStoreICDescriptor(isolate());
}
}
@@ -667,19 +649,13 @@ void ToObjectStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
CallInterfaceDescriptor StoreTransitionStub::GetCallInterfaceDescriptor()
const {
- if (FLAG_vector_stores) {
- return VectorStoreTransitionDescriptor(isolate());
- }
- return StoreTransitionDescriptor(isolate());
+ return VectorStoreTransitionDescriptor(isolate());
}
CallInterfaceDescriptor
ElementsTransitionAndStoreStub::GetCallInterfaceDescriptor() const {
- if (FLAG_vector_stores) {
- return VectorStoreTransitionDescriptor(isolate());
- }
- return StoreTransitionDescriptor(isolate());
+ return VectorStoreTransitionDescriptor(isolate());
}
@@ -701,6 +677,13 @@ void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
}
+void FastCloneRegExpStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ FastCloneRegExpDescriptor call_descriptor(isolate());
+ descriptor->Initialize(
+ Runtime::FunctionForId(Runtime::kCreateRegExpLiteral)->entry);
+}
+
+
void FastCloneShallowArrayStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
FastCloneShallowArrayDescriptor call_descriptor(isolate());
@@ -744,6 +727,18 @@ void AllocateHeapNumberStub::InitializeDescriptor(
}
+void AllocateMutableHeapNumberStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ descriptor->Initialize();
+}
+
+
+void AllocateInNewSpaceStub::InitializeDescriptor(
+ CodeStubDescriptor* descriptor) {
+ descriptor->Initialize();
+}
+
+
void CompareNilICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(Runtime_CompareNilIC_Miss));
descriptor->SetMissHandler(ExternalReference(
@@ -843,6 +838,9 @@ void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
}
+void RestParamAccessStub::Generate(MacroAssembler* masm) { GenerateNew(masm); }
+
+
void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
switch (type()) {
case READ_ELEMENT:
@@ -881,14 +879,8 @@ void ArgumentsAccessStub::PrintName(std::ostream& os) const { // NOLINT
}
-void CallFunctionStub::PrintName(std::ostream& os) const { // NOLINT
- os << "CallFunctionStub_Args" << argc();
-}
-
-
-void CallConstructStub::PrintName(std::ostream& os) const { // NOLINT
- os << "CallConstructStub";
- if (RecordCallTarget()) os << "_Recording";
+void RestParamAccessStub::PrintName(std::ostream& os) const { // NOLINT
+ os << "RestParamAccessStub_";
}
@@ -968,7 +960,7 @@ bool ToBooleanStub::Types::UpdateStatus(Handle<Object> object) {
} else if (object->IsSmi()) {
Add(SMI);
return Smi::cast(*object)->value() != 0;
- } else if (object->IsSpecObject()) {
+ } else if (object->IsJSReceiver()) {
Add(SPEC_OBJECT);
return !object->IsUndetectableObject();
} else if (object->IsString()) {
@@ -1049,7 +1041,7 @@ InternalArrayConstructorStub::InternalArrayConstructorStub(
Representation RepresentationFromType(Type* type) {
- if (type->Is(Type::UntaggedSigned()) || type->Is(Type::UntaggedUnsigned())) {
+ if (type->Is(Type::UntaggedIntegral())) {
return Representation::Integer32();
}
@@ -1064,5 +1056,6 @@ Representation RepresentationFromType(Type* type) {
DCHECK(!type->Is(Type::Untagged()));
return Representation::Tagged();
}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/code-stubs.h b/chromium/v8/src/code-stubs.h
index 163fdd88086..21e21356bb6 100644
--- a/chromium/v8/src/code-stubs.h
+++ b/chromium/v8/src/code-stubs.h
@@ -8,6 +8,7 @@
#include "src/allocation.h"
#include "src/assembler.h"
#include "src/codegen.h"
+#include "src/compiler/code-stub-assembler.h"
#include "src/globals.h"
#include "src/ic/ic-state.h"
#include "src/interface-descriptors.h"
@@ -27,7 +28,6 @@ namespace internal {
V(CallApiAccessor) \
V(CallApiGetter) \
V(CallConstruct) \
- V(CallFunction) \
V(CallIC) \
V(CEntry) \
V(CompareIC) \
@@ -44,14 +44,15 @@ namespace internal {
V(MathPow) \
V(ProfileEntryHook) \
V(RecordWrite) \
+ V(RestParamAccess) \
V(RegExpExec) \
- V(StoreArrayLiteralElement) \
V(StoreBufferOverflow) \
V(StoreElement) \
V(StringCompare) \
V(StubFailureTrampoline) \
V(SubString) \
V(ToNumber) \
+ V(ToLength) \
V(ToString) \
V(ToObject) \
V(VectorStoreICTrampoline) \
@@ -60,6 +61,8 @@ namespace internal {
V(VectorKeyedStoreIC) \
/* HydrogenCodeStubs */ \
V(AllocateHeapNumber) \
+ V(AllocateMutableHeapNumber) \
+ V(AllocateInNewSpace) \
V(ArrayNArgumentsConstructor) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
@@ -69,6 +72,7 @@ namespace internal {
V(CreateAllocationSite) \
V(CreateWeakCell) \
V(ElementsTransitionAndStore) \
+ V(FastCloneRegExp) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(FastNewClosure) \
@@ -94,10 +98,7 @@ namespace internal {
V(KeyedLoadIC) \
V(LoadIC) \
/* TurboFanCodeStubs */ \
- V(StringLengthTF) \
- V(StringAddTF) \
- /* TurboFanICs */ \
- V(MathFloor) \
+ V(StringLength) \
/* IC Handler stubs */ \
V(ArrayBufferViewLoadField) \
V(LoadConstant) \
@@ -107,8 +108,7 @@ namespace internal {
V(KeyedStoreSloppyArguments) \
V(StoreField) \
V(StoreGlobal) \
- V(StoreTransition) \
- V(StringLength)
+ V(StoreTransition)
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
@@ -347,19 +347,6 @@ class CodeStub BASE_EMBEDDED {
}; \
DEFINE_CODE_STUB(NAME, SUPER)
-#define DEFINE_TURBOFAN_IC(NAME, SUPER, DESC) \
- public: \
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
- if (GetCallMode() == CALL_FROM_OPTIMIZED_CODE) { \
- return DESC##CallFromOptimizedCodeDescriptor(isolate()); \
- } else { \
- return DESC##CallFromUnoptimizedCodeDescriptor(isolate()); \
- } \
- }; \
- \
- protected: \
- DEFINE_CODE_STUB(NAME, SUPER)
-
#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
public: \
Handle<Code> GenerateCode() override; \
@@ -539,7 +526,7 @@ class TurboFanCodeStub : public CodeStub {
// Retrieve the code for the stub. Generate the code if needed.
Handle<Code> GenerateCode() override;
- virtual int GetStackParameterCount() const override {
+ int GetStackParameterCount() const override {
return GetCallInterfaceDescriptor().GetStackParameterCount();
}
@@ -548,35 +535,11 @@ class TurboFanCodeStub : public CodeStub {
protected:
explicit TurboFanCodeStub(Isolate* isolate) : CodeStub(isolate) {}
- private:
- DEFINE_CODE_STUB_BASE(TurboFanCodeStub, CodeStub);
-};
-
-
-class TurboFanIC : public TurboFanCodeStub {
- public:
- enum CallMode { CALL_FROM_UNOPTIMIZED_CODE, CALL_FROM_OPTIMIZED_CODE };
-
- protected:
- explicit TurboFanIC(Isolate* isolate, CallMode mode)
- : TurboFanCodeStub(isolate) {
- minor_key_ = CallModeBits::encode(mode);
- }
-
- CallMode GetCallMode() const { return CallModeBits::decode(minor_key_); }
-
- void set_sub_minor_key(uint32_t key) {
- minor_key_ = SubMinorKeyBits::update(minor_key_, key);
- }
-
- uint32_t sub_minor_key() const { return SubMinorKeyBits::decode(minor_key_); }
-
- static const int kSubMinorKeyBits = kStubMinorKeyBits - 1;
+ virtual void GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const = 0;
private:
- class CallModeBits : public BitField<CallMode, 0, 1> {};
- class SubMinorKeyBits : public BitField<int, 1, kSubMinorKeyBits> {};
- DEFINE_CODE_STUB_BASE(TurboFanIC, TurboFanCodeStub);
+ DEFINE_CODE_STUB_BASE(TurboFanCodeStub, CodeStub);
};
@@ -597,7 +560,8 @@ class RuntimeCallHelper {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#if V8_TARGET_ARCH_IA32
#include "src/ia32/code-stubs-ia32.h"
@@ -629,9 +593,9 @@ class StubRuntimeCallHelper : public RuntimeCallHelper {
public:
StubRuntimeCallHelper() {}
- virtual void BeforeCall(MacroAssembler* masm) const;
+ void BeforeCall(MacroAssembler* masm) const override;
- virtual void AfterCall(MacroAssembler* masm) const;
+ void AfterCall(MacroAssembler* masm) const override;
};
@@ -640,31 +604,24 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
public:
NopRuntimeCallHelper() {}
- virtual void BeforeCall(MacroAssembler* masm) const {}
-
- virtual void AfterCall(MacroAssembler* masm) const {}
-};
-
+ void BeforeCall(MacroAssembler* masm) const override {}
-class MathFloorStub : public TurboFanIC {
- public:
- explicit MathFloorStub(Isolate* isolate, TurboFanIC::CallMode mode)
- : TurboFanIC(isolate, mode) {}
- Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
- DEFINE_TURBOFAN_IC(MathFloor, TurboFanIC, MathRoundVariant);
+ void AfterCall(MacroAssembler* masm) const override {}
};
-class StringLengthTFStub : public TurboFanCodeStub {
+class StringLengthStub : public TurboFanCodeStub {
public:
- explicit StringLengthTFStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+ explicit StringLengthStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
InlineCacheState GetICState() const override { return MONOMORPHIC; }
ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
+ void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
+
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_CODE_STUB(StringLengthTF, TurboFanCodeStub);
+ DEFINE_CODE_STUB(StringLength, TurboFanCodeStub);
};
@@ -687,34 +644,6 @@ enum StringAddFlags {
std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags);
-class StringAddTFStub : public TurboFanCodeStub {
- public:
- StringAddTFStub(Isolate* isolate, StringAddFlags flags,
- PretenureFlag pretenure_flag)
- : TurboFanCodeStub(isolate) {
- minor_key_ = StringAddFlagsBits::encode(flags) |
- PretenureFlagBits::encode(pretenure_flag);
- }
-
- StringAddFlags flags() const {
- return StringAddFlagsBits::decode(MinorKey());
- }
-
- PretenureFlag pretenure_flag() const {
- return PretenureFlagBits::decode(MinorKey());
- }
-
- private:
- class StringAddFlagsBits : public BitField<StringAddFlags, 0, 3> {};
- class PretenureFlagBits : public BitField<PretenureFlag, 3, 1> {};
-
- void PrintBaseName(std::ostream& os) const override; // NOLINT
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StringAdd);
- DEFINE_CODE_STUB(StringAddTF, TurboFanCodeStub);
-};
-
-
class NumberToStringStub final : public HydrogenCodeStub {
public:
explicit NumberToStringStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
@@ -791,6 +720,16 @@ class FastNewContextStub final : public HydrogenCodeStub {
};
+class FastCloneRegExpStub final : public HydrogenCodeStub {
+ public:
+ explicit FastCloneRegExpStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+
+ private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneRegExp);
+ DEFINE_HYDROGEN_CODE_STUB(FastCloneRegExp, HydrogenCodeStub);
+};
+
+
class FastCloneShallowArrayStub : public HydrogenCodeStub {
public:
FastCloneShallowArrayStub(Isolate* isolate,
@@ -972,18 +911,15 @@ class CallICStub: public PlatformCodeStub {
Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
- InlineCacheState GetICState() const override { return DEFAULT; }
+ InlineCacheState GetICState() const override { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
protected:
- bool CallAsMethod() const {
- return state().call_type() == CallICState::METHOD;
- }
-
- int arg_count() const { return state().arg_count(); }
+ int arg_count() const { return state().argc(); }
+ ConvertReceiverMode convert_mode() const { return state().convert_mode(); }
CallICState state() const {
return CallICState(static_cast<ExtraICState>(minor_key_));
@@ -1170,18 +1106,6 @@ class LoadConstantStub : public HandlerStub {
};
-class StringLengthStub: public HandlerStub {
- public:
- explicit StringLengthStub(Isolate* isolate) : HandlerStub(isolate) {}
-
- protected:
- Code::Kind kind() const override { return Code::LOAD_IC; }
- Code::StubType GetStubType() const override { return Code::FAST; }
-
- DEFINE_HANDLER_CODE_STUB(StringLength, HandlerStub);
-};
-
-
class StoreFieldStub : public HandlerStub {
public:
StoreFieldStub(Isolate* isolate, FieldIndex index,
@@ -1233,18 +1157,15 @@ class StoreTransitionHelper {
}
static Register SlotRegister() {
- DCHECK(FLAG_vector_stores);
return VectorStoreTransitionDescriptor::SlotRegister();
}
static Register VectorRegister() {
- DCHECK(FLAG_vector_stores);
return VectorStoreTransitionDescriptor::VectorRegister();
}
static Register MapRegister() {
- return FLAG_vector_stores ? VectorStoreTransitionDescriptor::MapRegister()
- : StoreTransitionDescriptor::MapRegister();
+ return VectorStoreTransitionDescriptor::MapRegister();
}
static int ReceiverIndex() {
@@ -1255,26 +1176,23 @@ class StoreTransitionHelper {
static int ValueIndex() { return StoreTransitionDescriptor::kValueIndex; }
- static int SlotIndex() {
- DCHECK(FLAG_vector_stores);
- return VectorStoreTransitionDescriptor::kSlotIndex;
+ static int MapIndex() {
+ DCHECK(static_cast<int>(VectorStoreTransitionDescriptor::kMapIndex) ==
+ static_cast<int>(StoreTransitionDescriptor::kMapIndex));
+ return StoreTransitionDescriptor::kMapIndex;
}
static int VectorIndex() {
- DCHECK(FLAG_vector_stores);
+ if (HasVirtualSlotArg()) {
+ return VectorStoreTransitionDescriptor::kVirtualSlotVectorIndex;
+ }
return VectorStoreTransitionDescriptor::kVectorIndex;
}
- static int MapIndex() {
- if (FLAG_vector_stores) {
- return VectorStoreTransitionDescriptor::kMapIndex;
- }
- return StoreTransitionDescriptor::kMapIndex;
+ // Some platforms don't have a slot arg.
+ static bool HasVirtualSlotArg() {
+ return SlotRegister().is(no_reg);
}
-
- // Some platforms push Slot, Vector, Map on the stack instead of in
- // registers.
- static bool UsesStackArgs() { return MapRegister().is(no_reg); }
};
@@ -1353,7 +1271,7 @@ class StoreGlobalStub : public HandlerStub {
return isolate->factory()->termination_exception();
}
- Handle<Code> GetCodeCopyFromTemplate(Handle<GlobalObject> global,
+ Handle<Code> GetCodeCopyFromTemplate(Handle<JSGlobalObject> global,
Handle<PropertyCell> cell) {
Code::FindAndReplacePattern pattern;
if (check_global()) {
@@ -1703,9 +1621,9 @@ class CompareICStub : public PlatformCodeStub {
void GenerateInternalizedStrings(MacroAssembler* masm);
void GenerateStrings(MacroAssembler* masm);
void GenerateUniqueNames(MacroAssembler* masm);
- void GenerateObjects(MacroAssembler* masm);
+ void GenerateReceivers(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
- void GenerateKnownObjects(MacroAssembler* masm);
+ void GenerateKnownReceivers(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
bool strict() const { return op() == Token::EQ_STRICT; }
@@ -1714,7 +1632,7 @@ class CompareICStub : public PlatformCodeStub {
void AddToSpecialCache(Handle<Code> new_object) override;
bool FindCodeInSpecialCache(Code** code_out) override;
bool UseSpecialCache() override {
- return state() == CompareICState::KNOWN_OBJECT;
+ return state() == CompareICState::KNOWN_RECEIVER;
}
class OpBits : public BitField<int, 0, 3> {};
@@ -1823,9 +1741,11 @@ std::ostream& operator<<(std::ostream& os, const CompareNilICStub::State& s);
class CEntryStub : public PlatformCodeStub {
public:
CEntryStub(Isolate* isolate, int result_size,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs)
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ ArgvMode argv_mode = kArgvOnStack)
: PlatformCodeStub(isolate) {
- minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs);
+ minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs) |
+ ArgvMode::encode(argv_mode == kArgvInRegister);
DCHECK(result_size == 1 || result_size == 2);
#if _WIN64 || V8_TARGET_ARCH_PPC
minor_key_ = ResultSizeBits::update(minor_key_, result_size);
@@ -1840,6 +1760,7 @@ class CEntryStub : public PlatformCodeStub {
private:
bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
+ bool argv_in_register() const { return ArgvMode::decode(minor_key_); }
#if _WIN64 || V8_TARGET_ARCH_PPC
int result_size() const { return ResultSizeBits::decode(minor_key_); }
#endif // _WIN64
@@ -1847,7 +1768,8 @@ class CEntryStub : public PlatformCodeStub {
bool NeedsImmovableCode() override;
class SaveDoublesBits : public BitField<bool, 0, 1> {};
- class ResultSizeBits : public BitField<int, 1, 3> {};
+ class ArgvMode : public BitField<bool, 1, 1> {};
+ class ResultSizeBits : public BitField<int, 2, 3> {};
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(CEntry, PlatformCodeStub);
@@ -1930,6 +1852,20 @@ class ArgumentsAccessStub: public PlatformCodeStub {
};
+class RestParamAccessStub : public PlatformCodeStub {
+ public:
+ explicit RestParamAccessStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ private:
+ void GenerateNew(MacroAssembler* masm);
+
+ void PrintName(std::ostream& os) const override; // NOLINT
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(RestParamAccess);
+ DEFINE_PLATFORM_CODE_STUB(RestParamAccess, PlatformCodeStub);
+};
+
+
class RegExpExecStub: public PlatformCodeStub {
public:
explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
@@ -1954,63 +1890,10 @@ class RegExpConstructResultStub final : public HydrogenCodeStub {
};
-// TODO(bmeurer): Deprecate the CallFunctionStub in favor of the more general
-// Invoke family of builtins.
-class CallFunctionStub: public PlatformCodeStub {
- public:
- CallFunctionStub(Isolate* isolate, int argc, CallFunctionFlags flags)
- : PlatformCodeStub(isolate) {
- DCHECK(argc >= 0 && argc <= Code::kMaxArguments);
- minor_key_ = ArgcBits::encode(argc) | FlagBits::encode(flags);
- }
-
- private:
- int argc() const { return ArgcBits::decode(minor_key_); }
- int flags() const { return FlagBits::decode(minor_key_); }
-
- bool CallAsMethod() const {
- return flags() == CALL_AS_METHOD || flags() == WRAP_AND_CALL;
- }
-
- bool NeedsChecks() const { return flags() != WRAP_AND_CALL; }
-
- void PrintName(std::ostream& os) const override; // NOLINT
-
- // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
- class FlagBits : public BitField<CallFunctionFlags, 0, 2> {};
- class ArgcBits : public BitField<unsigned, 2, Code::kArgumentsBits> {};
- STATIC_ASSERT(Code::kArgumentsBits + 2 <= kStubMinorKeyBits);
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunction);
- DEFINE_PLATFORM_CODE_STUB(CallFunction, PlatformCodeStub);
-};
-
-
-class CallConstructStub: public PlatformCodeStub {
+// TODO(bmeurer/mvstanton): Turn CallConstructStub into ConstructICStub.
+class CallConstructStub final : public PlatformCodeStub {
public:
- CallConstructStub(Isolate* isolate, CallConstructorFlags flags)
- : PlatformCodeStub(isolate) {
- minor_key_ = FlagBits::encode(flags);
- }
-
- void FinishCode(Handle<Code> code) override {
- code->set_has_function_cache(RecordCallTarget());
- }
-
- private:
- CallConstructorFlags flags() const { return FlagBits::decode(minor_key_); }
-
- bool RecordCallTarget() const {
- return (flags() & RECORD_CONSTRUCTOR_TARGET) != 0;
- }
-
- bool IsSuperConstructorCall() const {
- return (flags() & SUPER_CONSTRUCTOR_CALL) != 0;
- }
-
- void PrintName(std::ostream& os) const override; // NOLINT
-
- class FlagBits : public BitField<CallConstructorFlags, 0, 2> {};
+ explicit CallConstructStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(CallConstruct);
DEFINE_PLATFORM_CODE_STUB(CallConstruct, PlatformCodeStub);
@@ -2250,7 +2133,7 @@ class LoadICTrampolineStub : public PlatformCodeStub {
Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
+ InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
@@ -2286,7 +2169,7 @@ class VectorStoreICTrampolineStub : public PlatformCodeStub {
Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
+ InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
@@ -2324,7 +2207,7 @@ class CallICTrampolineStub : public PlatformCodeStub {
Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
+ InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
@@ -2350,7 +2233,7 @@ class LoadICStub : public PlatformCodeStub {
void GenerateForTrampoline(MacroAssembler* masm);
Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
+ InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
@@ -2373,7 +2256,7 @@ class KeyedLoadICStub : public PlatformCodeStub {
void GenerateForTrampoline(MacroAssembler* masm);
Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
+ InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
@@ -2396,7 +2279,7 @@ class VectorStoreICStub : public PlatformCodeStub {
void GenerateForTrampoline(MacroAssembler* masm);
Code::Kind GetCodeKind() const final { return Code::STORE_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
+ InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
@@ -2419,8 +2302,8 @@ class VectorKeyedStoreICStub : public PlatformCodeStub {
void GenerateForTrampoline(MacroAssembler* masm);
Code::Kind GetCodeKind() const final { return Code::KEYED_STORE_IC; }
- InlineCacheState GetICState() const final { return DEFAULT; }
- virtual ExtraICState GetExtraICState() const final {
+ InlineCacheState GetICState() const final { return GENERIC; }
+ ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
@@ -2485,6 +2368,7 @@ class ScriptContextFieldStub : public HandlerStub {
const ScriptContextTable::LookupResult* lookup_result)
: HandlerStub(isolate) {
DCHECK(Accepted(lookup_result));
+ STATIC_ASSERT(kContextIndexBits + kSlotIndexBits <= kSubMinorKeyBits);
set_sub_minor_key(ContextIndexBits::encode(lookup_result->context_index) |
SlotIndexBits::encode(lookup_result->slot_index));
}
@@ -2501,7 +2385,7 @@ class ScriptContextFieldStub : public HandlerStub {
}
private:
- static const int kContextIndexBits = 13;
+ static const int kContextIndexBits = 9;
static const int kSlotIndexBits = 13;
class ContextIndexBits : public BitField<int, 0, kContextIndexBits> {};
class SlotIndexBits
@@ -2594,10 +2478,7 @@ class StoreFastElementStub : public HydrogenCodeStub {
}
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- if (FLAG_vector_stores) {
- return VectorStoreICDescriptor(isolate());
- }
- return StoreDescriptor(isolate());
+ return VectorStoreICDescriptor(isolate());
}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
@@ -2650,6 +2531,28 @@ class AllocateHeapNumberStub final : public HydrogenCodeStub {
};
+class AllocateMutableHeapNumberStub final : public HydrogenCodeStub {
+ public:
+ explicit AllocateMutableHeapNumberStub(Isolate* isolate)
+ : HydrogenCodeStub(isolate) {}
+
+ private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber);
+ DEFINE_HYDROGEN_CODE_STUB(AllocateMutableHeapNumber, HydrogenCodeStub);
+};
+
+
+class AllocateInNewSpaceStub final : public HydrogenCodeStub {
+ public:
+ explicit AllocateInNewSpaceStub(Isolate* isolate)
+ : HydrogenCodeStub(isolate) {}
+
+ private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateInNewSpace);
+ DEFINE_HYDROGEN_CODE_STUB(AllocateInNewSpace, HydrogenCodeStub);
+};
+
+
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
ArrayConstructorStubBase(Isolate* isolate,
@@ -2826,10 +2729,7 @@ class StoreElementStub : public PlatformCodeStub {
}
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- if (FLAG_vector_stores) {
- return VectorStoreICDescriptor(isolate());
- }
- return StoreDescriptor(isolate());
+ return VectorStoreICDescriptor(isolate());
}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
@@ -2860,12 +2760,6 @@ class ToBooleanStub: public HydrogenCodeStub {
NUMBER_OF_TYPES
};
- enum ResultMode {
- RESULT_AS_SMI, // For Smi(1) on truthy value, Smi(0) otherwise.
- RESULT_AS_ODDBALL, // For {true} on truthy value, {false} otherwise.
- RESULT_AS_INVERSE_ODDBALL // For {false} on truthy value, {true} otherwise.
- };
-
// At most 16 different types can be distinguished, because the Code object
// only has room for two bytes to hold a set of these types. :-P
STATIC_ASSERT(NUMBER_OF_TYPES <= 16);
@@ -2885,21 +2779,13 @@ class ToBooleanStub: public HydrogenCodeStub {
static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
};
- ToBooleanStub(Isolate* isolate, ResultMode mode, Types types = Types())
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(TypesBits::encode(types.ToIntegral()) |
- ResultModeBits::encode(mode));
- }
-
ToBooleanStub(Isolate* isolate, ExtraICState state)
: HydrogenCodeStub(isolate) {
- set_sub_minor_key(TypesBits::encode(static_cast<uint16_t>(state)) |
- ResultModeBits::encode(RESULT_AS_SMI));
+ set_sub_minor_key(TypesBits::encode(static_cast<uint16_t>(state)));
}
bool UpdateStatus(Handle<Object> object);
Types types() const { return Types(TypesBits::decode(sub_minor_key())); }
- ResultMode mode() const { return ResultModeBits::decode(sub_minor_key()); }
Code::Kind GetCodeKind() const override { return Code::TO_BOOLEAN_IC; }
void PrintState(std::ostream& os) const override; // NOLINT
@@ -2923,11 +2809,9 @@ class ToBooleanStub: public HydrogenCodeStub {
private:
ToBooleanStub(Isolate* isolate, InitializationState init_state)
: HydrogenCodeStub(isolate, init_state) {
- set_sub_minor_key(ResultModeBits::encode(RESULT_AS_SMI));
}
class TypesBits : public BitField<uint16_t, 0, NUMBER_OF_TYPES> {};
- class ResultModeBits : public BitField<ResultMode, NUMBER_OF_TYPES, 2> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(ToBoolean);
DEFINE_HYDROGEN_CODE_STUB(ToBoolean, HydrogenCodeStub);
@@ -2967,16 +2851,6 @@ class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
};
-class StoreArrayLiteralElementStub : public PlatformCodeStub {
- public:
- explicit StoreArrayLiteralElementStub(Isolate* isolate)
- : PlatformCodeStub(isolate) { }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreArrayLiteralElement);
- DEFINE_PLATFORM_CODE_STUB(StoreArrayLiteralElement, PlatformCodeStub);
-};
-
-
class StubFailureTrampolineStub : public PlatformCodeStub {
public:
StubFailureTrampolineStub(Isolate* isolate, StubFunctionMode function_mode)
@@ -3058,6 +2932,15 @@ class ToNumberStub final : public PlatformCodeStub {
};
+class ToLengthStub final : public PlatformCodeStub {
+ public:
+ explicit ToLengthStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ToLength);
+ DEFINE_PLATFORM_CODE_STUB(ToLength, PlatformCodeStub);
+};
+
+
class ToStringStub final : public PlatformCodeStub {
public:
explicit ToStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
diff --git a/chromium/v8/src/code-stubs.js b/chromium/v8/src/code-stubs.js
deleted file mode 100644
index ab06f6c63b1..00000000000
--- a/chromium/v8/src/code-stubs.js
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, code_stubs) {
-
-"use strict";
-
-code_stubs.StringLengthTFStub = function StringLengthTFStub(call_conv, minor_key) {
- var stub = function(receiver, name, i, v) {
- // i and v are dummy parameters mandated by the InterfaceDescriptor,
- // (LoadWithVectorDescriptor).
- return %_StringGetLength(%_JSValueGetValue(receiver));
- }
- return stub;
-}
-
-code_stubs.StringAddTFStub = function StringAddTFStub(call_conv, minor_key) {
- var stub = function(left, right) {
- return %StringAdd(left, right);
- }
- return stub;
-}
-
-const kTurboFanICCallModeMask = 1;
-const kTurboFanICCallForUnptimizedCode = 0;
-const kTurboFanICCallForOptimizedCode = 1;
-
-code_stubs.MathFloorStub = function MathFloorStub(call_conv, minor_key) {
- var call_from_optimized_ic = function(f, i, tv, receiver, v) {
- "use strict";
- // |f| is this function's JSFunction
- // |i| is TypeFeedbackVector slot # of callee's CallIC for Math.floor call
- // |receiver| is receiver, should not be used
- // |tv| is the calling function's type vector
- // |v| is the value to floor
- if (f !== %_FixedArrayGet(tv, i|0)) {
- return %_CallFunction(receiver, v, f);
- }
- var r = %_MathFloor(+v);
- if (%_IsMinusZero(r)) {
- // Collect type feedback when the result of the floor is -0. This is
- // accomplished by storing a sentinel in the second, "extra"
- // TypeFeedbackVector slot corresponding to the Math.floor CallIC call in
- // the caller's TypeVector.
- %_FixedArraySet(tv, ((i|0)+1)|0, 1);
- return -0;
- }
- // Return integers in smi range as smis.
- var trunc = r|0;
- if (trunc === r) {
- return trunc;
- }
- return r;
- }
- var call_mode = (minor_key & kTurboFanICCallModeMask);
- if (call_mode == kTurboFanICCallForOptimizedCode) {
- return call_from_optimized_ic;
- } else {
- %SetForceInlineFlag(call_from_optimized_ic);
- var call_from_unoptimized_ic = function(f, i, receiver, v) {
- var tv = %_GetTypeFeedbackVector(%_GetCallerJSFunction());
- return call_from_optimized_ic(f, i, tv, receiver, v);
- }
- return call_from_unoptimized_ic;
- }
-}
-
-})
diff --git a/chromium/v8/src/codegen.cc b/chromium/v8/src/codegen.cc
index fd039d0f8a4..a57cbb3a5e6 100644
--- a/chromium/v8/src/codegen.cc
+++ b/chromium/v8/src/codegen.cc
@@ -7,37 +7,19 @@
#if defined(V8_OS_AIX)
#include <fenv.h> // NOLINT(build/c++11)
#endif
+#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
-#include "src/parser.h"
-#include "src/prettyprinter.h"
+#include "src/parsing/parser.h"
#include "src/profiler/cpu-profiler.h"
-#include "src/rewriter.h"
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-#if defined(_WIN64)
-typedef double (*ModuloFunction)(double, double);
-static ModuloFunction modulo_function = NULL;
-// Defined in codegen-x64.cc.
-ModuloFunction CreateModuloFunction();
-
-void init_modulo_function() {
- modulo_function = CreateModuloFunction();
-}
-
-
-double modulo(double x, double y) {
- // Note: here we rely on dependent reads being ordered. This is true
- // on all architectures we currently support.
- return (*modulo_function)(x, y);
-}
-#elif defined(_WIN32)
-
+#if defined(V8_OS_WIN)
double modulo(double x, double y) {
// Workaround MS fmod bugs. ECMA-262 says:
// dividend is finite and divisor is an infinity => result equals dividend
@@ -61,31 +43,29 @@ double modulo(double x, double y) {
return std::fmod(x, y);
#endif
}
-#endif // defined(_WIN64)
-
-
-#define UNARY_MATH_FUNCTION(name, generator) \
-static UnaryMathFunction fast_##name##_function = NULL; \
-void init_fast_##name##_function() { \
- fast_##name##_function = generator; \
-} \
-double fast_##name(double x) { \
- return (*fast_##name##_function)(x); \
-}
+#endif // defined(V8_OS_WIN)
+
+
+#define UNARY_MATH_FUNCTION(name, generator) \
+ static UnaryMathFunctionWithIsolate fast_##name##_function = nullptr; \
+ double std_##name(double x, Isolate* isolate) { return std::name(x); } \
+ void init_fast_##name##_function(Isolate* isolate) { \
+ if (FLAG_fast_math) fast_##name##_function = generator(isolate); \
+ if (!fast_##name##_function) fast_##name##_function = std_##name; \
+ } \
+ void lazily_initialize_fast_##name(Isolate* isolate) { \
+ if (!fast_##name##_function) init_fast_##name##_function(isolate); \
+ } \
+ double fast_##name(double x, Isolate* isolate) { \
+ return (*fast_##name##_function)(x, isolate); \
+ }
-UNARY_MATH_FUNCTION(exp, CreateExpFunction())
-UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
+UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction)
+UNARY_MATH_FUNCTION(exp, CreateExpFunction)
#undef UNARY_MATH_FUNCTION
-void lazily_initialize_fast_exp() {
- if (fast_exp_function == NULL) {
- init_fast_exp_function();
- }
-}
-
-
#define __ ACCESS_MASM(masm_)
#ifdef DEBUG
@@ -122,19 +102,19 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
if (FLAG_trace_codegen || print_source || print_ast) {
base::SmartArrayPointer<char> name = info->GetDebugName();
- PrintF("[generating %s code for %s function: %s]", kind, ftype, name.get());
+ PrintF("[generating %s code for %s function: %s]\n", kind, ftype,
+ name.get());
}
#ifdef DEBUG
if (info->parse_info() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter(info->isolate(), info->zone())
- .PrintProgram(info->literal()));
+ PrettyPrinter(info->isolate()).PrintProgram(info->literal()));
}
if (info->parse_info() && print_ast) {
- PrintF("--- AST ---\n%s\n", AstPrinter(info->isolate(), info->zone())
- .PrintProgram(info->literal()));
+ PrintF("--- AST ---\n%s\n",
+ AstPrinter(info->isolate()).PrintProgram(info->literal()));
}
#endif // DEBUG
}
diff --git a/chromium/v8/src/codegen.h b/chromium/v8/src/codegen.h
index 04f130999ee..512cbfc40a9 100644
--- a/chromium/v8/src/codegen.h
+++ b/chromium/v8/src/codegen.h
@@ -89,22 +89,19 @@ class CodeGenerator {
// Results of the library implementation of transcendental functions may differ
// from the one we use in our generated code. Therefore we use the same
// generated code both in runtime and compiled code.
-typedef double (*UnaryMathFunction)(double x);
+typedef double (*UnaryMathFunctionWithIsolate)(double x, Isolate* isolate);
-UnaryMathFunction CreateExpFunction();
-UnaryMathFunction CreateSqrtFunction();
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate);
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate);
double modulo(double x, double y);
// Custom implementation of math functions.
-double fast_exp(double input);
-double fast_sqrt(double input);
-#ifdef _WIN64
-void init_modulo_function();
-#endif
-void lazily_initialize_fast_exp();
-void init_fast_sqrt_function();
+double fast_exp(double input, Isolate* isolate);
+double fast_sqrt(double input, Isolate* isolate);
+void lazily_initialize_fast_exp(Isolate* isolate);
+void lazily_initialize_fast_sqrt(Isolate* isolate);
class ElementsTransitionGenerator : public AllStatic {
@@ -145,7 +142,7 @@ static const int kNumberDictionaryProbes = 4;
class CodeAgingHelper {
public:
- CodeAgingHelper();
+ explicit CodeAgingHelper(Isolate* isolate);
uint32_t young_sequence_length() const { return young_sequence_.length(); }
bool IsYoung(byte* candidate) const {
@@ -170,6 +167,7 @@ class CodeAgingHelper {
#endif
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CODEGEN_H_
diff --git a/chromium/v8/src/compilation-cache.h b/chromium/v8/src/compilation-cache.h
index 2d7609ec187..2295f4c6855 100644
--- a/chromium/v8/src/compilation-cache.h
+++ b/chromium/v8/src/compilation-cache.h
@@ -236,6 +236,7 @@ class CompilationCache {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILATION_CACHE_H_
diff --git a/chromium/v8/src/compilation-dependencies.cc b/chromium/v8/src/compilation-dependencies.cc
index 643b88ab0ec..96b3859e9ab 100644
--- a/chromium/v8/src/compilation-dependencies.cc
+++ b/chromium/v8/src/compilation-dependencies.cc
@@ -106,6 +106,38 @@ void CompilationDependencies::Rollback() {
}
+void CompilationDependencies::AssumeMapNotDeprecated(Handle<Map> map) {
+ DCHECK(!map->is_deprecated());
+ // Do nothing if the map cannot be deprecated.
+ if (map->CanBeDeprecated()) {
+ Insert(DependentCode::kTransitionGroup, map);
+ }
+}
+
+
+void CompilationDependencies::AssumeMapStable(Handle<Map> map) {
+ DCHECK(map->is_stable());
+ // Do nothing if the map cannot transition.
+ if (map->CanTransition()) {
+ Insert(DependentCode::kPrototypeCheckGroup, map);
+ }
+}
+
+
+void CompilationDependencies::AssumePrototypeMapsStable(
+ Handle<Map> map, MaybeHandle<JSReceiver> prototype) {
+ for (PrototypeIterator i(map); !i.IsAtEnd(); i.Advance()) {
+ Handle<JSReceiver> const current =
+ PrototypeIterator::GetCurrent<JSReceiver>(i);
+ AssumeMapStable(handle(current->map()));
+ Handle<JSReceiver> last;
+ if (prototype.ToHandle(&last) && last.is_identical_to(current)) {
+ break;
+ }
+ }
+}
+
+
void CompilationDependencies::AssumeTransitionStable(
Handle<AllocationSite> site) {
// Do nothing if the object doesn't have any useful element transitions left.
@@ -117,5 +149,6 @@ void CompilationDependencies::AssumeTransitionStable(
Insert(DependentCode::kAllocationSiteTransitionChangedGroup, site);
}
}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compilation-dependencies.h b/chromium/v8/src/compilation-dependencies.h
index c14220880f1..a40eb748011 100644
--- a/chromium/v8/src/compilation-dependencies.h
+++ b/chromium/v8/src/compilation-dependencies.h
@@ -31,6 +31,11 @@ class CompilationDependencies {
void AssumeFieldType(Handle<Map> map) {
Insert(DependentCode::kFieldTypeGroup, map);
}
+ void AssumeMapStable(Handle<Map> map);
+ void AssumePrototypeMapsStable(
+ Handle<Map> map,
+ MaybeHandle<JSReceiver> prototype = MaybeHandle<JSReceiver>());
+ void AssumeMapNotDeprecated(Handle<Map> map);
void AssumePropertyCell(Handle<PropertyCell> cell) {
Insert(DependentCode::kPropertyCellChangedGroup, cell);
}
@@ -61,7 +66,7 @@ class CompilationDependencies {
DependentCode* Get(Handle<Object> object);
void Set(Handle<Object> object, Handle<DependentCode> dep);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DEPENDENCIES_H_
diff --git a/chromium/v8/src/compiler.cc b/chromium/v8/src/compiler.cc
index 20aa558c3d1..307b3b0e428 100644
--- a/chromium/v8/src/compiler.cc
+++ b/chromium/v8/src/compiler.cc
@@ -6,32 +6,32 @@
#include <algorithm>
-#include "src/ast-numbering.h"
+#include "src/ast/ast-numbering.h"
+#include "src/ast/prettyprinter.h"
+#include "src/ast/scopeinfo.h"
+#include "src/ast/scopes.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compiler/pipeline.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/typing.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
#include "src/gdb-jit.h"
-#include "src/hydrogen.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
-#include "src/lithium.h"
#include "src/log-inl.h"
#include "src/messages.h"
-#include "src/parser.h"
-#include "src/prettyprinter.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
+#include "src/parsing/scanner-character-streams.h"
#include "src/profiler/cpu-profiler.h"
-#include "src/rewriter.h"
#include "src/runtime-profiler.h"
-#include "src/scanner-character-streams.h"
-#include "src/scopeinfo.h"
-#include "src/scopes.h"
#include "src/snapshot/serialize.h"
-#include "src/typing.h"
#include "src/vm-state-inl.h"
namespace v8 {
@@ -172,16 +172,12 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
dependencies_(isolate, zone),
bailout_reason_(kNoReason),
prologue_offset_(Code::kPrologueOffsetNotSet),
- no_frame_ranges_(isolate->cpu_profiler()->is_profiling()
- ? new List<OffsetRange>(2)
- : nullptr),
track_positions_(FLAG_hydrogen_track_positions ||
isolate->cpu_profiler()->is_profiling()),
opt_count_(has_shared_info() ? shared_info()->opt_count() : 0),
parameter_count_(0),
optimization_id_(-1),
osr_expr_stack_height_(0),
- function_type_(nullptr),
debug_name_(debug_name) {
// Parameter count is number of stack parameters.
if (code_stub_ != NULL) {
@@ -200,7 +196,6 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
CompilationInfo::~CompilationInfo() {
DisableFutureOptimization();
delete deferred_handles_;
- delete no_frame_ranges_;
#ifdef DEBUG
// Check that no dependent maps have been added or added dependent maps have
// been rolled back or committed.
@@ -209,14 +204,6 @@ CompilationInfo::~CompilationInfo() {
}
-void CompilationInfo::SetStub(CodeStub* code_stub) {
- SetMode(STUB);
- code_stub_ = code_stub;
- debug_name_ = CodeStub::MajorName(code_stub->MajorKey());
- set_output_code_kind(code_stub->GetCodeKind());
-}
-
-
int CompilationInfo::num_parameters() const {
return has_scope() ? scope()->num_parameters() : parameter_count_;
}
@@ -249,13 +236,15 @@ bool CompilationInfo::ShouldSelfOptimize() {
void CompilationInfo::EnsureFeedbackVector() {
if (feedback_vector_.is_null()) {
- feedback_vector_ = isolate()->factory()->NewTypeFeedbackVector(
- literal()->feedback_vector_spec());
+ Handle<TypeFeedbackMetadata> feedback_metadata =
+ TypeFeedbackMetadata::New(isolate(), literal()->feedback_vector_spec());
+ feedback_vector_ = TypeFeedbackVector::New(isolate(), feedback_metadata);
}
// It's very important that recompiles do not alter the structure of the
// type feedback vector.
- CHECK(!feedback_vector_->SpecDiffersFrom(literal()->feedback_vector_spec()));
+ CHECK(!feedback_vector_->metadata()->SpecDiffersFrom(
+ literal()->feedback_vector_spec()));
}
@@ -330,9 +319,8 @@ base::SmartArrayPointer<char> CompilationInfo::GetDebugName() const {
}
-bool CompilationInfo::MustReplaceUndefinedReceiverWithGlobalProxy() {
- return is_sloppy(language_mode()) && !is_native() &&
- scope()->has_this_declaration() && scope()->receiver()->is_used();
+bool CompilationInfo::ExpectsJSReceiverAsReceiver() {
+ return is_sloppy(language_mode()) && !is_native();
}
@@ -423,12 +411,29 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
DCHECK(info()->shared_info()->has_deoptimization_support());
DCHECK(!info()->is_first_compile());
- // Check the enabling conditions for TurboFan.
+ bool optimization_disabled = info()->shared_info()->optimization_disabled();
bool dont_crankshaft = info()->shared_info()->dont_crankshaft();
- if (((FLAG_turbo_asm && info()->shared_info()->asm_function()) ||
- (dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0) ||
- info()->closure()->PassesFilter(FLAG_turbo_filter)) &&
- (FLAG_turbo_osr || !info()->is_osr())) {
+
+ // Check the enabling conditions for Turbofan.
+ // 1. "use asm" code.
+ bool is_turbofanable_asm = FLAG_turbo_asm &&
+ info()->shared_info()->asm_function() &&
+ !optimization_disabled;
+
+ // 2. Fallback for features unsupported by Crankshaft.
+ bool is_unsupported_by_crankshaft_but_turbofanable =
+ dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0 &&
+ !optimization_disabled;
+
+ // 3. Explicitly enabled by the command-line filter.
+ bool passes_turbo_filter = info()->closure()->PassesFilter(FLAG_turbo_filter);
+
+ // If this is OSR request, OSR must be enabled by Turbofan.
+ bool passes_osr_test = FLAG_turbo_osr || !info()->is_osr();
+
+ if ((is_turbofanable_asm || is_unsupported_by_crankshaft_but_turbofanable ||
+ passes_turbo_filter) &&
+ passes_osr_test) {
// Use TurboFan for the compilation.
if (FLAG_trace_opt) {
OFStream os(stdout);
@@ -441,9 +446,10 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
if (info()->shared_info()->asm_function()) {
if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
info()->MarkAsFunctionContextSpecializing();
- } else if (FLAG_turbo_type_feedback) {
- info()->MarkAsTypeFeedbackEnabled();
- info()->EnsureFeedbackVector();
+ } else if (info()->has_global_object() &&
+ FLAG_native_context_specialization) {
+ info()->MarkAsNativeContextSpecializing();
+ info()->MarkAsTypingEnabled();
}
if (!info()->shared_info()->asm_function() ||
FLAG_turbo_asm_deoptimization) {
@@ -555,6 +561,61 @@ OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
}
+namespace {
+
+void AddWeakObjectToCodeDependency(Isolate* isolate, Handle<HeapObject> object,
+ Handle<Code> code) {
+ Handle<WeakCell> cell = Code::WeakCellFor(code);
+ Heap* heap = isolate->heap();
+ Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
+ dep = DependentCode::InsertWeakCode(dep, DependentCode::kWeakCodeGroup, cell);
+ heap->AddWeakObjectToCodeDependency(object, dep);
+}
+
+
+void RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
+ // TODO(turbofan): Move this to pipeline.cc once Crankshaft dies.
+ Isolate* const isolate = code->GetIsolate();
+ DCHECK(code->is_optimized_code());
+ std::vector<Handle<Map>> maps;
+ std::vector<Handle<HeapObject>> objects;
+ {
+ DisallowHeapAllocation no_gc;
+ int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::CELL &&
+ code->IsWeakObjectInOptimizedCode(it.rinfo()->target_cell())) {
+ objects.push_back(handle(it.rinfo()->target_cell(), isolate));
+ } else if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ code->IsWeakObjectInOptimizedCode(
+ it.rinfo()->target_object())) {
+ Handle<HeapObject> object(HeapObject::cast(it.rinfo()->target_object()),
+ isolate);
+ if (object->IsMap()) {
+ maps.push_back(Handle<Map>::cast(object));
+ } else {
+ objects.push_back(object);
+ }
+ }
+ }
+ }
+ for (Handle<Map> map : maps) {
+ if (map->dependent_code()->IsEmpty(DependentCode::kWeakCodeGroup)) {
+ isolate->heap()->AddRetainedMap(map);
+ }
+ Map::AddDependentCode(map, DependentCode::kWeakCodeGroup, code);
+ }
+ for (Handle<HeapObject> object : objects) {
+ AddWeakObjectToCodeDependency(isolate, object, code);
+ }
+ code->set_can_have_weak_objects(true);
+}
+
+} // namespace
+
+
OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
DCHECK(last_status() == SUCCEEDED);
// TODO(turbofan): Currently everything is done in the first phase.
@@ -563,6 +624,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
if (info()->is_deoptimization_enabled()) {
info()->parse_info()->context()->native_context()->AddOptimizedCode(
*info()->code());
+ RegisterWeakObjectsInOptimizedCode(info()->code());
}
RecordOptimizationStats();
return last_status();
@@ -587,6 +649,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
}
return SetLastStatus(BAILED_OUT);
}
+ RegisterWeakObjectsInOptimizedCode(optimized_code);
info()->SetCode(optimized_code);
}
RecordOptimizationStats();
@@ -705,10 +768,55 @@ static bool CompileUnoptimizedCode(CompilationInfo* info) {
}
-static bool GenerateBytecode(CompilationInfo* info) {
+// TODO(rmcilroy): Remove this temporary work-around when ignition supports
+// catch and eval.
+static bool IgnitionShouldFallbackToFullCodeGen(Scope* scope) {
+ if (scope->is_eval_scope() || scope->is_catch_scope() ||
+ scope->calls_eval()) {
+ return true;
+ }
+ for (auto inner_scope : *scope->inner_scopes()) {
+ if (IgnitionShouldFallbackToFullCodeGen(inner_scope)) return true;
+ }
+ return false;
+}
+
+
+static bool UseIgnition(CompilationInfo* info) {
+ // Cannot use Ignition when the {function_data} is already used.
+ if (info->has_shared_info() && info->shared_info()->HasBuiltinFunctionId()) {
+ return false;
+ }
+
+ // Checks whether the scope chain is supported.
+ if (FLAG_ignition_fallback_on_eval_and_catch &&
+ IgnitionShouldFallbackToFullCodeGen(info->scope())) {
+ return false;
+ }
+
+ // Checks whether top level functions should be passed by the filter.
+ if (info->closure().is_null()) {
+ Vector<const char> filter = CStrVector(FLAG_ignition_filter);
+ return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*');
+ }
+
+ // Finally respect the filter.
+ return info->closure()->PassesFilter(FLAG_ignition_filter);
+}
+
+
+static bool GenerateBaselineCode(CompilationInfo* info) {
+ if (FLAG_ignition && UseIgnition(info)) {
+ return interpreter::Interpreter::MakeBytecode(info);
+ } else {
+ return FullCodeGenerator::MakeCode(info);
+ }
+}
+
+
+static bool CompileBaselineCode(CompilationInfo* info) {
DCHECK(AllowCompilation::IsAllowed(info->isolate()));
- if (!Compiler::Analyze(info->parse_info()) ||
- !interpreter::Interpreter::MakeBytecode(info)) {
+ if (!Compiler::Analyze(info->parse_info()) || !GenerateBaselineCode(info)) {
Isolate* isolate = info->isolate();
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return false;
@@ -726,18 +834,13 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
Handle<SharedFunctionInfo> shared = info->shared_info();
FunctionLiteral* lit = info->literal();
- shared->set_language_mode(lit->language_mode());
+ DCHECK_EQ(shared->language_mode(), lit->language_mode());
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
MaybeDisableOptimization(shared, lit->dont_optimize_reason());
- if (FLAG_ignition && info->closure()->PassesFilter(FLAG_ignition_filter)) {
- // Compile bytecode for the interpreter.
- if (!GenerateBytecode(info)) return MaybeHandle<Code>();
- } else {
- // Compile unoptimized code.
- if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
-
- CHECK_EQ(Code::FUNCTION, info->code()->kind());
+ // Compile either unoptimized code or bytecode for the interpreter.
+ if (!CompileBaselineCode(info)) return MaybeHandle<Code>();
+ if (info->code()->kind() == Code::FUNCTION) { // Only for full code.
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
}
@@ -750,6 +853,10 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
// Update the code and feedback vector for the shared function info.
shared->ReplaceCode(*info->code());
shared->set_feedback_vector(*info->feedback_vector());
+ if (info->has_bytecode_array()) {
+ DCHECK(shared->function_data()->IsUndefined());
+ shared->set_function_data(*info->bytecode_array());
+ }
return info->code();
}
@@ -776,29 +883,26 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<Code> code = info->code();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
- // Context specialization folds-in the context, so no sharing can occur.
+ // Function context specialization folds-in the function context,
+ // so no sharing can occur.
if (info->is_function_context_specializing()) return;
// Frame specialization implies function context specialization.
DCHECK(!info->is_frame_specializing());
- // Do not cache bound functions.
- Handle<JSFunction> function = info->closure();
- if (function->shared()->bound()) return;
-
// Cache optimized context-specific code.
- if (FLAG_cache_optimized_code) {
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<LiteralsArray> literals(function->literals());
- Handle<Context> native_context(function->context()->native_context());
- SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
- literals, info->osr_ast_id());
- }
+ Handle<JSFunction> function = info->closure();
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<LiteralsArray> literals(function->literals());
+ Handle<Context> native_context(function->context()->native_context());
+ SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
+ literals, info->osr_ast_id());
- // Do not cache context-independent code compiled for OSR.
+ // Do not cache (native) context-independent code compiled for OSR.
if (code->is_turbofanned() && info->is_osr()) return;
- // Cache optimized context-independent code.
- if (FLAG_turbo_cache_shared_code && code->is_turbofanned()) {
+ // Cache optimized (native) context-independent code.
+ if (FLAG_turbo_cache_shared_code && code->is_turbofanned() &&
+ !info->is_native_context_specializing()) {
DCHECK(!info->is_function_context_specializing());
DCHECK(info->osr_ast_id().IsNone());
Handle<SharedFunctionInfo> shared(function->shared());
@@ -841,9 +945,12 @@ bool Compiler::ParseAndAnalyze(ParseInfo* info) {
static bool GetOptimizedCodeNow(CompilationInfo* info) {
+ Isolate* isolate = info->isolate();
+ CanonicalHandleScope canonical(isolate);
+
if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
- TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+ TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
OptimizedCompileJob job(info);
if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED ||
@@ -858,7 +965,7 @@ static bool GetOptimizedCodeNow(CompilationInfo* info) {
}
// Success!
- DCHECK(!info->isolate()->has_pending_exception());
+ DCHECK(!isolate->has_pending_exception());
InsertCodeIntoOptimizedCodeMap(info);
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info,
info->shared_info());
@@ -868,6 +975,8 @@ static bool GetOptimizedCodeNow(CompilationInfo* info) {
static bool GetOptimizedCodeLater(CompilationInfo* info) {
Isolate* isolate = info->isolate();
+ CanonicalHandleScope canonical(isolate);
+
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
@@ -968,23 +1077,6 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
}
-MaybeHandle<Code> Compiler::GetStubCode(Handle<JSFunction> function,
- CodeStub* stub) {
- // Build a "hybrid" CompilationInfo for a JSFunction/CodeStub pair.
- Zone zone;
- ParseInfo parse_info(&zone, function);
- CompilationInfo info(&parse_info);
- info.SetFunctionType(stub->GetCallInterfaceDescriptor().GetFunctionType());
- info.MarkAsFunctionContextSpecializing();
- info.MarkAsDeoptimizationEnabled();
- info.SetStub(stub);
-
- // Run a "mini pipeline", extracted from compiler.cc.
- if (!ParseAndAnalyze(&parse_info)) return MaybeHandle<Code>();
- return compiler::Pipeline(&info).GenerateCode();
-}
-
-
bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
if (function->is_compiled()) return true;
MaybeHandle<Code> maybe_code = Compiler::GetLazyCode(function);
@@ -1132,6 +1224,7 @@ void Compiler::CompileForLiveEdit(Handle<Script> script) {
// Get rid of old list of shared function infos.
info.MarkAsFirstCompile();
+ info.MarkAsDebug();
info.parse_info()->set_global();
if (!Parser::ParseStatic(info.parse_info())) return;
@@ -1209,7 +1302,7 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
HistogramTimerScope timer(rate);
// Compile the code.
- if (!CompileUnoptimizedCode(info)) {
+ if (!CompileBaselineCode(info)) {
return Handle<SharedFunctionInfo>::null();
}
@@ -1220,6 +1313,10 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
info->code(),
ScopeInfo::Create(info->isolate(), info->zone(), info->scope()),
info->feedback_vector());
+ if (info->has_bytecode_array()) {
+ DCHECK(result->function_data()->IsUndefined());
+ result->set_function_data(*info->bytecode_array());
+ }
DCHECK_EQ(RelocInfo::kNoPosition, lit->function_token_position());
SharedFunctionInfo::InitFromFunctionLiteral(result, lit);
@@ -1230,9 +1327,10 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
result->set_allows_lazy_compilation_without_context(false);
}
- Handle<String> script_name = script->name()->IsString()
- ? Handle<String>(String::cast(script->name()))
- : isolate->factory()->empty_string();
+ Handle<String> script_name =
+ script->name()->IsString()
+ ? Handle<String>(String::cast(script->name()))
+ : isolate->factory()->empty_string();
Logger::LogEventsAndTags log_tag = info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
@@ -1534,13 +1632,6 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
!LiveEditFunctionTracker::IsActive(isolate) &&
(!info.is_debug() || allow_lazy_without_ctx);
- if (outer_info->parse_info()->is_toplevel() && outer_info->will_serialize()) {
- // Make sure that if the toplevel code (possibly to be serialized),
- // the inner function must be allowed to be compiled lazily.
- // This is necessary to serialize toplevel code without inner functions.
- DCHECK(allow_lazy);
- }
-
bool lazy = FLAG_lazy && allow_lazy && !literal->should_eager_compile();
// Generate code
@@ -1557,9 +1648,8 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// called.
info.EnsureFeedbackVector();
scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
- } else if (Renumber(info.parse_info()) &&
- FullCodeGenerator::MakeCode(&info)) {
- // MakeCode will ensure that the feedback vector is present and
+ } else if (Renumber(info.parse_info()) && GenerateBaselineCode(&info)) {
+ // Code generation will ensure that the feedback vector is present and
// appropriately sized.
DCHECK(!info.code().is_null());
scope_info = ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
@@ -1577,6 +1667,10 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
isolate->factory()->NewSharedFunctionInfo(
literal->name(), literal->materialized_literal_count(),
literal->kind(), info.code(), scope_info, info.feedback_vector());
+ if (info.has_bytecode_array()) {
+ DCHECK(result->function_data()->IsUndefined());
+ result->set_function_data(*info.bytecode_array());
+ }
SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
SharedFunctionInfo::SetScript(result, script);
@@ -1763,7 +1857,7 @@ bool CompilationPhase::ShouldProduceTraceOutput() const {
#if DEBUG
void CompilationInfo::PrintAstForTesting() {
PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter(isolate(), zone()).PrintProgram(literal()));
+ PrettyPrinter(isolate()).PrintProgram(literal()));
}
#endif
} // namespace internal
diff --git a/chromium/v8/src/compiler.h b/chromium/v8/src/compiler.h
index 45cf7b5183d..9b439397c3c 100644
--- a/chromium/v8/src/compiler.h
+++ b/chromium/v8/src/compiler.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_H_
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/bailout-reason.h"
#include "src/compilation-dependencies.h"
#include "src/signature.h"
@@ -21,12 +21,6 @@ class JavaScriptFrame;
class ParseInfo;
class ScriptData;
-struct OffsetRange {
- OffsetRange(int from, int to) : from(from), to(to) {}
- int from;
- int to;
-};
-
// This class encapsulates encoding and decoding of sources positions from
// which hydrogen values originated.
@@ -123,14 +117,14 @@ class CompilationInfo {
kSerializing = 1 << 7,
kFunctionContextSpecializing = 1 << 8,
kFrameSpecializing = 1 << 9,
- kInliningEnabled = 1 << 10,
- kTypingEnabled = 1 << 11,
- kDisableFutureOptimization = 1 << 12,
- kSplittingEnabled = 1 << 13,
- kTypeFeedbackEnabled = 1 << 14,
- kDeoptimizationEnabled = 1 << 15,
- kSourcePositionsEnabled = 1 << 16,
- kFirstCompile = 1 << 17,
+ kNativeContextSpecializing = 1 << 10,
+ kInliningEnabled = 1 << 11,
+ kTypingEnabled = 1 << 12,
+ kDisableFutureOptimization = 1 << 13,
+ kSplittingEnabled = 1 << 14,
+ kDeoptimizationEnabled = 1 << 16,
+ kSourcePositionsEnabled = 1 << 17,
+ kFirstCompile = 1 << 18,
};
explicit CompilationInfo(ParseInfo* parse_info);
@@ -179,6 +173,9 @@ class CompilationInfo {
parameter_count_ = parameter_count;
}
+ bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
bool is_tracking_positions() const { return track_positions_; }
bool is_calling() const {
@@ -233,10 +230,12 @@ class CompilationInfo {
bool is_frame_specializing() const { return GetFlag(kFrameSpecializing); }
- void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
+ void MarkAsNativeContextSpecializing() {
+ SetFlag(kNativeContextSpecializing);
+ }
- bool is_type_feedback_enabled() const {
- return GetFlag(kTypeFeedbackEnabled);
+ bool is_native_context_specializing() const {
+ return GetFlag(kNativeContextSpecializing);
}
void MarkAsDeoptimizationEnabled() { SetFlag(kDeoptimizationEnabled); }
@@ -269,9 +268,12 @@ class CompilationInfo {
bool is_first_compile() const { return GetFlag(kFirstCompile); }
- bool IsCodePreAgingActive() const {
+ bool GeneratePreagedPrologue() const {
+ // Generate a pre-aged prologue if we are optimizing for size, which
+ // will make code flushing more aggressive. Only apply to Code::FUNCTION,
+ // since StaticMarkingVisitor::IsFlushable only flushes proper functions.
return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() &&
- !is_debug();
+ !is_debug() && output_code_kind_ == Code::FUNCTION;
}
void EnsureFeedbackVector();
@@ -280,18 +282,27 @@ class CompilationInfo {
}
void SetCode(Handle<Code> code) { code_ = code; }
+ void SetBytecodeArray(Handle<BytecodeArray> bytecode_array) {
+ bytecode_array_ = bytecode_array;
+ }
+
bool ShouldTrapOnDeopt() const {
return (FLAG_trap_on_deopt && IsOptimizing()) ||
(FLAG_trap_on_stub_deopt && IsStub());
}
- bool has_global_object() const {
- return !closure().is_null() &&
- (closure()->context()->global_object() != NULL);
+ bool has_native_context() const {
+ return !closure().is_null() && (closure()->native_context() != nullptr);
}
- GlobalObject* global_object() const {
- return has_global_object() ? closure()->context()->global_object() : NULL;
+ Context* native_context() const {
+ return has_native_context() ? closure()->native_context() : nullptr;
+ }
+
+ bool has_global_object() const { return has_native_context(); }
+
+ JSGlobalObject* global_object() const {
+ return has_global_object() ? native_context()->global_object() : nullptr;
}
// Accessors for the different compilation modes.
@@ -306,13 +317,6 @@ class CompilationInfo {
set_output_code_kind(Code::OPTIMIZED_FUNCTION);
}
- void SetFunctionType(Type::FunctionType* function_type) {
- function_type_ = function_type;
- }
- Type::FunctionType* function_type() const { return function_type_; }
-
- void SetStub(CodeStub* code_stub);
-
// Deoptimization support.
bool HasDeoptimizationSupport() const {
return GetFlag(kDeoptimizationSupport);
@@ -323,7 +327,7 @@ class CompilationInfo {
}
bool ShouldEnsureSpaceForLazyDeopt() { return !IsStub(); }
- bool MustReplaceUndefinedReceiverWithGlobalProxy();
+ bool ExpectsJSReceiverAsReceiver();
// Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize();
@@ -361,19 +365,6 @@ class CompilationInfo {
prologue_offset_ = prologue_offset;
}
- // Adds offset range [from, to) where fp register does not point
- // to the current frame base. Used in CPU profiler to detect stack
- // samples where top frame is not set up.
- inline void AddNoFrameRange(int from, int to) {
- if (no_frame_ranges_) no_frame_ranges_->Add(OffsetRange(from, to));
- }
-
- List<OffsetRange>* ReleaseNoFrameRanges() {
- List<OffsetRange>* result = no_frame_ranges_;
- no_frame_ranges_ = NULL;
- return result;
- }
-
int start_position_for(uint32_t inlining_id) {
return inlined_function_infos_.at(inlining_id).start_position;
}
@@ -407,12 +398,27 @@ class CompilationInfo {
bool has_simple_parameters();
- typedef std::vector<Handle<SharedFunctionInfo>> InlinedFunctionList;
+ struct InlinedFunctionHolder {
+ Handle<SharedFunctionInfo> shared_info;
+
+ // Root that holds the unoptimized code of the inlined function alive
+ // (and out of reach of code flushing) until we finish compilation.
+ // Do not remove.
+ Handle<Code> inlined_code_object_root;
+
+ explicit InlinedFunctionHolder(
+ Handle<SharedFunctionInfo> inlined_shared_info)
+ : shared_info(inlined_shared_info),
+ inlined_code_object_root(inlined_shared_info->code()) {}
+ };
+
+ typedef std::vector<InlinedFunctionHolder> InlinedFunctionList;
InlinedFunctionList const& inlined_functions() const {
return inlined_functions_;
}
+
void AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function) {
- inlined_functions_.push_back(inlined_function);
+ inlined_functions_.push_back(InlinedFunctionHolder(inlined_function));
}
base::SmartArrayPointer<char> GetDebugName() const;
@@ -478,6 +484,11 @@ class CompilationInfo {
// data. Keep track which code we patched.
Handle<Code> unoptimized_code_;
+ // Holds the bytecode array generated by the interpreter.
+ // TODO(rmcilroy/mstarzinger): Temporary work-around until compiler.cc is
+ // refactored to avoid us needing to carry the BytcodeArray around.
+ Handle<BytecodeArray> bytecode_array_;
+
// The zone from which the compilation pipeline working on this
// CompilationInfo allocates.
Zone* zone_;
@@ -491,7 +502,6 @@ class CompilationInfo {
int prologue_offset_;
- List<OffsetRange>* no_frame_ranges_;
std::vector<InlinedFunctionInfo> inlined_function_infos_;
bool track_positions_;
@@ -511,8 +521,6 @@ class CompilationInfo {
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* osr_frame_ = nullptr;
- Type::FunctionType* function_type_;
-
const char* debug_name_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
@@ -637,8 +645,6 @@ class Compiler : public AllStatic {
Handle<JSFunction> function);
MUST_USE_RESULT static MaybeHandle<Code> GetLazyCode(
Handle<JSFunction> function);
- MUST_USE_RESULT static MaybeHandle<Code> GetStubCode(
- Handle<JSFunction> function, CodeStub* stub);
static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
static bool CompileDebugCode(Handle<JSFunction> function);
@@ -716,6 +722,7 @@ class CompilationPhase BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_H_
diff --git a/chromium/v8/src/compiler/OWNERS b/chromium/v8/src/compiler/OWNERS
index 7f7a39bb9ec..1257e232f73 100644
--- a/chromium/v8/src/compiler/OWNERS
+++ b/chromium/v8/src/compiler/OWNERS
@@ -3,4 +3,5 @@ set noparent
bmeurer@chromium.org
jarin@chromium.org
mstarzinger@chromium.org
+mtrofin@chromium.org
titzer@chromium.org
diff --git a/chromium/v8/src/compiler/access-builder.cc b/chromium/v8/src/compiler/access-builder.cc
index 8a03ff77f38..ebd2789151f 100644
--- a/chromium/v8/src/compiler/access-builder.cc
+++ b/chromium/v8/src/compiler/access-builder.cc
@@ -3,6 +3,11 @@
// found in the LICENSE file.
#include "src/compiler/access-builder.h"
+
+#include "src/contexts.h"
+#include "src/frames.h"
+#include "src/heap/heap.h"
+#include "src/type-cache.h"
#include "src/types-inl.h"
namespace v8 {
@@ -12,7 +17,17 @@ namespace compiler {
// static
FieldAccess AccessBuilder::ForMap() {
FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
- MaybeHandle<Name>(), Type::Any(), kMachAnyTagged};
+ MaybeHandle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForHeapNumberValue() {
+ FieldAccess access = {kTaggedBase, HeapNumber::kValueOffset,
+ MaybeHandle<Name>(), TypeCache().Get().kFloat64,
+ MachineType::Float64()};
return access;
}
@@ -20,7 +35,8 @@ FieldAccess AccessBuilder::ForMap() {
// static
FieldAccess AccessBuilder::ForJSObjectProperties() {
FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
- MaybeHandle<Name>(), Type::Any(), kMachAnyTagged};
+ MaybeHandle<Name>(), Type::Internal(),
+ MachineType::AnyTagged()};
return access;
}
@@ -28,7 +44,18 @@ FieldAccess AccessBuilder::ForJSObjectProperties() {
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
- MaybeHandle<Name>(), Type::Internal(), kMachAnyTagged};
+ MaybeHandle<Name>(), Type::Internal(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSObjectInObjectProperty(Handle<Map> map,
+ int index) {
+ int const offset = map->GetInObjectPropertyOffset(index);
+ FieldAccess access = {kTaggedBase, offset, MaybeHandle<Name>(),
+ Type::Tagged(), MachineType::AnyTagged()};
return access;
}
@@ -36,7 +63,8 @@ FieldAccess AccessBuilder::ForJSObjectElements() {
// static
FieldAccess AccessBuilder::ForJSFunctionContext() {
FieldAccess access = {kTaggedBase, JSFunction::kContextOffset,
- MaybeHandle<Name>(), Type::Internal(), kMachAnyTagged};
+ MaybeHandle<Name>(), Type::Internal(),
+ MachineType::AnyTagged()};
return access;
}
@@ -44,7 +72,22 @@ FieldAccess AccessBuilder::ForJSFunctionContext() {
// static
FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
FieldAccess access = {kTaggedBase, JSFunction::kSharedFunctionInfoOffset,
- Handle<Name>(), Type::Any(), kMachAnyTagged};
+ Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
+ TypeCache const& type_cache = TypeCache::Get();
+ FieldAccess access = {kTaggedBase, JSArray::kLengthOffset, Handle<Name>(),
+ type_cache.kJSArrayLengthType,
+ MachineType::AnyTagged()};
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ access.type = type_cache.kFixedDoubleArrayLengthType;
+ } else if (IsFastElementsKind(elements_kind)) {
+ access.type = type_cache.kFixedArrayLengthType;
+ }
return access;
}
@@ -52,28 +95,80 @@ FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
// static
FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
FieldAccess access = {kTaggedBase, JSArrayBuffer::kBackingStoreOffset,
- MaybeHandle<Name>(), Type::UntaggedPointer(), kMachPtr};
+ MaybeHandle<Name>(), Type::UntaggedPointer(),
+ MachineType::Pointer()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
+ FieldAccess access = {kTaggedBase, JSArrayBuffer::kBitFieldOffset,
+ MaybeHandle<Name>(), TypeCache::Get().kInt8,
+ MachineType::Int8()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
+ FieldAccess access = {kTaggedBase, JSArrayBufferView::kBufferOffset,
+ MaybeHandle<Name>(), Type::TaggedPointer(),
+ MachineType::AnyTagged()};
return access;
}
// static
FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
- FieldAccess access = {kTaggedBase,
- JSDate::kValueOffset + index * kPointerSize,
- MaybeHandle<Name>(), Type::Number(), kMachAnyTagged};
+ FieldAccess access = {
+ kTaggedBase, JSDate::kValueOffset + index * kPointerSize,
+ MaybeHandle<Name>(), Type::Number(), MachineType::AnyTagged()};
return access;
}
// static
-FieldAccess AccessBuilder::ForFixedArrayLength(Zone* zone) {
- STATIC_ASSERT(FixedArray::kMaxLength <= 1 << 30);
+FieldAccess AccessBuilder::ForJSIteratorResultDone() {
+ FieldAccess access = {kTaggedBase, JSIteratorResult::kDoneOffset,
+ MaybeHandle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSIteratorResultValue() {
+ FieldAccess access = {kTaggedBase, JSIteratorResult::kValueOffset,
+ MaybeHandle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSRegExpFlags() {
+ FieldAccess access = {kTaggedBase, JSRegExp::kFlagsOffset,
+ MaybeHandle<Name>(), Type::Tagged(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSRegExpSource() {
+ FieldAccess access = {kTaggedBase, JSRegExp::kSourceOffset,
+ MaybeHandle<Name>(), Type::Tagged(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForFixedArrayLength() {
FieldAccess access = {
kTaggedBase, FixedArray::kLengthOffset, MaybeHandle<Name>(),
- Type::Intersect(Type::Range(0, FixedArray::kMaxLength, zone),
- Type::TaggedSigned(), zone),
- kMachAnyTagged};
+ TypeCache::Get().kFixedArrayLengthType, MachineType::AnyTagged()};
return access;
}
@@ -81,16 +176,25 @@ FieldAccess AccessBuilder::ForFixedArrayLength(Zone* zone) {
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
FieldAccess access = {kTaggedBase, DescriptorArray::kEnumCacheOffset,
- Handle<Name>(), Type::TaggedPointer(), kMachAnyTagged};
+ Handle<Name>(), Type::TaggedPointer(),
+ MachineType::AnyTagged()};
return access;
}
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
- FieldAccess access = {kTaggedBase,
- DescriptorArray::kEnumCacheBridgeCacheOffset,
- Handle<Name>(), Type::TaggedPointer(), kMachAnyTagged};
+ FieldAccess access = {
+ kTaggedBase, DescriptorArray::kEnumCacheBridgeCacheOffset, Handle<Name>(),
+ Type::TaggedPointer(), MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForMapBitField() {
+ FieldAccess access = {kTaggedBase, Map::kBitFieldOffset, Handle<Name>(),
+ TypeCache::Get().kUint8, MachineType::Uint8()};
return access;
}
@@ -98,7 +202,7 @@ FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
// static
FieldAccess AccessBuilder::ForMapBitField3() {
FieldAccess access = {kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
- Type::UntaggedUnsigned32(), kMachUint32};
+ TypeCache::Get().kInt32, MachineType::Int32()};
return access;
}
@@ -106,7 +210,7 @@ FieldAccess AccessBuilder::ForMapBitField3() {
// static
FieldAccess AccessBuilder::ForMapDescriptors() {
FieldAccess access = {kTaggedBase, Map::kDescriptorsOffset, Handle<Name>(),
- Type::TaggedPointer(), kMachAnyTagged};
+ Type::TaggedPointer(), MachineType::AnyTagged()};
return access;
}
@@ -114,18 +218,42 @@ FieldAccess AccessBuilder::ForMapDescriptors() {
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
FieldAccess access = {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
- Type::UntaggedUnsigned8(), kMachUint8};
+ TypeCache::Get().kUint8, MachineType::Uint8()};
return access;
}
// static
-FieldAccess AccessBuilder::ForStringLength(Zone* zone) {
- FieldAccess access = {
- kTaggedBase, String::kLengthOffset, Handle<Name>(),
- Type::Intersect(Type::Range(0, String::kMaxLength, zone),
- Type::TaggedSigned(), zone),
- kMachAnyTagged};
+FieldAccess AccessBuilder::ForMapPrototype() {
+ FieldAccess access = {kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
+ Type::TaggedPointer(), MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForStringLength() {
+ FieldAccess access = {kTaggedBase, String::kLengthOffset, Handle<Name>(),
+ TypeCache::Get().kStringLengthType,
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
+ FieldAccess access = {kTaggedBase, JSGlobalObject::kGlobalProxyOffset,
+ Handle<Name>(), Type::Receiver(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
+ FieldAccess access = {kTaggedBase, JSGlobalObject::kNativeContextOffset,
+ Handle<Name>(), Type::Internal(),
+ MachineType::AnyTagged()};
return access;
}
@@ -133,7 +261,36 @@ FieldAccess AccessBuilder::ForStringLength(Zone* zone) {
// static
FieldAccess AccessBuilder::ForValue() {
FieldAccess access = {kTaggedBase, JSValue::kValueOffset, Handle<Name>(),
- Type::Any(), kMachAnyTagged};
+ Type::Any(), MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForArgumentsLength() {
+ int offset =
+ JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
+ FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForArgumentsCallee() {
+ int offset =
+ JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
+ FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForFixedArraySlot(size_t index) {
+ int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
+ FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
return access;
}
@@ -144,15 +301,21 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
DCHECK_EQ(offset,
Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
- kMachAnyTagged};
+ MachineType::AnyTagged()};
return access;
}
// static
FieldAccess AccessBuilder::ForPropertyCellValue() {
+ return ForPropertyCellValue(Type::Tagged());
+}
+
+
+// static
+FieldAccess AccessBuilder::ForPropertyCellValue(Type* type) {
FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
- Type::Any(), kMachAnyTagged};
+ type, MachineType::AnyTagged()};
return access;
}
@@ -160,15 +323,23 @@ FieldAccess AccessBuilder::ForPropertyCellValue() {
// static
FieldAccess AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector() {
FieldAccess access = {kTaggedBase, SharedFunctionInfo::kFeedbackVectorOffset,
- Handle<Name>(), Type::Any(), kMachAnyTagged};
+ Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
return access;
}
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
- ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
- kMachAnyTagged};
+ ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Tagged(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+ElementAccess AccessBuilder::ForFixedDoubleArrayElement() {
+ ElementAccess access = {kTaggedBase, FixedDoubleArray::kHeaderSize,
+ TypeCache::Get().kFloat64, MachineType::Float64()};
return access;
}
@@ -181,92 +352,56 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
switch (type) {
case kExternalInt8Array: {
ElementAccess access = {taggedness, header_size, Type::Signed32(),
- kMachInt8};
+ MachineType::Int8()};
return access;
}
case kExternalUint8Array:
case kExternalUint8ClampedArray: {
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
- kMachUint8};
+ MachineType::Uint8()};
return access;
}
case kExternalInt16Array: {
ElementAccess access = {taggedness, header_size, Type::Signed32(),
- kMachInt16};
+ MachineType::Int16()};
return access;
}
case kExternalUint16Array: {
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
- kMachUint16};
+ MachineType::Uint16()};
return access;
}
case kExternalInt32Array: {
ElementAccess access = {taggedness, header_size, Type::Signed32(),
- kMachInt32};
+ MachineType::Int32()};
return access;
}
case kExternalUint32Array: {
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
- kMachUint32};
+ MachineType::Uint32()};
return access;
}
case kExternalFloat32Array: {
ElementAccess access = {taggedness, header_size, Type::Number(),
- kMachFloat32};
+ MachineType::Float32()};
return access;
}
case kExternalFloat64Array: {
ElementAccess access = {taggedness, header_size, Type::Number(),
- kMachFloat64};
+ MachineType::Float64()};
return access;
}
}
UNREACHABLE();
- ElementAccess access = {kUntaggedBase, 0, Type::None(), kMachNone};
- return access;
-}
-
-
-// static
-ElementAccess AccessBuilder::ForSeqStringChar(String::Encoding encoding) {
- switch (encoding) {
- case String::ONE_BYTE_ENCODING: {
- ElementAccess access = {kTaggedBase, SeqString::kHeaderSize,
- Type::Unsigned32(), kMachUint8};
- return access;
- }
- case String::TWO_BYTE_ENCODING: {
- ElementAccess access = {kTaggedBase, SeqString::kHeaderSize,
- Type::Unsigned32(), kMachUint16};
- return access;
- }
- }
- UNREACHABLE();
- ElementAccess access = {kUntaggedBase, 0, Type::None(), kMachNone};
+ ElementAccess access = {kUntaggedBase, 0, Type::None(), MachineType::None()};
return access;
}
// static
FieldAccess AccessBuilder::ForStatsCounter() {
- FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(), Type::Signed32(),
- kMachInt32};
- return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForFrameCallerFramePtr() {
- FieldAccess access = {kUntaggedBase, StandardFrameConstants::kCallerFPOffset,
- MaybeHandle<Name>(), Type::Internal(), kMachPtr};
- return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForFrameMarker() {
- FieldAccess access = {kUntaggedBase, StandardFrameConstants::kMarkerOffset,
- MaybeHandle<Name>(), Type::Tagged(), kMachAnyTagged};
+ FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(),
+ TypeCache::Get().kInt32, MachineType::Int32()};
return access;
}
diff --git a/chromium/v8/src/compiler/access-builder.h b/chromium/v8/src/compiler/access-builder.h
index 762ab64d523..8375d37600a 100644
--- a/chromium/v8/src/compiler/access-builder.h
+++ b/chromium/v8/src/compiler/access-builder.h
@@ -22,26 +22,53 @@ class AccessBuilder final : public AllStatic {
// Provides access to HeapObject::map() field.
static FieldAccess ForMap();
+ // Provides access to HeapNumber::value() field.
+ static FieldAccess ForHeapNumberValue();
+
// Provides access to JSObject::properties() field.
static FieldAccess ForJSObjectProperties();
// Provides access to JSObject::elements() field.
static FieldAccess ForJSObjectElements();
+ // Provides access to JSObject inobject property fields.
+ static FieldAccess ForJSObjectInObjectProperty(Handle<Map> map, int index);
+
// Provides access to JSFunction::context() field.
static FieldAccess ForJSFunctionContext();
// Provides access to JSFunction::shared() field.
static FieldAccess ForJSFunctionSharedFunctionInfo();
+ // Provides access to JSArray::length() field.
+ static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
+
// Provides access to JSArrayBuffer::backing_store() field.
static FieldAccess ForJSArrayBufferBackingStore();
+ // Provides access to JSArrayBuffer::bit_field() field.
+ static FieldAccess ForJSArrayBufferBitField();
+
+ // Provides access to JSArrayBufferView::buffer() field.
+ static FieldAccess ForJSArrayBufferViewBuffer();
+
// Provides access to JSDate fields.
static FieldAccess ForJSDateField(JSDate::FieldIndex index);
+ // Provides access to JSIteratorResult::done() field.
+ static FieldAccess ForJSIteratorResultDone();
+
+ // Provides access to JSIteratorResult::value() field.
+ static FieldAccess ForJSIteratorResultValue();
+
+ // Provides access to JSRegExp::flags() field.
+ static FieldAccess ForJSRegExpFlags();
+
+ // Provides access to JSRegExp::source() field.
+ static FieldAccess ForJSRegExpSource();
+
// Provides access to FixedArray::length() field.
- static FieldAccess ForFixedArrayLength(Zone* zone);
+ static FieldAccess ForFixedArrayLength();
// Provides access to DescriptorArray::enum_cache() field.
static FieldAccess ForDescriptorArrayEnumCache();
@@ -49,6 +76,9 @@ class AccessBuilder final : public AllStatic {
// Provides access to DescriptorArray::enum_cache_bridge_cache() field.
static FieldAccess ForDescriptorArrayEnumCacheBridgeCache();
+ // Provides access to Map::bit_field() byte.
+ static FieldAccess ForMapBitField();
+
// Provides access to Map::bit_field3() field.
static FieldAccess ForMapBitField3();
@@ -58,17 +88,34 @@ class AccessBuilder final : public AllStatic {
// Provides access to Map::instance_type() field.
static FieldAccess ForMapInstanceType();
+ // Provides access to Map::prototype() field.
+ static FieldAccess ForMapPrototype();
+
// Provides access to String::length() field.
- static FieldAccess ForStringLength(Zone* zone);
+ static FieldAccess ForStringLength();
+
+ // Provides access to JSGlobalObject::global_proxy() field.
+ static FieldAccess ForJSGlobalObjectGlobalProxy();
+
+ // Provides access to JSGlobalObject::native_context() field.
+ static FieldAccess ForJSGlobalObjectNativeContext();
// Provides access to JSValue::value() field.
static FieldAccess ForValue();
- // Provides access Context slots.
+ // Provides access to arguments object fields.
+ static FieldAccess ForArgumentsLength();
+ static FieldAccess ForArgumentsCallee();
+
+ // Provides access to FixedArray slots.
+ static FieldAccess ForFixedArraySlot(size_t index);
+
+ // Provides access to Context slots.
static FieldAccess ForContextSlot(size_t index);
// Provides access to PropertyCell::value() field.
static FieldAccess ForPropertyCellValue();
+ static FieldAccess ForPropertyCellValue(Type* type);
// Provides access to SharedFunctionInfo::feedback_vector() field.
static FieldAccess ForSharedFunctionInfoTypeFeedbackVector();
@@ -76,28 +123,19 @@ class AccessBuilder final : public AllStatic {
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
+ // Provides access to FixedDoubleArray elements.
+ static ElementAccess ForFixedDoubleArrayElement();
+
// Provides access to Fixed{type}TypedArray and External{type}Array elements.
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
bool is_external);
- // Provides access to the characters of sequential strings.
- static ElementAccess ForSeqStringChar(String::Encoding encoding);
-
// ===========================================================================
// Access to global per-isolate variables (based on external reference).
// Provides access to the backing store of a StatsCounter.
static FieldAccess ForStatsCounter();
- // ===========================================================================
- // Access to activation records on the stack (based on frame pointer).
-
- // Provides access to the next frame pointer in a stack frame.
- static FieldAccess ForFrameCallerFramePtr();
-
- // Provides access to the marker in a stack frame.
- static FieldAccess ForFrameMarker();
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
diff --git a/chromium/v8/src/compiler/access-info.cc b/chromium/v8/src/compiler/access-info.cc
new file mode 100644
index 00000000000..612170e5b1b
--- /dev/null
+++ b/chromium/v8/src/compiler/access-info.cc
@@ -0,0 +1,488 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <ostream>
+
+#include "src/accessors.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-info.h"
+#include "src/field-index-inl.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/type-cache.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+bool CanInlineElementAccess(Handle<Map> map) {
+ if (!map->IsJSObjectMap()) return false;
+ if (map->is_access_check_needed()) return false;
+ if (map->has_indexed_interceptor()) return false;
+ ElementsKind const elements_kind = map->elements_kind();
+ if (IsFastElementsKind(elements_kind)) return true;
+ // TODO(bmeurer): Add support for other elements kind.
+ return false;
+}
+
+
+bool CanInlinePropertyAccess(Handle<Map> map) {
+ // We can inline property access to prototypes of all primitives, except
+ // the special Oddball ones that have no wrapper counterparts (i.e. Null,
+ // Undefined and TheHole).
+ STATIC_ASSERT(ODDBALL_TYPE == LAST_PRIMITIVE_TYPE);
+ if (map->IsBooleanMap()) return true;
+ if (map->instance_type() < LAST_PRIMITIVE_TYPE) return true;
+ return map->IsJSObjectMap() && !map->is_dictionary_map() &&
+ !map->has_named_interceptor() &&
+ // TODO(verwaest): Whitelist contexts to which we have access.
+ !map->is_access_check_needed();
+}
+
+} // namespace
+
+
+std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
+ switch (access_mode) {
+ case AccessMode::kLoad:
+ return os << "Load";
+ case AccessMode::kStore:
+ return os << "Store";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+// static
+PropertyAccessInfo PropertyAccessInfo::NotFound(Type* receiver_type,
+ MaybeHandle<JSObject> holder) {
+ return PropertyAccessInfo(holder, receiver_type);
+}
+
+
+// static
+PropertyAccessInfo PropertyAccessInfo::DataConstant(
+ Type* receiver_type, Handle<Object> constant,
+ MaybeHandle<JSObject> holder) {
+ return PropertyAccessInfo(holder, constant, receiver_type);
+}
+
+
+// static
+PropertyAccessInfo PropertyAccessInfo::DataField(
+ Type* receiver_type, FieldIndex field_index, Type* field_type,
+ FieldCheck field_check, MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map) {
+ return PropertyAccessInfo(holder, transition_map, field_index, field_check,
+ field_type, receiver_type);
+}
+
+
+ElementAccessInfo::ElementAccessInfo() : receiver_type_(Type::None()) {}
+
+
+ElementAccessInfo::ElementAccessInfo(Type* receiver_type,
+ ElementsKind elements_kind,
+ MaybeHandle<JSObject> holder)
+ : elements_kind_(elements_kind),
+ holder_(holder),
+ receiver_type_(receiver_type) {}
+
+
+PropertyAccessInfo::PropertyAccessInfo()
+ : kind_(kInvalid), receiver_type_(Type::None()), field_type_(Type::Any()) {}
+
+
+PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ Type* receiver_type)
+ : kind_(kNotFound),
+ receiver_type_(receiver_type),
+ holder_(holder),
+ field_type_(Type::Any()) {}
+
+
+PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ Handle<Object> constant,
+ Type* receiver_type)
+ : kind_(kDataConstant),
+ receiver_type_(receiver_type),
+ constant_(constant),
+ holder_(holder),
+ field_type_(Type::Any()) {}
+
+
+PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map,
+ FieldIndex field_index,
+ FieldCheck field_check, Type* field_type,
+ Type* receiver_type)
+ : kind_(kDataField),
+ receiver_type_(receiver_type),
+ transition_map_(transition_map),
+ holder_(holder),
+ field_index_(field_index),
+ field_check_(field_check),
+ field_type_(field_type) {}
+
+
+AccessInfoFactory::AccessInfoFactory(CompilationDependencies* dependencies,
+ Handle<Context> native_context, Zone* zone)
+ : dependencies_(dependencies),
+ native_context_(native_context),
+ isolate_(native_context->GetIsolate()),
+ type_cache_(TypeCache::Get()),
+ zone_(zone) {
+ DCHECK(native_context->IsNativeContext());
+}
+
+
+bool AccessInfoFactory::ComputeElementAccessInfo(
+ Handle<Map> map, AccessMode access_mode, ElementAccessInfo* access_info) {
+ // Check if it is safe to inline element access for the {map}.
+ if (!CanInlineElementAccess(map)) return false;
+
+ ElementsKind const elements_kind = map->elements_kind();
+
+ // Certain (monomorphic) stores need a prototype chain check because shape
+ // changes could allow callbacks on elements in the chain that are not
+ // compatible with monomorphic keyed stores.
+ MaybeHandle<JSObject> holder;
+ if (access_mode == AccessMode::kStore && map->prototype()->IsJSObject()) {
+ for (PrototypeIterator i(map); !i.IsAtEnd(); i.Advance()) {
+ Handle<JSReceiver> prototype =
+ PrototypeIterator::GetCurrent<JSReceiver>(i);
+ if (!prototype->IsJSObject()) return false;
+ // TODO(bmeurer): We do not currently support unstable prototypes.
+ // We might want to revisit the way we handle certain keyed stores
+ // because this whole prototype chain check is essential a hack,
+ // and I'm not sure that it is correct at all with dictionaries in
+ // the prototype chain.
+ if (!prototype->map()->is_stable()) return false;
+ holder = Handle<JSObject>::cast(prototype);
+ }
+ }
+
+ *access_info =
+ ElementAccessInfo(Type::Class(map, zone()), elements_kind, holder);
+ return true;
+}
+
+
+bool AccessInfoFactory::ComputeElementAccessInfos(
+ MapHandleList const& maps, AccessMode access_mode,
+ ZoneVector<ElementAccessInfo>* access_infos) {
+ // Collect possible transition targets.
+ MapHandleList possible_transition_targets(maps.length());
+ for (Handle<Map> map : maps) {
+ if (Map::TryUpdate(map).ToHandle(&map)) {
+ if (CanInlineElementAccess(map) &&
+ IsFastElementsKind(map->elements_kind()) &&
+ GetInitialFastElementsKind() != map->elements_kind()) {
+ possible_transition_targets.Add(map);
+ }
+ }
+ }
+
+ // Separate the actual receiver maps and the possible transition sources.
+ MapHandleList receiver_maps(maps.length());
+ MapTransitionList transitions(maps.length());
+ for (Handle<Map> map : maps) {
+ if (Map::TryUpdate(map).ToHandle(&map)) {
+ Handle<Map> transition_target =
+ Map::FindTransitionedMap(map, &possible_transition_targets);
+ if (transition_target.is_null()) {
+ receiver_maps.Add(map);
+ } else {
+ transitions.push_back(std::make_pair(map, transition_target));
+ }
+ }
+ }
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ // Compute the element access information.
+ ElementAccessInfo access_info;
+ if (!ComputeElementAccessInfo(receiver_map, access_mode, &access_info)) {
+ return false;
+ }
+
+ // Collect the possible transitions for the {receiver_map}.
+ for (auto transition : transitions) {
+ if (transition.second.is_identical_to(receiver_map)) {
+ access_info.transitions().push_back(transition);
+ }
+ }
+
+ // Schedule the access information.
+ access_infos->push_back(access_info);
+ }
+ return true;
+}
+
+
+bool AccessInfoFactory::ComputePropertyAccessInfo(
+ Handle<Map> map, Handle<Name> name, AccessMode access_mode,
+ PropertyAccessInfo* access_info) {
+ // Check if it is safe to inline property access for the {map}.
+ if (!CanInlinePropertyAccess(map)) return false;
+
+ // Compute the receiver type.
+ Handle<Map> receiver_map = map;
+
+ // We support fast inline cases for certain JSObject getters.
+ if (access_mode == AccessMode::kLoad &&
+ LookupSpecialFieldAccessor(map, name, access_info)) {
+ return true;
+ }
+
+ MaybeHandle<JSObject> holder;
+ do {
+ // Lookup the named property on the {map}.
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
+ int const number = descriptors->SearchWithCache(*name, *map);
+ if (number != DescriptorArray::kNotFound) {
+ PropertyDetails const details = descriptors->GetDetails(number);
+ if (access_mode == AccessMode::kStore) {
+ // Don't bother optimizing stores to read-only properties.
+ if (details.IsReadOnly()) {
+ return false;
+ }
+ // Check for store to data property on a prototype.
+ if (details.kind() == kData && !holder.is_null()) {
+ // Store to property not found on the receiver but on a prototype, we
+ // need to transition to a new data property.
+ // Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
+ return LookupTransition(receiver_map, name, holder, access_info);
+ }
+ }
+ if (details.type() == DATA_CONSTANT) {
+ *access_info = PropertyAccessInfo::DataConstant(
+ Type::Class(receiver_map, zone()),
+ handle(descriptors->GetValue(number), isolate()), holder);
+ return true;
+ } else if (details.type() == DATA) {
+ int index = descriptors->GetFieldIndex(number);
+ Representation field_representation = details.representation();
+ FieldIndex field_index = FieldIndex::ForPropertyIndex(
+ *map, index, field_representation.IsDouble());
+ Type* field_type = Type::Tagged();
+ if (field_representation.IsSmi()) {
+ field_type = type_cache_.kSmi;
+ } else if (field_representation.IsDouble()) {
+ field_type = type_cache_.kFloat64;
+ } else if (field_representation.IsHeapObject()) {
+ // Extract the field type from the property details (make sure its
+ // representation is TaggedPointer to reflect the heap object case).
+ field_type = Type::Intersect(
+ Type::Convert<HeapType>(
+ handle(descriptors->GetFieldType(number), isolate()), zone()),
+ Type::TaggedPointer(), zone());
+ if (field_type->Is(Type::None())) {
+ // Store is not safe if the field type was cleared.
+ if (access_mode == AccessMode::kStore) return false;
+
+ // The field type was cleared by the GC, so we don't know anything
+ // about the contents now.
+ // TODO(bmeurer): It would be awesome to make this saner in the
+ // runtime/GC interaction.
+ field_type = Type::TaggedPointer();
+ } else if (!Type::Any()->Is(field_type)) {
+ // Add proper code dependencies in case of stable field map(s).
+ Handle<Map> field_owner_map(map->FindFieldOwner(number), isolate());
+ dependencies()->AssumeFieldType(field_owner_map);
+ }
+ DCHECK(field_type->Is(Type::TaggedPointer()));
+ }
+ *access_info = PropertyAccessInfo::DataField(
+ Type::Class(receiver_map, zone()), field_index, field_type,
+ FieldCheck::kNone, holder);
+ return true;
+ } else {
+ // TODO(bmeurer): Add support for accessors.
+ return false;
+ }
+ }
+
+ // Don't search on the prototype chain for special indices in case of
+ // integer indexed exotic objects (see ES6 section 9.4.5).
+ if (map->IsJSTypedArrayMap() && name->IsString() &&
+ IsSpecialIndex(isolate()->unicode_cache(), String::cast(*name))) {
+ return false;
+ }
+
+ // Don't lookup private symbols on the prototype chain.
+ if (name->IsPrivate()) return false;
+
+ // Walk up the prototype chain.
+ if (!map->prototype()->IsJSObject()) {
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ Handle<JSFunction> constructor;
+ if (Map::GetConstructorFunction(map, native_context())
+ .ToHandle(&constructor)) {
+ map = handle(constructor->initial_map(), isolate());
+ DCHECK(map->prototype()->IsJSObject());
+ } else if (map->prototype()->IsNull()) {
+ // Store to property not found on the receiver or any prototype, we need
+ // to transition to a new data property.
+ // Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
+ if (access_mode == AccessMode::kStore) {
+ return LookupTransition(receiver_map, name, holder, access_info);
+ }
+ // The property was not found, return undefined or throw depending
+ // on the language mode of the load operation.
+ // Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver)
+ *access_info = PropertyAccessInfo::NotFound(
+ Type::Class(receiver_map, zone()), holder);
+ return true;
+ } else {
+ return false;
+ }
+ }
+ Handle<JSObject> map_prototype(JSObject::cast(map->prototype()), isolate());
+ if (map_prototype->map()->is_deprecated()) {
+ // Try to migrate the prototype object so we don't embed the deprecated
+ // map into the optimized code.
+ JSObject::TryMigrateInstance(map_prototype);
+ }
+ map = handle(map_prototype->map(), isolate());
+ holder = map_prototype;
+ } while (CanInlinePropertyAccess(map));
+ return false;
+}
+
+
+bool AccessInfoFactory::ComputePropertyAccessInfos(
+ MapHandleList const& maps, Handle<Name> name, AccessMode access_mode,
+ ZoneVector<PropertyAccessInfo>* access_infos) {
+ for (Handle<Map> map : maps) {
+ if (Map::TryUpdate(map).ToHandle(&map)) {
+ PropertyAccessInfo access_info;
+ if (!ComputePropertyAccessInfo(map, name, access_mode, &access_info)) {
+ return false;
+ }
+ access_infos->push_back(access_info);
+ }
+ }
+ return true;
+}
+
+
+bool AccessInfoFactory::LookupSpecialFieldAccessor(
+ Handle<Map> map, Handle<Name> name, PropertyAccessInfo* access_info) {
+ // Check for special JSObject field accessors.
+ int offset;
+ if (Accessors::IsJSObjectFieldAccessor(map, name, &offset)) {
+ FieldIndex field_index = FieldIndex::ForInObjectOffset(offset);
+ Type* field_type = Type::Tagged();
+ if (map->IsStringMap()) {
+ DCHECK(Name::Equals(factory()->length_string(), name));
+ // The String::length property is always a smi in the range
+ // [0, String::kMaxLength].
+ field_type = type_cache_.kStringLengthType;
+ } else if (map->IsJSArrayMap()) {
+ DCHECK(Name::Equals(factory()->length_string(), name));
+ // The JSArray::length property is a smi in the range
+ // [0, FixedDoubleArray::kMaxLength] in case of fast double
+ // elements, a smi in the range [0, FixedArray::kMaxLength]
+ // in case of other fast elements, and [0, kMaxUInt32] in
+ // case of other arrays.
+ if (IsFastDoubleElementsKind(map->elements_kind())) {
+ field_type = type_cache_.kFixedDoubleArrayLengthType;
+ } else if (IsFastElementsKind(map->elements_kind())) {
+ field_type = type_cache_.kFixedArrayLengthType;
+ } else {
+ field_type = type_cache_.kJSArrayLengthType;
+ }
+ }
+ *access_info = PropertyAccessInfo::DataField(Type::Class(map, zone()),
+ field_index, field_type);
+ return true;
+ }
+ // Check for special JSArrayBufferView field accessors.
+ if (Accessors::IsJSArrayBufferViewFieldAccessor(map, name, &offset)) {
+ FieldIndex field_index = FieldIndex::ForInObjectOffset(offset);
+ Type* field_type = Type::Tagged();
+ if (Name::Equals(factory()->byte_length_string(), name) ||
+ Name::Equals(factory()->byte_offset_string(), name)) {
+ // The JSArrayBufferView::byte_length and JSArrayBufferView::byte_offset
+ // properties are always numbers in the range [0, kMaxSafeInteger].
+ field_type = type_cache_.kPositiveSafeInteger;
+ } else if (map->IsJSTypedArrayMap()) {
+ DCHECK(Name::Equals(factory()->length_string(), name));
+ // The JSTypedArray::length property is always a number in the range
+ // [0, kMaxSafeInteger].
+ field_type = type_cache_.kPositiveSafeInteger;
+ }
+ *access_info = PropertyAccessInfo::DataField(
+ Type::Class(map, zone()), field_index, field_type,
+ FieldCheck::kJSArrayBufferViewBufferNotNeutered);
+ return true;
+ }
+ return false;
+}
+
+
+bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
+ MaybeHandle<JSObject> holder,
+ PropertyAccessInfo* access_info) {
+ // Check if the {map} has a data transition with the given {name}.
+ if (map->unused_property_fields() == 0) return false;
+ Handle<Map> transition_map;
+ if (TransitionArray::SearchTransition(map, kData, name, NONE)
+ .ToHandle(&transition_map)) {
+ int const number = transition_map->LastAdded();
+ PropertyDetails const details =
+ transition_map->instance_descriptors()->GetDetails(number);
+ // Don't bother optimizing stores to read-only properties.
+ if (details.IsReadOnly()) return false;
+ // TODO(bmeurer): Handle transition to data constant?
+ if (details.type() != DATA) return false;
+ int const index = details.field_index();
+ Representation field_representation = details.representation();
+ FieldIndex field_index = FieldIndex::ForPropertyIndex(
+ *transition_map, index, field_representation.IsDouble());
+ Type* field_type = Type::Tagged();
+ if (field_representation.IsSmi()) {
+ field_type = type_cache_.kSmi;
+ } else if (field_representation.IsDouble()) {
+ field_type = type_cache_.kFloat64;
+ } else if (field_representation.IsHeapObject()) {
+ // Extract the field type from the property details (make sure its
+ // representation is TaggedPointer to reflect the heap object case).
+ field_type = Type::Intersect(
+ Type::Convert<HeapType>(
+ handle(
+ transition_map->instance_descriptors()->GetFieldType(number),
+ isolate()),
+ zone()),
+ Type::TaggedPointer(), zone());
+ if (field_type->Is(Type::None())) {
+ // Store is not safe if the field type was cleared.
+ return false;
+ } else if (!Type::Any()->Is(field_type)) {
+ // Add proper code dependencies in case of stable field map(s).
+ Handle<Map> field_owner_map(transition_map->FindFieldOwner(number),
+ isolate());
+ dependencies()->AssumeFieldType(field_owner_map);
+ }
+ DCHECK(field_type->Is(Type::TaggedPointer()));
+ }
+ dependencies()->AssumeMapNotDeprecated(transition_map);
+ *access_info = PropertyAccessInfo::DataField(
+ Type::Class(map, zone()), field_index, field_type, FieldCheck::kNone,
+ holder, transition_map);
+ return true;
+ }
+ return false;
+}
+
+
+Factory* AccessInfoFactory::factory() const { return isolate()->factory(); }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/access-info.h b/chromium/v8/src/compiler/access-info.h
new file mode 100644
index 00000000000..cae119140a5
--- /dev/null
+++ b/chromium/v8/src/compiler/access-info.h
@@ -0,0 +1,164 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ACCESS_INFO_H_
+#define V8_COMPILER_ACCESS_INFO_H_
+
+#include <iosfwd>
+
+#include "src/field-index.h"
+#include "src/objects.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class Factory;
+class TypeCache;
+
+
+namespace compiler {
+
+// Whether we are loading a property or storing to a property.
+enum class AccessMode { kLoad, kStore };
+
+std::ostream& operator<<(std::ostream&, AccessMode);
+
+
+// Mapping of transition source to transition target.
+typedef std::vector<std::pair<Handle<Map>, Handle<Map>>> MapTransitionList;
+
+
+// This class encapsulates all information required to access a certain element.
+class ElementAccessInfo final {
+ public:
+ ElementAccessInfo();
+ ElementAccessInfo(Type* receiver_type, ElementsKind elements_kind,
+ MaybeHandle<JSObject> holder);
+
+ MaybeHandle<JSObject> holder() const { return holder_; }
+ ElementsKind elements_kind() const { return elements_kind_; }
+ Type* receiver_type() const { return receiver_type_; }
+ MapTransitionList& transitions() { return transitions_; }
+ MapTransitionList const& transitions() const { return transitions_; }
+
+ private:
+ ElementsKind elements_kind_;
+ MaybeHandle<JSObject> holder_;
+ Type* receiver_type_;
+ MapTransitionList transitions_;
+};
+
+
+// Additional checks that need to be perform for data field accesses.
+enum class FieldCheck : uint8_t {
+ // No additional checking needed.
+ kNone,
+ // Check that the [[ViewedArrayBuffer]] of {JSArrayBufferView}s
+ // was not neutered.
+ kJSArrayBufferViewBufferNotNeutered,
+};
+
+
+// This class encapsulates all information required to access a certain
+// object property, either on the object itself or on the prototype chain.
+class PropertyAccessInfo final {
+ public:
+ enum Kind { kInvalid, kNotFound, kDataConstant, kDataField };
+
+ static PropertyAccessInfo NotFound(Type* receiver_type,
+ MaybeHandle<JSObject> holder);
+ static PropertyAccessInfo DataConstant(Type* receiver_type,
+ Handle<Object> constant,
+ MaybeHandle<JSObject> holder);
+ static PropertyAccessInfo DataField(
+ Type* receiver_type, FieldIndex field_index, Type* field_type,
+ FieldCheck field_check = FieldCheck::kNone,
+ MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
+ MaybeHandle<Map> transition_map = MaybeHandle<Map>());
+
+ PropertyAccessInfo();
+
+ bool IsNotFound() const { return kind() == kNotFound; }
+ bool IsDataConstant() const { return kind() == kDataConstant; }
+ bool IsDataField() const { return kind() == kDataField; }
+
+ bool HasTransitionMap() const { return !transition_map().is_null(); }
+
+ Kind kind() const { return kind_; }
+ MaybeHandle<JSObject> holder() const { return holder_; }
+ MaybeHandle<Map> transition_map() const { return transition_map_; }
+ Handle<Object> constant() const { return constant_; }
+ FieldCheck field_check() const { return field_check_; }
+ FieldIndex field_index() const { return field_index_; }
+ Type* field_type() const { return field_type_; }
+ Type* receiver_type() const { return receiver_type_; }
+
+ private:
+ PropertyAccessInfo(MaybeHandle<JSObject> holder, Type* receiver_type);
+ PropertyAccessInfo(MaybeHandle<JSObject> holder, Handle<Object> constant,
+ Type* receiver_type);
+ PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map, FieldIndex field_index,
+ FieldCheck field_check, Type* field_type,
+ Type* receiver_type);
+
+ Kind kind_;
+ Type* receiver_type_;
+ Handle<Object> constant_;
+ MaybeHandle<Map> transition_map_;
+ MaybeHandle<JSObject> holder_;
+ FieldIndex field_index_;
+ FieldCheck field_check_;
+ Type* field_type_;
+};
+
+
+// Factory class for {ElementAccessInfo}s and {PropertyAccessInfo}s.
+class AccessInfoFactory final {
+ public:
+ AccessInfoFactory(CompilationDependencies* dependencies,
+ Handle<Context> native_context, Zone* zone);
+
+ bool ComputeElementAccessInfo(Handle<Map> map, AccessMode access_mode,
+ ElementAccessInfo* access_info);
+ bool ComputeElementAccessInfos(MapHandleList const& maps,
+ AccessMode access_mode,
+ ZoneVector<ElementAccessInfo>* access_infos);
+ bool ComputePropertyAccessInfo(Handle<Map> map, Handle<Name> name,
+ AccessMode access_mode,
+ PropertyAccessInfo* access_info);
+ bool ComputePropertyAccessInfos(MapHandleList const& maps, Handle<Name> name,
+ AccessMode access_mode,
+ ZoneVector<PropertyAccessInfo>* access_infos);
+
+ private:
+ bool LookupSpecialFieldAccessor(Handle<Map> map, Handle<Name> name,
+ PropertyAccessInfo* access_info);
+ bool LookupTransition(Handle<Map> map, Handle<Name> name,
+ MaybeHandle<JSObject> holder,
+ PropertyAccessInfo* access_info);
+
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Factory* factory() const;
+ Isolate* isolate() const { return isolate_; }
+ Handle<Context> native_context() const { return native_context_; }
+ Zone* zone() const { return zone_; }
+
+ CompilationDependencies* const dependencies_;
+ Handle<Context> const native_context_;
+ Isolate* const isolate_;
+ TypeCache const& type_cache_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(AccessInfoFactory);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ACCESS_INFO_H_
diff --git a/chromium/v8/src/compiler/arm/code-generator-arm.cc b/chromium/v8/src/compiler/arm/code-generator-arm.cc
index 796d132a346..9b074b05ccd 100644
--- a/chromium/v8/src/compiler/arm/code-generator-arm.cc
+++ b/chromium/v8/src/compiler/arm/code-generator-arm.cc
@@ -5,11 +5,11 @@
#include "src/compiler/code-generator.h"
#include "src/arm/macro-assembler-arm.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -147,10 +147,10 @@ class ArmOperandConverter final : public InstructionOperandConverter {
}
MemOperand ToMemOperand(InstructionOperand* op) const {
- DCHECK(op != NULL);
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -198,6 +198,48 @@ class OutOfLineLoadInteger final : public OutOfLineCode {
};
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ add(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
+
Condition FlagsConditionToCondition(FlagsCondition condition) {
switch (condition) {
case kEqual:
@@ -312,12 +354,29 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ LeaveFrame(StackFrame::MANUAL);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ add(sp, sp, Operand(sp_slot_delta * kPointerSize));
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ sub(sp, sp, Operand(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
+ if (frame()->needs_frame()) {
+ if (FLAG_enable_embedded_constant_pool) {
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ }
+ __ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -340,10 +399,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -353,6 +414,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(ip);
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -368,6 +430,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(ip);
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
@@ -378,17 +441,29 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
break;
}
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (instr->InputAt(0)->IsImmediate()) {
@@ -398,6 +473,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -413,13 +490,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -438,6 +518,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ str(value, MemOperand(object, index));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kArmAdd:
__ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
i.OutputSBit());
@@ -697,18 +794,33 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArmVnegF64:
__ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
+ case kArmVrintmF32:
+ __ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArmVrintmF64:
__ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
+ case kArmVrintpF32:
+ __ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArmVrintpF64:
__ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
+ case kArmVrintzF32:
+ __ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArmVrintzF64:
__ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
case kArmVrintaF64:
__ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
+ case kArmVrintnF32:
+ __ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArmVrintnF64:
+ __ vrintn(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
case kArmVcvtF32F64: {
__ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -832,8 +944,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArmPush:
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vpush(i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
__ push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
@@ -843,19 +957,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmStoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ add(index, object, index);
- __ str(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
- __ RecordWrite(object, index, value, lr_status, mode);
- DCHECK_EQ(LeaveCC, i.OutputSBit());
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
break;
@@ -968,7 +1069,7 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
if (FLAG_enable_embedded_constant_pool) {
__ Push(lr, fp, pp);
// Adjust FP to point to saved FP.
@@ -978,13 +1079,13 @@ void CodeGenerator::AssemblePrologue() {
__ mov(fp, sp);
}
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(0);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1055,9 +1156,9 @@ void CodeGenerator::AssembleReturn() {
DwVfpRegister::from_code(last));
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ LeaveFrame(StackFrame::MANUAL);
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ b(&return_label_);
@@ -1073,7 +1174,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- ArmOperandConverter g(this, NULL);
+ ArmOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1181,7 +1282,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- ArmOperandConverter g(this, NULL);
+ ArmOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
diff --git a/chromium/v8/src/compiler/arm/instruction-codes-arm.h b/chromium/v8/src/compiler/arm/instruction-codes-arm.h
index c210c171e43..401100be753 100644
--- a/chromium/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/chromium/v8/src/compiler/arm/instruction-codes-arm.h
@@ -65,10 +65,15 @@ namespace compiler {
V(ArmVabsF64) \
V(ArmVnegF64) \
V(ArmVsqrtF64) \
+ V(ArmVrintmF32) \
V(ArmVrintmF64) \
+ V(ArmVrintpF32) \
V(ArmVrintpF64) \
+ V(ArmVrintzF32) \
V(ArmVrintzF64) \
V(ArmVrintaF64) \
+ V(ArmVrintnF32) \
+ V(ArmVrintnF64) \
V(ArmVcvtF32F64) \
V(ArmVcvtF64F32) \
V(ArmVcvtF64S32) \
@@ -93,8 +98,7 @@ namespace compiler {
V(ArmLdr) \
V(ArmStr) \
V(ArmPush) \
- V(ArmPoke) \
- V(ArmStoreWriteBarrier)
+ V(ArmPoke)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/arm/instruction-scheduler-arm.cc b/chromium/v8/src/compiler/arm/instruction-scheduler-arm.cc
new file mode 100644
index 00000000000..f36802ceb3b
--- /dev/null
+++ b/chromium/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -0,0 +1,129 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kArmAdd:
+ case kArmAnd:
+ case kArmBic:
+ case kArmClz:
+ case kArmCmp:
+ case kArmCmn:
+ case kArmTst:
+ case kArmTeq:
+ case kArmOrr:
+ case kArmEor:
+ case kArmSub:
+ case kArmRsb:
+ case kArmMul:
+ case kArmMla:
+ case kArmMls:
+ case kArmSmmul:
+ case kArmSmmla:
+ case kArmUmull:
+ case kArmSdiv:
+ case kArmUdiv:
+ case kArmMov:
+ case kArmMvn:
+ case kArmBfc:
+ case kArmUbfx:
+ case kArmSxtb:
+ case kArmSxth:
+ case kArmSxtab:
+ case kArmSxtah:
+ case kArmUxtb:
+ case kArmUxth:
+ case kArmUxtab:
+ case kArmUxtah:
+ case kArmVcmpF32:
+ case kArmVaddF32:
+ case kArmVsubF32:
+ case kArmVmulF32:
+ case kArmVmlaF32:
+ case kArmVmlsF32:
+ case kArmVdivF32:
+ case kArmVabsF32:
+ case kArmVnegF32:
+ case kArmVsqrtF32:
+ case kArmVcmpF64:
+ case kArmVaddF64:
+ case kArmVsubF64:
+ case kArmVmulF64:
+ case kArmVmlaF64:
+ case kArmVmlsF64:
+ case kArmVdivF64:
+ case kArmVmodF64:
+ case kArmVabsF64:
+ case kArmVnegF64:
+ case kArmVsqrtF64:
+ case kArmVrintmF32:
+ case kArmVrintmF64:
+ case kArmVrintpF32:
+ case kArmVrintpF64:
+ case kArmVrintzF32:
+ case kArmVrintzF64:
+ case kArmVrintaF64:
+ case kArmVrintnF32:
+ case kArmVrintnF64:
+ case kArmVcvtF32F64:
+ case kArmVcvtF64F32:
+ case kArmVcvtF64S32:
+ case kArmVcvtF64U32:
+ case kArmVcvtS32F64:
+ case kArmVcvtU32F64:
+ case kArmVmovLowU32F64:
+ case kArmVmovLowF64U32:
+ case kArmVmovHighU32F64:
+ case kArmVmovHighF64U32:
+ case kArmVmovF64U32U32:
+ return kNoOpcodeFlags;
+
+ case kArmVldrF32:
+ case kArmVldrF64:
+ case kArmLdrb:
+ case kArmLdrsb:
+ case kArmLdrh:
+ case kArmLdrsh:
+ case kArmLdr:
+ return kIsLoadOperation;
+
+ case kArmVstrF32:
+ case kArmVstrF64:
+ case kArmStrb:
+ case kArmStrh:
+ case kArmStr:
+ case kArmPush:
+ case kArmPoke:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/arm/instruction-selector-arm.cc b/chromium/v8/src/compiler/arm/instruction-selector-arm.cc
index f58a29de8a6..f3deae7d758 100644
--- a/chromium/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/chromium/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -61,7 +61,6 @@ class ArmOperandGenerator : public OperandGenerator {
case kArmStrb:
case kArmLdr:
case kArmStr:
- case kArmStoreWriteBarrier:
return value >= -4095 && value <= 4095;
case kArmLdrh:
@@ -304,32 +303,32 @@ void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kArmVldrF32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kArmVldrF64;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeUint32 ? kArmLdrb : kArmLdrsb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kArmLdrb : kArmLdrsb;
break;
- case kRepWord16:
- opcode = typ == kTypeUint32 ? kArmLdrh : kArmLdrsh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kArmLdrh : kArmLdrsh;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kArmLdr;
break;
- default:
+ case MachineRepresentation::kNone: // Fall through.
+ case MachineRepresentation::kWord64:
UNREACHABLE();
return;
}
@@ -350,79 +349,103 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
- Emit(kArmStoreWriteBarrier, g.NoOutput(), g.UseFixed(base, r4),
- g.UseFixed(index, r5), g.UseFixed(value, r6), arraysize(temps), temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
-
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kArmVstrF32;
- break;
- case kRepFloat64:
- opcode = kArmVstrF64;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kArmStrb;
- break;
- case kRepWord16:
- opcode = kArmStrh;
- break;
- case kRepTagged: // Fall through.
- case kRepWord32:
- opcode = kArmStr;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kArmVstrF32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kArmVstrF64;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kArmStrb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kArmStrh;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kArmStr;
+ break;
+ case MachineRepresentation::kNone: // Fall through.
+ case MachineRepresentation::kWord64:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ }
}
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -437,30 +460,33 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -697,6 +723,12 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1089,11 +1121,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kArmVrintmF32, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kArmVrintmF64, node);
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kArmVrintpF32, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kArmVrintpF64, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kArmVrintzF32, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kArmVrintzF64, node);
}
@@ -1104,23 +1156,20 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
- ArmOperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kArmVrintnF32, node);
+}
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kArmVrintnF64, node);
+}
+
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, true);
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ ArmOperandGenerator g(this);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -1129,140 +1178,26 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
0, nullptr, 0, nullptr);
// Poke any stack arguments.
- for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* input = buffer.pushed_nodes[n]) {
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
int slot = static_cast<int>(n);
Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
- g.UseRegister(input));
+ g.UseRegister(input.node()));
}
}
} else {
// Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- if (input == nullptr) continue;
- Emit(kArmPush, g.NoOutput(), g.UseRegister(input));
+ if (input.node() == nullptr) continue;
+ Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node()));
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- ArmOperandGenerator g(this);
- CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
- Emit(kArmPush, g.NoOutput(), g.UseRegister(input));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
namespace {
@@ -1423,7 +1358,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
@@ -1657,9 +1592,15 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe;
if (CpuFeatures::IsSupported(ARMv8)) {
- flags |= MachineOperatorBuilder::kFloat64RoundDown |
+ flags |= MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
- MachineOperatorBuilder::kFloat64RoundTiesAway;
+ MachineOperatorBuilder::kFloat64RoundTiesAway |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
}
return flags;
}
diff --git a/chromium/v8/src/compiler/arm64/code-generator-arm64.cc b/chromium/v8/src/compiler/arm64/code-generator-arm64.cc
index 257dd6c1341..d356195ecfb 100644
--- a/chromium/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/chromium/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -6,11 +6,11 @@
#include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -33,6 +33,8 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return InputDoubleRegister(index);
}
+ size_t OutputCount() { return instr_->OutputCount(); }
+
DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
@@ -41,8 +43,26 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return ToRegister(instr_->InputAt(index)).W();
}
+ Register InputOrZeroRegister32(size_t index) {
+ DCHECK(instr_->InputAt(index)->IsRegister() ||
+ (instr_->InputAt(index)->IsImmediate() && (InputInt32(index) == 0)));
+ if (instr_->InputAt(index)->IsImmediate()) {
+ return wzr;
+ }
+ return InputRegister32(index);
+ }
+
Register InputRegister64(size_t index) { return InputRegister(index); }
+ Register InputOrZeroRegister64(size_t index) {
+ DCHECK(instr_->InputAt(index)->IsRegister() ||
+ (instr_->InputAt(index)->IsImmediate() && (InputInt64(index) == 0)));
+ if (instr_->InputAt(index)->IsImmediate()) {
+ return xzr;
+ }
+ return InputRegister64(index);
+ }
+
Operand InputImmediate(size_t index) {
return ToImmediate(instr_->InputAt(index));
}
@@ -185,10 +205,21 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
}
MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
- DCHECK(op != NULL);
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
+ if (offset.from_frame_pointer()) {
+ int from_sp =
+ offset.offset() +
+ ((frame()->GetSpToFpSlotCount() + frame_access_state()->sp_delta()) *
+ kPointerSize);
+ // Convert FP-offsets to SP-offsets if it results in better code.
+ if (Assembler::IsImmLSUnscaled(from_sp) ||
+ Assembler::IsImmLSScaled(from_sp, LSDoubleWord)) {
+ offset = FrameOffset::FromStackPointer(from_sp);
+ }
+ }
return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
offset.offset());
}
@@ -237,6 +268,48 @@ class OutOfLineLoadZero final : public OutOfLineCode {
};
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlagClear(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ Add(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
+
Condition FlagsConditionToCondition(FlagsCondition condition) {
switch (condition) {
case kEqual:
@@ -388,13 +461,26 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ Mov(jssp, fp);
- __ Pop(fp, lr);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ Drop(sp_slot_delta);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Claim(-sp_slot_delta);
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -413,11 +499,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Call(target);
}
+ frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -426,6 +514,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Jump(target);
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -441,6 +530,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(x10);
+ frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
}
@@ -454,9 +544,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(cp, temp);
__ Assert(eq, kWrongFunctionContext);
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(x10);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
break;
}
case kArchPrepareCallCFunction:
@@ -465,6 +562,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// guarantee correct alignment of stack pointer.
UNREACHABLE();
break;
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (instr->InputAt(0)->IsImmediate()) {
@@ -474,6 +574,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters, 0);
}
+ // CallCFunction only supports register arguments so we never need to call
+ // frame()->ClearOutgoingParameterSlots() here.
+ DCHECK(frame_access_state()->sp_delta() == 0);
break;
}
case kArchJmp:
@@ -486,12 +589,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchLookupSwitch(instr);
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -506,41 +612,83 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ Str(value, MemOperand(object, index));
+ __ CheckPageFlagSet(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ ool->entry());
+ __ Bind(ool->exit());
+ break;
+ }
+ case kArm64Float32RoundDown:
+ __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArm64Float64RoundDown:
__ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kArm64Float32RoundUp:
+ __ Frintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArm64Float64RoundUp:
+ __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Float64RoundTiesAway:
__ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kArm64Float32RoundTruncate:
+ __ Frintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArm64Float64RoundTruncate:
__ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kArm64Float64RoundUp:
- __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ case kArm64Float32RoundTiesEven:
+ __ Frintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArm64Float64RoundTiesEven:
+ __ Frintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArm64Add:
- __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ __ Adds(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
+ } else {
+ __ Add(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
+ }
break;
case kArm64Add32:
if (FlagsModeField::decode(opcode) != kFlags_none) {
- __ Adds(i.OutputRegister32(), i.InputRegister32(0),
+ __ Adds(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
} else {
- __ Add(i.OutputRegister32(), i.InputRegister32(0),
+ __ Add(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
}
break;
case kArm64And:
- __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ And(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64And32:
- __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ And(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Bic:
- __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Bic(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Bic32:
- __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Bic(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -624,45 +772,53 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Not32:
__ Mvn(i.OutputRegister32(), i.InputOperand32(0));
break;
- case kArm64Neg:
- __ Neg(i.OutputRegister(), i.InputOperand(0));
- break;
- case kArm64Neg32:
- __ Neg(i.OutputRegister32(), i.InputOperand32(0));
- break;
case kArm64Or:
- __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Orr(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Or32:
- __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Orr(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Orn:
- __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Orn(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Orn32:
- __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Orn(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Eor:
- __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Eor(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Eor32:
- __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Eor(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Eon:
- __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ __ Eon(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
break;
case kArm64Eon32:
- __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Eon(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
break;
case kArm64Sub:
- __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ __ Subs(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
+ } else {
+ __ Sub(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
+ }
break;
case kArm64Sub32:
if (FlagsModeField::decode(opcode) != kFlags_none) {
- __ Subs(i.OutputRegister32(), i.InputRegister32(0),
+ __ Subs(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
} else {
- __ Sub(i.OutputRegister32(), i.InputRegister32(0),
+ __ Sub(i.OutputRegister32(), i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
}
break;
@@ -729,34 +885,48 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64CompareAndBranch32:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
- case kArm64Claim: {
+ case kArm64ClaimForCallArguments: {
__ Claim(i.InputInt32(0));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0));
break;
}
case kArm64Poke: {
Operand operand(i.InputInt32(1) * kPointerSize);
- __ Poke(i.InputRegister(0), operand);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Poke(i.InputFloat64Register(0), operand);
+ } else {
+ __ Poke(i.InputRegister(0), operand);
+ }
break;
}
case kArm64PokePair: {
int slot = i.InputInt32(2) - 1;
- __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0),
+ slot * kPointerSize);
+ } else {
+ __ PokePair(i.InputRegister(1), i.InputRegister(0),
+ slot * kPointerSize);
+ }
break;
}
+ case kArm64Clz:
+ __ Clz(i.OutputRegister64(), i.InputRegister64(0));
+ break;
case kArm64Clz32:
__ Clz(i.OutputRegister32(), i.InputRegister32(0));
break;
case kArm64Cmp:
- __ Cmp(i.InputRegister(0), i.InputOperand(1));
+ __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1));
break;
case kArm64Cmp32:
- __ Cmp(i.InputRegister32(0), i.InputOperand2_32(1));
+ __ Cmp(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Cmn:
- __ Cmn(i.InputRegister(0), i.InputOperand(1));
+ __ Cmn(i.InputOrZeroRegister64(0), i.InputOperand(1));
break;
case kArm64Cmn32:
- __ Cmn(i.InputRegister32(0), i.InputOperand32(1));
+ __ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Tst:
__ Tst(i.InputRegister(0), i.InputOperand(1));
@@ -791,12 +961,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputFloat32Register(1));
break;
case kArm64Float32Max:
- __ Fmax(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ // (b < a) ? a : b
+ __ Fcmp(i.InputFloat32Register(1), i.InputFloat32Register(0));
+ __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1), lo);
break;
case kArm64Float32Min:
- __ Fmin(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ // (a < b) ? a : b
+ __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
+ __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1), lo);
break;
case kArm64Float32Abs:
__ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
@@ -842,12 +1016,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArm64Float64Max:
- __ Fmax(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ // (b < a) ? a : b
+ __ Fcmp(i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), lo);
break;
case kArm64Float64Min:
- __ Fmin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ // (a < b) ? a : b
+ __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), lo);
break;
case kArm64Float64Abs:
__ Fabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
@@ -870,12 +1048,69 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Float64ToUint32:
__ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
break;
+ case kArm64Float32ToInt64:
+ __ Fcvtzs(i.OutputRegister64(), i.InputFloat32Register(0));
+ if (i.OutputCount() > 1) {
+ __ Mov(i.OutputRegister(1), 1);
+ Label done;
+ __ Cmp(i.OutputRegister(0), 1);
+ __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
+ __ Fccmp(i.InputFloat32Register(0), i.InputFloat32Register(0), VFlag,
+ vc);
+ __ B(vc, &done);
+ __ Fcmp(i.InputFloat32Register(0), static_cast<float>(INT64_MIN));
+ __ Cset(i.OutputRegister(1), eq);
+ __ Bind(&done);
+ }
+ break;
+ case kArm64Float64ToInt64:
+ __ Fcvtzs(i.OutputRegister(0), i.InputDoubleRegister(0));
+ if (i.OutputCount() > 1) {
+ __ Mov(i.OutputRegister(1), 1);
+ Label done;
+ __ Cmp(i.OutputRegister(0), 1);
+ __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
+ __ Fccmp(i.InputDoubleRegister(0), i.InputDoubleRegister(0), VFlag, vc);
+ __ B(vc, &done);
+ __ Fcmp(i.InputDoubleRegister(0), static_cast<double>(INT64_MIN));
+ __ Cset(i.OutputRegister(1), eq);
+ __ Bind(&done);
+ }
+ break;
+ case kArm64Float32ToUint64:
+ __ Fcvtzu(i.OutputRegister64(), i.InputFloat32Register(0));
+ if (i.OutputCount() > 1) {
+ __ Fcmp(i.InputFloat32Register(0), -1.0);
+ __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
+ __ Cset(i.OutputRegister(1), ne);
+ }
+ break;
+ case kArm64Float64ToUint64:
+ __ Fcvtzu(i.OutputRegister64(), i.InputDoubleRegister(0));
+ if (i.OutputCount() > 1) {
+ __ Fcmp(i.InputDoubleRegister(0), -1.0);
+ __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
+ __ Cset(i.OutputRegister(1), ne);
+ }
+ break;
case kArm64Int32ToFloat64:
__ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
+ case kArm64Int64ToFloat32:
+ __ Scvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0));
+ break;
+ case kArm64Int64ToFloat64:
+ __ Scvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
+ break;
case kArm64Uint32ToFloat64:
__ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
+ case kArm64Uint64ToFloat32:
+ __ Ucvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0));
+ break;
+ case kArm64Uint64ToFloat64:
+ __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
+ break;
case kArm64Float64ExtractLowWord32:
__ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
break;
@@ -950,29 +1185,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64StrD:
__ Str(i.InputDoubleRegister(2), i.MemoryOperand());
break;
- case kArm64StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ Add(index, object, index);
- __ Str(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- // TODO(dcarney): we shouldn't test write barriers from c calls.
- LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
- UseScratchRegisterScope scope(masm());
- Register temp = no_reg;
- if (csp.is(masm()->StackPointer())) {
- temp = scope.AcquireX();
- lr_status = kLRHasBeenSaved;
- __ Push(lr, temp); // Need to push a pair
- }
- __ RecordWrite(object, index, value, lr_status, mode);
- if (csp.is(masm()->StackPointer())) {
- __ Pop(temp, lr);
- }
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
break;
@@ -1127,29 +1339,31 @@ void CodeGenerator::AssembleDeoptimizerCall(
}
-// TODO(dcarney): increase stack slots in frame once before first use.
-static int AlignedStackSlots(int stack_slots) {
- if (stack_slots & 1) stack_slots++;
- return stack_slots;
-}
-
-
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ SetStackPointer(csp);
__ Push(lr, fp);
__ Mov(fp, csp);
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ SetStackPointer(jssp);
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
__ SetStackPointer(jssp);
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
+ if (descriptor->UseNativeStack()) {
+ __ SetStackPointer(csp);
+ } else {
+ __ SetStackPointer(jssp);
+ }
__ StubPrologue();
} else {
+ if (descriptor->UseNativeStack()) {
+ __ SetStackPointer(csp);
+ } else {
+ __ SetStackPointer(jssp);
+ }
frame()->SetElidedFrameSizeInSlots(0);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1167,13 +1381,15 @@ void CodeGenerator::AssemblePrologue() {
stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
- if (stack_shrink_slots > 0) {
- Register sp = __ StackPointer();
- if (!sp.Is(csp)) {
- __ Sub(sp, sp, stack_shrink_slots * kPointerSize);
- }
- __ Sub(csp, csp, AlignedStackSlots(stack_shrink_slots) * kPointerSize);
+ // If frame()->needs_frame() is false, then
+ // frame()->AlignSavedCalleeRegisterSlots() is guaranteed to return 0.
+ if (csp.Is(masm()->StackPointer()) && frame()->needs_frame()) {
+ // The system stack pointer requires 16-byte alignment at function call
+ // boundaries.
+
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
}
+ __ Claim(stack_shrink_slots);
// Save FP registers.
CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
@@ -1217,19 +1433,25 @@ void CodeGenerator::AssembleReturn() {
}
int pop_count = static_cast<int>(descriptor->StackParameterCount());
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ Mov(csp, fp);
__ Pop(fp, lr);
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ B(&return_label_);
return;
} else {
__ Bind(&return_label_);
- __ Mov(jssp, fp);
+ if (descriptor->UseNativeStack()) {
+ __ Mov(csp, fp);
+ } else {
+ __ Mov(jssp, fp);
+ }
__ Pop(fp, lr);
}
+ } else if (descriptor->UseNativeStack()) {
+ pop_count += (pop_count & 1);
}
__ Drop(pop_count);
__ Ret();
@@ -1238,7 +1460,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- Arm64OperandConverter g(this, NULL);
+ Arm64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1335,7 +1557,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- Arm64OperandConverter g(this, NULL);
+ Arm64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
diff --git a/chromium/v8/src/compiler/arm64/instruction-codes-arm64.h b/chromium/v8/src/compiler/arm64/instruction-codes-arm64.h
index c2a52af7cbb..ef333480e38 100644
--- a/chromium/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/chromium/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -18,6 +18,7 @@ namespace compiler {
V(Arm64And32) \
V(Arm64Bic) \
V(Arm64Bic32) \
+ V(Arm64Clz) \
V(Arm64Clz32) \
V(Arm64Cmp) \
V(Arm64Cmp32) \
@@ -55,8 +56,6 @@ namespace compiler {
V(Arm64Umod32) \
V(Arm64Not) \
V(Arm64Not32) \
- V(Arm64Neg) \
- V(Arm64Neg32) \
V(Arm64Lsl) \
V(Arm64Lsl32) \
V(Arm64Lsr) \
@@ -77,7 +76,7 @@ namespace compiler {
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
- V(Arm64Claim) \
+ V(Arm64ClaimForCallArguments) \
V(Arm64Poke) \
V(Arm64PokePair) \
V(Arm64Float32Cmp) \
@@ -89,6 +88,7 @@ namespace compiler {
V(Arm64Float32Min) \
V(Arm64Float32Abs) \
V(Arm64Float32Sqrt) \
+ V(Arm64Float32RoundDown) \
V(Arm64Float64Cmp) \
V(Arm64Float64Add) \
V(Arm64Float64Sub) \
@@ -101,15 +101,27 @@ namespace compiler {
V(Arm64Float64Neg) \
V(Arm64Float64Sqrt) \
V(Arm64Float64RoundDown) \
+ V(Arm64Float32RoundUp) \
+ V(Arm64Float64RoundUp) \
V(Arm64Float64RoundTiesAway) \
+ V(Arm64Float32RoundTruncate) \
V(Arm64Float64RoundTruncate) \
- V(Arm64Float64RoundUp) \
+ V(Arm64Float32RoundTiesEven) \
+ V(Arm64Float64RoundTiesEven) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float64ToInt32) \
V(Arm64Float64ToUint32) \
+ V(Arm64Float32ToInt64) \
+ V(Arm64Float64ToInt64) \
+ V(Arm64Float32ToUint64) \
+ V(Arm64Float64ToUint64) \
V(Arm64Int32ToFloat64) \
+ V(Arm64Int64ToFloat32) \
+ V(Arm64Int64ToFloat64) \
V(Arm64Uint32ToFloat64) \
+ V(Arm64Uint64ToFloat32) \
+ V(Arm64Uint64ToFloat64) \
V(Arm64Float64ExtractLowWord32) \
V(Arm64Float64ExtractHighWord32) \
V(Arm64Float64InsertLowWord32) \
@@ -129,8 +141,7 @@ namespace compiler {
V(Arm64LdrW) \
V(Arm64StrW) \
V(Arm64Ldr) \
- V(Arm64Str) \
- V(Arm64StoreWriteBarrier)
+ V(Arm64Str)
// Addressing modes represent the "shape" of inputs to an instruction.
@@ -158,8 +169,8 @@ namespace compiler {
V(Operand2_R_SXTB) /* %r0 SXTB (signed extend byte) */ \
V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */
-} // namespace internal
} // namespace compiler
+} // namespace internal
} // namespace v8
#endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
diff --git a/chromium/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/chromium/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
new file mode 100644
index 00000000000..eb358dd8c48
--- /dev/null
+++ b/chromium/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -0,0 +1,224 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kArm64Add:
+ case kArm64Add32:
+ case kArm64And:
+ case kArm64And32:
+ case kArm64Bic:
+ case kArm64Bic32:
+ case kArm64Clz:
+ case kArm64Clz32:
+ case kArm64Cmp:
+ case kArm64Cmp32:
+ case kArm64Cmn:
+ case kArm64Cmn32:
+ case kArm64Tst:
+ case kArm64Tst32:
+ case kArm64Or:
+ case kArm64Or32:
+ case kArm64Orn:
+ case kArm64Orn32:
+ case kArm64Eor:
+ case kArm64Eor32:
+ case kArm64Eon:
+ case kArm64Eon32:
+ case kArm64Sub:
+ case kArm64Sub32:
+ case kArm64Mul:
+ case kArm64Mul32:
+ case kArm64Smull:
+ case kArm64Umull:
+ case kArm64Madd:
+ case kArm64Madd32:
+ case kArm64Msub:
+ case kArm64Msub32:
+ case kArm64Mneg:
+ case kArm64Mneg32:
+ case kArm64Idiv:
+ case kArm64Idiv32:
+ case kArm64Udiv:
+ case kArm64Udiv32:
+ case kArm64Imod:
+ case kArm64Imod32:
+ case kArm64Umod:
+ case kArm64Umod32:
+ case kArm64Not:
+ case kArm64Not32:
+ case kArm64Lsl:
+ case kArm64Lsl32:
+ case kArm64Lsr:
+ case kArm64Lsr32:
+ case kArm64Asr:
+ case kArm64Asr32:
+ case kArm64Ror:
+ case kArm64Ror32:
+ case kArm64Mov32:
+ case kArm64Sxtb32:
+ case kArm64Sxth32:
+ case kArm64Sxtw:
+ case kArm64Sbfx32:
+ case kArm64Ubfx:
+ case kArm64Ubfx32:
+ case kArm64Ubfiz32:
+ case kArm64Bfi:
+ case kArm64Float32Cmp:
+ case kArm64Float32Add:
+ case kArm64Float32Sub:
+ case kArm64Float32Mul:
+ case kArm64Float32Div:
+ case kArm64Float32Max:
+ case kArm64Float32Min:
+ case kArm64Float32Abs:
+ case kArm64Float32Sqrt:
+ case kArm64Float32RoundDown:
+ case kArm64Float64Cmp:
+ case kArm64Float64Add:
+ case kArm64Float64Sub:
+ case kArm64Float64Mul:
+ case kArm64Float64Div:
+ case kArm64Float64Mod:
+ case kArm64Float64Max:
+ case kArm64Float64Min:
+ case kArm64Float64Abs:
+ case kArm64Float64Neg:
+ case kArm64Float64Sqrt:
+ case kArm64Float64RoundDown:
+ case kArm64Float64RoundTiesAway:
+ case kArm64Float64RoundTruncate:
+ case kArm64Float64RoundTiesEven:
+ case kArm64Float64RoundUp:
+ case kArm64Float32RoundTiesEven:
+ case kArm64Float32RoundTruncate:
+ case kArm64Float32RoundUp:
+ case kArm64Float32ToFloat64:
+ case kArm64Float64ToFloat32:
+ case kArm64Float64ToInt32:
+ case kArm64Float64ToUint32:
+ case kArm64Float32ToInt64:
+ case kArm64Float64ToInt64:
+ case kArm64Float32ToUint64:
+ case kArm64Float64ToUint64:
+ case kArm64Int32ToFloat64:
+ case kArm64Int64ToFloat32:
+ case kArm64Int64ToFloat64:
+ case kArm64Uint32ToFloat64:
+ case kArm64Uint64ToFloat32:
+ case kArm64Uint64ToFloat64:
+ case kArm64Float64ExtractLowWord32:
+ case kArm64Float64ExtractHighWord32:
+ case kArm64Float64InsertLowWord32:
+ case kArm64Float64InsertHighWord32:
+ case kArm64Float64MoveU64:
+ case kArm64U64MoveFloat64:
+ return kNoOpcodeFlags;
+
+ case kArm64TestAndBranch32:
+ case kArm64TestAndBranch:
+ case kArm64CompareAndBranch32:
+ return kIsBlockTerminator;
+
+ case kArm64LdrS:
+ case kArm64LdrD:
+ case kArm64Ldrb:
+ case kArm64Ldrsb:
+ case kArm64Ldrh:
+ case kArm64Ldrsh:
+ case kArm64LdrW:
+ case kArm64Ldr:
+ return kIsLoadOperation;
+
+ case kArm64ClaimForCallArguments:
+ case kArm64Poke:
+ case kArm64PokePair:
+ case kArm64StrS:
+ case kArm64StrD:
+ case kArm64Strb:
+ case kArm64Strh:
+ case kArm64StrW:
+ case kArm64Str:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // Basic latency modeling for arm64 instructions. They have been determined
+ // in an empirical way.
+ switch (instr->arch_opcode()) {
+ case kArm64Float32ToFloat64:
+ case kArm64Float64ToFloat32:
+ case kArm64Float64ToInt32:
+ case kArm64Float64ToUint32:
+ case kArm64Int32ToFloat64:
+ case kArm64Uint32ToFloat64:
+ return 3;
+
+ case kArm64Float64Add:
+ case kArm64Float64Sub:
+ return 2;
+
+ case kArm64Float64Mul:
+ return 3;
+
+ case kArm64Float64Div:
+ return 6;
+
+ case kArm64Lsl:
+ case kArm64Lsl32:
+ case kArm64Lsr:
+ case kArm64Lsr32:
+ case kArm64Asr:
+ case kArm64Asr32:
+ case kArm64Ror:
+ case kArm64Ror32:
+ return 3;
+
+ case kCheckedLoadInt8:
+ case kCheckedLoadUint8:
+ case kCheckedLoadInt16:
+ case kCheckedLoadUint16:
+ case kCheckedLoadWord32:
+ case kCheckedLoadWord64:
+ case kCheckedLoadFloat32:
+ case kCheckedLoadFloat64:
+ case kArm64LdrS:
+ case kArm64LdrD:
+ case kArm64Ldrb:
+ case kArm64Ldrsb:
+ case kArm64Ldrh:
+ case kArm64Ldrsh:
+ case kArm64LdrW:
+ case kArm64Ldr:
+ return 5;
+
+ default:
+ return 1;
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/arm64/instruction-selector-arm64.cc b/chromium/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 7a5b84275ac..1ec5ab4c41a 100644
--- a/chromium/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/chromium/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -37,6 +37,15 @@ class Arm64OperandGenerator final : public OperandGenerator {
return UseRegister(node);
}
+ // Use the zero register if the node has the immediate value zero, otherwise
+ // assign a register.
+ InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+ if (IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
// Use the provided node if it has the required value, or create a
// TempImmediate otherwise.
InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
@@ -214,14 +223,14 @@ void VisitBinop(InstructionSelector* selector, Node* node,
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
- bool is_cmp = opcode == kArm64Cmp32;
+ bool is_cmp = (opcode == kArm64Cmp32) || (opcode == kArm64Cmn32);
// We can commute cmp by switching the inputs and commuting the flags
// continuation.
bool can_commute = m.HasProperty(Operator::kCommutative) || is_cmp;
- // The cmp instruction is encoded as sub with zero output register, and
- // therefore supports the same operand modes.
+ // The cmp and cmn instructions are encoded as sub or add with zero output
+ // register, and therefore support the same operand modes.
bool is_add_sub = m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() ||
m.IsInt64Sub() || is_cmp;
@@ -247,18 +256,18 @@ void VisitBinop(InstructionSelector* selector, Node* node,
} else if (TryMatchAnyShift(selector, node, right_node, &opcode,
!is_add_sub)) {
Matcher m_shift(right_node);
- inputs[input_count++] = g.UseRegister(left_node);
+ inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
!is_add_sub)) {
if (is_cmp) cont->Commute();
Matcher m_shift(left_node);
- inputs[input_count++] = g.UseRegister(right_node);
+ inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else {
- inputs[input_count++] = g.UseRegister(left_node);
+ inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(right_node);
}
@@ -329,41 +338,40 @@ int32_t LeftShiftForReducedMultiply(Matcher* m) {
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate;
- switch (rep) {
- case kRepFloat32:
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kArm64LdrS;
immediate_mode = kLoadStoreImm32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kArm64LdrD;
immediate_mode = kLoadStoreImm64;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
immediate_mode = kLoadStoreImm8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
immediate_mode = kLoadStoreImm16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kArm64LdrW;
immediate_mode = kLoadStoreImm32;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -383,91 +391,114 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
- Emit(kArm64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, x10),
- g.UseFixed(index, x11), g.UseFixed(value, x12), arraysize(temps),
- temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
- ArchOpcode opcode;
- ImmediateMode immediate_mode = kNoImmediate;
- switch (rep) {
- case kRepFloat32:
- opcode = kArm64StrS;
- immediate_mode = kLoadStoreImm32;
- break;
- case kRepFloat64:
- opcode = kArm64StrD;
- immediate_mode = kLoadStoreImm64;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kArm64Strb;
- immediate_mode = kLoadStoreImm8;
- break;
- case kRepWord16:
- opcode = kArm64Strh;
- immediate_mode = kLoadStoreImm16;
- break;
- case kRepWord32:
- opcode = kArm64StrW;
- immediate_mode = kLoadStoreImm32;
- break;
- case kRepTagged: // Fall through.
- case kRepWord64:
- opcode = kArm64Str;
- immediate_mode = kLoadStoreImm64;
- break;
- default:
- UNREACHABLE();
- return;
- }
- if (g.CanBeImmediate(index, immediate_mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ // TODO(arm64): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ ArchOpcode opcode = kArchNop;
+ ImmediateMode immediate_mode = kNoImmediate;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kArm64StrS;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kArm64StrD;
+ immediate_mode = kLoadStoreImm64;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kArm64Strb;
+ immediate_mode = kLoadStoreImm8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kArm64Strh;
+ immediate_mode = kLoadStoreImm16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kArm64StrW;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kArm64Str;
+ immediate_mode = kLoadStoreImm64;
+ break;
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(index, immediate_mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ }
}
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
opcode = kCheckedLoadWord64;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -477,33 +508,35 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
opcode = kCheckedStoreWord64;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -912,12 +945,30 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
}
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitWord32Clz(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -997,12 +1048,7 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
}
}
- if (m.left().Is(0)) {
- Emit(kArm64Neg32, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- } else {
- VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
- }
+ VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
}
@@ -1023,11 +1069,7 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
}
}
- if (m.left().Is(0)) {
- Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
- } else {
- VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
- }
+ VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
}
@@ -1197,6 +1239,74 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
}
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ Arm64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ Arm64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kArm64Float64ToInt64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ Arm64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kArm64Float32ToUint64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ Arm64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
+}
+
+
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
VisitRR(this, kArm64Sxtw, node);
}
@@ -1276,6 +1386,26 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
}
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kArm64Int64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kArm64Int64ToFloat64, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kArm64Uint64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kArm64Uint64ToFloat64, node);
+}
+
+
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kArm64Float64ExtractLowWord32, node);
}
@@ -1363,16 +1493,24 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ VisitRRR(this, kArm64Float32Max, node);
+}
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ VisitRRR(this, kArm64Float64Max, node);
+}
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ VisitRRR(this, kArm64Float32Min, node);
+}
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ VisitRRR(this, kArm64Float64Min, node);
+}
void InstructionSelector::VisitFloat32Abs(Node* node) {
@@ -1395,11 +1533,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kArm64Float32RoundDown, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kArm64Float64RoundDown, node);
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kArm64Float32RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kArm64Float64RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kArm64Float32RoundTruncate, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kArm64Float64RoundTruncate, node);
}
@@ -1410,197 +1568,57 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
- Arm64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kArm64Float32RoundTiesEven, node);
+}
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())));
- }
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kArm64Float64RoundTiesEven, node);
+}
+
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM64 it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, true);
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ Arm64OperandGenerator g(this);
// Push the arguments to the stack.
- int aligned_push_count = static_cast<int>(buffer.pushed_nodes.size());
+ int aligned_push_count = static_cast<int>(arguments->size());
+
bool pushed_count_uneven = aligned_push_count & 1;
+ int claim_count = aligned_push_count;
+ if (pushed_count_uneven && descriptor->UseNativeStack()) {
+ // We can only claim for an even number of call arguments when we use the
+ // native stack.
+ claim_count++;
+ }
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
// Bump the stack pointer(s).
if (aligned_push_count > 0) {
// TODO(dcarney): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames.
- Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(aligned_push_count));
- }
- // Move arguments to the stack.
- {
- int slot = aligned_push_count - 1;
- // Emit the uneven pushes.
- if (pushed_count_uneven) {
- Node* input = buffer.pushed_nodes[slot];
- Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input),
- g.TempImmediate(slot));
- slot--;
- }
- // Now all pushes can be done in pairs.
- for (; slot >= 0; slot -= 2) {
- Emit(kArm64PokePair, g.NoOutput(),
- g.UseRegister(buffer.pushed_nodes[slot]),
- g.UseRegister(buffer.pushed_nodes[slot - 1]),
- g.TempImmediate(slot));
- }
+ Emit(kArm64ClaimForCallArguments, g.NoOutput(),
+ g.TempImmediate(claim_count));
}
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler != nullptr) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
+ // Move arguments to the stack.
+ int slot = aligned_push_count - 1;
+ while (slot >= 0) {
+ Emit(kArm64Poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
+ g.TempImmediate(slot));
+ slot--;
+ // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
+ // same type.
+ // Emit(kArm64PokePair, g.NoOutput(), g.UseRegister((*arguments)[slot]),
+ // g.UseRegister((*arguments)[slot - 1]), g.TempImmediate(slot));
+ // slot -= 2;
}
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- Arm64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM64 it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM64 it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Push the arguments to the stack.
- int aligned_push_count = static_cast<int>(buffer.pushed_nodes.size());
- bool pushed_count_uneven = aligned_push_count & 1;
- // TODO(dcarney): claim and poke probably take small immediates,
- // loop here or whatever.
- // Bump the stack pointer(s).
- if (aligned_push_count > 0) {
- // TODO(dcarney): it would be better to bump the csp here only
- // and emit paired stores with increment for non c frames.
- Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(aligned_push_count));
- }
- // Move arguments to the stack.
- {
- int slot = aligned_push_count - 1;
- // Emit the uneven pushes.
- if (pushed_count_uneven) {
- Node* input = buffer.pushed_nodes[slot];
- Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input),
- g.TempImmediate(slot));
- slot--;
- }
- // Now all pushes can be done in pairs.
- for (; slot >= 0; slot -= 2) {
- Emit(kArm64PokePair, g.NoOutput(),
- g.UseRegister(buffer.pushed_nodes[slot]),
- g.UseRegister(buffer.pushed_nodes[slot - 1]),
- g.TempImmediate(slot));
- }
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
namespace {
@@ -1646,8 +1664,29 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitBinop<Int32BinopMatcher>(selector, node, kArm64Cmp32, kArithmeticImm,
- cont);
+ Int32BinopMatcher m(node);
+ ArchOpcode opcode = kArm64Cmp32;
+
+ // Select negated compare for comparisons with negated right input.
+ if (m.right().IsInt32Sub()) {
+ Node* sub = m.right().node();
+ Int32BinopMatcher msub(sub);
+ if (msub.left().Is(0)) {
+ bool can_cover = selector->CanCover(node, sub);
+ node->ReplaceInput(1, msub.right().node());
+ // Even if the comparison node covers the subtraction, after the input
+ // replacement above, the node still won't cover the input to the
+ // subtraction; the subtraction still uses it.
+ // In order to get shifted operations to work, we must remove the rhs
+ // input to the subtraction, as TryMatchAnyShift requires this node to
+ // cover the input shift. We do this by setting it to the lhs input,
+ // as we know it's zero, and the result of the subtraction isn't used by
+ // any other node.
+ if (can_cover) sub->ReplaceInput(1, msub.left().node());
+ opcode = kArm64Cmn32;
+ }
+ }
+ VisitBinop<Int32BinopMatcher>(selector, node, opcode, kArithmeticImm, cont);
}
@@ -1793,12 +1832,12 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == NULL || IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
@@ -1808,6 +1847,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
kArithmeticImm, &cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
+ kArithmeticImm, &cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
+ kArithmeticImm, &cont);
default:
break;
}
@@ -1995,6 +2042,28 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
}
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm, &cont);
+}
+
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm, &cont);
+}
+
+
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont(kSignedLessThan, node);
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
@@ -2107,9 +2176,19 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64RoundDown |
+ return MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe;
diff --git a/chromium/v8/src/compiler/ast-graph-builder.cc b/chromium/v8/src/compiler/ast-graph-builder.cc
index f8f010d816a..c70dfbf6506 100644
--- a/chromium/v8/src/compiler/ast-graph-builder.cc
+++ b/chromium/v8/src/compiler/ast-graph-builder.cc
@@ -4,10 +4,10 @@
#include "src/compiler/ast-graph-builder.h"
+#include "src/ast/scopes.h"
#include "src/compiler.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/control-builders.h"
-#include "src/compiler/js-type-feedback.h"
#include "src/compiler/linkage.h"
#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/machine-operator.h"
@@ -15,9 +15,8 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/state-values-utils.h"
-#include "src/full-codegen/full-codegen.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/compiler/type-hint-analyzer.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -95,11 +94,14 @@ class AstGraphBuilder::AstValueContext final : public AstContext {
// Context to evaluate expression for a condition value (and side effects).
class AstGraphBuilder::AstTestContext final : public AstContext {
public:
- explicit AstTestContext(AstGraphBuilder* owner)
- : AstContext(owner, Expression::kTest) {}
+ AstTestContext(AstGraphBuilder* owner, TypeFeedbackId feedback_id)
+ : AstContext(owner, Expression::kTest), feedback_id_(feedback_id) {}
~AstTestContext() final;
void ProduceValue(Node* value) final;
Node* ConsumeValue() final;
+
+ private:
+ TypeFeedbackId const feedback_id_;
};
@@ -212,12 +214,12 @@ class AstGraphBuilder::ControlScope BASE_EMBEDDED {
class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
public:
explicit DeferredCommands(AstGraphBuilder* owner)
- : owner_(owner), deferred_(owner->zone()) {}
+ : owner_(owner), deferred_(owner->local_zone()) {}
// One recorded control-flow command.
struct Entry {
Command command; // The command type being applied on this path.
- Statement* statement; // The target statement for the command or {NULL}.
+ Statement* statement; // The target statement for the command or {nullptr}.
Node* token; // A token identifying this particular path.
};
@@ -279,7 +281,7 @@ class AstGraphBuilder::ControlScopeForBreakable : public ControlScope {
: ControlScope(owner), target_(target), control_(control) {}
protected:
- virtual bool Execute(Command cmd, Statement* target, Node* value) override {
+ bool Execute(Command cmd, Statement* target, Node* value) override {
if (target != target_) return false; // We are not the command target.
switch (cmd) {
case CMD_BREAK:
@@ -307,7 +309,7 @@ class AstGraphBuilder::ControlScopeForIteration : public ControlScope {
: ControlScope(owner), target_(target), control_(control) {}
protected:
- virtual bool Execute(Command cmd, Statement* target, Node* value) override {
+ bool Execute(Command cmd, Statement* target, Node* value) override {
if (target != target_) return false; // We are not the command target.
switch (cmd) {
case CMD_BREAK:
@@ -343,7 +345,7 @@ class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
}
protected:
- virtual bool Execute(Command cmd, Statement* target, Node* value) override {
+ bool Execute(Command cmd, Statement* target, Node* value) override {
switch (cmd) {
case CMD_THROW:
control_->Throw(value);
@@ -374,7 +376,7 @@ class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
}
protected:
- virtual bool Execute(Command cmd, Statement* target, Node* value) override {
+ bool Execute(Command cmd, Statement* target, Node* value) override {
Node* token = commands_->RecordCommand(cmd, target, value);
control_->LeaveTry(token, value);
return true;
@@ -396,8 +398,9 @@ class AstGraphBuilder::FrameStateBeforeAndAfter {
: builder_->environment()->Checkpoint(id_before);
}
- void AddToNode(Node* node, BailoutId id_after,
- OutputFrameStateCombine combine) {
+ void AddToNode(
+ Node* node, BailoutId id_after,
+ OutputFrameStateCombine combine = OutputFrameStateCombine::Ignore()) {
int count = OperatorProperties::GetFrameStateInputCount(node->op());
DCHECK_LE(count, 2);
@@ -430,8 +433,9 @@ class AstGraphBuilder::FrameStateBeforeAndAfter {
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph, LoopAssignmentAnalysis* loop,
- JSTypeFeedbackTable* js_type_feedback)
- : local_zone_(local_zone),
+ TypeHintAnalysis* type_hint_analysis)
+ : isolate_(info->isolate()),
+ local_zone_(local_zone),
info_(info),
jsgraph_(jsgraph),
environment_(nullptr),
@@ -445,15 +449,15 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
input_buffer_(nullptr),
exit_controls_(local_zone),
loop_assignment_analysis_(loop),
+ type_hint_analysis_(type_hint_analysis),
state_values_cache_(jsgraph),
liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
local_zone),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kJavaScriptFunction, info->num_parameters() + 1,
info->scope()->num_stack_slots(), info->shared_info(),
- CALL_MAINTAINS_NATIVE_CONTEXT)),
- js_type_feedback_(js_type_feedback) {
- InitializeAstVisitor(info->isolate(), local_zone);
+ CALL_MAINTAINS_NATIVE_CONTEXT)) {
+ InitializeAstVisitor(info->isolate());
}
@@ -463,8 +467,7 @@ Node* AstGraphBuilder::GetFunctionClosureForContext() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function as
// their closure, not the anonymous closure containing the global code.
- // Pass a SMI sentinel and let the runtime look up the empty function.
- return jsgraph()->SmiConstant(0);
+ return BuildLoadNativeContextField(Context::CLOSURE_INDEX);
} else {
DCHECK(closure_scope->is_function_scope());
return GetFunctionClosure();
@@ -474,8 +477,8 @@ Node* AstGraphBuilder::GetFunctionClosureForContext() {
Node* AstGraphBuilder::GetFunctionClosure() {
if (!function_closure_.is_set()) {
- const Operator* op = common()->Parameter(
- Linkage::kJSFunctionCallClosureParamIndex, "%closure");
+ int index = Linkage::kJSCallClosureParamIndex;
+ const Operator* op = common()->Parameter(index, "%closure");
Node* node = NewNode(op, graph()->start());
function_closure_.set(node);
}
@@ -485,9 +488,9 @@ Node* AstGraphBuilder::GetFunctionClosure() {
Node* AstGraphBuilder::GetFunctionContext() {
if (!function_context_.is_set()) {
- // Parameter (arity + 1) is special for the outer context of the function
- const Operator* op = common()->Parameter(
- info()->num_parameters_including_this(), "%context");
+ int params = info()->num_parameters_including_this();
+ int index = Linkage::GetJSCallContextParamIndex(params);
+ const Operator* op = common()->Parameter(index, "%context");
Node* node = NewNode(op, graph()->start());
function_context_.set(node);
}
@@ -495,13 +498,26 @@ Node* AstGraphBuilder::GetFunctionContext() {
}
+Node* AstGraphBuilder::GetNewTarget() {
+ if (!new_target_.is_set()) {
+ int params = info()->num_parameters_including_this();
+ int index = Linkage::GetJSCallNewTargetParamIndex(params);
+ const Operator* op = common()->Parameter(index, "%new.target");
+ Node* node = NewNode(op, graph()->start());
+ new_target_.set(node);
+ }
+ return new_target_.get();
+}
+
+
bool AstGraphBuilder::CreateGraph(bool stack_check) {
Scope* scope = info()->scope();
- DCHECK(graph() != NULL);
+ DCHECK_NOT_NULL(graph());
// Set up the basic structure of the graph. Outputs for {Start} are the formal
- // parameters (including the receiver) plus context and closure.
- int actual_parameter_count = info()->num_parameters_including_this() + 2;
+ // parameters (including the receiver) plus new target, number of arguments,
+ // context and closure.
+ int actual_parameter_count = info()->num_parameters_including_this() + 4;
graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
// Initialize the top-level environment.
@@ -527,18 +543,10 @@ bool AstGraphBuilder::CreateGraph(bool stack_check) {
env.RawParameterBind(0, jsgraph()->TheHoleConstant());
}
- // Build receiver check for sloppy mode if necessary.
- // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
- if (scope->has_this_declaration()) {
- Node* original_receiver = env.RawParameterLookup(0);
- Node* patched_receiver = BuildPatchReceiverToGlobalProxy(original_receiver);
- env.RawParameterBind(0, patched_receiver);
- }
-
- // Build function context only if there are context allocated variables.
+ // Build local context only if there are context allocated variables.
if (info()->num_heap_slots() > 0) {
- // Push a new inner context scope for the function.
- Node* inner_context = BuildLocalFunctionContext(GetFunctionContext());
+ // Push a new inner context scope for the current activation.
+ Node* inner_context = BuildLocalActivationContext(GetFunctionContext());
ContextScope top_context(this, scope, inner_context);
CreateGraphBody(stack_check);
} else {
@@ -568,6 +576,11 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
// Build the arguments object if it is used.
BuildArgumentsObject(scope->arguments());
+ // Build rest arguments array if it is used.
+ int rest_index;
+ Variable* rest_parameter = scope->rest_parameter(&rest_index);
+ BuildRestArgumentsArray(rest_parameter, rest_index);
+
// Build assignment to {.this_function} variable if it is used.
BuildThisFunctionVariable(scope->this_function_var());
@@ -816,7 +829,7 @@ void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
int offset, int count) {
bool should_update = false;
Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
- if (*state_values == NULL || (*state_values)->InputCount() != count) {
+ if (*state_values == nullptr || (*state_values)->InputCount() != count) {
should_update = true;
} else {
DCHECK(static_cast<size_t>(offset + count) <= values()->size());
@@ -922,11 +935,11 @@ void AstGraphBuilder::AstValueContext::ProduceValue(Node* value) {
void AstGraphBuilder::AstTestContext::ProduceValue(Node* value) {
- environment()->Push(owner()->BuildToBoolean(value));
+ environment()->Push(owner()->BuildToBoolean(value, feedback_id_));
}
-Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return NULL; }
+Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return nullptr; }
Node* AstGraphBuilder::AstValueContext::ConsumeValue() {
@@ -954,14 +967,14 @@ void AstGraphBuilder::ControlScope::PerformCommand(Command command,
Node* value) {
Environment* env = environment()->CopyAsUnreachable();
ControlScope* current = this;
- while (current != NULL) {
+ while (current != nullptr) {
environment()->TrimStack(current->stack_height());
environment()->TrimContextChain(current->context_length());
if (current->Execute(command, target, value)) break;
current = current->outer_;
}
builder()->set_environment(env);
- DCHECK(current != NULL); // Always handled (unless stack is malformed).
+ DCHECK_NOT_NULL(current); // Always handled (unless stack is malformed).
}
@@ -986,7 +999,7 @@ void AstGraphBuilder::ControlScope::ThrowValue(Node* exception_value) {
void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
- if (expr == NULL) {
+ if (expr == nullptr) {
return environment()->Push(jsgraph()->NullConstant());
}
VisitForValue(expr);
@@ -994,7 +1007,7 @@ void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
void AstGraphBuilder::VisitForValueOrTheHole(Expression* expr) {
- if (expr == NULL) {
+ if (expr == nullptr) {
return environment()->Push(jsgraph()->TheHoleConstant());
}
VisitForValue(expr);
@@ -1029,7 +1042,7 @@ void AstGraphBuilder::VisitForEffect(Expression* expr) {
void AstGraphBuilder::VisitForTest(Expression* expr) {
- AstTestContext for_condition(this);
+ AstTestContext for_condition(this, expr->test_id());
if (!CheckStackOverflow()) {
expr->Accept(this);
} else {
@@ -1128,8 +1141,8 @@ void AstGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
void AstGraphBuilder::VisitBlock(Block* stmt) {
BlockBuilder block(this);
ControlScopeForBreakable scope(this, stmt, &block);
- if (stmt->labels() != NULL) block.BeginBlock();
- if (stmt->scope() == NULL) {
+ if (stmt->labels() != nullptr) block.BeginBlock();
+ if (stmt->scope() == nullptr) {
// Visit statements in the same scope, no declarations.
VisitStatements(stmt->statements());
} else {
@@ -1144,7 +1157,7 @@ void AstGraphBuilder::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
}
- if (stmt->labels() != NULL) block.EndBlock();
+ if (stmt->labels() != nullptr) block.EndBlock();
}
@@ -1197,8 +1210,9 @@ void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
VisitForValue(stmt->expression());
Node* value = environment()->Pop();
+ Node* object = BuildToObject(value, stmt->ToObjectId());
const Operator* op = javascript()->CreateWithContext();
- Node* context = NewNode(op, value, GetFunctionClosureForContext());
+ Node* context = NewNode(op, object, GetFunctionClosureForContext());
PrepareFrameState(context, stmt->EntryId());
VisitInScope(stmt->statement(), stmt->scope(), context);
}
@@ -1213,7 +1227,6 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForValue(stmt->tag());
- Node* tag = environment()->Top();
// Iterate over all cases and create nodes for label comparison.
for (int i = 0; i < clauses->length(); i++) {
@@ -1229,6 +1242,7 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// value is still on the operand stack while the label is evaluated.
VisitForValue(clause->label());
Node* label = environment()->Pop();
+ Node* tag = environment()->Top();
const Operator* op = javascript()->StrictEqual();
Node* condition = NewNode(op, tag, label);
compare_switch.BeginLabel(i, condition);
@@ -1284,7 +1298,7 @@ void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
LoopBuilder for_loop(this);
VisitIfNotNull(stmt->init());
for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
- if (stmt->cond() != NULL) {
+ if (stmt->cond() != nullptr) {
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
for_loop.BreakUnless(condition);
@@ -1359,7 +1373,7 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
// Bind value and do loop body.
VectorSlotPair feedback =
CreateVectorSlotPair(stmt->EachFeedbackSlot());
- VisitForInAssignment(stmt->each(), value, feedback,
+ VisitForInAssignment(stmt->each(), value, feedback, stmt->FilterId(),
stmt->AssignmentId());
VisitIterationBody(stmt, &for_loop);
}
@@ -1395,8 +1409,6 @@ void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
TryCatchBuilder try_control(this);
- ExternalReference message_object =
- ExternalReference::address_of_pending_message_obj(isolate());
// Evaluate the try-block inside a control scope. This simulates a handler
// that is intercepting 'throw' control commands.
@@ -1410,14 +1422,17 @@ void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
}
try_control.EndTry();
- // TODO(mstarzinger): We are only using a runtime call to get a lazy bailout
- // point, there is no need to really emit an actual call. Optimize this!
- Node* guard = NewNode(javascript()->CallRuntime(Runtime::kMaxSmi, 0));
- PrepareFrameState(guard, stmt->HandlerId());
+ // Insert lazy bailout point.
+ // TODO(mstarzinger): We are only using a 'call' to get a lazy bailout
+ // point. Ideally, we whould not re-enter optimized code when deoptimized
+ // lazily. Tracked by issue v8:4195.
+ NewNode(common()->LazyBailout(),
+ jsgraph()->ZeroConstant(), // dummy target.
+ environment()->Checkpoint(stmt->HandlerId())); // frame state.
// Clear message object as we enter the catch block.
Node* the_hole = jsgraph()->TheHoleConstant();
- BuildStoreExternal(message_object, kMachAnyTagged, the_hole);
+ NewNode(javascript()->StoreMessage(), the_hole);
// Create a catch scope that binds the exception.
Node* exception = try_control.GetExceptionNode();
@@ -1433,8 +1448,6 @@ void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
TryFinallyBuilder try_control(this);
- ExternalReference message_object =
- ExternalReference::address_of_pending_message_obj(isolate());
// We keep a record of all paths that enter the finally-block to be able to
// dispatch to the correct continuation point after the statements in the
@@ -1447,7 +1460,7 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// 3. By exiting the try-block with a thrown exception.
Node* fallthrough_result = jsgraph()->TheHoleConstant();
ControlScope::DeferredCommands* commands =
- new (zone()) ControlScope::DeferredCommands(this);
+ new (local_zone()) ControlScope::DeferredCommands(this);
// Evaluate the try-block inside a control scope. This simulates a handler
// that is intercepting all control commands.
@@ -1461,10 +1474,13 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
}
try_control.EndTry(commands->GetFallThroughToken(), fallthrough_result);
- // TODO(mstarzinger): We are only using a runtime call to get a lazy bailout
- // point, there is no need to really emit an actual call. Optimize this!
- Node* guard = NewNode(javascript()->CallRuntime(Runtime::kMaxSmi, 0));
- PrepareFrameState(guard, stmt->HandlerId());
+ // Insert lazy bailout point.
+ // TODO(mstarzinger): We are only using a 'call' to get a lazy bailout
+ // point. Ideally, we whould not re-enter optimized code when deoptimized
+ // lazily. Tracked by issue v8:4195.
+ NewNode(common()->LazyBailout(),
+ jsgraph()->ZeroConstant(), // dummy target.
+ environment()->Checkpoint(stmt->HandlerId())); // frame state.
// The result value semantics depend on how the block was entered:
// - ReturnStatement: It represents the return value being returned.
@@ -1476,14 +1492,14 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// The result value, dispatch token and message is expected on the operand
// stack (this is in sync with FullCodeGenerator::EnterFinallyBlock).
- Node* message = BuildLoadExternal(message_object, kMachAnyTagged);
+ Node* message = NewNode(javascript()->LoadMessage());
environment()->Push(token); // TODO(mstarzinger): Cook token!
environment()->Push(result);
environment()->Push(message);
// Clear message object as we enter the finally block.
Node* the_hole = jsgraph()->TheHoleConstant();
- BuildStoreExternal(message_object, kMachAnyTagged, the_hole);
+ NewNode(javascript()->StoreMessage(), the_hole);
// Evaluate the finally-block.
Visit(stmt->finally_block());
@@ -1494,7 +1510,7 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
message = environment()->Pop();
result = environment()->Pop();
token = environment()->Pop(); // TODO(mstarzinger): Uncook token!
- BuildStoreExternal(message_object, kMachAnyTagged, message);
+ NewNode(javascript()->StoreMessage(), message);
// Dynamic dispatch after the finally-block.
commands->ApplyDeferredCommands(token, result);
@@ -1527,20 +1543,15 @@ void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
- if (expr->scope() == NULL) {
- // Visit class literal in the same scope, no declarations.
+ // Visit declarations and class literal in a block scope.
+ if (expr->scope()->ContextLocalCount() > 0) {
+ Node* context = BuildLocalBlockContext(expr->scope());
+ ContextScope scope(this, expr->scope(), context);
+ VisitDeclarations(expr->scope()->declarations());
VisitClassLiteralContents(expr);
} else {
- // Visit declarations and class literal in a block scope.
- if (expr->scope()->ContextLocalCount() > 0) {
- Node* context = BuildLocalBlockContext(expr->scope());
- ContextScope scope(this, expr->scope(), context);
- VisitDeclarations(expr->scope()->declarations());
- VisitClassLiteralContents(expr);
- } else {
- VisitDeclarations(expr->scope()->declarations());
- VisitClassLiteralContents(expr);
- }
+ VisitDeclarations(expr->scope()->declarations());
+ VisitClassLiteralContents(expr);
}
}
@@ -1567,18 +1578,18 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
// The prototype is ensured to exist by Runtime_DefineClass. No access check
// is needed here since the constructor is created by the class literal.
- Node* proto =
+ Node* prototype =
BuildLoadObjectField(literal, JSFunction::kPrototypeOrInitialMapOffset);
// The class literal and the prototype are both expected on the operand stack
// during evaluation of the method values.
environment()->Push(literal);
- environment()->Push(proto);
+ environment()->Push(prototype);
// Create nodes to store method values into the literal.
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
- environment()->Push(property->is_static() ? literal : proto);
+ environment()->Push(environment()->Peek(property->is_static() ? 1 : 0));
VisitForValue(property->key());
Node* name = BuildToName(environment()->Pop(), expr->GetIdForProperty(i));
@@ -1631,21 +1642,20 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- environment()->Pop(); // proto
- environment()->Pop(); // literal
+ prototype = environment()->Pop();
+ literal = environment()->Pop();
const Operator* op =
javascript()->CallRuntime(Runtime::kFinalizeClassDefinition, 2);
- literal = NewNode(op, literal, proto);
+ literal = NewNode(op, literal, prototype);
// Assign to class variable.
- if (expr->scope() != NULL) {
- DCHECK_NOT_NULL(expr->class_variable_proxy());
+ if (expr->class_variable_proxy() != nullptr) {
Variable* var = expr->class_variable_proxy()->var();
FrameStateBeforeAndAfter states(this, BailoutId::None());
VectorSlotPair feedback = CreateVectorSlotPair(
expr->NeedsProxySlot() ? expr->ProxySlot()
- : FeedbackVectorICSlot::Invalid());
- BuildVariableAssignment(var, literal, Token::INIT_CONST, feedback,
+ : FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(var, literal, Token::INIT, feedback,
BailoutId::None(), states);
}
ast_context()->ProduceValue(literal);
@@ -1657,6 +1667,13 @@ void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
}
+void AstGraphBuilder::VisitDoExpression(DoExpression* expr) {
+ VisitBlock(expr->block());
+ VisitVariableProxy(expr->result());
+ ast_context()->ReplaceValue();
+}
+
+
void AstGraphBuilder::VisitConditional(Conditional* expr) {
IfBuilder compare_if(this);
VisitForTest(expr->condition());
@@ -1690,14 +1707,9 @@ void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
Node* closure = GetFunctionClosure();
// Create node to materialize a regular expression literal.
- Node* literals_array =
- BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* literal_index = jsgraph()->Constant(expr->literal_index());
- Node* pattern = jsgraph()->Constant(expr->pattern());
- Node* flags = jsgraph()->Constant(expr->flags());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- Node* literal = NewNode(op, literals_array, literal_index, pattern, flags);
+ const Operator* op = javascript()->CreateLiteralRegExp(
+ expr->pattern(), expr->flags(), expr->literal_index());
+ Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(literal);
}
@@ -1707,13 +1719,10 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* closure = GetFunctionClosure();
// Create node to deep-copy the literal boilerplate.
- Node* literals_array =
- BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* literal_index = jsgraph()->Constant(expr->literal_index());
- Node* constants = jsgraph()->Constant(expr->constant_properties());
- const Operator* op =
- javascript()->CreateLiteralObject(expr->ComputeFlags(true));
- Node* literal = NewNode(op, literals_array, literal_index, constants);
+ const Operator* op = javascript()->CreateLiteralObject(
+ expr->constant_properties(), expr->ComputeFlags(true),
+ expr->literal_index());
+ Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
OutputFrameStateCombine::Push());
@@ -1723,7 +1732,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Create nodes to store computed values into the literal.
int property_index = 0;
- AccessorTable accessor_table(zone());
+ AccessorTable accessor_table(local_zone());
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1744,11 +1753,11 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForValue(property->value());
FrameStateBeforeAndAfter states(this, property->value()->id());
Node* value = environment()->Pop();
+ Node* literal = environment()->Top();
Handle<Name> name = key->AsPropertyName();
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(0));
- Node* store = BuildNamedStore(literal, name, value, feedback,
- TypeFeedbackId::None());
+ Node* store = BuildNamedStore(literal, name, value, feedback);
states.AddToNode(store, key->id(),
OutputFrameStateCombine::Ignore());
BuildSetHomeObject(value, literal, property, 1);
@@ -1757,7 +1766,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
}
- environment()->Push(literal); // Duplicate receiver.
+ environment()->Push(environment()->Top()); // Duplicate receiver.
VisitForValue(property->key());
VisitForValue(property->value());
Node* value = environment()->Pop();
@@ -1775,7 +1784,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
break;
}
case ObjectLiteral::Property::PROTOTYPE: {
- environment()->Push(literal); // Duplicate receiver.
+ environment()->Push(environment()->Top()); // Duplicate receiver.
VisitForValue(property->value());
Node* value = environment()->Pop();
Node* receiver = environment()->Pop();
@@ -1784,7 +1793,8 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
Node* set_prototype = NewNode(op, receiver, value);
// SetPrototype should not lazy deopt on an object literal.
- PrepareFrameState(set_prototype, BailoutId::None());
+ PrepareFrameState(set_prototype,
+ expr->GetIdForPropertySet(property_index));
break;
}
case ObjectLiteral::Property::GETTER:
@@ -1802,6 +1812,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Create nodes to define accessors, using only a single call to the runtime
// for each pair of corresponding getters and setters.
+ literal = environment()->Top(); // Reload from operand stack.
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end(); ++it) {
VisitForValue(it->first);
@@ -1831,21 +1842,21 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- environment()->Push(literal); // Duplicate receiver.
+ environment()->Push(environment()->Top()); // Duplicate receiver.
VisitForValue(property->value());
Node* value = environment()->Pop();
Node* receiver = environment()->Pop();
const Operator* op =
javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
Node* call = NewNode(op, receiver, value);
- PrepareFrameState(call, BailoutId::None());
+ PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
continue;
}
- environment()->Push(literal); // Duplicate receiver.
+ environment()->Push(environment()->Top()); // Duplicate receiver.
VisitForValue(property->key());
Node* name = BuildToName(environment()->Pop(),
- expr->GetIdForProperty(property_index));
+ expr->GetIdForPropertyName(property_index));
environment()->Push(name);
VisitForValue(property->value());
Node* value = environment()->Pop();
@@ -1886,6 +1897,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
// Transform literals that contain functions to fast properties.
+ literal = environment()->Top(); // Reload from operand stack.
if (expr->has_function()) {
const Operator* op =
javascript()->CallRuntime(Runtime::kToFastProperties, 1);
@@ -1911,21 +1923,16 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Node* closure = GetFunctionClosure();
// Create node to deep-copy the literal boilerplate.
- expr->BuildConstantElements(isolate());
- Node* literals_array =
- BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* literal_index = jsgraph()->Constant(expr->literal_index());
- Node* constants = jsgraph()->Constant(expr->constant_elements());
- const Operator* op =
- javascript()->CreateLiteralArray(expr->ComputeFlags(true));
- Node* literal = NewNode(op, literals_array, literal_index, constants);
+ const Operator* op = javascript()->CreateLiteralArray(
+ expr->constant_elements(), expr->ComputeFlags(true),
+ expr->literal_index());
+ Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
OutputFrameStateCombine::Push());
- // The array and the literal index are both expected on the operand stack
- // during computation of the element values.
+ // The array is expected on the operand stack during computation of the
+ // element values.
environment()->Push(literal);
- environment()->Push(literal_index);
// Create nodes to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
@@ -1938,13 +1945,11 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForValue(subexpr);
{
FrameStateBeforeAndAfter states(this, subexpr->id());
+ VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
Node* value = environment()->Pop();
Node* index = jsgraph()->Constant(array_index);
- // TODO(turbofan): More efficient code could be generated here. Consider
- // that the store will be generic because we don't have a feedback vector
- // slot.
- Node* store = BuildKeyedStore(literal, index, value, VectorSlotPair(),
- TypeFeedbackId::None());
+ Node* literal = environment()->Top();
+ Node* store = BuildKeyedStore(literal, index, value, pair);
states.AddToNode(store, expr->GetIdForElement(array_index),
OutputFrameStateCombine::Ignore());
}
@@ -1955,29 +1960,31 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// above. The second part is the part after the first spread expression
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
- environment()->Pop(); // Array literal index.
for (; array_index < expr->values()->length(); array_index++) {
Expression* subexpr = expr->values()->at(array_index);
- Node* array = environment()->Pop();
Node* result;
if (subexpr->IsSpread()) {
VisitForValue(subexpr->AsSpread()->expression());
+ FrameStateBeforeAndAfter states(this,
+ subexpr->AsSpread()->expression()->id());
Node* iterable = environment()->Pop();
+ Node* array = environment()->Pop();
Node* function = BuildLoadNativeContextField(
Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX);
- result = NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS,
- language_mode()),
- function, array, iterable);
+ result = NewNode(javascript()->CallFunction(3, language_mode()), function,
+ array, iterable);
+ states.AddToNode(result, expr->GetIdForElement(array_index));
} else {
VisitForValue(subexpr);
Node* value = environment()->Pop();
+ Node* array = environment()->Pop();
const Operator* op =
javascript()->CallRuntime(Runtime::kAppendElement, 2);
result = NewNode(op, array, value);
+ PrepareFrameState(result, expr->GetIdForElement(array_index));
}
- PrepareFrameState(result, expr->GetIdForElement(array_index));
environment()->Push(result);
}
@@ -1987,7 +1994,8 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
const VectorSlotPair& feedback,
- BailoutId bailout_id) {
+ BailoutId bailout_id_before,
+ BailoutId bailout_id_after) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
// Left-hand side can only be a property, a global or a variable slot.
@@ -1998,9 +2006,11 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
- FrameStateBeforeAndAfter states(this, BailoutId::None());
- BuildVariableAssignment(var, value, Token::ASSIGN, feedback, bailout_id,
- states);
+ environment()->Push(value);
+ FrameStateBeforeAndAfter states(this, bailout_id_before);
+ value = environment()->Pop();
+ BuildVariableAssignment(var, value, Token::ASSIGN, feedback,
+ bailout_id_after, states);
break;
}
case NAMED_PROPERTY: {
@@ -2010,9 +2020,9 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
Node* object = environment()->Pop();
value = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedStore(object, name, value, feedback,
- TypeFeedbackId::None());
- states.AddToNode(store, bailout_id, OutputFrameStateCombine::Ignore());
+ Node* store = BuildNamedStore(object, name, value, feedback);
+ states.AddToNode(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
case KEYED_PROPERTY: {
@@ -2023,9 +2033,9 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = environment()->Pop();
- Node* store =
- BuildKeyedStore(object, key, value, feedback, TypeFeedbackId::None());
- states.AddToNode(store, bailout_id, OutputFrameStateCombine::Ignore());
+ Node* store = BuildKeyedStore(object, key, value, feedback);
+ states.AddToNode(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2037,9 +2047,9 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
Node* receiver = environment()->Pop();
value = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedSuperStore(receiver, home_object, name, value,
- TypeFeedbackId::None());
- states.AddToNode(store, bailout_id, OutputFrameStateCombine::Ignore());
+ Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
+ states.AddToNode(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
case KEYED_SUPER_PROPERTY: {
@@ -2052,9 +2062,9 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
value = environment()->Pop();
- Node* store = BuildKeyedSuperStore(receiver, home_object, key, value,
- TypeFeedbackId::None());
- states.AddToNode(store, bailout_id, OutputFrameStateCombine::Ignore());
+ Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
+ states.AddToNode(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
}
@@ -2102,7 +2112,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
// Evaluate the value and potentially handle compound assignments by loading
// the left-hand side value and performing a binary operation.
if (expr->is_compound()) {
- Node* old_value = NULL;
+ Node* old_value = nullptr;
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->target()->AsVariableProxy();
@@ -2168,7 +2178,9 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
FrameStateBeforeAndAfter states(this, expr->value()->id());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
- value = BuildBinaryOp(left, right, expr->binary_op());
+ value =
+ BuildBinaryOp(left, right, expr->binary_op(),
+ expr->binary_operation()->BinaryOperationFeedbackId());
states.AddToNode(value, expr->binary_operation()->id(),
OutputFrameStateCombine::Push());
}
@@ -2197,8 +2209,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedStore(object, name, value, feedback,
- expr->AssignmentFeedbackId());
+ Node* store = BuildNamedStore(object, name, value, feedback);
store_states.AddToNode(store, expr->id(),
ast_context()->GetStateCombine());
break;
@@ -2206,8 +2217,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = BuildKeyedStore(object, key, value, feedback,
- expr->AssignmentFeedbackId());
+ Node* store = BuildKeyedStore(object, key, value, feedback);
store_states.AddToNode(store, expr->id(),
ast_context()->GetStateCombine());
break;
@@ -2216,8 +2226,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedSuperStore(receiver, home_object, name, value,
- expr->AssignmentFeedbackId());
+ Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
store_states.AddToNode(store, expr->id(),
ast_context()->GetStateCombine());
break;
@@ -2226,8 +2235,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Node* key = environment()->Pop();
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
- Node* store = BuildKeyedSuperStore(receiver, home_object, key, value,
- expr->AssignmentFeedbackId());
+ Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
store_states.AddToNode(store, expr->id(),
ast_context()->GetStateCombine());
break;
@@ -2314,9 +2322,9 @@ void AstGraphBuilder::VisitCall(Call* expr) {
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
- CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
- Node* receiver_value = NULL;
- Node* callee_value = NULL;
+ ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
+ Node* receiver_value = nullptr;
+ Node* callee_value = nullptr;
bool possibly_eval = false;
switch (call_type) {
case Call::GLOBAL_CALL: {
@@ -2326,6 +2334,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
callee_value =
BuildVariableLoad(proxy->var(), expr->expression()->id(), states,
pair, OutputFrameStateCombine::Push());
+ receiver_hint = ConvertReceiverMode::kNullOrUndefined;
receiver_value = jsgraph()->UndefinedConstant();
break;
}
@@ -2338,65 +2347,88 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Node* pair = NewNode(op, current_context(), name);
callee_value = NewNode(common()->Projection(0), pair);
receiver_value = NewNode(common()->Projection(1), pair);
-
PrepareFrameState(pair, expr->LookupId(),
OutputFrameStateCombine::Push(2));
break;
}
- case Call::PROPERTY_CALL: {
+ case Call::NAMED_PROPERTY_CALL: {
Property* property = callee->AsProperty();
- VectorSlotPair pair =
+ VectorSlotPair feedback =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- if (!property->IsSuperAccess()) {
- VisitForValue(property->obj());
- Node* object = environment()->Top();
-
- if (property->key()->IsPropertyName()) {
- FrameStateBeforeAndAfter states(this, property->obj()->id());
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- callee_value = BuildNamedLoad(object, name, pair);
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- } else {
- VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->key()->id());
- Node* key = environment()->Pop();
- callee_value = BuildKeyedLoad(object, key, pair);
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- }
- receiver_value = environment()->Pop();
- // Note that a PROPERTY_CALL requires the receiver to be wrapped into an
- // object for sloppy callees. This could also be modeled explicitly
- // here,
- // thereby obsoleting the need for a flag to the call operator.
- flags = CALL_AS_METHOD;
-
- } else {
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- Node* home_object = environment()->Pop();
- receiver_value = environment()->Pop();
- if (property->key()->IsPropertyName()) {
- FrameStateBeforeAndAfter states(this, property->obj()->id());
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- callee_value =
- BuildNamedSuperLoad(receiver_value, home_object, name, pair);
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
-
- } else {
- VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->key()->id());
- Node* key = environment()->Pop();
- callee_value =
- BuildKeyedSuperLoad(receiver_value, home_object, key, pair);
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- }
- }
-
+ VisitForValue(property->obj());
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* object = environment()->Top();
+ callee_value = BuildNamedLoad(object, name, feedback);
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. However the receiver is guaranteed
+ // not to be null or undefined at this point.
+ receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+ receiver_value = environment()->Pop();
+ break;
+ }
+ case Call::KEYED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ VisitForValue(property->obj());
+ VisitForValue(property->key());
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ Node* key = environment()->Pop();
+ Node* object = environment()->Top();
+ callee_value = BuildKeyedLoad(object, key, feedback);
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. However the receiver is guaranteed
+ // not to be null or undefined at this point.
+ receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+ receiver_value = environment()->Pop();
+ break;
+ }
+ case Call::NAMED_SUPER_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ SuperPropertyReference* super_ref =
+ property->obj()->AsSuperPropertyReference();
+ VisitForValue(super_ref->home_object());
+ VisitForValue(super_ref->this_var());
+ Node* home = environment()->Peek(1);
+ Node* object = environment()->Top();
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ FrameStateBeforeAndAfter states(this, property->obj()->id());
+ callee_value = BuildNamedSuperLoad(object, home, name, VectorSlotPair());
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. Since the receiver is not the target of
+ // the load, it could very well be null or undefined at this point.
+ receiver_value = environment()->Pop();
+ environment()->Drop(1);
+ break;
+ }
+ case Call::KEYED_SUPER_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ SuperPropertyReference* super_ref =
+ property->obj()->AsSuperPropertyReference();
+ VisitForValue(super_ref->home_object());
+ VisitForValue(super_ref->this_var());
+ environment()->Push(environment()->Top()); // Duplicate this_var.
+ environment()->Push(environment()->Peek(2)); // Duplicate home_obj.
+ VisitForValue(property->key());
+ Node* key = environment()->Pop();
+ Node* home = environment()->Pop();
+ Node* object = environment()->Pop();
+ FrameStateBeforeAndAfter states(this, property->key()->id());
+ callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
+ states.AddToNode(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. Since the receiver is not the target of
+ // the load, it could very well be null or undefined at this point.
+ receiver_value = environment()->Pop();
+ environment()->Drop(1);
break;
}
case Call::SUPER_CALL:
@@ -2419,6 +2451,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
case Call::OTHER_CALL:
VisitForValue(callee);
callee_value = environment()->Pop();
+ receiver_hint = ConvertReceiverMode::kNullOrUndefined;
receiver_value = jsgraph()->UndefinedConstant();
break;
}
@@ -2432,8 +2465,8 @@ void AstGraphBuilder::VisitCall(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
- // Resolve callee and receiver for a potential direct eval call. This block
- // will mutate the callee and receiver values pushed onto the environment.
+ // Resolve callee for a potential direct eval call. This block will mutate the
+ // callee value pushed onto the environment.
if (possibly_eval && args->length() > 0) {
int arg_count = args->length();
@@ -2442,7 +2475,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Node* source = environment()->Peek(arg_count - 1);
// Create node to ask for help resolving potential eval call. This will
- // provide a fully resolved callee and the corresponding receiver.
+ // provide a fully resolved callee to patch into the environment.
Node* function = GetFunctionClosure();
Node* language = jsgraph()->Constant(language_mode());
Node* position = jsgraph()->Constant(current_scope()->start_position());
@@ -2459,10 +2492,13 @@ void AstGraphBuilder::VisitCall(Call* expr) {
// Create node to perform the function call.
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
- const Operator* call = javascript()->CallFunction(args->length() + 2, flags,
- language_mode(), feedback);
+ const Operator* call = javascript()->CallFunction(
+ args->length() + 2, language_mode(), feedback, receiver_hint);
+ FrameStateBeforeAndAfter states(this, expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ environment()->Push(value->InputAt(0)); // The callee passed to the call.
+ states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
+ environment()->Drop(1);
ast_context()->ProduceValue(value);
}
@@ -2471,27 +2507,27 @@ void AstGraphBuilder::VisitCallSuper(Call* expr) {
SuperCallReference* super = expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super);
- // Prepare the callee to the super call. The super constructor is stored as
- // the prototype of the constructor we are currently executing.
+ // Prepare the callee to the super call.
VisitForValue(super->this_function_var());
Node* this_function = environment()->Pop();
- const Operator* op = javascript()->CallRuntime(Runtime::kGetPrototype, 1);
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kInlineGetSuperConstructor, 1);
Node* super_function = NewNode(op, this_function);
- // TODO(mstarzinger): This probably needs a proper bailout id.
- PrepareFrameState(super_function, BailoutId::None());
environment()->Push(super_function);
// Evaluate all arguments to the super call.
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
- // Original constructor is loaded from the {new.target} variable.
+ // The new target is loaded from the {new.target} variable.
VisitForValue(super->new_target_var());
// Create node to perform the super call.
- const Operator* call = javascript()->CallConstruct(args->length() + 2);
+ const Operator* call =
+ javascript()->CallConstruct(args->length() + 2, VectorSlotPair());
+ FrameStateBeforeAndAfter states(this, super->new_target_var()->id());
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(value);
}
@@ -2503,13 +2539,20 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
- // Original constructor is the same as the callee.
+ // The baseline compiler doesn't push the new.target, so we need to record
+ // the frame state before the push.
+ FrameStateBeforeAndAfter states(
+ this, args->is_empty() ? expr->expression()->id() : args->last()->id());
+
+ // The new target is the same as the callee.
environment()->Push(environment()->Peek(args->length()));
// Create node to perform the construct call.
- const Operator* call = javascript()->CallConstruct(args->length() + 2);
+ VectorSlotPair feedback = CreateVectorSlotPair(expr->CallNewFeedbackSlot());
+ const Operator* call =
+ javascript()->CallConstruct(args->length() + 2, feedback);
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(value);
}
@@ -2517,7 +2560,6 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
// The callee and the receiver both have to be pushed onto the operand stack
// before arguments are being evaluated.
- CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
Node* callee_value = BuildLoadNativeContextField(expr->context_index());
Node* receiver_value = jsgraph()->UndefinedConstant();
@@ -2530,9 +2572,10 @@ void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
// Create node to perform the JS runtime call.
const Operator* call =
- javascript()->CallFunction(args->length() + 2, flags, language_mode());
+ javascript()->CallFunction(args->length() + 2, language_mode());
+ FrameStateBeforeAndAfter states(this, expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -2592,10 +2635,12 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
// Reserve space for result of postfix operation.
bool is_postfix = expr->is_postfix() && !ast_context()->IsEffect();
- if (is_postfix) environment()->Push(jsgraph()->UndefinedConstant());
+ if (is_postfix && assign_type != VARIABLE) {
+ environment()->Push(jsgraph()->ZeroConstant());
+ }
// Evaluate LHS expression and get old value.
- Node* old_value = NULL;
+ Node* old_value = nullptr;
int stack_depth = -1;
switch (assign_type) {
case VARIABLE: {
@@ -2675,20 +2720,27 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
OutputFrameStateCombine::Push());
}
- // TODO(titzer): combine this framestate with the above?
- FrameStateBeforeAndAfter store_states(this, assign_type == KEYED_PROPERTY
- ? expr->ToNumberId()
- : BailoutId::None());
+ // Create a proper eager frame state for the stores.
+ environment()->Push(old_value);
+ FrameStateBeforeAndAfter store_states(this, expr->ToNumberId());
+ old_value = environment()->Pop();
// Save result for postfix expressions at correct stack depth.
- if (is_postfix) environment()->Poke(stack_depth, old_value);
+ if (is_postfix) {
+ if (assign_type != VARIABLE) {
+ environment()->Poke(stack_depth, old_value);
+ } else {
+ environment()->Push(old_value);
+ }
+ }
// Create node to perform +1/-1 operation.
Node* value;
{
+ // TODO(bmeurer): Cleanup this feedback/bailout mess!
FrameStateBeforeAndAfter states(this, BailoutId::None());
- value =
- BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
+ value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
+ expr->binary_op(), TypeFeedbackId::None());
// This should never deoptimize outside strong mode because otherwise we
// have converted to number before.
states.AddToNode(value, is_strong(language_mode()) ? expr->ToNumberId()
@@ -2710,8 +2762,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedStore(object, name, value, feedback,
- expr->CountStoreFeedbackId());
+ Node* store = BuildNamedStore(object, name, value, feedback);
environment()->Push(value);
store_states.AddToNode(store, expr->AssignmentId(),
OutputFrameStateCombine::Ignore());
@@ -2721,8 +2772,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = BuildKeyedStore(object, key, value, feedback,
- expr->CountStoreFeedbackId());
+ Node* store = BuildKeyedStore(object, key, value, feedback);
environment()->Push(value);
store_states.AddToNode(store, expr->AssignmentId(),
OutputFrameStateCombine::Ignore());
@@ -2733,8 +2783,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedSuperStore(receiver, home_object, name, value,
- expr->CountStoreFeedbackId());
+ Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
environment()->Push(value);
store_states.AddToNode(store, expr->AssignmentId(),
OutputFrameStateCombine::Ignore());
@@ -2745,8 +2794,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
Node* key = environment()->Pop();
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
- Node* store = BuildKeyedSuperStore(receiver, home_object, key, value,
- expr->CountStoreFeedbackId());
+ Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
environment()->Push(value);
store_states.AddToNode(store, expr->AssignmentId(),
OutputFrameStateCombine::Ignore());
@@ -2775,7 +2823,8 @@ void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
FrameStateBeforeAndAfter states(this, expr->right()->id());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
- Node* value = BuildBinaryOp(left, right, expr->op());
+ Node* value = BuildBinaryOp(left, right, expr->op(),
+ expr->BinaryOperationFeedbackId());
states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -2817,7 +2866,7 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
op = javascript()->HasProperty();
break;
default:
- op = NULL;
+ op = nullptr;
UNREACHABLE();
}
VisitForValue(expr->left());
@@ -2889,7 +2938,7 @@ void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
- if (stmt == NULL) return;
+ if (stmt == nullptr) return;
Visit(stmt);
}
@@ -2969,8 +3018,9 @@ void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
void AstGraphBuilder::VisitNot(UnaryOperation* expr) {
VisitForValue(expr->expression());
Node* operand = environment()->Pop();
- // TODO(mstarzinger): Possible optimization when we are in effect context.
- Node* value = NewNode(javascript()->UnaryNot(), operand);
+ Node* input = BuildToBoolean(operand, expr->expression()->test_id());
+ Node* value = NewNode(common()->Select(MachineRepresentation::kTagged), input,
+ jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
ast_context()->ProduceValue(value);
}
@@ -2987,7 +3037,7 @@ void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
IfBuilder compare_if(this);
VisitForValue(expr->left());
Node* condition = environment()->Top();
- compare_if.If(BuildToBoolean(condition));
+ compare_if.If(BuildToBoolean(condition, expr->left()->test_id()));
compare_if.Then();
if (is_logical_and) {
environment()->Pop();
@@ -3017,11 +3067,28 @@ LanguageMode AstGraphBuilder::language_mode() const {
VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
- FeedbackVectorICSlot slot) const {
+ FeedbackVectorSlot slot) const {
return VectorSlotPair(handle(info()->shared_info()->feedback_vector()), slot);
}
+void AstGraphBuilder::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ Visit(node->expression());
+}
+
+
+namespace {
+
+// Limit of context chain length to which inline check is possible.
+const int kMaxCheckDepth = 30;
+
+// Sentinel for {TryLoadDynamicVariable} disabling inline checks.
+const uint32_t kFullCheckRequired = -1;
+
+} // namespace
+
+
uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
bool found_eval_scope = false;
@@ -3034,9 +3101,7 @@ uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
if (s->is_eval_scope()) found_eval_scope = true;
if (!s->calls_sloppy_eval() && !found_eval_scope) continue;
int depth = current_scope()->ContextChainLength(s);
- if (depth > DynamicGlobalAccess::kMaxCheckDepth) {
- return DynamicGlobalAccess::kFullCheckRequired;
- }
+ if (depth > kMaxCheckDepth) return kFullCheckRequired;
check_depths |= 1 << depth;
}
return check_depths;
@@ -3050,9 +3115,7 @@ uint32_t AstGraphBuilder::ComputeBitsetForDynamicContext(Variable* variable) {
if (s->num_heap_slots() <= 0) continue;
if (!s->calls_sloppy_eval() && s != variable->scope()) continue;
int depth = current_scope()->ContextChainLength(s);
- if (depth > DynamicContextAccess::kMaxCheckDepth) {
- return DynamicContextAccess::kFullCheckRequired;
- }
+ if (depth > kMaxCheckDepth) return kFullCheckRequired;
check_depths |= 1 << depth;
if (s == variable->scope()) break;
}
@@ -3071,37 +3134,13 @@ Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
}
-Node* AstGraphBuilder::BuildPatchReceiverToGlobalProxy(Node* receiver) {
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object). Otherwise there is nothing left to do here.
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- IfBuilder receiver_check(this);
- Node* undefined = jsgraph()->UndefinedConstant();
- Node* check = NewNode(javascript()->StrictEqual(), receiver, undefined);
- receiver_check.If(check);
- receiver_check.Then();
- Node* proxy = BuildLoadGlobalProxy();
- environment()->Push(proxy);
- receiver_check.Else();
- environment()->Push(receiver);
- receiver_check.End();
- return environment()->Pop();
- } else {
- return receiver;
- }
-}
-
-
-Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context) {
+Node* AstGraphBuilder::BuildLocalActivationContext(Node* context) {
Scope* scope = info()->scope();
- Node* closure = GetFunctionClosure();
// Allocate a new local context.
- Node* local_context =
- scope->is_script_scope()
- ? BuildLocalScriptContext(scope)
- : NewNode(javascript()->CreateFunctionContext(), closure);
+ Node* local_context = scope->is_script_scope()
+ ? BuildLocalScriptContext(scope)
+ : BuildLocalFunctionContext(scope);
if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
Node* receiver = environment()->RawParameterLookup(0);
@@ -3128,6 +3167,18 @@ Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context) {
}
+Node* AstGraphBuilder::BuildLocalFunctionContext(Scope* scope) {
+ DCHECK(scope->is_function_scope());
+
+ // Allocate a new local context.
+ int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ const Operator* op = javascript()->CreateFunctionContext(slot_count);
+ Node* local_context = NewNode(op, GetFunctionClosure());
+
+ return local_context;
+}
+
+
Node* AstGraphBuilder::BuildLocalScriptContext(Scope* scope) {
DCHECK(scope->is_script_scope());
@@ -3135,7 +3186,8 @@ Node* AstGraphBuilder::BuildLocalScriptContext(Scope* scope) {
Handle<ScopeInfo> scope_info = scope->GetScopeInfo(isolate());
const Operator* op = javascript()->CreateScriptContext(scope_info);
Node* local_context = NewNode(op, GetFunctionClosure());
- PrepareFrameState(local_context, BailoutId::Prologue());
+ PrepareFrameState(local_context, BailoutId::ScriptContext(),
+ OutputFrameStateCombine::Push());
return local_context;
}
@@ -3154,7 +3206,7 @@ Node* AstGraphBuilder::BuildLocalBlockContext(Scope* scope) {
Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
- if (arguments == NULL) return NULL;
+ if (arguments == nullptr) return nullptr;
// Allocate and initialize a new arguments object.
CreateArgumentsParameters::Type type =
@@ -3175,6 +3227,25 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
}
+Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest, int index) {
+ if (rest == nullptr) return nullptr;
+
+ // Allocate and initialize a new arguments object.
+ CreateArgumentsParameters::Type type = CreateArgumentsParameters::kRestArray;
+ const Operator* op = javascript()->CreateArguments(type, index);
+ Node* object = NewNode(op, GetFunctionClosure());
+ PrepareFrameState(object, BailoutId::None());
+
+ // Assign the object to the {rest} variable. This should never lazy
+ // deopt, so it is fine to send invalid bailout id.
+ DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ BuildVariableAssignment(rest, object, Token::ASSIGN, VectorSlotPair(),
+ BailoutId::None(), states);
+ return object;
+}
+
+
Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
if (this_function_var == nullptr) return nullptr;
@@ -3184,7 +3255,7 @@ Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
// Assign the object to the {.this_function} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
FrameStateBeforeAndAfter states(this, BailoutId::None());
- BuildVariableAssignment(this_function_var, this_function, Token::INIT_CONST,
+ BuildVariableAssignment(this_function_var, this_function, Token::INIT,
VectorSlotPair(), BailoutId::None(), states);
return this_function;
}
@@ -3193,16 +3264,14 @@ Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
Node* AstGraphBuilder::BuildNewTargetVariable(Variable* new_target_var) {
if (new_target_var == nullptr) return nullptr;
- // Retrieve the original constructor in case we are called as a constructor.
- const Operator* op =
- javascript()->CallRuntime(Runtime::kGetOriginalConstructor, 0);
- Node* object = NewNode(op);
+ // Retrieve the new target we were called with.
+ Node* object = GetNewTarget();
// Assign the object to the {new.target} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
FrameStateBeforeAndAfter states(this, BailoutId::None());
- BuildVariableAssignment(new_target_var, object, Token::INIT_CONST,
- VectorSlotPair(), BailoutId::None(), states);
+ BuildVariableAssignment(new_target_var, object, Token::INIT, VectorSlotPair(),
+ BailoutId::None(), states);
return object;
}
@@ -3211,8 +3280,9 @@ Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole,
Node* not_hole) {
Node* the_hole = jsgraph()->TheHoleConstant();
Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
- return NewNode(common()->Select(kMachAnyTagged, BranchHint::kFalse), check,
- for_hole, not_hole);
+ return NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ check, for_hole, not_hole);
}
@@ -3279,22 +3349,9 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
- Node* script_context = current_context();
- int slot_index = -1;
- if (variable->index() > 0) {
- DCHECK(variable->IsStaticGlobalObjectProperty());
- slot_index = variable->index();
- int depth = current_scope()->ContextChainLength(variable->scope());
- if (depth > 0) {
- const Operator* op = javascript()->LoadContext(
- depth - 1, Context::PREVIOUS_INDEX, true);
- script_context = NewNode(op, current_context());
- }
- }
- Node* global = BuildLoadGlobalObject();
Handle<Name> name = variable->name();
- Node* value = BuildGlobalLoad(script_context, global, name, feedback,
- typeof_mode, slot_index);
+ if (Node* node = TryLoadGlobalConstant(name)) return node;
+ Node* value = BuildGlobalLoad(name, feedback, typeof_mode);
states.AddToNode(value, bailout_id, combine);
return value;
}
@@ -3342,45 +3399,20 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
}
case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
- Node* value = jsgraph()->TheHoleConstant();
Handle<String> name = variable->name();
- if (mode == DYNAMIC_GLOBAL) {
- uint32_t check_bitset = ComputeBitsetForDynamicGlobal(variable);
- const Operator* op = javascript()->LoadDynamicGlobal(
- name, check_bitset, feedback, typeof_mode);
- value = NewNode(op, BuildLoadFeedbackVector(), current_context());
- states.AddToNode(value, bailout_id, combine);
- } else if (mode == DYNAMIC_LOCAL) {
- Variable* local = variable->local_if_not_shadowed();
- DCHECK(local->location() ==
- VariableLocation::CONTEXT); // Must be context.
- int depth = current_scope()->ContextChainLength(local->scope());
- uint32_t check_bitset = ComputeBitsetForDynamicContext(variable);
- const Operator* op = javascript()->LoadDynamicContext(
- name, check_bitset, depth, local->index());
- value = NewNode(op, current_context());
- PrepareFrameState(value, bailout_id, combine);
- VariableMode local_mode = local->mode();
- if (local_mode == CONST_LEGACY) {
- // Perform check for uninitialized legacy const variables.
- Node* undefined = jsgraph()->UndefinedConstant();
- value = BuildHoleCheckSilent(value, undefined, value);
- } else if (local_mode == LET || local_mode == CONST) {
- // Perform check for uninitialized let/const variables.
- value = BuildHoleCheckThenThrow(value, local, value, bailout_id);
- }
- } else if (mode == DYNAMIC) {
- uint32_t check_bitset = DynamicGlobalAccess::kFullCheckRequired;
- const Operator* op = javascript()->LoadDynamicGlobal(
- name, check_bitset, feedback, typeof_mode);
- value = NewNode(op, BuildLoadFeedbackVector(), current_context());
- states.AddToNode(value, bailout_id, combine);
+ if (Node* node =
+ TryLoadDynamicVariable(variable, name, bailout_id, states,
+ feedback, combine, typeof_mode)) {
+ return node;
}
+ const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
+ Node* value = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ states.AddToNode(value, bailout_id, combine);
return value;
}
}
UNREACHABLE();
- return NULL;
+ return nullptr;
}
@@ -3415,7 +3447,7 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
}
}
UNREACHABLE();
- return NULL;
+ return nullptr;
}
@@ -3429,36 +3461,21 @@ Node* AstGraphBuilder::BuildVariableAssignment(
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
- Node* script_context = current_context();
- int slot_index = -1;
- if (variable->index() > 0) {
- DCHECK(variable->IsStaticGlobalObjectProperty());
- slot_index = variable->index();
- int depth = current_scope()->ContextChainLength(variable->scope());
- if (depth > 0) {
- const Operator* op = javascript()->LoadContext(
- depth - 1, Context::PREVIOUS_INDEX, true);
- script_context = NewNode(op, current_context());
- }
- }
- Node* global = BuildLoadGlobalObject();
Handle<Name> name = variable->name();
- Node* store =
- BuildGlobalStore(script_context, global, name, value, feedback,
- TypeFeedbackId::None(), slot_index);
+ Node* store = BuildGlobalStore(name, value, feedback);
states.AddToNode(store, bailout_id, combine);
return store;
}
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
// Local var, const, or let variable.
- if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+ if (mode == CONST_LEGACY && op == Token::INIT) {
// Perform an initialization check for legacy const variables.
Node* current = environment()->Lookup(variable);
if (current->op() != the_hole->op()) {
value = BuildHoleCheckSilent(current, value, current);
}
- } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
+ } else if (mode == CONST_LEGACY && op != Token::INIT) {
// Non-initializing assignment to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
@@ -3466,15 +3483,21 @@ Node* AstGraphBuilder::BuildVariableAssignment(
return BuildThrowConstAssignError(bailout_id);
}
return value;
- } else if (mode == LET && op != Token::INIT_LET) {
+ } else if (mode == LET && op == Token::INIT) {
+ // No initialization check needed because scoping guarantees it. Note
+ // that we still perform a lookup to keep the variable live, because
+ // baseline code might contain debug code that inspects the variable.
+ Node* current = environment()->Lookup(variable);
+ CHECK_NOT_NULL(current);
+ } else if (mode == LET && op != Token::INIT) {
// Perform an initialization check for let declared variables.
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
- value = BuildThrowReferenceError(variable, bailout_id);
+ return BuildThrowReferenceError(variable, bailout_id);
} else if (current->opcode() == IrOpcode::kPhi) {
- value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ BuildHoleCheckThenThrow(current, variable, value, bailout_id);
}
- } else if (mode == CONST && op == Token::INIT_CONST) {
+ } else if (mode == CONST && op == Token::INIT) {
// Perform an initialization check for const {this} variables.
// Note that the {this} variable is the only const variable being able
// to trigger bind operations outside the TDZ, via {super} calls.
@@ -3482,7 +3505,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
if (current->op() != the_hole->op() && variable->is_this()) {
value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
}
- } else if (mode == CONST && op != Token::INIT_CONST) {
+ } else if (mode == CONST && op != Token::INIT) {
// Assignment to const is exception in all modes.
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
@@ -3497,13 +3520,13 @@ Node* AstGraphBuilder::BuildVariableAssignment(
case VariableLocation::CONTEXT: {
// Context variable (potentially up the context chain).
int depth = current_scope()->ContextChainLength(variable->scope());
- if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+ if (mode == CONST_LEGACY && op == Token::INIT) {
// Perform an initialization check for legacy const variables.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op, current_context());
value = BuildHoleCheckSilent(current, value, current);
- } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
+ } else if (mode == CONST_LEGACY && op != Token::INIT) {
// Non-initializing assignment to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
@@ -3511,13 +3534,13 @@ Node* AstGraphBuilder::BuildVariableAssignment(
return BuildThrowConstAssignError(bailout_id);
}
return value;
- } else if (mode == LET && op != Token::INIT_LET) {
+ } else if (mode == LET && op != Token::INIT) {
// Perform an initialization check for let declared variables.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op, current_context());
value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
- } else if (mode == CONST && op == Token::INIT_CONST) {
+ } else if (mode == CONST && op == Token::INIT) {
// Perform an initialization check for const {this} variables.
// Note that the {this} variable is the only const variable being able
// to trigger bind operations outside the TDZ, via {super} calls.
@@ -3527,7 +3550,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
Node* current = NewNode(op, current_context());
value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
}
- } else if (mode == CONST && op != Token::INIT_CONST) {
+ } else if (mode == CONST && op != Token::INIT) {
// Assignment to const is exception in all modes.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
@@ -3552,67 +3575,41 @@ Node* AstGraphBuilder::BuildVariableAssignment(
}
}
UNREACHABLE();
- return NULL;
-}
-
-
-static inline Node* Record(JSTypeFeedbackTable* js_type_feedback, Node* node,
- FeedbackVectorICSlot slot) {
- if (js_type_feedback) {
- js_type_feedback->Record(node, slot);
- }
- return node;
-}
-
-
-static inline Node* Record(JSTypeFeedbackTable* js_type_feedback, Node* node,
- TypeFeedbackId id) {
- if (js_type_feedback) {
- js_type_feedback->Record(node, id);
- }
- return node;
+ return nullptr;
}
Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
const VectorSlotPair& feedback) {
- const Operator* op = javascript()->LoadProperty(feedback, language_mode());
+ const Operator* op = javascript()->LoadProperty(language_mode(), feedback);
Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
- return Record(js_type_feedback_, node, feedback.slot());
+ return node;
}
Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
const VectorSlotPair& feedback) {
- const Operator* op = javascript()->LoadNamed(name, feedback, language_mode());
+ const Operator* op = javascript()->LoadNamed(language_mode(), name, feedback);
Node* node = NewNode(op, object, BuildLoadFeedbackVector());
- return Record(js_type_feedback_, node, feedback.slot());
+ return node;
}
Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
- const VectorSlotPair& feedback,
- TypeFeedbackId id) {
+ const VectorSlotPair& feedback) {
const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
- if (FLAG_vector_stores) {
- return Record(js_type_feedback_, node, feedback.slot());
- }
- return Record(js_type_feedback_, node, id);
+ return node;
}
Node* AstGraphBuilder::BuildNamedStore(Node* object, Handle<Name> name,
Node* value,
- const VectorSlotPair& feedback,
- TypeFeedbackId id) {
+ const VectorSlotPair& feedback) {
const Operator* op =
javascript()->StoreNamed(language_mode(), name, feedback);
Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
- if (FLAG_vector_stores) {
- return Record(js_type_feedback_, node, feedback.slot());
- }
- return Record(js_type_feedback_, node, id);
+ return node;
}
@@ -3623,7 +3620,7 @@ Node* AstGraphBuilder::BuildNamedSuperLoad(Node* receiver, Node* home_object,
Node* language = jsgraph()->Constant(language_mode());
const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper, 4);
Node* node = NewNode(op, receiver, home_object, name_node, language);
- return Record(js_type_feedback_, node, feedback.slot());
+ return node;
}
@@ -3634,97 +3631,78 @@ Node* AstGraphBuilder::BuildKeyedSuperLoad(Node* receiver, Node* home_object,
const Operator* op =
javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
Node* node = NewNode(op, receiver, home_object, key, language);
- return Record(js_type_feedback_, node, feedback.slot());
+ return node;
}
Node* AstGraphBuilder::BuildKeyedSuperStore(Node* receiver, Node* home_object,
- Node* key, Node* value,
- TypeFeedbackId id) {
+ Node* key, Node* value) {
Runtime::FunctionId function_id = is_strict(language_mode())
? Runtime::kStoreKeyedToSuper_Strict
: Runtime::kStoreKeyedToSuper_Sloppy;
const Operator* op = javascript()->CallRuntime(function_id, 4);
Node* node = NewNode(op, receiver, home_object, key, value);
- return Record(js_type_feedback_, node, id);
+ return node;
}
Node* AstGraphBuilder::BuildNamedSuperStore(Node* receiver, Node* home_object,
- Handle<Name> name, Node* value,
- TypeFeedbackId id) {
+ Handle<Name> name, Node* value) {
Node* name_node = jsgraph()->Constant(name);
Runtime::FunctionId function_id = is_strict(language_mode())
? Runtime::kStoreToSuper_Strict
: Runtime::kStoreToSuper_Sloppy;
const Operator* op = javascript()->CallRuntime(function_id, 4);
Node* node = NewNode(op, receiver, home_object, name_node, value);
- return Record(js_type_feedback_, node, id);
+ return node;
}
-Node* AstGraphBuilder::BuildGlobalLoad(Node* script_context, Node* global,
- Handle<Name> name,
+Node* AstGraphBuilder::BuildGlobalLoad(Handle<Name> name,
const VectorSlotPair& feedback,
- TypeofMode typeof_mode, int slot_index) {
- const Operator* op =
- javascript()->LoadGlobal(name, feedback, typeof_mode, slot_index);
- Node* node = NewNode(op, script_context, global, BuildLoadFeedbackVector());
- return Record(js_type_feedback_, node, feedback.slot());
+ TypeofMode typeof_mode) {
+ const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
+ Node* node = NewNode(op, BuildLoadFeedbackVector());
+ return node;
}
-Node* AstGraphBuilder::BuildGlobalStore(Node* script_context, Node* global,
- Handle<Name> name, Node* value,
- const VectorSlotPair& feedback,
- TypeFeedbackId id, int slot_index) {
+Node* AstGraphBuilder::BuildGlobalStore(Handle<Name> name, Node* value,
+ const VectorSlotPair& feedback) {
const Operator* op =
- javascript()->StoreGlobal(language_mode(), name, feedback, slot_index);
- Node* node =
- NewNode(op, script_context, global, value, BuildLoadFeedbackVector());
- if (FLAG_vector_stores) {
- return Record(js_type_feedback_, node, feedback.slot());
- }
- return Record(js_type_feedback_, node, id);
+ javascript()->StoreGlobal(language_mode(), name, feedback);
+ Node* node = NewNode(op, value, BuildLoadFeedbackVector());
+ return node;
}
Node* AstGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
- return NewNode(jsgraph()->machine()->Load(kMachAnyTagged), object,
+ return NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()), object,
jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
}
Node* AstGraphBuilder::BuildLoadImmutableObjectField(Node* object, int offset) {
- return graph()->NewNode(jsgraph()->machine()->Load(kMachAnyTagged), object,
+ return graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
+ object,
jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
graph()->start(), graph()->start());
}
Node* AstGraphBuilder::BuildLoadGlobalObject() {
- const Operator* load_op =
- javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true);
- return NewNode(load_op, GetFunctionContext());
+ return BuildLoadNativeContextField(Context::EXTENSION_INDEX);
}
Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
- Node* global = BuildLoadGlobalObject();
- Node* native_context =
- BuildLoadObjectField(global, GlobalObject::kNativeContextOffset);
+ const Operator* op =
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
+ Node* native_context = NewNode(op, current_context());
return NewNode(javascript()->LoadContext(0, index, true), native_context);
}
-Node* AstGraphBuilder::BuildLoadGlobalProxy() {
- Node* global = BuildLoadGlobalObject();
- Node* proxy =
- BuildLoadObjectField(global, JSGlobalObject::kGlobalProxyOffset);
- return proxy;
-}
-
-
Node* AstGraphBuilder::BuildLoadFeedbackVector() {
if (!feedback_vector_.is_set()) {
Node* closure = GetFunctionClosure();
@@ -3738,60 +3716,19 @@ Node* AstGraphBuilder::BuildLoadFeedbackVector() {
}
-Node* AstGraphBuilder::BuildLoadExternal(ExternalReference reference,
- MachineType type) {
- return NewNode(jsgraph()->machine()->Load(type),
- jsgraph()->ExternalConstant(reference),
- jsgraph()->IntPtrConstant(0));
-}
-
-
-Node* AstGraphBuilder::BuildStoreExternal(ExternalReference reference,
- MachineType type, Node* value) {
- StoreRepresentation representation(type, kNoWriteBarrier);
- return NewNode(jsgraph()->machine()->Store(representation),
- jsgraph()->ExternalConstant(reference),
- jsgraph()->IntPtrConstant(0), value);
-}
-
-
-Node* AstGraphBuilder::BuildToBoolean(Node* input) {
- // TODO(bmeurer, mstarzinger): Refactor this into a separate optimization
- // method.
- switch (input->opcode()) {
- case IrOpcode::kNumberConstant: {
- NumberMatcher m(input);
- return jsgraph_->BooleanConstant(!m.Is(0) && !m.IsNaN());
- }
- case IrOpcode::kHeapConstant: {
- Handle<HeapObject> object = HeapObjectMatcher(input).Value();
- return jsgraph_->BooleanConstant(object->BooleanValue());
- }
- case IrOpcode::kJSEqual:
- case IrOpcode::kJSNotEqual:
- case IrOpcode::kJSStrictEqual:
- case IrOpcode::kJSStrictNotEqual:
- case IrOpcode::kJSLessThan:
- case IrOpcode::kJSLessThanOrEqual:
- case IrOpcode::kJSGreaterThan:
- case IrOpcode::kJSGreaterThanOrEqual:
- case IrOpcode::kJSUnaryNot:
- case IrOpcode::kJSToBoolean:
- case IrOpcode::kJSDeleteProperty:
- case IrOpcode::kJSHasProperty:
- case IrOpcode::kJSInstanceOf:
- return input;
- default:
- break;
+Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
+ if (Node* node = TryFastToBoolean(input)) return node;
+ ToBooleanHints hints;
+ if (!type_hint_analysis_ ||
+ !type_hint_analysis_->GetToBooleanHints(feedback_id, &hints)) {
+ hints = ToBooleanHint::kAny;
}
- return NewNode(javascript()->ToBoolean(), input);
+ return NewNode(javascript()->ToBoolean(hints), input);
}
Node* AstGraphBuilder::BuildToName(Node* input, BailoutId bailout_id) {
- // TODO(turbofan): Possible optimization is to NOP on name constants. But the
- // same caveat as with BuildToBoolean applies, and it should be factored out
- // into a JSOperatorReducer.
+ if (Node* node = TryFastToName(input)) return node;
Node* name = NewNode(javascript()->ToName(), input);
PrepareFrameState(name, bailout_id);
return name;
@@ -3814,8 +3751,7 @@ Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
FrameStateBeforeAndAfter states(this, BailoutId::None());
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(slot_number));
- Node* store = BuildNamedStore(value, name, home_object, feedback,
- TypeFeedbackId::None());
+ Node* store = BuildNamedStore(value, name, home_object, feedback);
states.AddToNode(store, BailoutId::None(), OutputFrameStateCombine::Ignore());
return store;
}
@@ -3892,50 +3828,208 @@ Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
}
-Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
+Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
+ TypeFeedbackId feedback_id) {
const Operator* js_op;
+ BinaryOperationHints hints;
+ if (!type_hint_analysis_ ||
+ !type_hint_analysis_->GetBinaryOperationHints(feedback_id, &hints)) {
+ hints = BinaryOperationHints::Any();
+ }
switch (op) {
case Token::BIT_OR:
- js_op = javascript()->BitwiseOr(language_mode());
+ js_op = javascript()->BitwiseOr(language_mode(), hints);
break;
case Token::BIT_AND:
- js_op = javascript()->BitwiseAnd(language_mode());
+ js_op = javascript()->BitwiseAnd(language_mode(), hints);
break;
case Token::BIT_XOR:
- js_op = javascript()->BitwiseXor(language_mode());
+ js_op = javascript()->BitwiseXor(language_mode(), hints);
break;
case Token::SHL:
- js_op = javascript()->ShiftLeft(language_mode());
+ js_op = javascript()->ShiftLeft(language_mode(), hints);
break;
case Token::SAR:
- js_op = javascript()->ShiftRight(language_mode());
+ js_op = javascript()->ShiftRight(language_mode(), hints);
break;
case Token::SHR:
- js_op = javascript()->ShiftRightLogical(language_mode());
+ js_op = javascript()->ShiftRightLogical(language_mode(), hints);
break;
case Token::ADD:
- js_op = javascript()->Add(language_mode());
+ js_op = javascript()->Add(language_mode(), hints);
break;
case Token::SUB:
- js_op = javascript()->Subtract(language_mode());
+ js_op = javascript()->Subtract(language_mode(), hints);
break;
case Token::MUL:
- js_op = javascript()->Multiply(language_mode());
+ js_op = javascript()->Multiply(language_mode(), hints);
break;
case Token::DIV:
- js_op = javascript()->Divide(language_mode());
+ js_op = javascript()->Divide(language_mode(), hints);
break;
case Token::MOD:
- js_op = javascript()->Modulus(language_mode());
+ js_op = javascript()->Modulus(language_mode(), hints);
break;
default:
UNREACHABLE();
- js_op = NULL;
+ js_op = nullptr;
}
return NewNode(js_op, left, right);
}
+Node* AstGraphBuilder::TryLoadGlobalConstant(Handle<Name> name) {
+ // Optimize global constants like "undefined", "Infinity", and "NaN".
+ Handle<Object> constant_value = isolate()->factory()->GlobalConstantFor(name);
+ if (!constant_value.is_null()) return jsgraph()->Constant(constant_value);
+ return nullptr;
+}
+
+
+Node* AstGraphBuilder::TryLoadDynamicVariable(
+ Variable* variable, Handle<String> name, BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states, const VectorSlotPair& feedback,
+ OutputFrameStateCombine combine, TypeofMode typeof_mode) {
+ VariableMode mode = variable->mode();
+
+ if (mode == DYNAMIC_GLOBAL) {
+ uint32_t bitset = ComputeBitsetForDynamicGlobal(variable);
+ if (bitset == kFullCheckRequired) return nullptr;
+
+ // We are using two blocks to model fast and slow cases.
+ BlockBuilder fast_block(this);
+ BlockBuilder slow_block(this);
+ environment()->Push(jsgraph()->TheHoleConstant());
+ slow_block.BeginBlock();
+ environment()->Pop();
+ fast_block.BeginBlock();
+
+ // Perform checks whether the fast mode applies, by looking for any
+ // extension object which might shadow the optimistic declaration.
+ for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
+ if ((bitset & 1) == 0) continue;
+ Node* load = NewNode(
+ javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
+ current_context());
+ Node* check = NewNode(javascript()->StrictEqual(), load,
+ jsgraph()->TheHoleConstant());
+ fast_block.BreakUnless(check, BranchHint::kTrue);
+ }
+
+ // Fast case, because variable is not shadowed. Perform global slot load.
+ Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
+ states.AddToNode(fast, bailout_id, combine);
+ environment()->Push(fast);
+ slow_block.Break();
+ environment()->Pop();
+ fast_block.EndBlock();
+
+ // Slow case, because variable potentially shadowed. Perform dynamic lookup.
+ const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
+ Node* slow = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ states.AddToNode(slow, bailout_id, combine);
+ environment()->Push(slow);
+ slow_block.EndBlock();
+
+ return environment()->Pop();
+ }
+
+ if (mode == DYNAMIC_LOCAL) {
+ uint32_t bitset = ComputeBitsetForDynamicContext(variable);
+ if (bitset == kFullCheckRequired) return nullptr;
+
+ // We are using two blocks to model fast and slow cases.
+ BlockBuilder fast_block(this);
+ BlockBuilder slow_block(this);
+ environment()->Push(jsgraph()->TheHoleConstant());
+ slow_block.BeginBlock();
+ environment()->Pop();
+ fast_block.BeginBlock();
+
+ // Perform checks whether the fast mode applies, by looking for any
+ // extension object which might shadow the optimistic declaration.
+ for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
+ if ((bitset & 1) == 0) continue;
+ Node* load = NewNode(
+ javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
+ current_context());
+ Node* check = NewNode(javascript()->StrictEqual(), load,
+ jsgraph()->TheHoleConstant());
+ fast_block.BreakUnless(check, BranchHint::kTrue);
+ }
+
+ // Fast case, because variable is not shadowed. Perform context slot load.
+ Variable* local = variable->local_if_not_shadowed();
+ DCHECK(local->location() == VariableLocation::CONTEXT); // Must be context.
+ Node* fast = BuildVariableLoad(local, bailout_id, states, feedback, combine,
+ typeof_mode);
+ environment()->Push(fast);
+ slow_block.Break();
+ environment()->Pop();
+ fast_block.EndBlock();
+
+ // Slow case, because variable potentially shadowed. Perform dynamic lookup.
+ const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
+ Node* slow = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ states.AddToNode(slow, bailout_id, combine);
+ environment()->Push(slow);
+ slow_block.EndBlock();
+
+ return environment()->Pop();
+ }
+
+ return nullptr;
+}
+
+
+Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
+ switch (input->opcode()) {
+ case IrOpcode::kNumberConstant: {
+ NumberMatcher m(input);
+ return jsgraph_->BooleanConstant(!m.Is(0) && !m.IsNaN());
+ }
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> object = HeapObjectMatcher(input).Value();
+ return jsgraph_->BooleanConstant(object->BooleanValue());
+ }
+ case IrOpcode::kJSEqual:
+ case IrOpcode::kJSNotEqual:
+ case IrOpcode::kJSStrictEqual:
+ case IrOpcode::kJSStrictNotEqual:
+ case IrOpcode::kJSLessThan:
+ case IrOpcode::kJSLessThanOrEqual:
+ case IrOpcode::kJSGreaterThan:
+ case IrOpcode::kJSGreaterThanOrEqual:
+ case IrOpcode::kJSToBoolean:
+ case IrOpcode::kJSDeleteProperty:
+ case IrOpcode::kJSHasProperty:
+ case IrOpcode::kJSInstanceOf:
+ return input;
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+
+Node* AstGraphBuilder::TryFastToName(Node* input) {
+ switch (input->opcode()) {
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> object = HeapObjectMatcher(input).Value();
+ if (object->IsName()) return input;
+ break;
+ }
+ case IrOpcode::kJSToString:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSTypeOf:
+ return input;
+ default:
+ break;
+ }
+ return nullptr;
+}
+
+
bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
if (info()->osr_ast_id() == stmt->OsrEntryId()) {
info()->set_osr_expr_stack_height(std::max(
@@ -3961,7 +4055,7 @@ void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
IterationStatement* stmt) {
- if (loop_assignment_analysis_ == NULL) return NULL;
+ if (loop_assignment_analysis_ == nullptr) return nullptr;
return loop_assignment_analysis_->GetVariablesAssignedInLoop(stmt);
}
@@ -3988,7 +4082,7 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
DCHECK(op->ControlInputCount() < 2);
DCHECK(op->EffectInputCount() < 2);
- Node* result = NULL;
+ Node* result = nullptr;
if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
} else {
@@ -4195,7 +4289,7 @@ void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned,
Node* AstGraphBuilder::NewPhi(int count, Node* input, Node* control) {
- const Operator* phi_op = common()->Phi(kMachAnyTagged, count);
+ const Operator* phi_op = common()->Phi(MachineRepresentation::kTagged, count);
Node** buffer = EnsureInputBufferSize(count + 1);
MemsetPointer(buffer, input, count);
buffer[count] = control;
@@ -4257,7 +4351,8 @@ Node* AstGraphBuilder::MergeValue(Node* value, Node* other, Node* control) {
NodeProperties::GetControlInput(value) == control) {
// Phi already exists, add input.
value->InsertInput(graph_zone(), inputs - 1, other);
- NodeProperties::ChangeOp(value, common()->Phi(kMachAnyTagged, inputs));
+ NodeProperties::ChangeOp(
+ value, common()->Phi(MachineRepresentation::kTagged, inputs));
} else if (value != other) {
// Phi does not exist yet, introduce one.
value = NewPhi(inputs, value, control);
diff --git a/chromium/v8/src/compiler/ast-graph-builder.h b/chromium/v8/src/compiler/ast-graph-builder.h
index 8b90f072a02..3b6302d3ddc 100644
--- a/chromium/v8/src/compiler/ast-graph-builder.h
+++ b/chromium/v8/src/compiler/ast-graph-builder.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_AST_GRAPH_BUILDER_H_
#define V8_COMPILER_AST_GRAPH_BUILDER_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/state-values-utils.h"
@@ -13,16 +13,20 @@
namespace v8 {
namespace internal {
+// Forward declarations.
class BitVector;
+
namespace compiler {
+// Forward declarations.
class ControlBuilder;
class Graph;
-class JSTypeFeedbackTable;
class LoopAssignmentAnalysis;
class LoopBuilder;
class Node;
+class TypeHintAnalysis;
+
// The AstGraphBuilder produces a high-level IR graph, based on an
// underlying AST. The produced graph can either be compiled into a
@@ -31,8 +35,8 @@ class Node;
class AstGraphBuilder : public AstVisitor {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
- LoopAssignmentAnalysis* loop_assignment = NULL,
- JSTypeFeedbackTable* js_type_feedback = NULL);
+ LoopAssignmentAnalysis* loop_assignment = nullptr,
+ TypeHintAnalysis* type_hint_analysis = nullptr);
// Creates a graph by visiting the entire AST.
bool CreateGraph(bool stack_check = true);
@@ -70,6 +74,7 @@ class AstGraphBuilder : public AstVisitor {
class FrameStateBeforeAndAfter;
friend class ControlBuilder;
+ Isolate* isolate_;
Zone* local_zone_;
CompilationInfo* info_;
JSGraph* jsgraph_;
@@ -88,6 +93,7 @@ class AstGraphBuilder : public AstVisitor {
// Nodes representing values in the activation record.
SetOncePointer<Node> function_closure_;
SetOncePointer<Node> function_context_;
+ SetOncePointer<Node> new_target_;
// Tracks how many try-blocks are currently entered.
int try_catch_nesting_level_;
@@ -106,6 +112,9 @@ class AstGraphBuilder : public AstVisitor {
// Result of loop assignment analysis performed before graph creation.
LoopAssignmentAnalysis* loop_assignment_analysis_;
+ // Result of type hint analysis performed before graph creation.
+ TypeHintAnalysis* type_hint_analysis_;
+
// Cache for StateValues nodes for frame states.
StateValuesCache state_values_cache_;
@@ -115,9 +124,6 @@ class AstGraphBuilder : public AstVisitor {
// Function info for frame state construction.
const FrameStateFunctionInfo* const frame_state_function_info_;
- // Type feedback table.
- JSTypeFeedbackTable* js_type_feedback_;
-
// Growth increment for the temporary buffer used to construct input lists to
// new nodes.
static const int kInputBufferSizeIncrement = 64;
@@ -129,6 +135,7 @@ class AstGraphBuilder : public AstVisitor {
ContextScope* execution_context() const { return execution_context_; }
CommonOperatorBuilder* common() const { return jsgraph_->common(); }
CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return isolate_; }
LanguageMode language_mode() const;
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph_->graph(); }
@@ -150,16 +157,19 @@ class AstGraphBuilder : public AstVisitor {
// Create the main graph body by visiting the AST.
void CreateGraphBody(bool stack_check);
- // Get or create the node that represents the outer function closure.
+ // Get or create the node that represents the incoming function closure.
Node* GetFunctionClosureForContext();
Node* GetFunctionClosure();
- // Get or create the node that represents the outer function context.
+ // Get or create the node that represents the incoming function context.
Node* GetFunctionContext();
+ // Get or create the node that represents the incoming new target value.
+ Node* GetNewTarget();
+
// Node creation helpers.
Node* NewNode(const Operator* op, bool incomplete = false) {
- return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
+ return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
}
Node* NewNode(const Operator* op, Node* n1) {
@@ -233,7 +243,7 @@ class AstGraphBuilder : public AstVisitor {
Node** EnsureInputBufferSize(int size);
// Named and keyed loads require a VectorSlotPair for successful lowering.
- VectorSlotPair CreateVectorSlotPair(FeedbackVectorICSlot slot) const;
+ VectorSlotPair CreateVectorSlotPair(FeedbackVectorSlot slot) const;
// Determine which contexts need to be checked for extension objects that
// might shadow the optimistic declaration of dynamic lookup variables.
@@ -245,17 +255,18 @@ class AstGraphBuilder : public AstVisitor {
// resulting node. The operand stack height remains the same, variables and
// other dependencies tracked by the environment might be mutated though.
- // Builder to create a receiver check for sloppy mode.
- Node* BuildPatchReceiverToGlobalProxy(Node* receiver);
-
// Builders to create local function, script and block contexts.
- Node* BuildLocalFunctionContext(Node* context);
+ Node* BuildLocalActivationContext(Node* context);
+ Node* BuildLocalFunctionContext(Scope* scope);
Node* BuildLocalScriptContext(Scope* scope);
Node* BuildLocalBlockContext(Scope* scope);
// Builder to create an arguments object if it is used.
Node* BuildArgumentsObject(Variable* arguments);
+ // Builder to create an array of rest parameters if used
+ Node* BuildRestArgumentsArray(Variable* rest, int index);
+
// Builder that assigns to the {.this_function} internal variable if needed.
Node* BuildThisFunctionVariable(Variable* this_function_var);
@@ -283,44 +294,37 @@ class AstGraphBuilder : public AstVisitor {
Node* BuildNamedLoad(Node* receiver, Handle<Name> name,
const VectorSlotPair& feedback);
Node* BuildKeyedStore(Node* receiver, Node* key, Node* value,
- const VectorSlotPair& feedback, TypeFeedbackId id);
+ const VectorSlotPair& feedback);
Node* BuildNamedStore(Node* receiver, Handle<Name> name, Node* value,
- const VectorSlotPair& feedback, TypeFeedbackId id);
+ const VectorSlotPair& feedback);
// Builders for super property loads and stores.
Node* BuildKeyedSuperStore(Node* receiver, Node* home_object, Node* key,
- Node* value, TypeFeedbackId id);
+ Node* value);
Node* BuildNamedSuperStore(Node* receiver, Node* home_object,
- Handle<Name> name, Node* value, TypeFeedbackId id);
+ Handle<Name> name, Node* value);
Node* BuildNamedSuperLoad(Node* receiver, Node* home_object,
Handle<Name> name, const VectorSlotPair& feedback);
Node* BuildKeyedSuperLoad(Node* receiver, Node* home_object, Node* key,
const VectorSlotPair& feedback);
// Builders for global variable loads and stores.
- Node* BuildGlobalLoad(Node* script_context, Node* global, Handle<Name> name,
- const VectorSlotPair& feedback, TypeofMode typeof_mode,
- int slot_index);
- Node* BuildGlobalStore(Node* script_context, Node* global, Handle<Name> name,
- Node* value, const VectorSlotPair& feedback,
- TypeFeedbackId id, int slot_index);
+ Node* BuildGlobalLoad(Handle<Name> name, const VectorSlotPair& feedback,
+ TypeofMode typeof_mode);
+ Node* BuildGlobalStore(Handle<Name> name, Node* value,
+ const VectorSlotPair& feedback);
// Builders for accessing the function context.
Node* BuildLoadGlobalObject();
Node* BuildLoadNativeContextField(int index);
- Node* BuildLoadGlobalProxy();
Node* BuildLoadFeedbackVector();
// Builder for accessing a (potentially immutable) object field.
Node* BuildLoadObjectField(Node* object, int offset);
Node* BuildLoadImmutableObjectField(Node* object, int offset);
- // Builders for accessing external references.
- Node* BuildLoadExternal(ExternalReference ref, MachineType type);
- Node* BuildStoreExternal(ExternalReference ref, MachineType type, Node* val);
-
// Builders for automatic type conversion.
- Node* BuildToBoolean(Node* input);
+ Node* BuildToBoolean(Node* input, TypeFeedbackId feedback_id);
Node* BuildToName(Node* input, BailoutId bailout_id);
Node* BuildToObject(Node* input, BailoutId bailout_id);
@@ -352,13 +356,36 @@ class AstGraphBuilder : public AstVisitor {
Node* BuildThrow(Node* exception_value);
// Builders for binary operations.
- Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
+ Node* BuildBinaryOp(Node* left, Node* right, Token::Value op,
+ TypeFeedbackId feedback_id);
// Process arguments to a call by popping {arity} elements off the operand
// stack and build a call node using the given call operator.
Node* ProcessArguments(const Operator* op, int arity);
// ===========================================================================
+ // The following build methods have the same contract as the above ones, but
+ // they can also return {nullptr} to indicate that no fragment was built. Note
+ // that these are optimizations, disabling any of them should still produce
+ // correct graphs.
+
+ // Optimization for variable load from global object.
+ Node* TryLoadGlobalConstant(Handle<Name> name);
+
+ // Optimization for variable load of dynamic lookup slot that is most likely
+ // to resolve to a global slot or context slot (inferred from scope chain).
+ Node* TryLoadDynamicVariable(Variable* variable, Handle<String> name,
+ BailoutId bailout_id,
+ FrameStateBeforeAndAfter& states,
+ const VectorSlotPair& feedback,
+ OutputFrameStateCombine combine,
+ TypeofMode typeof_mode);
+
+ // Optimizations for automatic type conversion.
+ Node* TryFastToBoolean(Node* input);
+ Node* TryFastToName(Node* input);
+
+ // ===========================================================================
// The following visitation methods all recursively visit a subtree of the
// underlying AST and extent the graph. The operand stack is mutated in a way
// consistent with other compilers:
@@ -401,7 +428,8 @@ class AstGraphBuilder : public AstVisitor {
// Dispatched from VisitForInStatement.
void VisitForInAssignment(Expression* expr, Node* value,
const VectorSlotPair& feedback,
- BailoutId bailout_id);
+ BailoutId bailout_id_before,
+ BailoutId bailout_id_after);
// Dispatched from VisitObjectLiteral.
void VisitObjectLiteralAccessor(Node* home_object,
diff --git a/chromium/v8/src/compiler/ast-loop-assignment-analyzer.cc b/chromium/v8/src/compiler/ast-loop-assignment-analyzer.cc
index d9ec109e40e..2074c944e62 100644
--- a/chromium/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/chromium/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -4,7 +4,7 @@
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -13,16 +13,16 @@ namespace compiler {
typedef class AstLoopAssignmentAnalyzer ALAA; // for code shortitude.
ALAA::AstLoopAssignmentAnalyzer(Zone* zone, CompilationInfo* info)
- : info_(info), loop_stack_(zone) {
- InitializeAstVisitor(info->isolate(), zone);
+ : info_(info), zone_(zone), loop_stack_(zone) {
+ InitializeAstVisitor(info->isolate());
}
LoopAssignmentAnalysis* ALAA::Analyze() {
- LoopAssignmentAnalysis* a = new (zone()) LoopAssignmentAnalysis(zone());
+ LoopAssignmentAnalysis* a = new (zone_) LoopAssignmentAnalysis(zone_);
result_ = a;
VisitStatements(info()->literal()->body());
- result_ = NULL;
+ result_ = nullptr;
return a;
}
@@ -30,7 +30,7 @@ LoopAssignmentAnalysis* ALAA::Analyze() {
void ALAA::Enter(IterationStatement* loop) {
int num_variables = 1 + info()->scope()->num_parameters() +
info()->scope()->num_stack_slots();
- BitVector* bits = new (zone()) BitVector(num_variables, zone());
+ BitVector* bits = new (zone_) BitVector(num_variables, zone_);
if (info()->is_osr() && info()->osr_ast_id() == loop->OsrEntryId())
bits->AddAll();
loop_stack_.push_back(bits);
@@ -77,6 +77,12 @@ void ALAA::VisitSuperCallReference(SuperCallReference* leaf) {}
void ALAA::VisitBlock(Block* stmt) { VisitStatements(stmt->statements()); }
+void ALAA::VisitDoExpression(DoExpression* expr) {
+ Visit(expr->block());
+ Visit(expr->result());
+}
+
+
void ALAA::VisitExpressionStatement(ExpressionStatement* stmt) {
Visit(stmt->expression());
}
@@ -120,6 +126,7 @@ void ALAA::VisitClassLiteral(ClassLiteral* e) {
VisitIfNotNull(e->constructor());
ZoneList<ObjectLiteralProperty*>* properties = e->properties();
for (int i = 0; i < properties->length(); i++) {
+ Visit(properties->at(i)->key());
Visit(properties->at(i)->value());
}
}
@@ -135,6 +142,7 @@ void ALAA::VisitConditional(Conditional* e) {
void ALAA::VisitObjectLiteral(ObjectLiteral* e) {
ZoneList<ObjectLiteralProperty*>* properties = e->properties();
for (int i = 0; i < properties->length(); i++) {
+ Visit(properties->at(i)->key());
Visit(properties->at(i)->value());
}
}
@@ -255,7 +263,9 @@ void ALAA::VisitForInStatement(ForInStatement* loop) {
void ALAA::VisitForOfStatement(ForOfStatement* loop) {
+ Visit(loop->assign_iterator());
Enter(loop);
+ Visit(loop->assign_each());
Visit(loop->each());
Visit(loop->subject());
Visit(loop->body());
@@ -278,6 +288,12 @@ void ALAA::VisitCountOperation(CountOperation* e) {
}
+void ALAA::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ Visit(expr->expression());
+}
+
+
void ALAA::AnalyzeAssignment(Variable* var) {
if (!loop_stack_.empty() && var->IsStackAllocated()) {
loop_stack_.back()->Add(GetVariableIndex(info()->scope(), var));
diff --git a/chromium/v8/src/compiler/ast-loop-assignment-analyzer.h b/chromium/v8/src/compiler/ast-loop-assignment-analyzer.h
index cd56d0a7ef1..169691135a9 100644
--- a/chromium/v8/src/compiler/ast-loop-assignment-analyzer.h
+++ b/chromium/v8/src/compiler/ast-loop-assignment-analyzer.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
#define V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/bit-vector.h"
#include "src/zone-containers.h"
@@ -26,7 +26,7 @@ class LoopAssignmentAnalysis : public ZoneObject {
if (list_[i].first == loop) return list_[i].second;
}
UNREACHABLE(); // should never ask for loops that aren't here!
- return NULL;
+ return nullptr;
}
int GetAssignmentCountForTesting(Scope* scope, Variable* var);
@@ -53,6 +53,7 @@ class AstLoopAssignmentAnalyzer : public AstVisitor {
private:
CompilationInfo* info_;
+ Zone* zone_;
ZoneDeque<BitVector*> loop_stack_;
LoopAssignmentAnalysis* result_;
@@ -62,7 +63,7 @@ class AstLoopAssignmentAnalyzer : public AstVisitor {
void Exit(IterationStatement* loop);
void VisitIfNotNull(AstNode* node) {
- if (node != NULL) Visit(node);
+ if (node != nullptr) Visit(node);
}
void AnalyzeAssignment(Variable* var);
@@ -70,8 +71,8 @@ class AstLoopAssignmentAnalyzer : public AstVisitor {
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstLoopAssignmentAnalyzer);
};
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
diff --git a/chromium/v8/src/compiler/basic-block-instrumentor.cc b/chromium/v8/src/compiler/basic-block-instrumentor.cc
index 23170e701b3..a966a5b2626 100644
--- a/chromium/v8/src/compiler/basic-block-instrumentor.cc
+++ b/chromium/v8/src/compiler/basic-block-instrumentor.cc
@@ -81,12 +81,13 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
// Construct increment operation.
Node* base = graph->NewNode(
PointerConstant(&common, data->GetCounterAddress(block_number)));
- Node* load = graph->NewNode(machine.Load(kMachUint32), base, zero,
+ Node* load = graph->NewNode(machine.Load(MachineType::Uint32()), base, zero,
graph->start(), graph->start());
Node* inc = graph->NewNode(machine.Int32Add(), load, one);
- Node* store = graph->NewNode(
- machine.Store(StoreRepresentation(kMachUint32, kNoWriteBarrier)), base,
- zero, inc, graph->start(), graph->start());
+ Node* store =
+ graph->NewNode(machine.Store(StoreRepresentation(
+ MachineRepresentation::kWord32, kNoWriteBarrier)),
+ base, zero, inc, graph->start(), graph->start());
// Insert the new nodes.
static const int kArraySize = 6;
Node* to_insert[kArraySize] = {zero, one, base, load, inc, store};
diff --git a/chromium/v8/src/compiler/branch-elimination.cc b/chromium/v8/src/compiler/branch-elimination.cc
new file mode 100644
index 00000000000..bc56e73a081
--- /dev/null
+++ b/chromium/v8/src/compiler/branch-elimination.cc
@@ -0,0 +1,269 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/branch-elimination.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
+ Zone* zone)
+ : AdvancedReducer(editor),
+ node_conditions_(zone, js_graph->graph()->NodeCount()),
+ zone_(zone),
+ dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {}
+
+
+BranchElimination::~BranchElimination() {}
+
+
+Reduction BranchElimination::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kDead:
+ return NoChange();
+ case IrOpcode::kMerge:
+ return ReduceMerge(node);
+ case IrOpcode::kLoop:
+ return ReduceLoop(node);
+ case IrOpcode::kBranch:
+ return ReduceBranch(node);
+ case IrOpcode::kIfFalse:
+ return ReduceIf(node, false);
+ case IrOpcode::kIfTrue:
+ return ReduceIf(node, true);
+ case IrOpcode::kStart:
+ return ReduceStart(node);
+ default:
+ if (node->op()->ControlOutputCount() > 0) {
+ return ReduceOtherControl(node);
+ }
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction BranchElimination::ReduceBranch(Node* node) {
+ Node* condition = node->InputAt(0);
+ Node* control_input = NodeProperties::GetControlInput(node, 0);
+ const ControlPathConditions* from_input = node_conditions_.Get(control_input);
+ if (from_input != nullptr) {
+ Maybe<bool> condition_value = from_input->LookupCondition(condition);
+ // If we know the condition we can discard the branch.
+ if (condition_value.IsJust()) {
+ bool known_value = condition_value.FromJust();
+ for (Node* const use : node->uses()) {
+ switch (use->opcode()) {
+ case IrOpcode::kIfTrue:
+ Replace(use, known_value ? control_input : dead());
+ break;
+ case IrOpcode::kIfFalse:
+ Replace(use, known_value ? dead() : control_input);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return Replace(dead());
+ }
+ }
+ return TakeConditionsFromFirstControl(node);
+}
+
+
+Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
+ // Add the condition to the list arriving from the input branch.
+ Node* branch = NodeProperties::GetControlInput(node, 0);
+ const ControlPathConditions* from_branch = node_conditions_.Get(branch);
+ // If we do not know anything about the predecessor, do not propagate just
+ // yet because we will have to recompute anyway once we compute the
+ // predecessor.
+ if (from_branch == nullptr) {
+ DCHECK(node_conditions_.Get(node) == nullptr);
+ return NoChange();
+ }
+ Node* condition = branch->InputAt(0);
+ return UpdateConditions(
+ node, from_branch->AddCondition(zone_, condition, is_true_branch));
+}
+
+
+Reduction BranchElimination::ReduceLoop(Node* node) {
+ // Here we rely on having only reducible loops:
+ // The loop entry edge always dominates the header, so we can just use
+ // the information from the loop entry edge.
+ return TakeConditionsFromFirstControl(node);
+}
+
+
+Reduction BranchElimination::ReduceMerge(Node* node) {
+ // Shortcut for the case when we do not know anything about some
+ // input.
+ for (int i = 0; i < node->InputCount(); i++) {
+ if (node_conditions_.Get(node->InputAt(i)) == nullptr) {
+ DCHECK(node_conditions_.Get(node) == nullptr);
+ return NoChange();
+ }
+ }
+
+ const ControlPathConditions* first = node_conditions_.Get(node->InputAt(0));
+ // Make a copy of the first input's conditions and merge with the conditions
+ // from other inputs.
+ ControlPathConditions* conditions =
+ new (zone_->New(sizeof(ControlPathConditions)))
+ ControlPathConditions(*first);
+ for (int i = 1; i < node->InputCount(); i++) {
+ conditions->Merge(*(node_conditions_.Get(node->InputAt(i))));
+ }
+
+ return UpdateConditions(node, conditions);
+}
+
+
+Reduction BranchElimination::ReduceStart(Node* node) {
+ return UpdateConditions(node, ControlPathConditions::Empty(zone_));
+}
+
+
+const BranchElimination::ControlPathConditions*
+BranchElimination::PathConditionsForControlNodes::Get(Node* node) {
+ if (static_cast<size_t>(node->id()) < info_for_node_.size()) {
+ return info_for_node_[node->id()];
+ }
+ return nullptr;
+}
+
+
+void BranchElimination::PathConditionsForControlNodes::Set(
+ Node* node, const ControlPathConditions* conditions) {
+ size_t index = static_cast<size_t>(node->id());
+ if (index >= info_for_node_.size()) {
+ info_for_node_.resize(index + 1, nullptr);
+ }
+ info_for_node_[index] = conditions;
+}
+
+
+Reduction BranchElimination::ReduceOtherControl(Node* node) {
+ DCHECK_EQ(1, node->op()->ControlInputCount());
+ return TakeConditionsFromFirstControl(node);
+}
+
+
+Reduction BranchElimination::TakeConditionsFromFirstControl(Node* node) {
+ // We just propagate the information from the control input (ideally,
+ // we would only revisit control uses if there is change).
+ const ControlPathConditions* from_input =
+ node_conditions_.Get(NodeProperties::GetControlInput(node, 0));
+ return UpdateConditions(node, from_input);
+}
+
+
+Reduction BranchElimination::UpdateConditions(
+ Node* node, const ControlPathConditions* conditions) {
+ const ControlPathConditions* original = node_conditions_.Get(node);
+ // Only signal that the node has Changed if the condition information has
+ // changed.
+ if (conditions != original) {
+ if (original == nullptr || *conditions != *original) {
+ node_conditions_.Set(node, conditions);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+
+// static
+const BranchElimination::ControlPathConditions*
+BranchElimination::ControlPathConditions::Empty(Zone* zone) {
+ return new (zone->New(sizeof(ControlPathConditions)))
+ ControlPathConditions(nullptr, 0);
+}
+
+
+void BranchElimination::ControlPathConditions::Merge(
+ const ControlPathConditions& other) {
+ // Change the current condition list to a longest common tail
+ // of this condition list and the other list. (The common tail
+ // should correspond to the list from the common dominator.)
+
+ // First, we throw away the prefix of the longer list, so that
+ // we have lists of the same length.
+ size_t other_size = other.condition_count_;
+ BranchCondition* other_condition = other.head_;
+ while (other_size > condition_count_) {
+ other_condition = other_condition->next;
+ other_size--;
+ }
+ while (condition_count_ > other_size) {
+ head_ = head_->next;
+ condition_count_--;
+ }
+
+ // Then we go through both lists in lock-step until we find
+ // the common tail.
+ while (head_ != other_condition) {
+ DCHECK(condition_count_ > 0);
+ condition_count_--;
+ other_condition = other_condition->next;
+ head_ = head_->next;
+ }
+}
+
+
+const BranchElimination::ControlPathConditions*
+BranchElimination::ControlPathConditions::AddCondition(Zone* zone,
+ Node* condition,
+ bool is_true) const {
+ DCHECK(LookupCondition(condition).IsNothing());
+
+ BranchCondition* new_head = new (zone->New(sizeof(BranchCondition)))
+ BranchCondition(condition, is_true, head_);
+
+ ControlPathConditions* conditions =
+ new (zone->New(sizeof(ControlPathConditions)))
+ ControlPathConditions(new_head, condition_count_ + 1);
+ return conditions;
+}
+
+
+Maybe<bool> BranchElimination::ControlPathConditions::LookupCondition(
+ Node* condition) const {
+ for (BranchCondition* current = head_; current != nullptr;
+ current = current->next) {
+ if (current->condition == condition) {
+ return Just<bool>(current->is_true);
+ }
+ }
+ return Nothing<bool>();
+}
+
+
+bool BranchElimination::ControlPathConditions::operator==(
+ const ControlPathConditions& other) const {
+ if (condition_count_ != other.condition_count_) return false;
+ BranchCondition* this_condition = head_;
+ BranchCondition* other_condition = other.head_;
+ while (true) {
+ if (this_condition == other_condition) return true;
+ if (this_condition->condition != other_condition->condition ||
+ this_condition->is_true != other_condition->is_true) {
+ return false;
+ }
+ this_condition = this_condition->next;
+ other_condition = other_condition->next;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/branch-elimination.h b/chromium/v8/src/compiler/branch-elimination.h
new file mode 100644
index 00000000000..a7ac926c7ab
--- /dev/null
+++ b/chromium/v8/src/compiler/branch-elimination.h
@@ -0,0 +1,97 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
+#define V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSGraph;
+
+
+class BranchElimination final : public AdvancedReducer {
+ public:
+ BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone);
+ ~BranchElimination() final;
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ struct BranchCondition {
+ Node* condition;
+ bool is_true;
+ BranchCondition* next;
+
+ BranchCondition(Node* condition, bool is_true, BranchCondition* next)
+ : condition(condition), is_true(is_true), next(next) {}
+ };
+
+ // Class for tracking information about branch conditions.
+ // At the moment it is a linked list of conditions and their values
+ // (true or false).
+ class ControlPathConditions {
+ public:
+ Maybe<bool> LookupCondition(Node* condition) const;
+
+ const ControlPathConditions* AddCondition(Zone* zone, Node* condition,
+ bool is_true) const;
+ static const ControlPathConditions* Empty(Zone* zone);
+ void Merge(const ControlPathConditions& other);
+
+ bool operator==(const ControlPathConditions& other) const;
+ bool operator!=(const ControlPathConditions& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ ControlPathConditions(BranchCondition* head, size_t condition_count)
+ : head_(head), condition_count_(condition_count) {}
+
+ BranchCondition* head_;
+ // We keep track of the list length so that we can find the longest
+ // common tail easily.
+ size_t condition_count_;
+ };
+
+ // Maps each control node to the condition information known about the node.
+ // If the information is nullptr, then we have not calculated the information
+ // yet.
+ class PathConditionsForControlNodes {
+ public:
+ PathConditionsForControlNodes(Zone* zone, size_t size_hint)
+ : info_for_node_(size_hint, nullptr, zone) {}
+ const ControlPathConditions* Get(Node* node);
+ void Set(Node* node, const ControlPathConditions* conditions);
+
+ private:
+ ZoneVector<const ControlPathConditions*> info_for_node_;
+ };
+
+ Reduction ReduceBranch(Node* node);
+ Reduction ReduceIf(Node* node, bool is_true_branch);
+ Reduction ReduceLoop(Node* node);
+ Reduction ReduceMerge(Node* node);
+ Reduction ReduceStart(Node* node);
+ Reduction ReduceOtherControl(Node* node);
+
+ Reduction TakeConditionsFromFirstControl(Node* node);
+ Reduction UpdateConditions(Node* node,
+ const ControlPathConditions* conditions);
+
+ Node* dead() const { return dead_; }
+
+ PathConditionsForControlNodes node_conditions_;
+ Zone* zone_;
+ Node* dead_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
diff --git a/chromium/v8/src/compiler/bytecode-branch-analysis.cc b/chromium/v8/src/compiler/bytecode-branch-analysis.cc
new file mode 100644
index 00000000000..27699a1b9a9
--- /dev/null
+++ b/chromium/v8/src/compiler/bytecode-branch-analysis.cc
@@ -0,0 +1,125 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-branch-analysis.h"
+
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The class contains all of the sites that contain
+// branches to a particular target (bytecode offset).
+class BytecodeBranchInfo final : public ZoneObject {
+ public:
+ explicit BytecodeBranchInfo(Zone* zone)
+ : back_edge_offsets_(zone), fore_edge_offsets_(zone) {}
+
+ void AddBranch(int source_offset, int target_offset);
+
+ // The offsets of bytecodes that refer to this bytecode as
+ // a back-edge predecessor.
+ const ZoneVector<int>* back_edge_offsets() { return &back_edge_offsets_; }
+
+ // The offsets of bytecodes that refer to this bytecode as
+ // a forwards-edge predecessor.
+ const ZoneVector<int>* fore_edge_offsets() { return &fore_edge_offsets_; }
+
+ private:
+ ZoneVector<int> back_edge_offsets_;
+ ZoneVector<int> fore_edge_offsets_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeBranchInfo);
+};
+
+
+void BytecodeBranchInfo::AddBranch(int source_offset, int target_offset) {
+ if (source_offset < target_offset) {
+ fore_edge_offsets_.push_back(source_offset);
+ } else {
+ back_edge_offsets_.push_back(source_offset);
+ }
+}
+
+
+BytecodeBranchAnalysis::BytecodeBranchAnalysis(
+ Handle<BytecodeArray> bytecode_array, Zone* zone)
+ : branch_infos_(zone),
+ bytecode_array_(bytecode_array),
+ reachable_(bytecode_array->length(), zone),
+ zone_(zone) {}
+
+
+void BytecodeBranchAnalysis::Analyze() {
+ interpreter::BytecodeArrayIterator iterator(bytecode_array());
+ bool reachable = true;
+ while (!iterator.done()) {
+ interpreter::Bytecode bytecode = iterator.current_bytecode();
+ int current_offset = iterator.current_offset();
+ // All bytecode basic blocks are generated to be forward reachable
+ // and may also be backward reachable. Hence if there's a forward
+ // branch targetting here the code becomes reachable.
+ reachable = reachable || forward_branches_target(current_offset);
+ if (reachable) {
+ reachable_.Add(current_offset);
+ if (interpreter::Bytecodes::IsConditionalJump(bytecode)) {
+ // Only the branch is recorded, the forward path falls through
+ // and is handled as normal bytecode data flow.
+ AddBranch(current_offset, iterator.GetJumpTargetOffset());
+ } else if (interpreter::Bytecodes::IsJump(bytecode)) {
+ // Unless the branch targets the next bytecode it's not
+ // reachable. If it targets the next bytecode the check at the
+ // start of the loop will set the reachable flag.
+ AddBranch(current_offset, iterator.GetJumpTargetOffset());
+ reachable = false;
+ } else if (interpreter::Bytecodes::IsJumpOrReturn(bytecode)) {
+ DCHECK_EQ(bytecode, interpreter::Bytecode::kReturn);
+ reachable = false;
+ }
+ }
+ iterator.Advance();
+ }
+}
+
+
+const ZoneVector<int>* BytecodeBranchAnalysis::BackwardBranchesTargetting(
+ int offset) const {
+ auto iterator = branch_infos_.find(offset);
+ if (branch_infos_.end() != iterator) {
+ return iterator->second->back_edge_offsets();
+ } else {
+ return nullptr;
+ }
+}
+
+
+const ZoneVector<int>* BytecodeBranchAnalysis::ForwardBranchesTargetting(
+ int offset) const {
+ auto iterator = branch_infos_.find(offset);
+ if (branch_infos_.end() != iterator) {
+ return iterator->second->fore_edge_offsets();
+ } else {
+ return nullptr;
+ }
+}
+
+
+void BytecodeBranchAnalysis::AddBranch(int source_offset, int target_offset) {
+ BytecodeBranchInfo* branch_info = nullptr;
+ auto iterator = branch_infos_.find(target_offset);
+ if (branch_infos_.end() == iterator) {
+ branch_info = new (zone()) BytecodeBranchInfo(zone());
+ branch_infos_.insert(std::make_pair(target_offset, branch_info));
+ } else {
+ branch_info = iterator->second;
+ }
+ branch_info->AddBranch(source_offset, target_offset);
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/bytecode-branch-analysis.h b/chromium/v8/src/compiler/bytecode-branch-analysis.h
new file mode 100644
index 00000000000..0ef33b640c3
--- /dev/null
+++ b/chromium/v8/src/compiler/bytecode-branch-analysis.h
@@ -0,0 +1,79 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
+#define V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
+
+#include "src/bit-vector.h"
+#include "src/handles.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+
+namespace compiler {
+
+class BytecodeBranchInfo;
+
+// A class for identifying the branch targets and their branch sites
+// within a bytecode array and also identifying which bytecodes are
+// reachable. This information can be used to construct the local
+// control flow logic for high-level IR graphs built from bytecode.
+//
+// NB This class relies on the only backwards branches in bytecode
+// being jumps back to loop headers.
+class BytecodeBranchAnalysis BASE_EMBEDDED {
+ public:
+ BytecodeBranchAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone);
+
+ // Analyze the bytecodes to find the branch sites and their
+ // targets. No other methods in this class return valid information
+ // until this has been called.
+ void Analyze();
+
+ // Offsets of bytecodes having a backward branch to the bytecode at |offset|.
+ const ZoneVector<int>* BackwardBranchesTargetting(int offset) const;
+
+ // Offsets of bytecodes having a forward branch to the bytecode at |offset|.
+ const ZoneVector<int>* ForwardBranchesTargetting(int offset) const;
+
+ // Returns true if the bytecode at |offset| is reachable.
+ bool is_reachable(int offset) const { return reachable_.Contains(offset); }
+
+ // Returns true if there are any forward branches to the bytecode at
+ // |offset|.
+ bool forward_branches_target(int offset) const {
+ const ZoneVector<int>* sites = ForwardBranchesTargetting(offset);
+ return sites != nullptr && sites->size() > 0;
+ }
+
+ // Returns true if there are any backward branches to the bytecode
+ // at |offset|.
+ bool backward_branches_target(int offset) const {
+ const ZoneVector<int>* sites = BackwardBranchesTargetting(offset);
+ return sites != nullptr && sites->size() > 0;
+ }
+
+ private:
+ void AddBranch(int origin_offset, int target_offset);
+
+ Zone* zone() const { return zone_; }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
+ ZoneMap<int, BytecodeBranchInfo*> branch_infos_;
+ Handle<BytecodeArray> bytecode_array_;
+ BitVector reachable_;
+ Zone* zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeBranchAnalysis);
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
diff --git a/chromium/v8/src/compiler/bytecode-graph-builder.cc b/chromium/v8/src/compiler/bytecode-graph-builder.cc
index 5ba18ffc979..cf0b6ab4381 100644
--- a/chromium/v8/src/compiler/bytecode-graph-builder.cc
+++ b/chromium/v8/src/compiler/bytecode-graph-builder.cc
@@ -4,16 +4,78 @@
#include "src/compiler/bytecode-graph-builder.h"
+#include "src/compiler/bytecode-branch-analysis.h"
#include "src/compiler/linkage.h"
#include "src/compiler/operator-properties.h"
-#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecodes.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Helper for generating frame states for before and after a bytecode.
+class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
+ public:
+ FrameStateBeforeAndAfter(BytecodeGraphBuilder* builder,
+ const interpreter::BytecodeArrayIterator& iterator)
+ : builder_(builder),
+ id_after_(BailoutId::None()),
+ added_to_node_(false),
+ output_poke_offset_(0),
+ output_poke_count_(0) {
+ BailoutId id_before(iterator.current_offset());
+ frame_state_before_ = builder_->environment()->Checkpoint(
+ id_before, OutputFrameStateCombine::Ignore());
+ id_after_ = BailoutId(id_before.ToInt() + iterator.current_bytecode_size());
+ }
+
+ ~FrameStateBeforeAndAfter() {
+ DCHECK(added_to_node_);
+ DCHECK(builder_->environment()->StateValuesAreUpToDate(output_poke_offset_,
+ output_poke_count_));
+ }
+
+ private:
+ friend class Environment;
+
+ void AddToNode(Node* node, OutputFrameStateCombine combine) {
+ DCHECK(!added_to_node_);
+ int count = OperatorProperties::GetFrameStateInputCount(node->op());
+ DCHECK_LE(count, 2);
+ if (count >= 1) {
+ // Add the frame state for after the operation.
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ Node* frame_state_after =
+ builder_->environment()->Checkpoint(id_after_, combine);
+ NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_after);
+ }
+
+ if (count >= 2) {
+ // Add the frame state for before the operation.
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 1)->opcode());
+ NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
+ }
+
+ if (!combine.IsOutputIgnored()) {
+ output_poke_offset_ = static_cast<int>(combine.GetOffsetToPokeAt());
+ output_poke_count_ = node->op()->ValueOutputCount();
+ }
+ added_to_node_ = true;
+ }
+
+ BytecodeGraphBuilder* builder_;
+ Node* frame_state_before_;
+ BailoutId id_after_;
+
+ bool added_to_node_;
+ int output_poke_offset_;
+ int output_poke_count_;
+};
+
+
// Issues:
-// - Need to deal with FrameState / FrameStateBeforeAndAfter / StateValue.
// - Scopes - intimately tied to AST. Need to eval what is needed.
// - Need to resolve closure parameter treatment.
BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
@@ -27,10 +89,13 @@ BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
context_(context),
control_dependency_(control_dependency),
effect_dependency_(control_dependency),
- values_(builder->local_zone()) {
+ values_(builder->local_zone()),
+ parameters_state_values_(nullptr),
+ registers_state_values_(nullptr),
+ accumulator_state_values_(nullptr) {
// The layout of values_ is:
//
- // [receiver] [parameters] [registers]
+ // [receiver] [parameters] [registers] [accumulator]
//
// parameter[0] is the receiver (this), parameters 1..N are the
// parameters supplied to the method (arg0..argN-1). The accumulator
@@ -50,7 +115,26 @@ BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
values()->insert(values()->end(), register_count, undefined_constant);
// Accumulator
- accumulator_ = undefined_constant;
+ accumulator_base_ = static_cast<int>(values()->size());
+ values()->push_back(undefined_constant);
+}
+
+
+BytecodeGraphBuilder::Environment::Environment(
+ const BytecodeGraphBuilder::Environment* other)
+ : builder_(other->builder_),
+ register_count_(other->register_count_),
+ parameter_count_(other->parameter_count_),
+ context_(other->context_),
+ control_dependency_(other->control_dependency_),
+ effect_dependency_(other->effect_dependency_),
+ values_(other->zone()),
+ parameters_state_values_(nullptr),
+ registers_state_values_(nullptr),
+ accumulator_state_values_(nullptr),
+ register_base_(other->register_base_),
+ accumulator_base_(other->accumulator_base_) {
+ values_ = other->values_;
}
@@ -64,27 +148,75 @@ int BytecodeGraphBuilder::Environment::RegisterToValuesIndex(
}
-void BytecodeGraphBuilder::Environment::BindRegister(
- interpreter::Register the_register, Node* node) {
- int values_index = RegisterToValuesIndex(the_register);
- values()->at(values_index) = node;
+Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
+ return values()->at(accumulator_base_);
}
Node* BytecodeGraphBuilder::Environment::LookupRegister(
interpreter::Register the_register) const {
+ if (the_register.is_function_context()) {
+ return builder()->GetFunctionContext();
+ } else if (the_register.is_function_closure()) {
+ return builder()->GetFunctionClosure();
+ } else if (the_register.is_new_target()) {
+ return builder()->GetNewTarget();
+ } else {
+ int values_index = RegisterToValuesIndex(the_register);
+ return values()->at(values_index);
+ }
+}
+
+
+void BytecodeGraphBuilder::Environment::ExchangeRegisters(
+ interpreter::Register reg0, interpreter::Register reg1) {
+ int reg0_index = RegisterToValuesIndex(reg0);
+ int reg1_index = RegisterToValuesIndex(reg1);
+ Node* saved_reg0_value = values()->at(reg0_index);
+ values()->at(reg0_index) = values()->at(reg1_index);
+ values()->at(reg1_index) = saved_reg0_value;
+}
+
+
+void BytecodeGraphBuilder::Environment::BindAccumulator(
+ Node* node, FrameStateBeforeAndAfter* states) {
+ if (states) {
+ states->AddToNode(node, OutputFrameStateCombine::PokeAt(0));
+ }
+ values()->at(accumulator_base_) = node;
+}
+
+
+void BytecodeGraphBuilder::Environment::BindRegister(
+ interpreter::Register the_register, Node* node,
+ FrameStateBeforeAndAfter* states) {
int values_index = RegisterToValuesIndex(the_register);
- return values()->at(values_index);
+ if (states) {
+ states->AddToNode(node, OutputFrameStateCombine::PokeAt(accumulator_base_ -
+ values_index));
+ }
+ values()->at(values_index) = node;
}
-void BytecodeGraphBuilder::Environment::BindAccumulator(Node* node) {
- accumulator_ = node;
+void BytecodeGraphBuilder::Environment::BindRegistersToProjections(
+ interpreter::Register first_reg, Node* node,
+ FrameStateBeforeAndAfter* states) {
+ int values_index = RegisterToValuesIndex(first_reg);
+ if (states) {
+ states->AddToNode(node, OutputFrameStateCombine::PokeAt(accumulator_base_ -
+ values_index));
+ }
+ for (int i = 0; i < node->op()->ValueOutputCount(); i++) {
+ values()->at(values_index + i) =
+ builder()->NewNode(common()->Projection(i), node);
+ }
}
-Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
- return accumulator_;
+void BytecodeGraphBuilder::Environment::RecordAfterState(
+ Node* node, FrameStateBeforeAndAfter* states) {
+ states->AddToNode(node, OutputFrameStateCombine::Ignore());
}
@@ -98,24 +230,188 @@ void BytecodeGraphBuilder::Environment::MarkAsUnreachable() {
}
+BytecodeGraphBuilder::Environment*
+BytecodeGraphBuilder::Environment::CopyForLoop() {
+ PrepareForLoop();
+ return new (zone()) Environment(this);
+}
+
+
+BytecodeGraphBuilder::Environment*
+BytecodeGraphBuilder::Environment::CopyForConditional() const {
+ return new (zone()) Environment(this);
+}
+
+
+void BytecodeGraphBuilder::Environment::Merge(
+ BytecodeGraphBuilder::Environment* other) {
+ // Nothing to do if the other environment is dead.
+ if (other->IsMarkedAsUnreachable()) {
+ return;
+ }
+
+ // Create a merge of the control dependencies of both environments and update
+ // the current environment's control dependency accordingly.
+ Node* control = builder()->MergeControl(GetControlDependency(),
+ other->GetControlDependency());
+ UpdateControlDependency(control);
+
+ // Create a merge of the effect dependencies of both environments and update
+ // the current environment's effect dependency accordingly.
+ Node* effect = builder()->MergeEffect(GetEffectDependency(),
+ other->GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+
+ // Introduce Phi nodes for values that have differing input at merge points,
+ // potentially extending an existing Phi node if possible.
+ context_ = builder()->MergeValue(context_, other->context_, control);
+ for (size_t i = 0; i < values_.size(); i++) {
+ values_[i] = builder()->MergeValue(values_[i], other->values_[i], control);
+ }
+}
+
+
+void BytecodeGraphBuilder::Environment::PrepareForLoop() {
+ // Create a control node for the loop header.
+ Node* control = builder()->NewLoop();
+
+ // Create a Phi for external effects.
+ Node* effect = builder()->NewEffectPhi(1, GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+
+ // Assume everything in the loop is updated.
+ context_ = builder()->NewPhi(1, context_, control);
+ int size = static_cast<int>(values()->size());
+ for (int i = 0; i < size; i++) {
+ values()->at(i) = builder()->NewPhi(1, values()->at(i), control);
+ }
+
+ // Connect to the loop end.
+ Node* terminate = builder()->graph()->NewNode(
+ builder()->common()->Terminate(), effect, control);
+ builder()->exit_controls_.push_back(terminate);
+}
+
+
+bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
+ Node** state_values, int offset, int count) {
+ if (!builder()->info()->is_deoptimization_enabled()) {
+ return false;
+ }
+ if (*state_values == nullptr) {
+ return true;
+ }
+ DCHECK_EQ((*state_values)->InputCount(), count);
+ DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
+ Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
+ for (int i = 0; i < count; i++) {
+ if ((*state_values)->InputAt(i) != env_values[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
+ int offset,
+ int count) {
+ if (StateValuesRequireUpdate(state_values, offset, count)) {
+ const Operator* op = common()->StateValues(count);
+ (*state_values) = graph()->NewNode(op, count, &values()->at(offset));
+ }
+}
+
+
+Node* BytecodeGraphBuilder::Environment::Checkpoint(
+ BailoutId bailout_id, OutputFrameStateCombine combine) {
+ if (!builder()->info()->is_deoptimization_enabled()) {
+ return builder()->jsgraph()->EmptyFrameState();
+ }
+
+ // TODO(rmcilroy): Consider using StateValuesCache for some state values.
+ UpdateStateValues(&parameters_state_values_, 0, parameter_count());
+ UpdateStateValues(&registers_state_values_, register_base(),
+ register_count());
+ UpdateStateValues(&accumulator_state_values_, accumulator_base(), 1);
+
+ const Operator* op = common()->FrameState(
+ bailout_id, combine, builder()->frame_state_function_info());
+ Node* result = graph()->NewNode(
+ op, parameters_state_values_, registers_state_values_,
+ accumulator_state_values_, Context(), builder()->GetFunctionClosure(),
+ builder()->graph()->start());
+
+ return result;
+}
+
+
+bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
+ Node** state_values, int offset, int count, int output_poke_start,
+ int output_poke_end) {
+ DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
+ for (int i = 0; i < count; i++, offset++) {
+ if (offset < output_poke_start || offset >= output_poke_end) {
+ if ((*state_values)->InputAt(i) != values()->at(offset)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
+ int output_poke_offset, int output_poke_count) {
+ // Poke offset is relative to the top of the stack (i.e., the accumulator).
+ int output_poke_start = accumulator_base() - output_poke_offset;
+ int output_poke_end = output_poke_start + output_poke_count;
+ return StateValuesAreUpToDate(&parameters_state_values_, 0, parameter_count(),
+ output_poke_start, output_poke_end) &&
+ StateValuesAreUpToDate(&registers_state_values_, register_base(),
+ register_count(), output_poke_start,
+ output_poke_end) &&
+ StateValuesAreUpToDate(&accumulator_state_values_, accumulator_base(),
+ 1, output_poke_start, output_poke_end);
+}
+
+
BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
CompilationInfo* compilation_info,
JSGraph* jsgraph)
: local_zone_(local_zone),
info_(compilation_info),
jsgraph_(jsgraph),
+ bytecode_array_(handle(info()->shared_info()->bytecode_array())),
+ frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kInterpretedFunction,
+ bytecode_array()->parameter_count(),
+ bytecode_array()->register_count(), info()->shared_info(),
+ CALL_MAINTAINS_NATIVE_CONTEXT)),
+ merge_environments_(local_zone),
+ loop_header_environments_(local_zone),
input_buffer_size_(0),
input_buffer_(nullptr),
- exit_controls_(local_zone) {
- bytecode_array_ = handle(info()->shared_info()->bytecode_array());
+ exit_controls_(local_zone) {}
+
+
+Node* BytecodeGraphBuilder::GetNewTarget() {
+ if (!new_target_.is_set()) {
+ int params = bytecode_array()->parameter_count();
+ int index = Linkage::GetJSCallNewTargetParamIndex(params);
+ const Operator* op = common()->Parameter(index, "%new.target");
+ Node* node = NewNode(op, graph()->start());
+ new_target_.set(node);
+ }
+ return new_target_.get();
}
Node* BytecodeGraphBuilder::GetFunctionContext() {
if (!function_context_.is_set()) {
- // Parameter (arity + 1) is special for the outer context of the function
- const Operator* op =
- common()->Parameter(bytecode_array()->parameter_count(), "%context");
+ int params = bytecode_array()->parameter_count();
+ int index = Linkage::GetJSCallContextParamIndex(params);
+ const Operator* op = common()->Parameter(index, "%context");
Node* node = NewNode(op, graph()->start());
function_context_.set(node);
}
@@ -123,13 +419,72 @@ Node* BytecodeGraphBuilder::GetFunctionContext() {
}
+Node* BytecodeGraphBuilder::GetFunctionClosure() {
+ if (!function_closure_.is_set()) {
+ int index = Linkage::kJSCallClosureParamIndex;
+ const Operator* op = common()->Parameter(index, "%closure");
+ Node* node = NewNode(op, graph()->start());
+ function_closure_.set(node);
+ }
+ return function_closure_.get();
+}
+
+
+Node* BytecodeGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
+ return NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()), object,
+ jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
+}
+
+
+Node* BytecodeGraphBuilder::BuildLoadImmutableObjectField(Node* object,
+ int offset) {
+ return graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
+ object,
+ jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
+ graph()->start(), graph()->start());
+}
+
+
+Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
+ const Operator* op =
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
+ Node* native_context = NewNode(op, environment()->Context());
+ return NewNode(javascript()->LoadContext(0, index, true), native_context);
+}
+
+
+Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
+ if (!feedback_vector_.is_set()) {
+ Node* closure = GetFunctionClosure();
+ Node* shared = BuildLoadImmutableObjectField(
+ closure, JSFunction::kSharedFunctionInfoOffset);
+ Node* vector = BuildLoadImmutableObjectField(
+ shared, SharedFunctionInfo::kFeedbackVectorOffset);
+ feedback_vector_.set(vector);
+ }
+ return feedback_vector_.get();
+}
+
+
+VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
+ Handle<TypeFeedbackVector> feedback_vector = info()->feedback_vector();
+ FeedbackVectorSlot slot;
+ if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
+ slot = feedback_vector->ToSlot(slot_id);
+ }
+ return VectorSlotPair(feedback_vector, slot);
+}
+
+
bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
// Set up the basic structure of the graph. Outputs for {Start} are
// the formal parameters (including the receiver) plus context and
// closure.
- // The additional count items are for the context and closure.
- int actual_parameter_count = bytecode_array()->parameter_count() + 2;
+ // Set up the basic structure of the graph. Outputs for {Start} are the formal
+ // parameters (including the receiver) plus new target, number of arguments,
+ // context and closure.
+ int actual_parameter_count = bytecode_array()->parameter_count() + 4;
graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
Environment env(this, bytecode_array()->register_count(),
@@ -137,13 +492,7 @@ bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
GetFunctionContext());
set_environment(&env);
- // Build function context only if there are context allocated variables.
- if (info()->num_heap_slots() > 0) {
- UNIMPLEMENTED(); // TODO(oth): Write ast-graph-builder equivalent.
- } else {
- // Simply use the outer function context in building the graph.
- CreateGraphBody(stack_check);
- }
+ CreateGraphBody(stack_check);
// Finish the basic structure of the graph.
DCHECK_NE(0u, exit_controls_.size());
@@ -159,23 +508,41 @@ bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
void BytecodeGraphBuilder::CreateGraphBody(bool stack_check) {
// TODO(oth): Review ast-graph-builder equivalent, i.e. arguments
// object setup, this function variable if used, tracing hooks.
+
+ if (stack_check) {
+ Node* node = NewNode(javascript()->StackCheck());
+ PrepareEntryFrameState(node);
+ }
+
VisitBytecodes();
}
void BytecodeGraphBuilder::VisitBytecodes() {
+ BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
+ analysis.Analyze();
+ set_branch_analysis(&analysis);
interpreter::BytecodeArrayIterator iterator(bytecode_array());
+ set_bytecode_iterator(&iterator);
while (!iterator.done()) {
- switch (iterator.current_bytecode()) {
+ int current_offset = iterator.current_offset();
+ if (analysis.is_reachable(current_offset)) {
+ MergeEnvironmentsOfForwardBranches(current_offset);
+ BuildLoopHeaderForBackwardBranches(current_offset);
+
+ switch (iterator.current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
Visit##name(iterator); \
break;
- BYTECODE_LIST(BYTECODE_CASE)
+ BYTECODE_LIST(BYTECODE_CASE)
#undef BYTECODE_CODE
+ }
}
iterator.Advance();
}
+ set_branch_analysis(nullptr);
+ set_bytecode_iterator(nullptr);
}
@@ -188,7 +555,14 @@ void BytecodeGraphBuilder::VisitLdaZero(
void BytecodeGraphBuilder::VisitLdaSmi8(
const interpreter::BytecodeArrayIterator& iterator) {
- Node* node = jsgraph()->Constant(iterator.GetSmi8Operand(0));
+ Node* node = jsgraph()->Constant(iterator.GetImmediateOperand(0));
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
environment()->BindAccumulator(node);
}
@@ -249,188 +623,1093 @@ void BytecodeGraphBuilder::VisitStar(
}
-void BytecodeGraphBuilder::VisitLdaGlobal(
+void BytecodeGraphBuilder::VisitMov(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* value = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ environment()->BindRegister(iterator.GetRegisterOperand(1), value);
+}
+
+
+void BytecodeGraphBuilder::VisitExchange(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ environment()->ExchangeRegisters(iterator.GetRegisterOperand(0),
+ iterator.GetRegisterOperand(1));
+}
+
+
+void BytecodeGraphBuilder::VisitExchangeWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ environment()->ExchangeRegisters(iterator.GetRegisterOperand(0),
+ iterator.GetRegisterOperand(1));
+}
+
+
+void BytecodeGraphBuilder::BuildLoadGlobal(
+ const interpreter::BytecodeArrayIterator& iterator,
+ TypeofMode typeof_mode) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Handle<Name> name =
+ Handle<Name>::cast(iterator.GetConstantForIndexOperand(0));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+
+ const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
+ Node* node = NewNode(op, BuildLoadFeedbackVector());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::BuildStoreGlobal(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Handle<Name> name =
+ Handle<Name>::cast(iterator.GetConstantForIndexOperand(0));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+ Node* value = environment()->LookupAccumulator();
+
+ const Operator* op =
+ javascript()->StoreGlobal(language_mode(), name, feedback);
+ Node* node = NewNode(op, value, BuildLoadFeedbackVector());
+ environment()->RecordAfterState(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitStaGlobalSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildStoreGlobal(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaGlobalStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildStoreGlobal(iterator);
+}
+
+void BytecodeGraphBuilder::VisitStaGlobalSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildStoreGlobal(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaGlobalStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildStoreGlobal(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaContextSlot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ // TODO(mythria): LoadContextSlots are unrolled by the required depth when
+ // generating bytecode. Hence the value of depth is always 0. Update this
+ // code, when the implementation changes.
+ // TODO(mythria): immutable flag is also set to false. This information is not
+ // available in bytecode array. update this code when the implementation
+ // changes.
+ const Operator* op =
+ javascript()->LoadContext(0, iterator.GetIndexOperand(1), false);
+ Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* node = NewNode(op, context);
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaContextSlotWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitLdaContextSlot(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaContextSlot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ // TODO(mythria): LoadContextSlots are unrolled by the required depth when
+ // generating bytecode. Hence the value of depth is always 0. Update this
+ // code, when the implementation changes.
+ const Operator* op =
+ javascript()->StoreContext(0, iterator.GetIndexOperand(1));
+ Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* value = environment()->LookupAccumulator();
+ NewNode(op, context, value);
+}
+
+
+void BytecodeGraphBuilder::VisitStaContextSlotWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitStaContextSlot(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildLdaLookupSlot(
+ TypeofMode typeof_mode,
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Handle<String> name =
+ Handle<String>::cast(iterator.GetConstantForIndexOperand(0));
+ const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
+ Node* value =
+ NewNode(op, BuildLoadFeedbackVector(), environment()->Context());
+ environment()->BindAccumulator(value, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaLookupSlot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildLdaLookupSlot(TypeofMode::NOT_INSIDE_TYPEOF, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeof(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildLdaLookupSlot(TypeofMode::INSIDE_TYPEOF, iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildStaLookupSlot(
+ LanguageMode language_mode,
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* value = environment()->LookupAccumulator();
+ Node* name = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
+ Node* language = jsgraph()->Constant(language_mode);
+ const Operator* op = javascript()->CallRuntime(Runtime::kStoreLookupSlot, 4);
+ Node* store = NewNode(op, value, environment()->Context(), name, language);
+ environment()->BindAccumulator(store, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaLookupSlotWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitLdaLookupSlot(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeofWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitLdaLookupSlotInsideTypeof(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaLookupSlotSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildStaLookupSlot(LanguageMode::SLOPPY, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaLookupSlotStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildStaLookupSlot(LanguageMode::STRICT, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaLookupSlotSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitStaLookupSlotSloppy(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaLookupSlotStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitStaLookupSlotStrict(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildNamedLoad(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Handle<Name> name =
+ Handle<Name>::cast(iterator.GetConstantForIndexOperand(1));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+
+ const Operator* op = javascript()->LoadNamed(language_mode(), name, feedback);
+ Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildNamedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildNamedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildNamedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildNamedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildKeyedLoad(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* key = environment()->LookupAccumulator();
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+
+ const Operator* op = javascript()->LoadProperty(language_mode(), feedback);
+ Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildKeyedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildKeyedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildKeyedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildKeyedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildNamedStore(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* value = environment()->LookupAccumulator();
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Handle<Name> name =
+ Handle<Name>::cast(iterator.GetConstantForIndexOperand(1));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+
+ const Operator* op =
+ javascript()->StoreNamed(language_mode(), name, feedback);
+ Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+ environment()->RecordAfterState(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitStoreICSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildNamedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStoreICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildNamedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStoreICSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildNamedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStoreICStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildNamedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildKeyedStore(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* value = environment()->LookupAccumulator();
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* key = environment()->LookupRegister(iterator.GetRegisterOperand(1));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+
+ const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
+ Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
+ environment()->RecordAfterState(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildKeyedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedStoreICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildKeyedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildKeyedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildKeyedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitPushContext(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* context = environment()->LookupAccumulator();
+ environment()->BindRegister(iterator.GetRegisterOperand(0), context);
+ environment()->SetContext(context);
+}
+
+
+void BytecodeGraphBuilder::VisitPopContext(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ environment()->SetContext(context);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateClosure(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Handle<SharedFunctionInfo> shared_info =
+ Handle<SharedFunctionInfo>::cast(iterator.GetConstantForIndexOperand(0));
+ PretenureFlag tenured =
+ iterator.GetImmediateOperand(1) ? TENURED : NOT_TENURED;
+ const Operator* op = javascript()->CreateClosure(shared_info, tenured);
+ Node* closure = NewNode(op);
+ environment()->BindAccumulator(closure);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateClosureWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitCreateClosure(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateArguments(
+ CreateArgumentsParameters::Type type,
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ const Operator* op = javascript()->CreateArguments(type, 0);
+ Node* object = NewNode(op, GetFunctionClosure());
+ environment()->BindAccumulator(object, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateMappedArguments(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateArguments(CreateArgumentsParameters::kMappedArguments, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateUnmappedArguments(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateArguments(CreateArgumentsParameters::kUnmappedArguments, iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateLiteral(
+ const Operator* op, const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* literal = NewNode(op, GetFunctionClosure());
+ environment()->BindAccumulator(literal, &states);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateRegExpLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Handle<String> constant_pattern =
+ Handle<String>::cast(iterator.GetConstantForIndexOperand(0));
+ int literal_index = iterator.GetIndexOperand(1);
+ int literal_flags = iterator.GetImmediateOperand(2);
+ const Operator* op = javascript()->CreateLiteralRegExp(
+ constant_pattern, literal_flags, literal_index);
+ BuildCreateLiteral(op, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateRegExpLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateRegExpLiteral(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateRegExpLiteralWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCreateRegExpLiteral(iterator);
}
-void BytecodeGraphBuilder::VisitLoadIC(
+void BytecodeGraphBuilder::BuildCreateArrayLiteral(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ Handle<FixedArray> constant_elements =
+ Handle<FixedArray>::cast(iterator.GetConstantForIndexOperand(0));
+ int literal_index = iterator.GetIndexOperand(1);
+ int literal_flags = iterator.GetImmediateOperand(2);
+ const Operator* op = javascript()->CreateLiteralArray(
+ constant_elements, literal_flags, literal_index);
+ BuildCreateLiteral(op, iterator);
}
-void BytecodeGraphBuilder::VisitKeyedLoadIC(
+void BytecodeGraphBuilder::VisitCreateArrayLiteral(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCreateArrayLiteral(iterator);
}
-void BytecodeGraphBuilder::VisitStoreIC(
+void BytecodeGraphBuilder::VisitCreateArrayLiteralWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCreateArrayLiteral(iterator);
}
-void BytecodeGraphBuilder::VisitKeyedStoreIC(
+void BytecodeGraphBuilder::BuildCreateObjectLiteral(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ Handle<FixedArray> constant_properties =
+ Handle<FixedArray>::cast(iterator.GetConstantForIndexOperand(0));
+ int literal_index = iterator.GetIndexOperand(1);
+ int literal_flags = iterator.GetImmediateOperand(2);
+ const Operator* op = javascript()->CreateLiteralObject(
+ constant_properties, literal_flags, literal_index);
+ BuildCreateLiteral(op, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateObjectLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateObjectLiteral(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateObjectLiteralWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateObjectLiteral(iterator);
+}
+
+
+Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
+ Node* callee,
+ interpreter::Register receiver,
+ size_t arity) {
+ Node** all = info()->zone()->NewArray<Node*>(static_cast<int>(arity));
+ all[0] = callee;
+ all[1] = environment()->LookupRegister(receiver);
+ int receiver_index = receiver.index();
+ for (int i = 2; i < static_cast<int>(arity); ++i) {
+ all[i] = environment()->LookupRegister(
+ interpreter::Register(receiver_index + i - 1));
+ }
+ Node* value = MakeNode(call_op, static_cast<int>(arity), all, false);
+ return value;
+}
+
+
+void BytecodeGraphBuilder::BuildCall(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ // TODO(rmcilroy): Set receiver_hint correctly based on whether the receiver
+ // register has been loaded with null / undefined explicitly or we are sure it
+ // is not null / undefined.
+ ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
+ Node* callee = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ interpreter::Register receiver = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(3));
+
+ const Operator* call = javascript()->CallFunction(
+ arg_count + 2, language_mode(), feedback, receiver_hint);
+ Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 2);
+ environment()->BindAccumulator(value, &states);
}
void BytecodeGraphBuilder::VisitCall(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCall(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCallWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCall(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCallJSRuntime(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* callee = BuildLoadNativeContextField(iterator.GetIndexOperand(0));
+ interpreter::Register receiver = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+
+ // Create node to perform the JS runtime call.
+ const Operator* call =
+ javascript()->CallFunction(arg_count + 2, language_mode());
+ Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 2);
+ environment()->BindAccumulator(value, &states);
+}
+
+
+Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
+ const Operator* call_runtime_op, interpreter::Register first_arg,
+ size_t arity) {
+ Node** all = info()->zone()->NewArray<Node*>(arity);
+ int first_arg_index = first_arg.index();
+ for (int i = 0; i < static_cast<int>(arity); ++i) {
+ all[i] = environment()->LookupRegister(
+ interpreter::Register(first_arg_index + i));
+ }
+ Node* value = MakeNode(call_runtime_op, static_cast<int>(arity), all, false);
+ return value;
+}
+
+
+void BytecodeGraphBuilder::VisitCallRuntime(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Runtime::FunctionId functionId =
+ static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0));
+ interpreter::Register first_arg = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+
+ // Create node to perform the runtime call.
+ const Operator* call = javascript()->CallRuntime(functionId, arg_count);
+ Node* value = ProcessCallRuntimeArguments(call, first_arg, arg_count);
+ environment()->BindAccumulator(value, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitCallRuntimeForPair(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Runtime::FunctionId functionId =
+ static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0));
+ interpreter::Register first_arg = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+ interpreter::Register first_return = iterator.GetRegisterOperand(3);
+
+ // Create node to perform the runtime call.
+ const Operator* call = javascript()->CallRuntime(functionId, arg_count);
+ Node* return_pair = ProcessCallRuntimeArguments(call, first_arg, arg_count);
+ environment()->BindRegistersToProjections(first_return, return_pair, &states);
+}
+
+
+Node* BytecodeGraphBuilder::ProcessCallNewArguments(
+ const Operator* call_new_op, interpreter::Register callee,
+ interpreter::Register first_arg, size_t arity) {
+ Node** all = info()->zone()->NewArray<Node*>(arity);
+ all[0] = environment()->LookupRegister(callee);
+ int first_arg_index = first_arg.index();
+ for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
+ all[i] = environment()->LookupRegister(
+ interpreter::Register(first_arg_index + i - 1));
+ }
+ // Original constructor is the same as the callee.
+ all[arity - 1] = environment()->LookupRegister(callee);
+ Node* value = MakeNode(call_new_op, static_cast<int>(arity), all, false);
+ return value;
+}
+
+
+void BytecodeGraphBuilder::VisitNew(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ interpreter::Register callee = iterator.GetRegisterOperand(0);
+ interpreter::Register first_arg = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+
+ // TODO(turbofan): Pass the feedback here.
+ const Operator* call = javascript()->CallConstruct(
+ static_cast<int>(arg_count) + 2, VectorSlotPair());
+ Node* value = ProcessCallNewArguments(call, callee, first_arg, arg_count + 2);
+ environment()->BindAccumulator(value, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitThrow(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* value = environment()->LookupAccumulator();
+ // TODO(mythria): Change to Runtime::kThrow when we have deoptimization
+ // information support in the interpreter.
+ NewNode(javascript()->CallRuntime(Runtime::kReThrow, 1), value);
+ Node* control = NewNode(common()->Throw(), value);
+ environment()->RecordAfterState(control, &states);
+ UpdateControlDependencyToLeaveFunction(control);
}
void BytecodeGraphBuilder::BuildBinaryOp(
const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
Node* node = NewNode(js_op, left, right);
-
- // TODO(oth): Real frame state and environment check pointing.
- int frame_state_count =
- OperatorProperties::GetFrameStateInputCount(node->op());
- for (int i = 0; i < frame_state_count; i++) {
- NodeProperties::ReplaceFrameStateInput(node, i,
- jsgraph()->EmptyFrameState());
- }
- environment()->BindAccumulator(node);
+ environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::VisitAdd(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->Add(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Add(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitSub(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->Subtract(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Subtract(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitMul(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->Multiply(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Multiply(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitDiv(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->Divide(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Divide(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitMod(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->Modulus(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Modulus(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitBitwiseOr(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->BitwiseOr(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitBitwiseXor(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->BitwiseXor(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitBitwiseAnd(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->BitwiseAnd(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitShiftLeft(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->ShiftLeft(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitShiftRight(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->ShiftRight(language_mode(), hints), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitShiftRightLogical(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->ShiftRightLogical(language_mode(), hints),
+ iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitInc(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ const Operator* js_op =
+ javascript()->Add(language_mode(), BinaryOperationHints::Any());
+ Node* node = NewNode(js_op, environment()->LookupAccumulator(),
+ jsgraph()->OneConstant());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitDec(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ const Operator* js_op =
+ javascript()->Subtract(language_mode(), BinaryOperationHints::Any());
+ Node* node = NewNode(js_op, environment()->LookupAccumulator(),
+ jsgraph()->OneConstant());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitLogicalNot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ environment()->LookupAccumulator());
+ Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
+ jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitTypeOf(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node =
+ NewNode(javascript()->TypeOf(), environment()->LookupAccumulator());
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::BuildDelete(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* key = environment()->LookupAccumulator();
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* node =
+ NewNode(javascript()->DeleteProperty(language_mode()), object, key);
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitDeletePropertyStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildDelete(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitDeletePropertySloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildDelete(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitDeleteLookupSlot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* name = environment()->LookupAccumulator();
+ const Operator* op = javascript()->CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ Node* result = NewNode(op, environment()->Context(), name);
+ environment()->BindAccumulator(result, &states);
+}
+
+
+void BytecodeGraphBuilder::BuildCompareOp(
+ const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* right = environment()->LookupAccumulator();
+ Node* node = NewNode(js_op, left, right);
+ environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::VisitTestEqual(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->Equal(), iterator);
}
void BytecodeGraphBuilder::VisitTestNotEqual(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->NotEqual(), iterator);
}
void BytecodeGraphBuilder::VisitTestEqualStrict(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->StrictEqual(), iterator);
}
void BytecodeGraphBuilder::VisitTestNotEqualStrict(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->StrictNotEqual(), iterator);
}
void BytecodeGraphBuilder::VisitTestLessThan(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->LessThan(language_mode()), iterator);
}
void BytecodeGraphBuilder::VisitTestGreaterThan(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->GreaterThan(language_mode()), iterator);
}
void BytecodeGraphBuilder::VisitTestLessThanOrEqual(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->LessThanOrEqual(language_mode()), iterator);
}
void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->GreaterThanOrEqual(language_mode()), iterator);
}
void BytecodeGraphBuilder::VisitTestIn(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->HasProperty(), iterator);
}
void BytecodeGraphBuilder::VisitTestInstanceOf(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->InstanceOf(), iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildCastOperator(
+ const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* node = NewNode(js_op, environment()->LookupAccumulator());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitToName(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCastOperator(javascript()->ToName(), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitToObject(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCastOperator(javascript()->ToObject(), iterator);
}
-void BytecodeGraphBuilder::VisitToBoolean(
- const interpreter::BytecodeArrayIterator& ToBoolean) {
- UNIMPLEMENTED();
+void BytecodeGraphBuilder::VisitToNumber(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCastOperator(javascript()->ToNumber(), iterator);
}
void BytecodeGraphBuilder::VisitJump(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJump();
}
void BytecodeGraphBuilder::VisitJumpConstant(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJump();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJump();
}
void BytecodeGraphBuilder::VisitJumpIfTrue(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfEqual(jsgraph()->TrueConstant());
}
void BytecodeGraphBuilder::VisitJumpIfTrueConstant(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfEqual(jsgraph()->TrueConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfTrueConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->TrueConstant());
}
void BytecodeGraphBuilder::VisitJumpIfFalse(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfEqual(jsgraph()->FalseConstant());
}
void BytecodeGraphBuilder::VisitJumpIfFalseConstant(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfFalseConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfNull(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->NullConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfNullConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->NullConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfNullConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->NullConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfUndefined(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->UndefinedConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->UndefinedConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfUndefinedConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->UndefinedConstant());
}
@@ -439,6 +1718,156 @@ void BytecodeGraphBuilder::VisitReturn(
Node* control =
NewNode(common()->Return(), environment()->LookupAccumulator());
UpdateControlDependencyToLeaveFunction(control);
+ set_environment(nullptr);
+}
+
+
+void BytecodeGraphBuilder::VisitForInPrepare(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* prepare = nullptr;
+ {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* receiver = environment()->LookupAccumulator();
+ prepare = NewNode(javascript()->ForInPrepare(), receiver);
+ environment()->RecordAfterState(prepare, &states);
+ }
+ // Project cache_type, cache_array, cache_length into register
+ // operands 1, 2, 3.
+ for (int i = 0; i < 3; i++) {
+ environment()->BindRegister(iterator.GetRegisterOperand(i),
+ NewNode(common()->Projection(i), prepare));
+ }
+}
+
+
+void BytecodeGraphBuilder::VisitForInDone(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* cache_length =
+ environment()->LookupRegister(iterator.GetRegisterOperand(1));
+ Node* exit_cond = NewNode(javascript()->ForInDone(), index, cache_length);
+ environment()->BindAccumulator(exit_cond, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitForInNext(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* receiver =
+ environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* cache_type =
+ environment()->LookupRegister(iterator.GetRegisterOperand(1));
+ Node* cache_array =
+ environment()->LookupRegister(iterator.GetRegisterOperand(2));
+ Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(3));
+ Node* value = NewNode(javascript()->ForInNext(), receiver, cache_array,
+ cache_type, index);
+ environment()->BindAccumulator(value, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitForInStep(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ index = NewNode(javascript()->ForInStep(), index);
+ environment()->BindAccumulator(index, &states);
+}
+
+
+void BytecodeGraphBuilder::MergeEnvironmentsOfBackwardBranches(
+ int source_offset, int target_offset) {
+ DCHECK_GE(source_offset, target_offset);
+ const ZoneVector<int>* branch_sites =
+ branch_analysis()->BackwardBranchesTargetting(target_offset);
+ if (branch_sites->back() == source_offset) {
+ // The set of back branches is complete, merge them.
+ DCHECK_GE(branch_sites->at(0), target_offset);
+ Environment* merged = merge_environments_[branch_sites->at(0)];
+ for (size_t i = 1; i < branch_sites->size(); i++) {
+ DCHECK_GE(branch_sites->at(i), target_offset);
+ merged->Merge(merge_environments_[branch_sites->at(i)]);
+ }
+ // And now merge with loop header environment created when loop
+ // header was visited.
+ loop_header_environments_[target_offset]->Merge(merged);
+ }
+}
+
+
+void BytecodeGraphBuilder::MergeEnvironmentsOfForwardBranches(
+ int source_offset) {
+ if (branch_analysis()->forward_branches_target(source_offset)) {
+ // Merge environments of branches that reach this bytecode.
+ auto branch_sites =
+ branch_analysis()->ForwardBranchesTargetting(source_offset);
+ DCHECK_LT(branch_sites->at(0), source_offset);
+ Environment* merged = merge_environments_[branch_sites->at(0)];
+ for (size_t i = 1; i < branch_sites->size(); i++) {
+ DCHECK_LT(branch_sites->at(i), source_offset);
+ merged->Merge(merge_environments_[branch_sites->at(i)]);
+ }
+ if (environment()) {
+ merged->Merge(environment());
+ }
+ set_environment(merged);
+ }
+}
+
+
+void BytecodeGraphBuilder::BuildLoopHeaderForBackwardBranches(
+ int source_offset) {
+ if (branch_analysis()->backward_branches_target(source_offset)) {
+ // Add loop header and store a copy so we can connect merged back
+ // edge inputs to the loop header.
+ loop_header_environments_[source_offset] = environment()->CopyForLoop();
+ }
+}
+
+
+void BytecodeGraphBuilder::BuildJump(int source_offset, int target_offset) {
+ DCHECK_NULL(merge_environments_[source_offset]);
+ merge_environments_[source_offset] = environment();
+ if (source_offset >= target_offset) {
+ MergeEnvironmentsOfBackwardBranches(source_offset, target_offset);
+ }
+ set_environment(nullptr);
+}
+
+
+void BytecodeGraphBuilder::BuildJump() {
+ int source_offset = bytecode_iterator()->current_offset();
+ int target_offset = bytecode_iterator()->GetJumpTargetOffset();
+ BuildJump(source_offset, target_offset);
+}
+
+
+void BytecodeGraphBuilder::BuildConditionalJump(Node* condition) {
+ int source_offset = bytecode_iterator()->current_offset();
+ NewBranch(condition);
+ Environment* if_false_environment = environment()->CopyForConditional();
+ NewIfTrue();
+ BuildJump(source_offset, bytecode_iterator()->GetJumpTargetOffset());
+ set_environment(if_false_environment);
+ NewIfFalse();
+}
+
+
+void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* condition =
+ NewNode(javascript()->StrictEqual(), accumulator, comperand);
+ BuildConditionalJump(condition);
+}
+
+
+void BytecodeGraphBuilder::BuildJumpIfToBooleanEqual(Node* comperand) {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* to_boolean =
+ NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
+ Node* condition = NewNode(javascript()->StrictEqual(), to_boolean, comperand);
+ BuildConditionalJump(condition);
}
@@ -452,6 +1881,16 @@ Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
}
+void BytecodeGraphBuilder::PrepareEntryFrameState(Node* node) {
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ NodeProperties::ReplaceFrameStateInput(
+ node, 0, environment()->Checkpoint(BailoutId(0),
+ OutputFrameStateCombine::Ignore()));
+}
+
+
Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
Node** value_inputs, bool incomplete) {
DCHECK_EQ(op->ValueInputCount(), value_input_count);
@@ -464,7 +1903,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
DCHECK_LT(op->ControlInputCount(), 2);
DCHECK_LT(op->EffectInputCount(), 2);
- Node* result = NULL;
+ Node* result = nullptr;
if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
} else {
@@ -514,6 +1953,25 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
}
+Node* BytecodeGraphBuilder::NewPhi(int count, Node* input, Node* control) {
+ const Operator* phi_op = common()->Phi(MachineRepresentation::kTagged, count);
+ Node** buffer = EnsureInputBufferSize(count + 1);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer, true);
+}
+
+
+Node* BytecodeGraphBuilder::NewEffectPhi(int count, Node* input,
+ Node* control) {
+ const Operator* phi_op = common()->EffectPhi(count);
+ Node** buffer = EnsureInputBufferSize(count + 1);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer, true);
+}
+
+
Node* BytecodeGraphBuilder::MergeControl(Node* control, Node* other) {
int inputs = control->op()->ControlInputCount() + 1;
if (control->opcode() == IrOpcode::kLoop) {
@@ -536,6 +1994,41 @@ Node* BytecodeGraphBuilder::MergeControl(Node* control, Node* other) {
}
+Node* BytecodeGraphBuilder::MergeEffect(Node* value, Node* other,
+ Node* control) {
+ int inputs = control->op()->ControlInputCount();
+ if (value->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->InsertInput(graph_zone(), inputs - 1, other);
+ NodeProperties::ChangeOp(value, common()->EffectPhi(inputs));
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewEffectPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
+
+Node* BytecodeGraphBuilder::MergeValue(Node* value, Node* other,
+ Node* control) {
+ int inputs = control->op()->ControlInputCount();
+ if (value->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->InsertInput(graph_zone(), inputs - 1, other);
+ NodeProperties::ChangeOp(
+ value, common()->Phi(MachineRepresentation::kTagged, inputs));
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
+
void BytecodeGraphBuilder::UpdateControlDependencyToLeaveFunction(Node* exit) {
if (environment()->IsMarkedAsUnreachable()) return;
environment()->MarkAsUnreachable();
diff --git a/chromium/v8/src/compiler/bytecode-graph-builder.h b/chromium/v8/src/compiler/bytecode-graph-builder.h
index 4e479ba3e64..94a278c3cf4 100644
--- a/chromium/v8/src/compiler/bytecode-graph-builder.h
+++ b/chromium/v8/src/compiler/bytecode-graph-builder.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
#include "src/compiler.h"
+#include "src/compiler/bytecode-branch-analysis.h"
#include "src/compiler/js-graph.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecodes.h"
@@ -28,21 +29,43 @@ class BytecodeGraphBuilder {
private:
class Environment;
+ class FrameStateBeforeAndAfter;
void CreateGraphBody(bool stack_check);
void VisitBytecodes();
Node* LoadAccumulator(Node* value);
+ // Get or create the node that represents the outer function closure.
+ Node* GetFunctionClosure();
+
+ // Get or create the node that represents the outer function context.
Node* GetFunctionContext();
+ // Get or create the node that represents the incoming new target value.
+ Node* GetNewTarget();
+
+ // Builder for accessing a (potentially immutable) object field.
+ Node* BuildLoadObjectField(Node* object, int offset);
+ Node* BuildLoadImmutableObjectField(Node* object, int offset);
+
+ // Builder for accessing type feedback vector.
+ Node* BuildLoadFeedbackVector();
+
+ // Builder for loading the a native context field.
+ Node* BuildLoadNativeContextField(int index);
+
+ // Helper function for creating a pair containing type feedback vector and
+ // a feedback slot.
+ VectorSlotPair CreateVectorSlotPair(int slot_id);
+
void set_environment(Environment* env) { environment_ = env; }
const Environment* environment() const { return environment_; }
Environment* environment() { return environment_; }
// Node creation helpers
Node* NewNode(const Operator* op, bool incomplete = false) {
- return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
+ return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
}
Node* NewNode(const Operator* op, Node* n1) {
@@ -55,17 +78,98 @@ class BytecodeGraphBuilder {
return MakeNode(op, arraysize(buffer), buffer, false);
}
- Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
- bool incomplete);
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* buffer[] = {n1, n2, n3};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* buffer[] = {n1, n2, n3, n4};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ // Helpers to create new control nodes.
+ Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
+ Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+ Node* NewMerge() { return NewNode(common()->Merge(1), true); }
+ Node* NewLoop() { return NewNode(common()->Loop(1), true); }
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
+ return NewNode(common()->Branch(hint), condition);
+ }
+
+ // Creates a new Phi node having {count} input values.
+ Node* NewPhi(int count, Node* input, Node* control);
+ Node* NewEffectPhi(int count, Node* input, Node* control);
+
+ // Helpers for merging control, effect or value dependencies.
Node* MergeControl(Node* control, Node* other);
+ Node* MergeEffect(Node* effect, Node* other_effect, Node* control);
+ Node* MergeValue(Node* value, Node* other_value, Node* control);
- Node** EnsureInputBufferSize(int size);
+ // The main node creation chokepoint. Adds context, frame state, effect,
+ // and control dependencies depending on the operator.
+ Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
+ bool incomplete);
+ // Helper to indicate a node exits the function body.
void UpdateControlDependencyToLeaveFunction(Node* exit);
+ Node** EnsureInputBufferSize(int size);
+
+ Node* ProcessCallArguments(const Operator* call_op, Node* callee,
+ interpreter::Register receiver, size_t arity);
+ Node* ProcessCallNewArguments(const Operator* call_new_op,
+ interpreter::Register callee,
+ interpreter::Register first_arg, size_t arity);
+ Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
+ interpreter::Register first_arg,
+ size_t arity);
+
+ void BuildCreateLiteral(const Operator* op,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateRegExpLiteral(
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateArrayLiteral(
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateObjectLiteral(
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateArguments(CreateArgumentsParameters::Type type,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildLoadGlobal(const interpreter::BytecodeArrayIterator& iterator,
+ TypeofMode typeof_mode);
+ void BuildStoreGlobal(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildNamedLoad(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildKeyedLoad(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildNamedStore(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildKeyedStore(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildLdaLookupSlot(TypeofMode typeof_mode,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildStaLookupSlot(LanguageMode language_mode,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCall(const interpreter::BytecodeArrayIterator& iterator);
void BuildBinaryOp(const Operator* op,
const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCompareOp(const Operator* op,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildDelete(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCastOperator(const Operator* js_op,
+ const interpreter::BytecodeArrayIterator& iterator);
+
+ // Control flow plumbing.
+ void BuildJump(int source_offset, int target_offset);
+ void BuildJump();
+ void BuildConditionalJump(Node* condition);
+ void BuildJumpIfEqual(Node* comperand);
+ void BuildJumpIfToBooleanEqual(Node* boolean_comperand);
+
+ // Constructing merge and loop headers.
+ void MergeEnvironmentsOfBackwardBranches(int source_offset,
+ int target_offset);
+ void MergeEnvironmentsOfForwardBranches(int source_offset);
+ void BuildLoopHeaderForBackwardBranches(int source_offset);
+
+ // Attaches a frame state to |node| for the entry to the function.
+ void PrepareEntryFrameState(Node* node);
// Growth increment for the temporary buffer used to construct input lists to
// new nodes.
@@ -81,10 +185,30 @@ class BytecodeGraphBuilder {
const Handle<BytecodeArray>& bytecode_array() const {
return bytecode_array_;
}
+ const FrameStateFunctionInfo* frame_state_function_info() const {
+ return frame_state_function_info_;
+ }
LanguageMode language_mode() const {
- // TODO(oth): need to propagate language mode through
- return LanguageMode::SLOPPY;
+ // TODO(mythria): Don't rely on parse information to get language mode.
+ return info()->language_mode();
+ }
+
+ const interpreter::BytecodeArrayIterator* bytecode_iterator() const {
+ return bytecode_iterator_;
+ }
+
+ void set_bytecode_iterator(
+ const interpreter::BytecodeArrayIterator* bytecode_iterator) {
+ bytecode_iterator_ = bytecode_iterator;
+ }
+
+ const BytecodeBranchAnalysis* branch_analysis() const {
+ return branch_analysis_;
+ }
+
+ void set_branch_analysis(const BytecodeBranchAnalysis* branch_analysis) {
+ branch_analysis_ = branch_analysis;
}
#define DECLARE_VISIT_BYTECODE(name, ...) \
@@ -96,14 +220,31 @@ class BytecodeGraphBuilder {
CompilationInfo* info_;
JSGraph* jsgraph_;
Handle<BytecodeArray> bytecode_array_;
+ const FrameStateFunctionInfo* frame_state_function_info_;
+ const interpreter::BytecodeArrayIterator* bytecode_iterator_;
+ const BytecodeBranchAnalysis* branch_analysis_;
Environment* environment_;
+
+ // Merge environments are snapshots of the environment at a particular
+ // bytecode offset to be merged into a later environment.
+ ZoneMap<int, Environment*> merge_environments_;
+
+ // Loop header environments are environments created for bytecodes
+ // where it is known there are back branches, ie a loop header.
+ ZoneMap<int, Environment*> loop_header_environments_;
+
// Temporary storage for building node input lists.
int input_buffer_size_;
Node** input_buffer_;
// Nodes representing values in the activation record.
SetOncePointer<Node> function_context_;
+ SetOncePointer<Node> function_closure_;
+ SetOncePointer<Node> new_target_;
+
+ // Optimization to cache loaded feedback vector.
+ SetOncePointer<Node> feedback_vector_;
// Control nodes that exit the function body.
ZoneVector<Node*> exit_controls_;
@@ -120,11 +261,18 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
int parameter_count() const { return parameter_count_; }
int register_count() const { return register_count_; }
- void BindRegister(interpreter::Register the_register, Node* node);
+ Node* LookupAccumulator() const;
Node* LookupRegister(interpreter::Register the_register) const;
- void BindAccumulator(Node* node);
- Node* LookupAccumulator() const;
+ void ExchangeRegisters(interpreter::Register reg0,
+ interpreter::Register reg1);
+
+ void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
+ void BindRegister(interpreter::Register the_register, Node* node,
+ FrameStateBeforeAndAfter* states = nullptr);
+ void BindRegistersToProjections(interpreter::Register first_reg, Node* node,
+ FrameStateBeforeAndAfter* states = nullptr);
+ void RecordAfterState(Node* node, FrameStateBeforeAndAfter* states);
bool IsMarkedAsUnreachable() const;
void MarkAsUnreachable();
@@ -135,6 +283,14 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
effect_dependency_ = dependency;
}
+ // Preserve a checkpoint of the environment for the IR graph. Any
+ // further mutation of the environment will not affect checkpoints.
+ Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine);
+
+ // Returns true if the state values are up to date with the current
+ // environment.
+ bool StateValuesAreUpToDate(int output_poke_offset, int output_poke_count);
+
// Control dependency tracked by this environment.
Node* GetControlDependency() const { return control_dependency_; }
void UpdateControlDependency(Node* dependency) {
@@ -142,8 +298,20 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
}
Node* Context() const { return context_; }
+ void SetContext(Node* new_context) { context_ = new_context; }
+
+ Environment* CopyForConditional() const;
+ Environment* CopyForLoop();
+ void Merge(Environment* other);
private:
+ explicit Environment(const Environment* copy);
+ void PrepareForLoop();
+ bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
+ int output_poke_start, int output_poke_end);
+ bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
+ void UpdateStateValues(Node** state_values, int offset, int count);
+
int RegisterToValuesIndex(interpreter::Register the_register) const;
Zone* zone() const { return builder_->local_zone(); }
@@ -152,21 +320,23 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
BytecodeGraphBuilder* builder() const { return builder_; }
const NodeVector* values() const { return &values_; }
NodeVector* values() { return &values_; }
- Node* accumulator() { return accumulator_; }
int register_base() const { return register_base_; }
+ int accumulator_base() const { return accumulator_base_; }
BytecodeGraphBuilder* builder_;
int register_count_;
int parameter_count_;
- Node* accumulator_;
Node* context_;
Node* control_dependency_;
Node* effect_dependency_;
NodeVector values_;
+ Node* parameters_state_values_;
+ Node* registers_state_values_;
+ Node* accumulator_state_values_;
int register_base_;
+ int accumulator_base_;
};
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/c-linkage.cc b/chromium/v8/src/compiler/c-linkage.cc
index 76ddd2ed7d1..44e0bf1672c 100644
--- a/chromium/v8/src/compiler/c-linkage.cc
+++ b/chromium/v8/src/compiler/c-linkage.cc
@@ -15,7 +15,7 @@ namespace compiler {
namespace {
LinkageLocation regloc(Register reg) {
- return LinkageLocation::ForRegister(Register::ToAllocationIndex(reg));
+ return LinkageLocation::ForRegister(reg.code());
}
@@ -208,7 +208,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
#endif
// The target for C calls is always an address (i.e. machine pointer).
- MachineType target_type = kMachPtr;
+ MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallAddress, // kind
@@ -223,6 +223,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
CallDescriptor::kNoFlags, // flags
"c-call");
}
-}
-}
-}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/change-lowering.cc b/chromium/v8/src/compiler/change-lowering.cc
index 4421c4f3e30..f791db1fdc8 100644
--- a/chromium/v8/src/compiler/change-lowering.cc
+++ b/chromium/v8/src/compiler/change-lowering.cc
@@ -4,12 +4,14 @@
#include "src/compiler/change-lowering.h"
+#include "src/address-map.h"
#include "src/code-factory.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
@@ -37,6 +39,16 @@ Reduction ChangeLowering::Reduce(Node* node) {
return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned);
case IrOpcode::kChangeUint32ToTagged:
return ChangeUint32ToTagged(node->InputAt(0), control);
+ case IrOpcode::kLoadField:
+ return LoadField(node);
+ case IrOpcode::kStoreField:
+ return StoreField(node);
+ case IrOpcode::kLoadElement:
+ return LoadElement(node);
+ case IrOpcode::kStoreElement:
+ return StoreElement(node);
+ case IrOpcode::kAllocate:
+ return Allocate(node);
default:
return NoChange();
}
@@ -66,7 +78,7 @@ Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
Callable callable = CodeFactory::AllocateHeapNumber(isolate());
Node* target = jsgraph()->HeapConstant(callable.code());
Node* context = jsgraph()->NoContextConstant();
- Node* effect = graph()->NewNode(common()->ValueEffect(1), value);
+ Node* effect = graph()->NewNode(common()->BeginRegion(), graph()->start());
if (!allocate_heap_number_operator_.is_set()) {
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
isolate(), jsgraph()->zone(), callable.descriptor(), 0,
@@ -76,9 +88,10 @@ Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
target, context, effect, control);
Node* store = graph()->NewNode(
- machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
+ machine()->Store(StoreRepresentation(MachineRepresentation::kFloat64,
+ kNoWriteBarrier)),
heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
- return graph()->NewNode(common()->Finish(1), heap_number, store);
+ return graph()->NewNode(common()->FinishRegion(), heap_number, store);
}
@@ -123,7 +136,7 @@ Node* ChangeLowering::ChangeUint32ToSmi(Node* value) {
Node* ChangeLowering::LoadHeapNumberValue(Node* value, Node* control) {
- return graph()->NewNode(machine()->Load(kMachFloat64), value,
+ return graph()->NewNode(machine()->Load(MachineType::Float64()), value,
HeapNumberValueIndexConstant(), graph()->start(),
control);
}
@@ -138,9 +151,9 @@ Node* ChangeLowering::TestNotSmi(Node* value) {
Reduction ChangeLowering::ChangeBitToBool(Node* value, Node* control) {
- return Replace(graph()->NewNode(common()->Select(kMachAnyTagged), value,
- jsgraph()->TrueConstant(),
- jsgraph()->FalseConstant()));
+ return Replace(
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged), value,
+ jsgraph()->TrueConstant(), jsgraph()->FalseConstant()));
}
@@ -151,7 +164,78 @@ Reduction ChangeLowering::ChangeBoolToBit(Node* value) {
Reduction ChangeLowering::ChangeFloat64ToTagged(Node* value, Node* control) {
- return Replace(AllocateHeapNumberWithValue(value, control));
+ Type* const value_type = NodeProperties::GetType(value);
+ Node* const value32 = graph()->NewNode(
+ machine()->TruncateFloat64ToInt32(TruncationMode::kRoundToZero), value);
+ // TODO(bmeurer): This fast case must be disabled until we kill the asm.js
+ // support in the generic JavaScript pipeline, because LoadBuffer is lying
+ // about its result.
+ // if (value_type->Is(Type::Signed32())) {
+ // return ChangeInt32ToTagged(value32, control);
+ // }
+ Node* check_same = graph()->NewNode(
+ machine()->Float64Equal(), value,
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
+ Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
+
+ Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
+ Node* vsmi;
+ Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
+ Node* vbox;
+
+ // We only need to check for -0 if the {value} can potentially contain -0.
+ if (value_type->Maybe(Type::MinusZero())) {
+ Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
+ jsgraph()->Int32Constant(0));
+ Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_zero, if_smi);
+
+ Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+ Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Node* check_negative = graph()->NewNode(
+ machine()->Int32LessThan(),
+ graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+ jsgraph()->Int32Constant(0));
+ Node* branch_negative = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), check_negative, if_zero);
+
+ Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
+ Node* if_notnegative =
+ graph()->NewNode(common()->IfFalse(), branch_negative);
+
+ // We need to create a box for negative 0.
+ if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
+ if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
+ }
+
+ // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
+ // machines we need to deal with potential overflow and fallback to boxing.
+ if (machine()->Is64() || value_type->Is(Type::SignedSmall())) {
+ vsmi = ChangeInt32ToSmi(value32);
+ } else {
+ Node* smi_tag =
+ graph()->NewNode(machine()->Int32AddWithOverflow(), value32, value32);
+
+ Node* check_ovf = graph()->NewNode(common()->Projection(1), smi_tag);
+ Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_ovf, if_smi);
+
+ Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
+ if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
+
+ if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
+ vsmi = graph()->NewNode(common()->Projection(0), smi_tag);
+ }
+
+ // Allocate the box for the {value}.
+ vbox = AllocateHeapNumberWithValue(value, if_box);
+
+ control = graph()->NewNode(common()->Merge(2), if_smi, if_box);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vsmi, vbox, control);
+ return Replace(value);
}
@@ -175,8 +259,8 @@ Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
Node* vfalse = graph()->NewNode(common()->Projection(0), add);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vtrue, vfalse, merge);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, merge);
return Replace(phi);
}
@@ -188,7 +272,6 @@ Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
return Replace(ChangeSmiToInt32(value));
}
- const MachineType type = (signedness == kSigned) ? kMachInt32 : kMachUint32;
const Operator* op = (signedness == kSigned)
? machine()->ChangeFloat64ToInt32()
: machine()->ChangeFloat64ToUint32();
@@ -208,7 +291,8 @@ Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
Node* vfalse = ChangeSmiToInt32(value);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(type, 2), vtrue, vfalse, merge);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue, vfalse, merge);
return Replace(phi);
}
@@ -247,7 +331,7 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
const Operator* merge_op = common()->Merge(2);
const Operator* ephi_op = common()->EffectPhi(2);
- const Operator* phi_op = common()->Phi(kMachFloat64, 2);
+ const Operator* phi_op = common()->Phi(MachineRepresentation::kFloat64, 2);
Node* check1 = TestNotSmi(object);
Node* branch1 =
@@ -304,8 +388,8 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
Node* vfalse = ChangeSmiToFloat64(value);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachFloat64, 2), vtrue, vfalse, merge);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
return Replace(phi);
}
@@ -329,13 +413,176 @@ Reduction ChangeLowering::ChangeUint32ToTagged(Node* value, Node* control) {
AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), if_false);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vtrue, vfalse, merge);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, merge);
return Replace(phi);
}
+namespace {
+
+WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
+ MachineRepresentation representation,
+ Type* field_type, Type* input_type) {
+ if (field_type->Is(Type::TaggedSigned()) ||
+ input_type->Is(Type::TaggedSigned())) {
+ // Write barriers are only for writes of heap objects.
+ return kNoWriteBarrier;
+ }
+ if (input_type->Is(Type::BooleanOrNullOrUndefined())) {
+ // Write barriers are not necessary when storing true, false, null or
+ // undefined, because these special oddballs are always in the root set.
+ return kNoWriteBarrier;
+ }
+ if (base_is_tagged == kTaggedBase &&
+ representation == MachineRepresentation::kTagged) {
+ if (input_type->IsConstant() &&
+ input_type->AsConstant()->Value()->IsHeapObject()) {
+ Handle<HeapObject> input =
+ Handle<HeapObject>::cast(input_type->AsConstant()->Value());
+ if (input->IsMap()) {
+ // Write barriers for storing maps are cheaper.
+ return kMapWriteBarrier;
+ }
+ Isolate* const isolate = input->GetIsolate();
+ RootIndexMap root_index_map(isolate);
+ int root_index = root_index_map.Lookup(*input);
+ if (root_index != RootIndexMap::kInvalidRootIndex &&
+ isolate->heap()->RootIsImmortalImmovable(root_index)) {
+ // Write barriers are unnecessary for immortal immovable roots.
+ return kNoWriteBarrier;
+ }
+ }
+ if (field_type->Is(Type::TaggedPointer()) ||
+ input_type->Is(Type::TaggedPointer())) {
+ // Write barriers for heap objects don't need a Smi check.
+ return kPointerWriteBarrier;
+ }
+ // Write barriers are only for writes into heap objects (i.e. tagged base).
+ return kFullWriteBarrier;
+ }
+ return kNoWriteBarrier;
+}
+
+
+WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
+ MachineRepresentation representation,
+ int field_offset, Type* field_type,
+ Type* input_type) {
+ if (base_is_tagged == kTaggedBase && field_offset == HeapObject::kMapOffset) {
+ // Write barriers for storing maps are cheaper.
+ return kMapWriteBarrier;
+ }
+ return ComputeWriteBarrierKind(base_is_tagged, representation, field_type,
+ input_type);
+}
+
+} // namespace
+
+
+Reduction ChangeLowering::LoadField(Node* node) {
+ const FieldAccess& access = FieldAccessOf(node->op());
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ return Changed(node);
+}
+
+
+Reduction ChangeLowering::StoreField(Node* node) {
+ const FieldAccess& access = FieldAccessOf(node->op());
+ Type* type = NodeProperties::GetType(node->InputAt(1));
+ WriteBarrierKind kind = ComputeWriteBarrierKind(
+ access.base_is_tagged, access.machine_type.representation(),
+ access.offset, access.type, type);
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(node,
+ machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), kind)));
+ return Changed(node);
+}
+
+
+Node* ChangeLowering::ComputeIndex(const ElementAccess& access,
+ Node* const key) {
+ Node* index = key;
+ const int element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ if (element_size_shift) {
+ index = graph()->NewNode(machine()->Word32Shl(), index,
+ jsgraph()->Int32Constant(element_size_shift));
+ }
+ const int fixed_offset = access.header_size - access.tag();
+ if (fixed_offset) {
+ index = graph()->NewNode(machine()->Int32Add(), index,
+ jsgraph()->Int32Constant(fixed_offset));
+ }
+ if (machine()->Is64()) {
+ // TODO(turbofan): This is probably only correct for typed arrays, and only
+ // if the typed arrays are at most 2GiB in size, which happens to match
+ // exactly our current situation.
+ index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
+ }
+ return index;
+}
+
+
+Reduction ChangeLowering::LoadElement(Node* node) {
+ const ElementAccess& access = ElementAccessOf(node->op());
+ node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ return Changed(node);
+}
+
+
+Reduction ChangeLowering::StoreElement(Node* node) {
+ const ElementAccess& access = ElementAccessOf(node->op());
+ Type* type = NodeProperties::GetType(node->InputAt(2));
+ node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(),
+ ComputeWriteBarrierKind(access.base_is_tagged,
+ access.machine_type.representation(),
+ access.type, type))));
+ return Changed(node);
+}
+
+
+Reduction ChangeLowering::Allocate(Node* node) {
+ PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
+ if (pretenure == NOT_TENURED) {
+ Callable callable = CodeFactory::AllocateInNewSpace(isolate());
+ Node* target = jsgraph()->HeapConstant(callable.code());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow);
+ const Operator* op = common()->Call(descriptor);
+ node->InsertInput(graph()->zone(), 0, target);
+ node->InsertInput(graph()->zone(), 2, jsgraph()->NoContextConstant());
+ NodeProperties::ChangeOp(node, op);
+ } else {
+ DCHECK_EQ(TENURED, pretenure);
+ AllocationSpace space = OLD_SPACE;
+ Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
+ Operator::Properties props = node->op()->properties();
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph()->zone(), f, 2, props, CallDescriptor::kNeedsFrameState);
+ ExternalReference ref(f, jsgraph()->isolate());
+ int32_t flags = AllocateTargetSpace::encode(space);
+ node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
+ node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ }
+ return Changed(node);
+}
+
+
Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
diff --git a/chromium/v8/src/compiler/change-lowering.h b/chromium/v8/src/compiler/change-lowering.h
index ead41b1a004..6d607768d9c 100644
--- a/chromium/v8/src/compiler/change-lowering.h
+++ b/chromium/v8/src/compiler/change-lowering.h
@@ -13,6 +13,7 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
+struct ElementAccess;
class JSGraph;
class Linkage;
class MachineOperatorBuilder;
@@ -49,6 +50,13 @@ class ChangeLowering final : public Reducer {
Signedness signedness);
Reduction ChangeUint32ToTagged(Node* value, Node* control);
+ Reduction LoadField(Node* node);
+ Reduction StoreField(Node* node);
+ Reduction LoadElement(Node* node);
+ Reduction StoreElement(Node* node);
+ Reduction Allocate(Node* node);
+
+ Node* ComputeIndex(const ElementAccess& access, Node* const key);
Graph* graph() const;
Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/chromium/v8/src/compiler/coalesced-live-ranges.cc b/chromium/v8/src/compiler/coalesced-live-ranges.cc
index 44dd336c83f..4ac3e2118de 100644
--- a/chromium/v8/src/compiler/coalesced-live-ranges.cc
+++ b/chromium/v8/src/compiler/coalesced-live-ranges.cc
@@ -27,7 +27,7 @@ LiveRange* LiveRangeConflictIterator::Current() const {
void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() {
- DCHECK(query_ != nullptr);
+ DCHECK_NOT_NULL(query_);
auto end = intervals_->end();
LifetimePosition q_start = query_->start();
LifetimePosition q_end = query_->end();
diff --git a/chromium/v8/src/compiler/coalesced-live-ranges.h b/chromium/v8/src/compiler/coalesced-live-ranges.h
index e617c0a2519..54bbce20558 100644
--- a/chromium/v8/src/compiler/coalesced-live-ranges.h
+++ b/chromium/v8/src/compiler/coalesced-live-ranges.h
@@ -89,7 +89,7 @@ class LiveRangeConflictIterator {
}
bool QueryIntersectsAllocatedInterval() const {
- DCHECK(query_ != nullptr);
+ DCHECK_NOT_NULL(query_);
return pos_ != intervals_->end() &&
Intersects(query_->start(), query_->end(), pos_->start_, pos_->end_);
}
diff --git a/chromium/v8/src/compiler/code-generator-impl.h b/chromium/v8/src/compiler/code-generator-impl.h
index 83cbd22604a..72959483991 100644
--- a/chromium/v8/src/compiler/code-generator-impl.h
+++ b/chromium/v8/src/compiler/code-generator-impl.h
@@ -43,6 +43,10 @@ class InstructionOperandConverter {
return ToConstant(instr_->InputAt(index)).ToInt32();
}
+ int64_t InputInt64(size_t index) {
+ return ToConstant(instr_->InputAt(index)).ToInt64();
+ }
+
int8_t InputInt8(size_t index) {
return static_cast<int8_t>(InputInt32(index));
}
@@ -96,12 +100,11 @@ class InstructionOperandConverter {
}
Register ToRegister(InstructionOperand* op) {
- return Register::FromAllocationIndex(RegisterOperand::cast(op)->index());
+ return LocationOperand::cast(op)->GetRegister();
}
DoubleRegister ToDoubleRegister(InstructionOperand* op) {
- return DoubleRegister::FromAllocationIndex(
- DoubleRegisterOperand::cast(op)->index());
+ return LocationOperand::cast(op)->GetDoubleRegister();
}
Constant ToConstant(InstructionOperand* op) {
@@ -125,6 +128,9 @@ class InstructionOperandConverter {
}
Frame* frame() const { return gen_->frame(); }
+ FrameAccessState* frame_access_state() const {
+ return gen_->frame_access_state();
+ }
Isolate* isolate() const { return gen_->isolate(); }
Linkage* linkage() const { return gen_->linkage(); }
@@ -144,12 +150,15 @@ class OutOfLineCode : public ZoneObject {
Label* entry() { return &entry_; }
Label* exit() { return &exit_; }
+ Frame* frame() const { return frame_; }
+ Isolate* isolate() const { return masm()->isolate(); }
MacroAssembler* masm() const { return masm_; }
OutOfLineCode* next() const { return next_; }
private:
Label entry_;
Label exit_;
+ Frame* const frame_;
MacroAssembler* const masm_;
OutOfLineCode* const next_;
};
diff --git a/chromium/v8/src/compiler/code-generator.cc b/chromium/v8/src/compiler/code-generator.cc
index 91602a02a3c..313567ed87f 100644
--- a/chromium/v8/src/compiler/code-generator.cc
+++ b/chromium/v8/src/compiler/code-generator.cc
@@ -4,11 +4,11 @@
#include "src/compiler/code-generator.h"
+#include "src/address-map.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
#include "src/frames-inl.h"
-#include "src/snapshot/serialize.h" // TODO(turbofan): RootIndexMap
namespace v8 {
namespace internal {
@@ -34,14 +34,14 @@ class CodeGenerator::JumpTable final : public ZoneObject {
CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info)
- : frame_(frame),
+ : frame_access_state_(new (code->zone()) FrameAccessState(frame)),
linkage_(linkage),
code_(code),
info_(info),
labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
current_block_(RpoNumber::Invalid()),
current_source_position_(SourcePosition::Unknown()),
- masm_(info->isolate(), NULL, 0),
+ masm_(info->isolate(), nullptr, 0, CodeObjectRequired::kYes),
resolver_(this),
safepoints_(code->zone()),
handlers_(code->zone()),
@@ -52,11 +52,13 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
last_lazy_deopt_pc_(0),
jump_tables_(nullptr),
ools_(nullptr),
- osr_pc_offset_(-1),
- needs_frame_(frame->GetSpillSlotCount() > 0 || code->ContainsCall()) {
+ osr_pc_offset_(-1) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
+ if (code->ContainsCall()) {
+ frame->MarkNeedsFrame();
+ }
}
@@ -83,13 +85,21 @@ Handle<Code> CodeGenerator::GenerateCode() {
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
- for (auto shared_info : info->inlined_functions()) {
- if (!shared_info.is_identical_to(info->shared_info())) {
- DefineDeoptimizationLiteral(shared_info);
+ for (auto& inlined : info->inlined_functions()) {
+ if (!inlined.shared_info.is_identical_to(info->shared_info())) {
+ DefineDeoptimizationLiteral(inlined.shared_info);
}
}
inlined_function_count_ = deoptimization_literals_.size();
+ // Define deoptimization literals for all unoptimized code objects of inlined
+ // functions. This ensures unoptimized code is kept alive by optimized code.
+ for (auto& inlined : info->inlined_functions()) {
+ if (!inlined.shared_info.is_identical_to(info->shared_info())) {
+ DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
+ }
+ }
+
// Assemble all non-deferred blocks, followed by deferred ones.
for (int deferred = 0; deferred < 2; ++deferred) {
for (auto const block : code()->instruction_blocks()) {
@@ -193,7 +203,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
PopulateDeoptimizationData(result);
// Ensure there is space for lazy deoptimization in the relocation info.
- if (!info->ShouldEnsureSpaceForLazyDeopt()) {
+ if (info->ShouldEnsureSpaceForLazyDeopt()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(result);
}
@@ -206,8 +216,10 @@ Handle<Code> CodeGenerator::GenerateCode() {
bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
- return code()->InstructionBlockAt(current_block_)->ao_number().IsNext(
- code()->InstructionBlockAt(block)->ao_number());
+ return code()
+ ->InstructionBlockAt(current_block_)
+ ->ao_number()
+ .IsNext(code()->InstructionBlockAt(block)->ao_number());
}
@@ -220,15 +232,14 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
for (auto& operand : references->reference_operands()) {
if (operand.IsStackSlot()) {
- int index = StackSlotOperand::cast(operand).index();
+ int index = LocationOperand::cast(operand).index();
DCHECK(index >= 0);
// Safepoint table indices are 0-based from the beginning of the spill
// slot area, adjust appropriately.
index -= stackSlotToSpillSlotDelta;
safepoint.DefinePointerSlot(index, zone());
} else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
- Register reg =
- Register::FromAllocationIndex(RegisterOperand::cast(operand).index());
+ Register reg = LocationOperand::cast(operand).GetRegister();
safepoint.DefinePointerRegister(reg, zone());
}
}
@@ -480,62 +491,84 @@ FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
}
-namespace {
-
-struct OperandAndType {
- InstructionOperand* const operand;
- MachineType const type;
-};
+void CodeGenerator::TranslateStateValueDescriptor(
+ StateValueDescriptor* desc, Translation* translation,
+ InstructionOperandIterator* iter) {
+ if (desc->IsNested()) {
+ translation->BeginCapturedObject(static_cast<int>(desc->size()));
+ for (size_t index = 0; index < desc->fields().size(); index++) {
+ TranslateStateValueDescriptor(&desc->fields()[index], translation, iter);
+ }
+ } else if (desc->IsDuplicate()) {
+ translation->DuplicateObject(static_cast<int>(desc->id()));
+ } else {
+ DCHECK(desc->IsPlain());
+ AddTranslationForOperand(translation, iter->instruction(), iter->Advance(),
+ desc->type());
+ }
+}
-OperandAndType TypedOperandForFrameState(FrameStateDescriptor* descriptor,
- Instruction* instr,
- size_t frame_state_offset,
- size_t index,
- OutputFrameStateCombine combine) {
- DCHECK(index < descriptor->GetSize(combine));
- switch (combine.kind()) {
- case OutputFrameStateCombine::kPushOutput: {
- DCHECK(combine.GetPushCount() <= instr->OutputCount());
- size_t size_without_output =
- descriptor->GetSize(OutputFrameStateCombine::Ignore());
- // If the index is past the existing stack items, return the output.
- if (index >= size_without_output) {
- return {instr->OutputAt(index - size_without_output), kMachAnyTagged};
+void CodeGenerator::TranslateFrameStateDescriptorOperands(
+ FrameStateDescriptor* desc, InstructionOperandIterator* iter,
+ OutputFrameStateCombine combine, Translation* translation) {
+ for (size_t index = 0; index < desc->GetSize(combine); index++) {
+ switch (combine.kind()) {
+ case OutputFrameStateCombine::kPushOutput: {
+ DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
+ size_t size_without_output =
+ desc->GetSize(OutputFrameStateCombine::Ignore());
+ // If the index is past the existing stack items in values_.
+ if (index >= size_without_output) {
+ // Materialize the result of the call instruction in this slot.
+ AddTranslationForOperand(
+ translation, iter->instruction(),
+ iter->instruction()->OutputAt(index - size_without_output),
+ MachineType::AnyTagged());
+ continue;
+ }
+ break;
}
- break;
+ case OutputFrameStateCombine::kPokeAt:
+ // The result of the call should be placed at position
+ // [index_from_top] in the stack (overwriting whatever was
+ // previously there).
+ size_t index_from_top =
+ desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
+ if (index >= index_from_top &&
+ index < index_from_top + iter->instruction()->OutputCount()) {
+ AddTranslationForOperand(
+ translation, iter->instruction(),
+ iter->instruction()->OutputAt(index - index_from_top),
+ MachineType::AnyTagged());
+ iter->Advance(); // We do not use this input, but we need to
+ // advace, as the input got replaced.
+ continue;
+ }
+ break;
}
- case OutputFrameStateCombine::kPokeAt:
- size_t index_from_top =
- descriptor->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
- if (index >= index_from_top &&
- index < index_from_top + instr->OutputCount()) {
- return {instr->OutputAt(index - index_from_top), kMachAnyTagged};
- }
- break;
+ StateValueDescriptor* value_desc = desc->GetStateValueDescriptor();
+ TranslateStateValueDescriptor(&value_desc->fields()[index], translation,
+ iter);
}
- return {instr->InputAt(frame_state_offset + index),
- descriptor->GetType(index)};
}
-} // namespace
-
void CodeGenerator::BuildTranslationForFrameStateDescriptor(
- FrameStateDescriptor* descriptor, Instruction* instr,
- Translation* translation, size_t frame_state_offset,
- OutputFrameStateCombine state_combine) {
+ FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
+ Translation* translation, OutputFrameStateCombine state_combine) {
// Outer-most state must be added to translation first.
if (descriptor->outer_state() != nullptr) {
- BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), instr,
- translation, frame_state_offset,
+ BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), iter,
+ translation,
OutputFrameStateCombine::Ignore());
}
- frame_state_offset += descriptor->outer_state()->GetTotalSize();
Handle<SharedFunctionInfo> shared_info;
if (!descriptor->shared_info().ToHandle(&shared_info)) {
- if (!info()->has_shared_info()) return; // Stub with no SharedFunctionInfo.
+ if (!info()->has_shared_info()) {
+ return; // Stub with no SharedFunctionInfo.
+ }
shared_info = info()->shared_info();
}
int shared_info_id = DefineDeoptimizationLiteral(shared_info);
@@ -547,18 +580,25 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
static_cast<unsigned int>(descriptor->GetSize(state_combine) -
(1 + descriptor->parameters_count())));
break;
+ case FrameStateType::kInterpretedFunction:
+ translation->BeginInterpretedFrame(
+ descriptor->bailout_id(), shared_info_id,
+ static_cast<unsigned int>(descriptor->locals_count()));
+ break;
case FrameStateType::kArgumentsAdaptor:
translation->BeginArgumentsAdaptorFrame(
shared_info_id,
static_cast<unsigned int>(descriptor->parameters_count()));
break;
+ case FrameStateType::kConstructStub:
+ translation->BeginConstructStubFrame(
+ shared_info_id,
+ static_cast<unsigned int>(descriptor->parameters_count()));
+ break;
}
- for (size_t i = 0; i < descriptor->GetSize(state_combine); i++) {
- OperandAndType op = TypedOperandForFrameState(
- descriptor, instr, frame_state_offset, i, state_combine);
- AddTranslationForOperand(translation, instr, op.operand, op.type);
- }
+ TranslateFrameStateDescriptorOperands(descriptor, iter, state_combine,
+ translation);
}
@@ -572,8 +612,9 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
Translation translation(
&translations_, static_cast<int>(descriptor->GetFrameCount()),
static_cast<int>(descriptor->GetJSFrameCount()), zone());
- BuildTranslationForFrameStateDescriptor(descriptor, instr, &translation,
- frame_state_offset, state_combine);
+ InstructionOperandIterator iter(instr, frame_state_offset);
+ BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
+ state_combine);
int deoptimization_id = static_cast<int>(deoptimization_states_.size());
@@ -589,38 +630,39 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
InstructionOperand* op,
MachineType type) {
if (op->IsStackSlot()) {
- if (type == kMachBool || type == kRepBit) {
- translation->StoreBoolStackSlot(StackSlotOperand::cast(op)->index());
- } else if (type == kMachInt32 || type == kMachInt8 || type == kMachInt16) {
- translation->StoreInt32StackSlot(StackSlotOperand::cast(op)->index());
- } else if (type == kMachUint32 || type == kMachUint16 ||
- type == kMachUint8) {
- translation->StoreUint32StackSlot(StackSlotOperand::cast(op)->index());
- } else if ((type & kRepMask) == kRepTagged) {
- translation->StoreStackSlot(StackSlotOperand::cast(op)->index());
+ if (type.representation() == MachineRepresentation::kBit) {
+ translation->StoreBoolStackSlot(LocationOperand::cast(op)->index());
+ } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
+ type == MachineType::Int32()) {
+ translation->StoreInt32StackSlot(LocationOperand::cast(op)->index());
+ } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
+ type == MachineType::Uint32()) {
+ translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
+ } else if (type.representation() == MachineRepresentation::kTagged) {
+ translation->StoreStackSlot(LocationOperand::cast(op)->index());
} else {
CHECK(false);
}
} else if (op->IsDoubleStackSlot()) {
- DCHECK((type & (kRepFloat32 | kRepFloat64)) != 0);
- translation->StoreDoubleStackSlot(
- DoubleStackSlotOperand::cast(op)->index());
+ DCHECK(IsFloatingPoint(type.representation()));
+ translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
} else if (op->IsRegister()) {
InstructionOperandConverter converter(this, instr);
- if (type == kMachBool || type == kRepBit) {
+ if (type.representation() == MachineRepresentation::kBit) {
translation->StoreBoolRegister(converter.ToRegister(op));
- } else if (type == kMachInt32 || type == kMachInt8 || type == kMachInt16) {
+ } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
+ type == MachineType::Int32()) {
translation->StoreInt32Register(converter.ToRegister(op));
- } else if (type == kMachUint32 || type == kMachUint16 ||
- type == kMachUint8) {
+ } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
+ type == MachineType::Uint32()) {
translation->StoreUint32Register(converter.ToRegister(op));
- } else if ((type & kRepMask) == kRepTagged) {
+ } else if (type.representation() == MachineRepresentation::kTagged) {
translation->StoreRegister(converter.ToRegister(op));
} else {
CHECK(false);
}
} else if (op->IsDoubleRegister()) {
- DCHECK((type & (kRepFloat32 | kRepFloat64)) != 0);
+ DCHECK(IsFloatingPoint(type.representation()));
InstructionOperandConverter converter(this, instr);
translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
} else if (op->IsImmediate()) {
@@ -629,16 +671,23 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
Handle<Object> constant_object;
switch (constant.type()) {
case Constant::kInt32:
- DCHECK(type == kMachInt32 || type == kMachUint32 || type == kRepBit);
+ DCHECK(type == MachineType::Int32() || type == MachineType::Uint32() ||
+ type.representation() == MachineRepresentation::kBit);
constant_object =
isolate()->factory()->NewNumberFromInt(constant.ToInt32());
break;
+ case Constant::kFloat32:
+ DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
+ type.representation() == MachineRepresentation::kTagged);
+ constant_object = isolate()->factory()->NewNumber(constant.ToFloat32());
+ break;
case Constant::kFloat64:
- DCHECK((type & (kRepFloat64 | kRepTagged)) != 0);
+ DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
+ type.representation() == MachineRepresentation::kTagged);
constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
break;
case Constant::kHeapObject:
- DCHECK((type & kRepMask) == kRepTagged);
+ DCHECK(type.representation() == MachineRepresentation::kTagged);
constant_object = constant.ToHeapObject();
break;
default:
@@ -661,8 +710,22 @@ void CodeGenerator::MarkLazyDeoptSite() {
}
+int CodeGenerator::TailCallFrameStackSlotDelta(int stack_param_delta) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int spill_slots = frame()->GetSpillSlotCount();
+ bool has_frame = descriptor->IsJSFunctionCall() || spill_slots > 0;
+ // Leave the PC on the stack on platforms that have that as part of their ABI
+ int pc_slots = V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
+ int sp_slot_delta =
+ has_frame ? (frame()->GetTotalFrameSlotCount() - pc_slots) : 0;
+ // Discard only slots that won't be used by new parameters.
+ sp_slot_delta += stack_param_delta;
+ return sp_slot_delta;
+}
+
+
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
- : masm_(gen->masm()), next_(gen->ools_) {
+ : frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
gen->ools_ = this;
}
diff --git a/chromium/v8/src/compiler/code-generator.h b/chromium/v8/src/compiler/code-generator.h
index d1545d10b9c..70bf81f5afe 100644
--- a/chromium/v8/src/compiler/code-generator.h
+++ b/chromium/v8/src/compiler/code-generator.h
@@ -16,6 +16,7 @@ namespace internal {
namespace compiler {
// Forward declarations.
+class FrameAccessState;
class Linkage;
class OutOfLineCode;
@@ -27,6 +28,20 @@ struct BranchInfo {
};
+class InstructionOperandIterator {
+ public:
+ InstructionOperandIterator(Instruction* instr, size_t pos)
+ : instr_(instr), pos_(pos) {}
+
+ Instruction* instruction() const { return instr_; }
+ InstructionOperand* Advance() { return instr_->InputAt(pos_++); }
+
+ private:
+ Instruction* instr_;
+ size_t pos_;
+};
+
+
// Generates native code for a sequence of instructions.
class CodeGenerator final : public GapResolver::Assembler {
public:
@@ -37,7 +52,8 @@ class CodeGenerator final : public GapResolver::Assembler {
Handle<Code> GenerateCode();
InstructionSequence* code() const { return code_; }
- Frame* frame() const { return frame_; }
+ FrameAccessState* frame_access_state() const { return frame_access_state_; }
+ Frame* frame() const { return frame_access_state_->frame(); }
Isolate* isolate() const { return info_->isolate(); }
Linkage* linkage() const { return linkage_; }
@@ -94,7 +110,10 @@ class CodeGenerator final : public GapResolver::Assembler {
void AssembleReturn();
// Generates code to deconstruct a the caller's frame, including arguments.
- void AssembleDeconstructActivationRecord();
+ void AssembleDeconstructActivationRecord(int stack_param_delta);
+
+ // Generates code to manipulate the stack in preparation for a tail call.
+ void AssemblePrepareTailCall(int stack_param_delta);
// ===========================================================================
// ============== Architecture-specific gap resolver methods. ================
@@ -125,21 +144,33 @@ class CodeGenerator final : public GapResolver::Assembler {
void RecordCallPosition(Instruction* instr);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
- FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
- size_t frame_state_offset);
+ FrameStateDescriptor* GetFrameStateDescriptor(
+ Instruction* instr, size_t frame_access_state_offset);
int BuildTranslation(Instruction* instr, int pc_offset,
- size_t frame_state_offset,
+ size_t frame_access_state_offset,
OutputFrameStateCombine state_combine);
void BuildTranslationForFrameStateDescriptor(
- FrameStateDescriptor* descriptor, Instruction* instr,
- Translation* translation, size_t frame_state_offset,
- OutputFrameStateCombine state_combine);
+ FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
+ Translation* translation, OutputFrameStateCombine state_combine);
+ void TranslateStateValueDescriptor(StateValueDescriptor* desc,
+ Translation* translation,
+ InstructionOperandIterator* iter);
+ void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
+ InstructionOperandIterator* iter,
+ OutputFrameStateCombine combine,
+ Translation* translation);
void AddTranslationForOperand(Translation* translation, Instruction* instr,
InstructionOperand* op, MachineType type);
void AddNopForSmiCodeInlining();
void EnsureSpaceForLazyDeopt();
void MarkLazyDeoptSite();
+ // Converts the delta in the number of stack parameter passed from a tail
+ // caller to the callee into the distance (in pointers) the SP must be
+ // adjusted, taking frame elision and other relevant factors into
+ // consideration.
+ int TailCallFrameStackSlotDelta(int stack_param_delta);
+
// ===========================================================================
struct DeoptimizationState : ZoneObject {
@@ -167,7 +198,7 @@ class CodeGenerator final : public GapResolver::Assembler {
friend class OutOfLineCode;
- Frame* const frame_;
+ FrameAccessState* frame_access_state_;
Linkage* const linkage_;
InstructionSequence* const code_;
CompilationInfo* const info_;
@@ -187,7 +218,6 @@ class CodeGenerator final : public GapResolver::Assembler {
JumpTable* jump_tables_;
OutOfLineCode* ools_;
int osr_pc_offset_;
- bool needs_frame_;
};
} // namespace compiler
diff --git a/chromium/v8/src/compiler/code-stub-assembler.cc b/chromium/v8/src/compiler/code-stub-assembler.cc
new file mode 100644
index 00000000000..b2a05b64f86
--- /dev/null
+++ b/chromium/v8/src/compiler/code-stub-assembler.cc
@@ -0,0 +1,176 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-stub-assembler.h"
+
+#include <ostream>
+
+#include "src/code-factory.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/schedule.h"
+#include "src/frames.h"
+#include "src/interface-descriptors.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/machine-type.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor,
+ Code::Kind kind, const char* name)
+ : raw_assembler_(new RawMachineAssembler(
+ isolate, new (zone) Graph(zone),
+ Linkage::GetStubCallDescriptor(isolate, zone, descriptor, 0,
+ CallDescriptor::kNoFlags))),
+ kind_(kind),
+ name_(name),
+ code_generated_(false) {}
+
+
+CodeStubAssembler::~CodeStubAssembler() {}
+
+
+Handle<Code> CodeStubAssembler::GenerateCode() {
+ DCHECK(!code_generated_);
+
+ Schedule* schedule = raw_assembler_->Export();
+ Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
+ isolate(), raw_assembler_->call_descriptor(), graph(), schedule, kind_,
+ name_);
+
+ code_generated_ = true;
+ return code;
+}
+
+
+Node* CodeStubAssembler::Int32Constant(int value) {
+ return raw_assembler_->Int32Constant(value);
+}
+
+
+Node* CodeStubAssembler::IntPtrConstant(intptr_t value) {
+ return raw_assembler_->IntPtrConstant(value);
+}
+
+
+Node* CodeStubAssembler::NumberConstant(double value) {
+ return raw_assembler_->NumberConstant(value);
+}
+
+
+Node* CodeStubAssembler::HeapConstant(Handle<HeapObject> object) {
+ return raw_assembler_->HeapConstant(object);
+}
+
+
+Node* CodeStubAssembler::BooleanConstant(bool value) {
+ return raw_assembler_->BooleanConstant(value);
+}
+
+
+Node* CodeStubAssembler::Parameter(int value) {
+ return raw_assembler_->Parameter(value);
+}
+
+
+void CodeStubAssembler::Return(Node* value) {
+ return raw_assembler_->Return(value);
+}
+
+
+Node* CodeStubAssembler::SmiShiftBitsConstant() {
+ return Int32Constant(kSmiShiftSize + kSmiTagSize);
+}
+
+
+Node* CodeStubAssembler::SmiTag(Node* value) {
+ return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
+}
+
+
+Node* CodeStubAssembler::SmiUntag(Node* value) {
+ return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
+}
+
+
+Node* CodeStubAssembler::IntPtrAdd(Node* a, Node* b) {
+ return raw_assembler_->IntPtrAdd(a, b);
+}
+
+
+Node* CodeStubAssembler::IntPtrSub(Node* a, Node* b) {
+ return raw_assembler_->IntPtrSub(a, b);
+}
+
+
+Node* CodeStubAssembler::WordShl(Node* value, int shift) {
+ return raw_assembler_->WordShl(value, Int32Constant(shift));
+}
+
+
+Node* CodeStubAssembler::LoadObjectField(Node* object, int offset) {
+ return raw_assembler_->Load(MachineType::AnyTagged(), object,
+ IntPtrConstant(offset - kHeapObjectTag));
+}
+
+
+Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
+ Node** args) {
+ return raw_assembler_->CallN(descriptor, code_target, args);
+}
+
+
+Node* CodeStubAssembler::TailCallN(CallDescriptor* descriptor,
+ Node* code_target, Node** args) {
+ return raw_assembler_->TailCallN(descriptor, code_target, args);
+}
+
+
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1) {
+ return raw_assembler_->CallRuntime1(function_id, arg1, context);
+}
+
+
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2) {
+ return raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
+}
+
+
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1) {
+ return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
+}
+
+
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1,
+ Node* arg2) {
+ return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
+}
+
+
+// RawMachineAssembler delegate helpers:
+Isolate* CodeStubAssembler::isolate() { return raw_assembler_->isolate(); }
+
+
+Graph* CodeStubAssembler::graph() { return raw_assembler_->graph(); }
+
+
+Zone* CodeStubAssembler::zone() { return raw_assembler_->zone(); }
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/code-stub-assembler.h b/chromium/v8/src/compiler/code-stub-assembler.h
new file mode 100644
index 00000000000..3c4ae05eaa5
--- /dev/null
+++ b/chromium/v8/src/compiler/code-stub-assembler.h
@@ -0,0 +1,96 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_
+#define V8_COMPILER_CODE_STUB_ASSEMBLER_H_
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/allocation.h"
+#include "src/builtins.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+class CallInterfaceDescriptor;
+class Isolate;
+class Zone;
+
+namespace compiler {
+
+class CallDescriptor;
+class Graph;
+class Node;
+class Operator;
+class RawMachineAssembler;
+class Schedule;
+
+class CodeStubAssembler {
+ public:
+ CodeStubAssembler(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor, Code::Kind kind,
+ const char* name);
+ virtual ~CodeStubAssembler();
+
+ Handle<Code> GenerateCode();
+
+ // Constants.
+ Node* Int32Constant(int value);
+ Node* IntPtrConstant(intptr_t value);
+ Node* NumberConstant(double value);
+ Node* HeapConstant(Handle<HeapObject> object);
+ Node* BooleanConstant(bool value);
+
+ Node* Parameter(int value);
+ void Return(Node* value);
+
+ // Tag and untag Smi values.
+ Node* SmiTag(Node* value);
+ Node* SmiUntag(Node* value);
+
+ // Basic arithmetic operations.
+ Node* IntPtrAdd(Node* a, Node* b);
+ Node* IntPtrSub(Node* a, Node* b);
+ Node* WordShl(Node* value, int shift);
+
+ // Load a field from an object on the heap.
+ Node* LoadObjectField(Node* object, int offset);
+
+ // Call runtime function.
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2);
+
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2);
+
+ private:
+ friend class CodeStubAssemblerTester;
+
+ Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+ Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+
+ Node* SmiShiftBitsConstant();
+
+ // Private helpers which delegate to RawMachineAssembler.
+ Graph* graph();
+ Isolate* isolate();
+ Zone* zone();
+
+ base::SmartPointer<RawMachineAssembler> raw_assembler_;
+ Code::Kind kind_;
+ const char* name_;
+ bool code_generated_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_
diff --git a/chromium/v8/src/compiler/common-node-cache.cc b/chromium/v8/src/compiler/common-node-cache.cc
index e7f7436a0bd..a0ae6e8ad74 100644
--- a/chromium/v8/src/compiler/common-node-cache.cc
+++ b/chromium/v8/src/compiler/common-node-cache.cc
@@ -16,6 +16,11 @@ Node** CommonNodeCache::FindExternalConstant(ExternalReference value) {
}
+Node** CommonNodeCache::FindHeapConstant(Handle<HeapObject> value) {
+ return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.location()));
+}
+
+
void CommonNodeCache::GetCachedNodes(ZoneVector<Node*>* nodes) {
int32_constants_.GetCachedNodes(nodes);
int64_constants_.GetCachedNodes(nodes);
@@ -23,6 +28,7 @@ void CommonNodeCache::GetCachedNodes(ZoneVector<Node*>* nodes) {
float64_constants_.GetCachedNodes(nodes);
external_constants_.GetCachedNodes(nodes);
number_constants_.GetCachedNodes(nodes);
+ heap_constants_.GetCachedNodes(nodes);
}
} // namespace compiler
diff --git a/chromium/v8/src/compiler/common-node-cache.h b/chromium/v8/src/compiler/common-node-cache.h
index b0100aaac6c..720bc1531dd 100644
--- a/chromium/v8/src/compiler/common-node-cache.h
+++ b/chromium/v8/src/compiler/common-node-cache.h
@@ -12,6 +12,9 @@ namespace internal {
// Forward declarations.
class ExternalReference;
+class HeapObject;
+template <typename>
+class Handle;
namespace compiler {
@@ -47,6 +50,8 @@ class CommonNodeCache final {
return number_constants_.Find(zone(), bit_cast<int64_t>(value));
}
+ Node** FindHeapConstant(Handle<HeapObject> value);
+
// Return all nodes from the cache.
void GetCachedNodes(ZoneVector<Node*>* nodes);
@@ -59,7 +64,8 @@ class CommonNodeCache final {
Int64NodeCache float64_constants_;
IntPtrNodeCache external_constants_;
Int64NodeCache number_constants_;
- Zone* zone_;
+ IntPtrNodeCache heap_constants_;
+ Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
};
diff --git a/chromium/v8/src/compiler/common-operator-reducer.cc b/chromium/v8/src/compiler/common-operator-reducer.cc
index e4af2ad1f9b..2334541f8ad 100644
--- a/chromium/v8/src/compiler/common-operator-reducer.cc
+++ b/chromium/v8/src/compiler/common-operator-reducer.cc
@@ -67,6 +67,8 @@ Reduction CommonOperatorReducer::Reduce(Node* node) {
return ReduceReturn(node);
case IrOpcode::kSelect:
return ReduceSelect(node);
+ case IrOpcode::kGuard:
+ return ReduceGuard(node);
default:
break;
}
@@ -202,6 +204,8 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
if_false->opcode() == IrOpcode::kIfFalse &&
if_true->InputAt(0) == if_false->InputAt(0)) {
Node* const branch = if_true->InputAt(0);
+ // Check that the branch is not dead already.
+ if (branch->opcode() != IrOpcode::kBranch) return NoChange();
Node* const cond = branch->InputAt(0);
if (cond->opcode() == IrOpcode::kFloat32LessThan) {
Float32BinopMatcher mcond(cond);
@@ -358,6 +362,16 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
}
+Reduction CommonOperatorReducer::ReduceGuard(Node* node) {
+ DCHECK_EQ(IrOpcode::kGuard, node->opcode());
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetTypeOrAny(input);
+ Type* const guard_type = OpParameter<Type*>(node);
+ if (input_type->Is(guard_type)) return Replace(input);
+ return NoChange();
+}
+
+
Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
Node* a) {
node->ReplaceInput(0, a);
diff --git a/chromium/v8/src/compiler/common-operator-reducer.h b/chromium/v8/src/compiler/common-operator-reducer.h
index 8582d6b633f..7184755885d 100644
--- a/chromium/v8/src/compiler/common-operator-reducer.h
+++ b/chromium/v8/src/compiler/common-operator-reducer.h
@@ -35,6 +35,7 @@ class CommonOperatorReducer final : public AdvancedReducer {
Reduction ReducePhi(Node* node);
Reduction ReduceReturn(Node* node);
Reduction ReduceSelect(Node* node);
+ Reduction ReduceGuard(Node* node);
Reduction Change(Node* node, Operator const* op, Node* a);
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
diff --git a/chromium/v8/src/compiler/common-operator.cc b/chromium/v8/src/compiler/common-operator.cc
index bacaae980f9..be7730962f1 100644
--- a/chromium/v8/src/compiler/common-operator.cc
+++ b/chromium/v8/src/compiler/common-operator.cc
@@ -36,6 +36,27 @@ BranchHint BranchHintOf(const Operator* const op) {
}
+size_t hash_value(DeoptimizeKind kind) { return static_cast<size_t>(kind); }
+
+
+std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ return os << "Eager";
+ case DeoptimizeKind::kSoft:
+ return os << "Soft";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+DeoptimizeKind DeoptimizeKindOf(const Operator* const op) {
+ DCHECK_EQ(IrOpcode::kDeoptimize, op->opcode());
+ return OpParameter<DeoptimizeKind>(op);
+}
+
+
size_t hash_value(IfExceptionHint hint) { return static_cast<size_t>(hint); }
@@ -52,7 +73,8 @@ std::ostream& operator<<(std::ostream& os, IfExceptionHint hint) {
bool operator==(SelectParameters const& lhs, SelectParameters const& rhs) {
- return lhs.type() == rhs.type() && lhs.hint() == rhs.hint();
+ return lhs.representation() == rhs.representation() &&
+ lhs.hint() == rhs.hint();
}
@@ -62,12 +84,12 @@ bool operator!=(SelectParameters const& lhs, SelectParameters const& rhs) {
size_t hash_value(SelectParameters const& p) {
- return base::hash_combine(p.type(), p.hint());
+ return base::hash_combine(p.representation(), p.hint());
}
std::ostream& operator<<(std::ostream& os, SelectParameters const& p) {
- return os << p.type() << "|" << p.hint();
+ return os << p.representation() << "|" << p.hint();
}
@@ -83,6 +105,12 @@ size_t ProjectionIndexOf(const Operator* const op) {
}
+MachineRepresentation PhiRepresentationOf(const Operator* const op) {
+ DCHECK_EQ(IrOpcode::kPhi, op->opcode());
+ return OpParameter<MachineRepresentation>(op);
+}
+
+
int ParameterIndexOf(const Operator* const op) {
DCHECK_EQ(IrOpcode::kParameter, op->opcode());
return OpParameter<ParameterInfo>(op).index();
@@ -122,11 +150,17 @@ std::ostream& operator<<(std::ostream& os, ParameterInfo const& i) {
V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1) \
- V(Deoptimize, Operator::kNoThrow, 1, 1, 1, 0, 0, 1) \
- V(Return, Operator::kNoThrow, 1, 1, 1, 0, 0, 1) \
V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
- V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)
+ V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
+ V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0) \
+ V(FinishRegion, Operator::kNoThrow, 1, 1, 0, 1, 1, 0)
+
+
+#define CACHED_RETURN_LIST(V) \
+ V(1) \
+ V(2) \
+ V(3)
#define CACHED_END_LIST(V) \
@@ -176,15 +210,15 @@ std::ostream& operator<<(std::ostream& os, ParameterInfo const& i) {
#define CACHED_PHI_LIST(V) \
- V(kMachAnyTagged, 1) \
- V(kMachAnyTagged, 2) \
- V(kMachAnyTagged, 3) \
- V(kMachAnyTagged, 4) \
- V(kMachAnyTagged, 5) \
- V(kMachAnyTagged, 6) \
- V(kMachBool, 2) \
- V(kMachFloat64, 2) \
- V(kMachInt32, 2)
+ V(kTagged, 1) \
+ V(kTagged, 2) \
+ V(kTagged, 3) \
+ V(kTagged, 4) \
+ V(kTagged, 5) \
+ V(kTagged, 6) \
+ V(kBit, 2) \
+ V(kFloat64, 2) \
+ V(kWord32, 2)
#define CACHED_PROJECTION_LIST(V) \
@@ -224,6 +258,18 @@ struct CommonOperatorGlobalCache final {
CACHED_OP_LIST(CACHED)
#undef CACHED
+ template <DeoptimizeKind kKind>
+ struct DeoptimizeOperator final : public Operator1<DeoptimizeKind> {
+ DeoptimizeOperator()
+ : Operator1<DeoptimizeKind>( // --
+ IrOpcode::kDeoptimize, Operator::kNoThrow, // opcode
+ "Deoptimize", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ kKind) {} // parameter
+ };
+ DeoptimizeOperator<DeoptimizeKind::kEager> kDeoptimizeEagerOperator;
+ DeoptimizeOperator<DeoptimizeKind::kSoft> kDeoptimizeSoftOperator;
+
template <IfExceptionHint kCaughtLocally>
struct IfExceptionOperator final : public Operator1<IfExceptionHint> {
IfExceptionOperator()
@@ -249,6 +295,19 @@ struct CommonOperatorGlobalCache final {
CACHED_END_LIST(CACHED_END)
#undef CACHED_END
+ template <size_t kInputCount>
+ struct ReturnOperator final : public Operator {
+ ReturnOperator()
+ : Operator( // --
+ IrOpcode::kReturn, Operator::kNoThrow, // opcode
+ "Return", // name
+ kInputCount, 1, 1, 0, 0, 1) {} // counts
+ };
+#define CACHED_RETURN(input_count) \
+ ReturnOperator<input_count> kReturn##input_count##Operator;
+ CACHED_RETURN_LIST(CACHED_RETURN)
+#undef CACHED_RETURN
+
template <BranchHint kBranchHint>
struct BranchOperator final : public Operator1<BranchHint> {
BranchOperator()
@@ -301,17 +360,18 @@ struct CommonOperatorGlobalCache final {
CACHED_MERGE_LIST(CACHED_MERGE)
#undef CACHED_MERGE
- template <MachineType kType, int kInputCount>
- struct PhiOperator final : public Operator1<MachineType> {
+ template <MachineRepresentation kRep, int kInputCount>
+ struct PhiOperator final : public Operator1<MachineRepresentation> {
PhiOperator()
- : Operator1<MachineType>( //--
+ : Operator1<MachineRepresentation>( //--
IrOpcode::kPhi, Operator::kPure, // opcode
"Phi", // name
kInputCount, 0, 1, 1, 0, 0, // counts
- kType) {} // parameter
+ kRep) {} // parameter
};
-#define CACHED_PHI(type, input_count) \
- PhiOperator<type, input_count> kPhi##type##input_count##Operator;
+#define CACHED_PHI(rep, input_count) \
+ PhiOperator<MachineRepresentation::rep, input_count> \
+ kPhi##rep##input_count##Operator;
CACHED_PHI_LIST(CACHED_PHI)
#undef CACHED_PHI
@@ -379,7 +439,6 @@ CACHED_OP_LIST(CACHED)
const Operator* CommonOperatorBuilder::End(size_t control_input_count) {
- DCHECK_NE(0u, control_input_count); // Disallow empty ends.
switch (control_input_count) {
#define CACHED_END(input_count) \
case input_count: \
@@ -397,6 +456,24 @@ const Operator* CommonOperatorBuilder::End(size_t control_input_count) {
}
+const Operator* CommonOperatorBuilder::Return(int value_input_count) {
+ switch (value_input_count) {
+#define CACHED_RETURN(input_count) \
+ case input_count: \
+ return &cache_.kReturn##input_count##Operator;
+ CACHED_RETURN_LIST(CACHED_RETURN)
+#undef CACHED_RETURN
+ default:
+ break;
+ }
+ // Uncached.
+ return new (zone()) Operator( //--
+ IrOpcode::kReturn, Operator::kNoThrow, // opcode
+ "Return", // name
+ value_input_count, 1, 1, 0, 0, 1); // counts
+}
+
+
const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
switch (hint) {
case BranchHint::kNone:
@@ -411,6 +488,18 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
}
+const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind) {
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ return &cache_.kDeoptimizeEagerOperator;
+ case DeoptimizeKind::kSoft:
+ return &cache_.kDeoptimizeSoftOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
const Operator* CommonOperatorBuilder::IfException(IfExceptionHint hint) {
switch (hint) {
case IfExceptionHint::kLocallyCaught:
@@ -424,7 +513,6 @@ const Operator* CommonOperatorBuilder::IfException(IfExceptionHint hint) {
const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
- DCHECK_GE(control_output_count, 3u); // Disallow trivial switches.
return new (zone()) Operator( // --
IrOpcode::kSwitch, Operator::kKontrol, // opcode
"Switch", // name
@@ -535,22 +623,20 @@ const Operator* CommonOperatorBuilder::Int64Constant(int64_t value) {
const Operator* CommonOperatorBuilder::Float32Constant(volatile float value) {
- return new (zone())
- Operator1<float, base::bit_equal_to<float>, base::bit_hash<float>>( // --
- IrOpcode::kFloat32Constant, Operator::kPure, // opcode
- "Float32Constant", // name
- 0, 0, 0, 1, 0, 0, // counts
- value); // parameter
+ return new (zone()) Operator1<float>( // --
+ IrOpcode::kFloat32Constant, Operator::kPure, // opcode
+ "Float32Constant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
}
const Operator* CommonOperatorBuilder::Float64Constant(volatile double value) {
- return new (zone()) Operator1<double, base::bit_equal_to<double>,
- base::bit_hash<double>>( // --
- IrOpcode::kFloat64Constant, Operator::kPure, // opcode
- "Float64Constant", // name
- 0, 0, 0, 1, 0, 0, // counts
- value); // parameter
+ return new (zone()) Operator1<double>( // --
+ IrOpcode::kFloat64Constant, Operator::kPure, // opcode
+ "Float64Constant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
}
@@ -565,52 +651,50 @@ const Operator* CommonOperatorBuilder::ExternalConstant(
const Operator* CommonOperatorBuilder::NumberConstant(volatile double value) {
- return new (zone()) Operator1<double, base::bit_equal_to<double>,
- base::bit_hash<double>>( // --
- IrOpcode::kNumberConstant, Operator::kPure, // opcode
- "NumberConstant", // name
- 0, 0, 0, 1, 0, 0, // counts
- value); // parameter
+ return new (zone()) Operator1<double>( // --
+ IrOpcode::kNumberConstant, Operator::kPure, // opcode
+ "NumberConstant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
}
const Operator* CommonOperatorBuilder::HeapConstant(
const Handle<HeapObject>& value) {
- return new (zone())
- Operator1<Handle<HeapObject>, Handle<HeapObject>::equal_to,
- Handle<HeapObject>::hash>( // --
- IrOpcode::kHeapConstant, Operator::kPure, // opcode
- "HeapConstant", // name
- 0, 0, 0, 1, 0, 0, // counts
- value); // parameter
+ return new (zone()) Operator1<Handle<HeapObject>>( // --
+ IrOpcode::kHeapConstant, Operator::kPure, // opcode
+ "HeapConstant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
}
-const Operator* CommonOperatorBuilder::Select(MachineType type,
+const Operator* CommonOperatorBuilder::Select(MachineRepresentation rep,
BranchHint hint) {
return new (zone()) Operator1<SelectParameters>( // --
IrOpcode::kSelect, Operator::kPure, // opcode
"Select", // name
3, 0, 0, 1, 0, 0, // counts
- SelectParameters(type, hint)); // parameter
+ SelectParameters(rep, hint)); // parameter
}
-const Operator* CommonOperatorBuilder::Phi(MachineType type,
+const Operator* CommonOperatorBuilder::Phi(MachineRepresentation rep,
int value_input_count) {
DCHECK(value_input_count > 0); // Disallow empty phis.
-#define CACHED_PHI(kType, kValueInputCount) \
- if (kType == type && kValueInputCount == value_input_count) { \
- return &cache_.kPhi##kType##kValueInputCount##Operator; \
+#define CACHED_PHI(kRep, kValueInputCount) \
+ if (MachineRepresentation::kRep == rep && \
+ kValueInputCount == value_input_count) { \
+ return &cache_.kPhi##kRep##kValueInputCount##Operator; \
}
CACHED_PHI_LIST(CACHED_PHI)
#undef CACHED_PHI
// Uncached.
- return new (zone()) Operator1<MachineType>( // --
- IrOpcode::kPhi, Operator::kPure, // opcode
- "Phi", // name
- value_input_count, 0, 1, 1, 0, 0, // counts
- type); // parameter
+ return new (zone()) Operator1<MachineRepresentation>( // --
+ IrOpcode::kPhi, Operator::kPure, // opcode
+ "Phi", // name
+ value_input_count, 0, 1, 1, 0, 0, // counts
+ rep); // parameter
}
@@ -633,6 +717,15 @@ const Operator* CommonOperatorBuilder::EffectPhi(int effect_input_count) {
}
+const Operator* CommonOperatorBuilder::Guard(Type* type) {
+ return new (zone()) Operator1<Type*>( // --
+ IrOpcode::kGuard, Operator::kKontrol, // opcode
+ "Guard", // name
+ 1, 0, 1, 1, 0, 0, // counts
+ type); // parameter
+}
+
+
const Operator* CommonOperatorBuilder::EffectSet(int arguments) {
DCHECK(arguments > 1); // Disallow empty/singleton sets.
return new (zone()) Operator( // --
@@ -642,24 +735,6 @@ const Operator* CommonOperatorBuilder::EffectSet(int arguments) {
}
-const Operator* CommonOperatorBuilder::ValueEffect(int arguments) {
- DCHECK(arguments > 0); // Disallow empty value effects.
- return new (zone()) Operator( // --
- IrOpcode::kValueEffect, Operator::kPure, // opcode
- "ValueEffect", // name
- arguments, 0, 0, 0, 1, 0); // counts
-}
-
-
-const Operator* CommonOperatorBuilder::Finish(int arguments) {
- DCHECK(arguments > 0); // Disallow empty finishes.
- return new (zone()) Operator( // --
- IrOpcode::kFinish, Operator::kPure, // opcode
- "Finish", // name
- 1, arguments, 0, 1, 0, 0); // counts
-}
-
-
const Operator* CommonOperatorBuilder::StateValues(int arguments) {
switch (arguments) {
#define CACHED_STATE_VALUES(arguments) \
@@ -678,6 +753,14 @@ const Operator* CommonOperatorBuilder::StateValues(int arguments) {
}
+const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots, int id) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kObjectState, Operator::kPure, // opcode
+ "ObjectState", // name
+ pointer_slots, 0, 0, 1, 0, 0, id); // counts
+}
+
+
const Operator* CommonOperatorBuilder::TypedStateValues(
const ZoneVector<MachineType>* types) {
return new (zone()) Operator1<const ZoneVector<MachineType>*>( // --
@@ -720,6 +803,11 @@ const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
}
+const Operator* CommonOperatorBuilder::LazyBailout() {
+ return Call(Linkage::GetLazyBailoutDescriptor(zone()));
+}
+
+
const Operator* CommonOperatorBuilder::TailCall(
const CallDescriptor* descriptor) {
class TailCallOperator final : public Operator1<const CallDescriptor*> {
@@ -761,7 +849,7 @@ const Operator* CommonOperatorBuilder::Projection(size_t index) {
const Operator* CommonOperatorBuilder::ResizeMergeOrPhi(const Operator* op,
int size) {
if (op->opcode() == IrOpcode::kPhi) {
- return Phi(OpParameter<MachineType>(op), size);
+ return Phi(PhiRepresentationOf(op), size);
} else if (op->opcode() == IrOpcode::kEffectPhi) {
return EffectPhi(size);
} else if (op->opcode() == IrOpcode::kMerge) {
diff --git a/chromium/v8/src/compiler/common-operator.h b/chromium/v8/src/compiler/common-operator.h
index 22490f7fe1a..83cb5b2c66b 100644
--- a/chromium/v8/src/compiler/common-operator.h
+++ b/chromium/v8/src/compiler/common-operator.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_COMMON_OPERATOR_H_
#include "src/compiler/frame-states.h"
-#include "src/compiler/machine-type.h"
+#include "src/machine-type.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -14,6 +14,10 @@ namespace internal {
// Forward declarations.
class ExternalReference;
+template <class>
+class TypeImpl;
+struct ZoneTypeConfig;
+typedef TypeImpl<ZoneTypeConfig> Type;
namespace compiler {
@@ -47,6 +51,16 @@ std::ostream& operator<<(std::ostream&, BranchHint);
BranchHint BranchHintOf(const Operator* const);
+// Deoptimize bailout kind.
+enum class DeoptimizeKind : uint8_t { kEager, kSoft };
+
+size_t hash_value(DeoptimizeKind kind);
+
+std::ostream& operator<<(std::ostream&, DeoptimizeKind);
+
+DeoptimizeKind DeoptimizeKindOf(const Operator* const);
+
+
// Prediction whether throw-site is surrounded by any local catch-scope.
enum class IfExceptionHint { kLocallyUncaught, kLocallyCaught };
@@ -57,15 +71,15 @@ std::ostream& operator<<(std::ostream&, IfExceptionHint);
class SelectParameters final {
public:
- explicit SelectParameters(MachineType type,
+ explicit SelectParameters(MachineRepresentation representation,
BranchHint hint = BranchHint::kNone)
- : type_(type), hint_(hint) {}
+ : representation_(representation), hint_(hint) {}
- MachineType type() const { return type_; }
+ MachineRepresentation representation() const { return representation_; }
BranchHint hint() const { return hint_; }
private:
- const MachineType type_;
+ const MachineRepresentation representation_;
const BranchHint hint_;
};
@@ -81,6 +95,8 @@ SelectParameters const& SelectParametersOf(const Operator* const);
size_t ProjectionIndexOf(const Operator* const);
+MachineRepresentation PhiRepresentationOf(const Operator* const);
+
// The {IrOpcode::kParameter} opcode represents an incoming parameter to the
// function. This class bundles the index and a debug name for such operators.
@@ -120,8 +136,8 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* IfValue(int32_t value);
const Operator* IfDefault();
const Operator* Throw();
- const Operator* Deoptimize();
- const Operator* Return();
+ const Operator* Deoptimize(DeoptimizeKind kind);
+ const Operator* Return(int value_input_count = 1);
const Operator* Terminate();
const Operator* Start(int value_output_count);
@@ -141,13 +157,16 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* NumberConstant(volatile double);
const Operator* HeapConstant(const Handle<HeapObject>&);
- const Operator* Select(MachineType, BranchHint = BranchHint::kNone);
- const Operator* Phi(MachineType type, int value_input_count);
+ const Operator* Select(MachineRepresentation, BranchHint = BranchHint::kNone);
+ const Operator* Phi(MachineRepresentation representation,
+ int value_input_count);
const Operator* EffectPhi(int effect_input_count);
const Operator* EffectSet(int arguments);
- const Operator* ValueEffect(int arguments);
- const Operator* Finish(int arguments);
+ const Operator* Guard(Type* type);
+ const Operator* BeginRegion();
+ const Operator* FinishRegion();
const Operator* StateValues(int arguments);
+ const Operator* ObjectState(int pointer_slots, int id);
const Operator* TypedStateValues(const ZoneVector<MachineType>* types);
const Operator* FrameState(BailoutId bailout_id,
OutputFrameStateCombine state_combine,
@@ -155,6 +174,7 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* Call(const CallDescriptor* descriptor);
const Operator* TailCall(const CallDescriptor* descriptor);
const Operator* Projection(size_t index);
+ const Operator* LazyBailout();
// Constructs a new merge or phi operator with the same opcode as {op}, but
// with {size} inputs.
diff --git a/chromium/v8/src/compiler/control-builders.cc b/chromium/v8/src/compiler/control-builders.cc
index bb0ed140d9b..6905ef589ff 100644
--- a/chromium/v8/src/compiler/control-builders.cc
+++ b/chromium/v8/src/compiler/control-builders.cc
@@ -153,6 +153,16 @@ void BlockBuilder::BreakWhen(Node* condition, BranchHint hint) {
}
+void BlockBuilder::BreakUnless(Node* condition, BranchHint hint) {
+ IfBuilder control_if(builder_);
+ control_if.If(condition, hint);
+ control_if.Then();
+ control_if.Else();
+ Break();
+ control_if.End();
+}
+
+
void BlockBuilder::EndBlock() {
break_environment_->Merge(environment());
set_environment(break_environment_);
diff --git a/chromium/v8/src/compiler/control-builders.h b/chromium/v8/src/compiler/control-builders.h
index 9f3afce8366..6ff00be5961 100644
--- a/chromium/v8/src/compiler/control-builders.h
+++ b/chromium/v8/src/compiler/control-builders.h
@@ -41,8 +41,8 @@ class IfBuilder final : public ControlBuilder {
public:
explicit IfBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
- then_environment_(NULL),
- else_environment_(NULL) {}
+ then_environment_(nullptr),
+ else_environment_(nullptr) {}
// Primitive control commands.
void If(Node* condition, BranchHint hint = BranchHint::kNone);
@@ -61,9 +61,9 @@ class LoopBuilder final : public ControlBuilder {
public:
explicit LoopBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
- loop_environment_(NULL),
- continue_environment_(NULL),
- break_environment_(NULL) {}
+ loop_environment_(nullptr),
+ continue_environment_(nullptr),
+ break_environment_(nullptr) {}
// Primitive control commands.
void BeginLoop(BitVector* assigned, bool is_osr = false);
@@ -90,9 +90,9 @@ class SwitchBuilder final : public ControlBuilder {
public:
explicit SwitchBuilder(AstGraphBuilder* builder, int case_count)
: ControlBuilder(builder),
- body_environment_(NULL),
- label_environment_(NULL),
- break_environment_(NULL),
+ body_environment_(nullptr),
+ label_environment_(nullptr),
+ break_environment_(nullptr),
body_environments_(case_count, zone()) {}
// Primitive control commands.
@@ -122,7 +122,7 @@ class SwitchBuilder final : public ControlBuilder {
class BlockBuilder final : public ControlBuilder {
public:
explicit BlockBuilder(AstGraphBuilder* builder)
- : ControlBuilder(builder), break_environment_(NULL) {}
+ : ControlBuilder(builder), break_environment_(nullptr) {}
// Primitive control commands.
void BeginBlock();
@@ -133,6 +133,7 @@ class BlockBuilder final : public ControlBuilder {
// Compound control commands for conditional break.
void BreakWhen(Node* condition, BranchHint = BranchHint::kNone);
+ void BreakUnless(Node* condition, BranchHint hint = BranchHint::kNone);
private:
Environment* break_environment_; // Environment after the block exits.
@@ -144,9 +145,9 @@ class TryCatchBuilder final : public ControlBuilder {
public:
explicit TryCatchBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
- catch_environment_(NULL),
- exit_environment_(NULL),
- exception_node_(NULL) {}
+ catch_environment_(nullptr),
+ exit_environment_(nullptr),
+ exception_node_(nullptr) {}
// Primitive control commands.
void BeginTry();
@@ -169,9 +170,9 @@ class TryFinallyBuilder final : public ControlBuilder {
public:
explicit TryFinallyBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
- finally_environment_(NULL),
- token_node_(NULL),
- value_node_(NULL) {}
+ finally_environment_(nullptr),
+ token_node_(nullptr),
+ value_node_(nullptr) {}
// Primitive control commands.
void BeginTry();
diff --git a/chromium/v8/src/compiler/control-equivalence.cc b/chromium/v8/src/compiler/control-equivalence.cc
index 718de4cb12d..af1a11565c4 100644
--- a/chromium/v8/src/compiler/control-equivalence.cc
+++ b/chromium/v8/src/compiler/control-equivalence.cc
@@ -71,7 +71,7 @@ void ControlEquivalence::VisitPost(Node* node, Node* parent_node,
BracketListDelete(blist, node, direction);
// Propagate bracket list up the DFS tree [line:13].
- if (parent_node != NULL) {
+ if (parent_node != nullptr) {
BracketList& parent_blist = GetBracketList(parent_node);
parent_blist.splice(parent_blist.end(), blist);
}
@@ -91,7 +91,7 @@ void ControlEquivalence::VisitBackedge(Node* from, Node* to,
void ControlEquivalence::RunUndirectedDFS(Node* exit) {
ZoneStack<DFSStackEntry> stack(zone_);
- DFSPush(stack, exit, NULL, kInputDirection);
+ DFSPush(stack, exit, nullptr, kInputDirection);
VisitPre(exit);
while (!stack.empty()) { // Undirected depth-first backwards traversal.
diff --git a/chromium/v8/src/compiler/diamond.h b/chromium/v8/src/compiler/diamond.h
index f562092a8a4..e1333052d74 100644
--- a/chromium/v8/src/compiler/diamond.h
+++ b/chromium/v8/src/compiler/diamond.h
@@ -49,8 +49,8 @@ struct Diamond {
}
}
- Node* Phi(MachineType machine_type, Node* tv, Node* fv) {
- return graph->NewNode(common->Phi(machine_type, 2), tv, fv, merge);
+ Node* Phi(MachineRepresentation rep, Node* tv, Node* fv) {
+ return graph->NewNode(common->Phi(rep, 2), tv, fv, merge);
}
};
diff --git a/chromium/v8/src/compiler/escape-analysis-reducer.cc b/chromium/v8/src/compiler/escape-analysis-reducer.cc
new file mode 100644
index 00000000000..df8b65dab2e
--- /dev/null
+++ b/chromium/v8/src/compiler/escape-analysis-reducer.cc
@@ -0,0 +1,313 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/escape-analysis-reducer.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/counters.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+EscapeAnalysisReducer::EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
+ EscapeAnalysis* escape_analysis,
+ Zone* zone)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ escape_analysis_(escape_analysis),
+ zone_(zone),
+ visited_(static_cast<int>(jsgraph->graph()->NodeCount()), zone) {}
+
+
+Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kLoadField:
+ case IrOpcode::kLoadElement:
+ return ReduceLoad(node);
+ case IrOpcode::kStoreField:
+ case IrOpcode::kStoreElement:
+ return ReduceStore(node);
+ case IrOpcode::kAllocate:
+ return ReduceAllocate(node);
+ case IrOpcode::kFinishRegion:
+ return ReduceFinishRegion(node);
+ case IrOpcode::kReferenceEqual:
+ return ReduceReferenceEqual(node);
+ case IrOpcode::kObjectIsSmi:
+ return ReduceObjectIsSmi(node);
+ default:
+ // TODO(sigurds): Change this to GetFrameStateInputCount once
+ // it is working. For now we use EffectInputCount > 0 to determine
+ // whether a node might have a frame state input.
+ if (node->op()->EffectInputCount() > 0) {
+ return ReduceFrameStateUses(node);
+ }
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kLoadField ||
+ node->opcode() == IrOpcode::kLoadElement);
+ if (visited_.Contains(node->id())) return NoChange();
+ visited_.Add(node->id());
+ if (Node* rep = escape_analysis()->GetReplacement(node)) {
+ visited_.Add(node->id());
+ counters()->turbo_escape_loads_replaced()->Increment();
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced #%d (%s) with #%d (%s)\n", node->id(),
+ node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
+ }
+ ReplaceWithValue(node, rep);
+ return Changed(rep);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceStore(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kStoreField ||
+ node->opcode() == IrOpcode::kStoreElement);
+ if (visited_.Contains(node->id())) return NoChange();
+ visited_.Add(node->id());
+ if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Removed #%d (%s) from effect chain\n", node->id(),
+ node->op()->mnemonic());
+ }
+ RelaxEffectsAndControls(node);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceAllocate(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
+ if (visited_.Contains(node->id())) return NoChange();
+ visited_.Add(node->id());
+ if (escape_analysis()->IsVirtual(node)) {
+ RelaxEffectsAndControls(node);
+ counters()->turbo_escape_allocs_replaced()->Increment();
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Removed allocate #%d from effect chain\n", node->id());
+ }
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceFinishRegion(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
+ Node* effect = NodeProperties::GetEffectInput(node, 0);
+ if (effect->opcode() == IrOpcode::kBeginRegion) {
+ RelaxEffectsAndControls(effect);
+ RelaxEffectsAndControls(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Removed region #%d / #%d from effect chain,", effect->id(),
+ node->id());
+ PrintF(" %d user(s) of #%d remain(s):", node->UseCount(), node->id());
+ for (Edge edge : node->use_edges()) {
+ PrintF(" #%d", edge.from()->id());
+ }
+ PrintF("\n");
+ }
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceReferenceEqual(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kReferenceEqual);
+ Node* left = NodeProperties::GetValueInput(node, 0);
+ Node* right = NodeProperties::GetValueInput(node, 1);
+ if (escape_analysis()->IsVirtual(left)) {
+ if (escape_analysis()->IsVirtual(right) &&
+ escape_analysis()->CompareVirtualObjects(left, right)) {
+ ReplaceWithValue(node, jsgraph()->TrueConstant());
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced ref eq #%d with true\n", node->id());
+ }
+ }
+ // Right-hand side is not a virtual object, or a different one.
+ ReplaceWithValue(node, jsgraph()->FalseConstant());
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced ref eq #%d with false\n", node->id());
+ }
+ return Replace(node);
+ } else if (escape_analysis()->IsVirtual(right)) {
+ // Left-hand side is not a virtual object.
+ ReplaceWithValue(node, jsgraph()->FalseConstant());
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced ref eq #%d with false\n", node->id());
+ }
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceObjectIsSmi(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kObjectIsSmi);
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ if (escape_analysis()->IsVirtual(input)) {
+ ReplaceWithValue(node, jsgraph()->FalseConstant());
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced ObjectIsSmi #%d with false\n", node->id());
+ }
+ return Replace(node);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceFrameStateUses(Node* node) {
+ if (visited_.Contains(node->id())) return NoChange();
+ visited_.Add(node->id());
+ DCHECK_GE(node->op()->EffectInputCount(), 1);
+ bool changed = false;
+ for (int i = 0; i < node->InputCount(); ++i) {
+ Node* input = node->InputAt(i);
+ if (input->opcode() == IrOpcode::kFrameState) {
+ if (Node* ret = ReduceFrameState(input, node, false)) {
+ node->ReplaceInput(i, ret);
+ changed = true;
+ }
+ }
+ }
+ if (changed) {
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+// Returns the clone if it duplicated the node, and null otherwise.
+Node* EscapeAnalysisReducer::ReduceFrameState(Node* node, Node* effect,
+ bool multiple_users) {
+ DCHECK(node->opcode() == IrOpcode::kFrameState);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Reducing FrameState %d\n", node->id());
+ }
+ Node* clone = nullptr;
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ Node* ret =
+ input->opcode() == IrOpcode::kStateValues
+ ? ReduceStateValueInputs(input, effect, node->UseCount() > 1)
+ : ReduceStateValueInput(node, i, effect, node->UseCount() > 1);
+ if (ret) {
+ if (node->UseCount() > 1 || multiple_users) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Cloning #%d", node->id());
+ }
+ node = clone = jsgraph()->graph()->CloneNode(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" to #%d\n", node->id());
+ }
+ multiple_users = false; // Don't clone anymore.
+ }
+ NodeProperties::ReplaceValueInput(node, ret, i);
+ }
+ }
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ if (outer_frame_state->opcode() == IrOpcode::kFrameState) {
+ if (Node* ret =
+ ReduceFrameState(outer_frame_state, effect, node->UseCount() > 1)) {
+ if (node->UseCount() > 1 || multiple_users) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Cloning #%d", node->id());
+ }
+ node = clone = jsgraph()->graph()->CloneNode(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" to #%d\n", node->id());
+ }
+ multiple_users = false;
+ }
+ NodeProperties::ReplaceFrameStateInput(node, 0, ret);
+ }
+ }
+ return clone;
+}
+
+
+// Returns the clone if it duplicated the node, and null otherwise.
+Node* EscapeAnalysisReducer::ReduceStateValueInputs(Node* node, Node* effect,
+ bool multiple_users) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Reducing StateValue #%d\n", node->id());
+ }
+ DCHECK(node->opcode() == IrOpcode::kStateValues);
+ DCHECK_NOT_NULL(effect);
+ Node* clone = nullptr;
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ Node* ret = nullptr;
+ if (input->opcode() == IrOpcode::kStateValues) {
+ ret = ReduceStateValueInputs(input, effect, multiple_users);
+ } else {
+ ret = ReduceStateValueInput(node, i, effect, multiple_users);
+ }
+ if (ret) {
+ node = ret;
+ DCHECK_NULL(clone);
+ clone = ret;
+ multiple_users = false;
+ }
+ }
+ return clone;
+}
+
+
+// Returns the clone if it duplicated the node, and null otherwise.
+Node* EscapeAnalysisReducer::ReduceStateValueInput(Node* node, int node_index,
+ Node* effect,
+ bool multiple_users) {
+ Node* input = NodeProperties::GetValueInput(node, node_index);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Reducing State Input #%d (%s)\n", input->id(),
+ input->op()->mnemonic());
+ }
+ Node* clone = nullptr;
+ if (input->opcode() == IrOpcode::kFinishRegion ||
+ input->opcode() == IrOpcode::kAllocate) {
+ if (escape_analysis()->IsVirtual(input)) {
+ if (Node* object_state =
+ escape_analysis()->GetOrCreateObjectState(effect, input)) {
+ if (node->UseCount() > 1 || multiple_users) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Cloning #%d", node->id());
+ }
+ node = clone = jsgraph()->graph()->CloneNode(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" to #%d\n", node->id());
+ }
+ }
+ NodeProperties::ReplaceValueInput(node, object_state, node_index);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced state #%d input #%d with object state #%d\n",
+ node->id(), input->id(), object_state->id());
+ }
+ } else {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("No object state replacement available.\n");
+ }
+ }
+ }
+ }
+ return clone;
+}
+
+
+Counters* EscapeAnalysisReducer::counters() const {
+ return jsgraph_->isolate()->counters();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/escape-analysis-reducer.h b/chromium/v8/src/compiler/escape-analysis-reducer.h
new file mode 100644
index 00000000000..1c0da165fb6
--- /dev/null
+++ b/chromium/v8/src/compiler/escape-analysis-reducer.h
@@ -0,0 +1,63 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
+#define V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
+
+#include "src/bit-vector.h"
+#include "src/compiler/escape-analysis.h"
+#include "src/compiler/graph-reducer.h"
+
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class Counters;
+
+
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+
+
+class EscapeAnalysisReducer final : public AdvancedReducer {
+ public:
+ EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
+ EscapeAnalysis* escape_analysis, Zone* zone);
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceLoad(Node* node);
+ Reduction ReduceStore(Node* node);
+ Reduction ReduceAllocate(Node* node);
+ Reduction ReduceFinishRegion(Node* node);
+ Reduction ReduceReferenceEqual(Node* node);
+ Reduction ReduceObjectIsSmi(Node* node);
+ Reduction ReduceFrameStateUses(Node* node);
+ Node* ReduceFrameState(Node* node, Node* effect, bool multiple_users);
+ Node* ReduceStateValueInputs(Node* node, Node* effect, bool multiple_users);
+ Node* ReduceStateValueInput(Node* node, int node_index, Node* effect,
+ bool multiple_users);
+
+ JSGraph* jsgraph() const { return jsgraph_; }
+ EscapeAnalysis* escape_analysis() const { return escape_analysis_; }
+ Zone* zone() const { return zone_; }
+ Counters* counters() const;
+
+ JSGraph* const jsgraph_;
+ EscapeAnalysis* escape_analysis_;
+ Zone* const zone_;
+ BitVector visited_;
+
+ DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisReducer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
diff --git a/chromium/v8/src/compiler/escape-analysis.cc b/chromium/v8/src/compiler/escape-analysis.cc
new file mode 100644
index 00000000000..af0ba6a6398
--- /dev/null
+++ b/chromium/v8/src/compiler/escape-analysis.cc
@@ -0,0 +1,1471 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/escape-analysis.h"
+
+#include <limits>
+
+#include "src/base/flags.h"
+#include "src/bootstrapper.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/objects-inl.h"
+#include "src/type-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+const EscapeAnalysis::Alias EscapeAnalysis::kNotReachable =
+ std::numeric_limits<Alias>::max();
+const EscapeAnalysis::Alias EscapeAnalysis::kUntrackable =
+ std::numeric_limits<Alias>::max() - 1;
+
+
+class VirtualObject : public ZoneObject {
+ public:
+ enum Status { kUntracked = 0, kTracked = 1 };
+ VirtualObject(NodeId id, Zone* zone)
+ : id_(id),
+ status_(kUntracked),
+ fields_(zone),
+ phi_(zone),
+ object_state_(nullptr) {}
+
+ VirtualObject(const VirtualObject& other)
+ : id_(other.id_),
+ status_(other.status_),
+ fields_(other.fields_),
+ phi_(other.phi_),
+ object_state_(other.object_state_) {}
+
+ VirtualObject(NodeId id, Zone* zone, size_t field_number)
+ : id_(id),
+ status_(kTracked),
+ fields_(zone),
+ phi_(zone),
+ object_state_(nullptr) {
+ fields_.resize(field_number);
+ phi_.resize(field_number, false);
+ }
+
+ Node* GetField(size_t offset) {
+ if (offset < fields_.size()) {
+ return fields_[offset];
+ }
+ return nullptr;
+ }
+
+ bool IsCreatedPhi(size_t offset) {
+ if (offset < phi_.size()) {
+ return phi_[offset];
+ }
+ return false;
+ }
+
+ bool SetField(size_t offset, Node* node, bool created_phi = false) {
+ bool changed = fields_[offset] != node || phi_[offset] != created_phi;
+ fields_[offset] = node;
+ phi_[offset] = created_phi;
+ if (changed && FLAG_trace_turbo_escape && node) {
+ PrintF("Setting field %zu of #%d to #%d (%s)\n", offset, id(), node->id(),
+ node->op()->mnemonic());
+ }
+ return changed;
+ }
+ bool IsVirtual() const { return status_ == kTracked; }
+ bool IsTracked() const { return status_ != kUntracked; }
+
+ Node** fields_array() { return &fields_.front(); }
+ size_t field_count() { return fields_.size(); }
+ bool ResizeFields(size_t field_count) {
+ if (field_count != fields_.size()) {
+ fields_.resize(field_count);
+ phi_.resize(field_count);
+ return true;
+ }
+ return false;
+ }
+ bool ClearAllFields() {
+ bool changed = false;
+ for (size_t i = 0; i < fields_.size(); ++i) {
+ if (fields_[i] != nullptr) {
+ fields_[i] = nullptr;
+ changed = true;
+ }
+ phi_[i] = false;
+ }
+ return changed;
+ }
+ bool UpdateFrom(const VirtualObject& other);
+ void SetObjectState(Node* node) { object_state_ = node; }
+ Node* GetObjectState() const { return object_state_; }
+
+ NodeId id() const { return id_; }
+ void id(NodeId id) { id_ = id; }
+
+ private:
+ NodeId id_;
+ Status status_;
+ ZoneVector<Node*> fields_;
+ ZoneVector<bool> phi_;
+ Node* object_state_;
+};
+
+
+bool VirtualObject::UpdateFrom(const VirtualObject& other) {
+ bool changed = status_ != other.status_;
+ status_ = other.status_;
+ if (fields_.size() != other.fields_.size()) {
+ fields_ = other.fields_;
+ return true;
+ }
+ for (size_t i = 0; i < fields_.size(); ++i) {
+ if (fields_[i] != other.fields_[i]) {
+ changed = true;
+ fields_[i] = other.fields_[i];
+ }
+ }
+ return changed;
+}
+
+
+class VirtualState : public ZoneObject {
+ public:
+ VirtualState(Zone* zone, size_t size);
+ VirtualState(const VirtualState& states);
+
+ VirtualObject* VirtualObjectFromAlias(size_t alias);
+ VirtualObject* GetOrCreateTrackedVirtualObject(EscapeAnalysis::Alias alias,
+ NodeId id, Zone* zone);
+ void SetVirtualObject(EscapeAnalysis::Alias alias, VirtualObject* state);
+ void LastChangedAt(Node* node) { last_changed_ = node; }
+ Node* GetLastChanged() { return last_changed_; }
+ bool UpdateFrom(VirtualState* state, Zone* zone);
+ bool MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
+ CommonOperatorBuilder* common, Node* control);
+ size_t size() const { return info_.size(); }
+
+ private:
+ ZoneVector<VirtualObject*> info_;
+ Node* last_changed_;
+};
+
+
+class MergeCache : public ZoneObject {
+ public:
+ explicit MergeCache(Zone* zone)
+ : states_(zone), objects_(zone), fields_(zone) {
+ states_.reserve(4);
+ objects_.reserve(4);
+ fields_.reserve(4);
+ }
+ ZoneVector<VirtualState*>& states() { return states_; }
+ ZoneVector<VirtualObject*>& objects() { return objects_; }
+ ZoneVector<Node*>& fields() { return fields_; }
+ void Clear() {
+ states_.clear();
+ objects_.clear();
+ fields_.clear();
+ }
+ size_t LoadVirtualObjectsFromStatesFor(EscapeAnalysis::Alias alias);
+ void LoadVirtualObjectsForFieldsFrom(
+ VirtualState* state, const ZoneVector<EscapeAnalysis::Alias>& aliases);
+ Node* GetFields(size_t pos);
+
+ private:
+ ZoneVector<VirtualState*> states_;
+ ZoneVector<VirtualObject*> objects_;
+ ZoneVector<Node*> fields_;
+};
+
+
+size_t MergeCache::LoadVirtualObjectsFromStatesFor(
+ EscapeAnalysis::Alias alias) {
+ objects_.clear();
+ DCHECK_GT(states_.size(), 0u);
+ size_t min = std::numeric_limits<size_t>::max();
+ for (VirtualState* state : states_) {
+ if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
+ objects_.push_back(obj);
+ min = std::min(obj->field_count(), min);
+ }
+ }
+ return min;
+}
+
+
+void MergeCache::LoadVirtualObjectsForFieldsFrom(
+ VirtualState* state, const ZoneVector<EscapeAnalysis::Alias>& aliases) {
+ objects_.clear();
+ size_t max_alias = state->size();
+ for (Node* field : fields_) {
+ EscapeAnalysis::Alias alias = aliases[field->id()];
+ if (alias >= max_alias) continue;
+ if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
+ objects_.push_back(obj);
+ }
+ }
+}
+
+
+Node* MergeCache::GetFields(size_t pos) {
+ fields_.clear();
+ Node* rep = objects_.front()->GetField(pos);
+ for (VirtualObject* obj : objects_) {
+ Node* field = obj->GetField(pos);
+ if (field) {
+ fields_.push_back(field);
+ }
+ if (field != rep) {
+ rep = nullptr;
+ }
+ }
+ return rep;
+}
+
+
+VirtualState::VirtualState(Zone* zone, size_t size)
+ : info_(size, nullptr, zone), last_changed_(nullptr) {}
+
+
+VirtualState::VirtualState(const VirtualState& state)
+ : info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
+ last_changed_(state.last_changed_) {
+ for (size_t i = 0; i < state.info_.size(); ++i) {
+ if (state.info_[i]) {
+ info_[i] =
+ new (info_.get_allocator().zone()) VirtualObject(*state.info_[i]);
+ }
+ }
+}
+
+
+VirtualObject* VirtualState::VirtualObjectFromAlias(size_t alias) {
+ return info_[alias];
+}
+
+
+VirtualObject* VirtualState::GetOrCreateTrackedVirtualObject(
+ EscapeAnalysis::Alias alias, NodeId id, Zone* zone) {
+ if (VirtualObject* obj = VirtualObjectFromAlias(alias)) {
+ return obj;
+ }
+ VirtualObject* obj = new (zone) VirtualObject(id, zone, 0);
+ SetVirtualObject(alias, obj);
+ return obj;
+}
+
+
+void VirtualState::SetVirtualObject(EscapeAnalysis::Alias alias,
+ VirtualObject* obj) {
+ info_[alias] = obj;
+}
+
+
+bool VirtualState::UpdateFrom(VirtualState* from, Zone* zone) {
+ bool changed = false;
+ for (EscapeAnalysis::Alias alias = 0; alias < size(); ++alias) {
+ VirtualObject* ls = VirtualObjectFromAlias(alias);
+ VirtualObject* rs = from->VirtualObjectFromAlias(alias);
+
+ if (rs == nullptr) {
+ continue;
+ }
+
+ if (ls == nullptr) {
+ ls = new (zone) VirtualObject(*rs);
+ SetVirtualObject(alias, ls);
+ changed = true;
+ continue;
+ }
+
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Updating fields of @%d\n", alias);
+ }
+
+ changed = ls->UpdateFrom(*rs) || changed;
+ }
+ return false;
+}
+
+
+namespace {
+
+bool IsEquivalentPhi(Node* node1, Node* node2) {
+ if (node1 == node2) return true;
+ if (node1->opcode() != IrOpcode::kPhi || node2->opcode() != IrOpcode::kPhi ||
+ node1->op()->ValueInputCount() != node2->op()->ValueInputCount()) {
+ return false;
+ }
+ for (int i = 0; i < node1->op()->ValueInputCount(); ++i) {
+ Node* input1 = NodeProperties::GetValueInput(node1, i);
+ Node* input2 = NodeProperties::GetValueInput(node2, i);
+ if (!IsEquivalentPhi(input1, input2)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
+ if (phi->opcode() != IrOpcode::kPhi) return false;
+ if (phi->op()->ValueInputCount() != inputs.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < inputs.size(); ++i) {
+ Node* input = NodeProperties::GetValueInput(phi, static_cast<int>(i));
+ if (!IsEquivalentPhi(input, inputs[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+
+Node* EscapeAnalysis::GetReplacementIfSame(ZoneVector<VirtualObject*>& objs) {
+ Node* rep = GetReplacement(objs.front()->id());
+ for (VirtualObject* obj : objs) {
+ if (GetReplacement(obj->id()) != rep) {
+ return nullptr;
+ }
+ }
+ return rep;
+}
+
+
+bool VirtualState::MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
+ CommonOperatorBuilder* common, Node* control) {
+ DCHECK_GT(cache->states().size(), 0u);
+ bool changed = false;
+ for (EscapeAnalysis::Alias alias = 0; alias < size(); ++alias) {
+ size_t fields = cache->LoadVirtualObjectsFromStatesFor(alias);
+ if (cache->objects().size() == cache->states().size()) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Merging virtual objects of @%d\n", alias);
+ }
+ VirtualObject* mergeObject = GetOrCreateTrackedVirtualObject(
+ alias, cache->objects().front()->id(), zone);
+ changed = mergeObject->ResizeFields(fields) || changed;
+ for (size_t i = 0; i < fields; ++i) {
+ if (Node* field = cache->GetFields(i)) {
+ changed = mergeObject->SetField(i, field) || changed;
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Field %zu agree on rep #%d\n", i, field->id());
+ }
+ } else {
+ int value_input_count = static_cast<int>(cache->fields().size());
+ if (cache->fields().size() == cache->objects().size()) {
+ Node* rep = mergeObject->GetField(i);
+ if (!rep || !mergeObject->IsCreatedPhi(i)) {
+ cache->fields().push_back(control);
+ Node* phi = graph->NewNode(
+ common->Phi(MachineRepresentation::kTagged,
+ value_input_count),
+ value_input_count + 1, &cache->fields().front());
+ mergeObject->SetField(i, phi, true);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Creating Phi #%d as merge of", phi->id());
+ for (int i = 0; i < value_input_count; i++) {
+ PrintF(" #%d (%s)", cache->fields()[i]->id(),
+ cache->fields()[i]->op()->mnemonic());
+ }
+ PrintF("\n");
+ }
+ changed = true;
+ } else {
+ DCHECK(rep->opcode() == IrOpcode::kPhi);
+ for (int n = 0; n < value_input_count; ++n) {
+ if (n < rep->op()->ValueInputCount()) {
+ Node* old = NodeProperties::GetValueInput(rep, n);
+ if (old != cache->fields()[n]) {
+ changed = true;
+ NodeProperties::ReplaceValueInput(rep, cache->fields()[n],
+ n);
+ }
+ } else {
+ changed = true;
+ rep->InsertInput(graph->zone(), n, cache->fields()[n]);
+ }
+ }
+ if (rep->op()->ValueInputCount() != value_input_count) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Widening Phi #%d of arity %d to %d", rep->id(),
+ rep->op()->ValueInputCount(), value_input_count);
+ }
+ NodeProperties::ChangeOp(
+ rep, common->Phi(MachineRepresentation::kTagged,
+ value_input_count));
+ }
+ }
+ } else {
+ changed = mergeObject->SetField(i, nullptr) || changed;
+ }
+ }
+ }
+ } else {
+ SetVirtualObject(alias, nullptr);
+ }
+ }
+ return changed;
+}
+
+
+EscapeStatusAnalysis::EscapeStatusAnalysis(EscapeAnalysis* object_analysis,
+ Graph* graph, Zone* zone)
+ : object_analysis_(object_analysis),
+ graph_(graph),
+ zone_(zone),
+ status_(graph->NodeCount(), kUnknown, zone),
+ queue_(zone) {}
+
+
+EscapeStatusAnalysis::~EscapeStatusAnalysis() {}
+
+
+bool EscapeStatusAnalysis::HasEntry(Node* node) {
+ return status_[node->id()] & (kTracked | kEscaped);
+}
+
+
+bool EscapeStatusAnalysis::IsVirtual(Node* node) {
+ return (status_[node->id()] & kTracked) && !(status_[node->id()] & kEscaped);
+}
+
+
+bool EscapeStatusAnalysis::IsEscaped(Node* node) {
+ return status_[node->id()] & kEscaped;
+}
+
+
+bool EscapeStatusAnalysis::IsAllocation(Node* node) {
+ return node->opcode() == IrOpcode::kAllocate ||
+ node->opcode() == IrOpcode::kFinishRegion;
+}
+
+
+bool EscapeStatusAnalysis::SetEscaped(Node* node) {
+ bool changed = !(status_[node->id()] & kEscaped);
+ status_[node->id()] |= kEscaped | kTracked;
+ return changed;
+}
+
+
+void EscapeStatusAnalysis::Resize() {
+ status_.resize(graph()->NodeCount(), kUnknown);
+}
+
+
+size_t EscapeStatusAnalysis::size() { return status_.size(); }
+
+
+void EscapeStatusAnalysis::Run() {
+ Resize();
+ queue_.push_back(graph()->end());
+ status_[graph()->end()->id()] |= kOnStack;
+ while (!queue_.empty()) {
+ Node* node = queue_.front();
+ queue_.pop_front();
+ status_[node->id()] &= ~kOnStack;
+ Process(node);
+ status_[node->id()] |= kVisited;
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (!(status_[input->id()] & (kVisited | kOnStack))) {
+ queue_.push_back(input);
+ status_[input->id()] |= kOnStack;
+ }
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::RevisitInputs(Node* node) {
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (!(status_[input->id()] & kOnStack)) {
+ queue_.push_back(input);
+ status_[input->id()] |= kOnStack;
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::RevisitUses(Node* node) {
+ for (Edge edge : node->use_edges()) {
+ Node* use = edge.from();
+ if (!(status_[use->id()] & kOnStack)) {
+ queue_.push_back(use);
+ status_[use->id()] |= kOnStack;
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::Process(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ ProcessAllocate(node);
+ break;
+ case IrOpcode::kFinishRegion:
+ ProcessFinishRegion(node);
+ break;
+ case IrOpcode::kStoreField:
+ ProcessStoreField(node);
+ break;
+ case IrOpcode::kStoreElement:
+ ProcessStoreElement(node);
+ break;
+ case IrOpcode::kLoadField:
+ case IrOpcode::kLoadElement: {
+ if (Node* rep = object_analysis_->GetReplacement(node)) {
+ if (IsAllocation(rep) && CheckUsesForEscape(node, rep)) {
+ RevisitInputs(rep);
+ RevisitUses(rep);
+ }
+ }
+ break;
+ }
+ case IrOpcode::kPhi:
+ if (!HasEntry(node)) {
+ status_[node->id()] |= kTracked;
+ if (!IsAllocationPhi(node)) {
+ SetEscaped(node);
+ RevisitUses(node);
+ }
+ }
+ CheckUsesForEscape(node);
+ default:
+ break;
+ }
+}
+
+
+bool EscapeStatusAnalysis::IsAllocationPhi(Node* node) {
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (input->opcode() == IrOpcode::kPhi && !IsEscaped(input)) continue;
+ if (IsAllocation(input)) continue;
+ return false;
+ }
+ return true;
+}
+
+
+void EscapeStatusAnalysis::ProcessStoreField(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
+ Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* val = NodeProperties::GetValueInput(node, 1);
+ if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
+ RevisitUses(val);
+ RevisitInputs(val);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Setting #%d (%s) to escaped because of store to field of #%d\n",
+ val->id(), val->op()->mnemonic(), to->id());
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::ProcessStoreElement(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
+ Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* val = NodeProperties::GetValueInput(node, 2);
+ if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
+ RevisitUses(val);
+ RevisitInputs(val);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Setting #%d (%s) to escaped because of store to field of #%d\n",
+ val->id(), val->op()->mnemonic(), to->id());
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::ProcessAllocate(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
+ if (!HasEntry(node)) {
+ status_[node->id()] |= kTracked;
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Created status entry for node #%d (%s)\n", node->id(),
+ node->op()->mnemonic());
+ }
+ NumberMatcher size(node->InputAt(0));
+ DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
+ if (!size.HasValue() && SetEscaped(node)) {
+ RevisitUses(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Setting #%d to escaped because of non-const alloc\n",
+ node->id());
+ }
+ // This node is known to escape, uses do not have to be checked.
+ return;
+ }
+ }
+ if (CheckUsesForEscape(node, true)) {
+ RevisitUses(node);
+ }
+}
+
+
+bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
+ bool phi_escaping) {
+ for (Edge edge : uses->use_edges()) {
+ Node* use = edge.from();
+ if (edge.index() >= use->op()->ValueInputCount() +
+ OperatorProperties::GetContextInputCount(use->op()))
+ continue;
+ switch (use->opcode()) {
+ case IrOpcode::kPhi:
+ if (phi_escaping && SetEscaped(rep)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Setting #%d (%s) to escaped because of use by phi node "
+ "#%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ }
+ return true;
+ }
+ // Fallthrough.
+ case IrOpcode::kStoreField:
+ case IrOpcode::kLoadField:
+ case IrOpcode::kStoreElement:
+ case IrOpcode::kLoadElement:
+ case IrOpcode::kFrameState:
+ case IrOpcode::kStateValues:
+ case IrOpcode::kReferenceEqual:
+ case IrOpcode::kFinishRegion:
+ if (IsEscaped(use) && SetEscaped(rep)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Setting #%d (%s) to escaped because of use by escaping node "
+ "#%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ }
+ return true;
+ }
+ break;
+ case IrOpcode::kObjectIsSmi:
+ if (!IsAllocation(rep) && SetEscaped(rep)) {
+ PrintF("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ return true;
+ }
+ break;
+ default:
+ if (use->op()->EffectInputCount() == 0 &&
+ uses->op()->EffectInputCount() > 0) {
+ PrintF("Encountered unaccounted use by #%d (%s)\n", use->id(),
+ use->op()->mnemonic());
+ UNREACHABLE();
+ }
+ if (SetEscaped(rep)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
+void EscapeStatusAnalysis::ProcessFinishRegion(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
+ if (!HasEntry(node)) {
+ status_[node->id()] |= kTracked;
+ RevisitUses(node);
+ }
+ if (CheckUsesForEscape(node, true)) {
+ RevisitInputs(node);
+ }
+}
+
+
+void EscapeStatusAnalysis::DebugPrint() {
+ for (NodeId id = 0; id < status_.size(); id++) {
+ if (status_[id] & kTracked) {
+ PrintF("Node #%d is %s\n", id,
+ (status_[id] & kEscaped) ? "escaping" : "virtual");
+ }
+ }
+}
+
+
+EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
+ Zone* zone)
+ : graph_(graph),
+ common_(common),
+ zone_(zone),
+ virtual_states_(zone),
+ replacements_(zone),
+ escape_status_(this, graph, zone),
+ cache_(new (zone) MergeCache(zone)),
+ aliases_(zone),
+ next_free_alias_(0) {}
+
+
+EscapeAnalysis::~EscapeAnalysis() {}
+
+
+void EscapeAnalysis::Run() {
+ replacements_.resize(graph()->NodeCount());
+ AssignAliases();
+ RunObjectAnalysis();
+ escape_status_.Run();
+}
+
+
+void EscapeAnalysis::AssignAliases() {
+ ZoneVector<Node*> stack(zone());
+ stack.push_back(graph()->end());
+ CHECK_LT(graph()->NodeCount(), kUntrackable);
+ aliases_.resize(graph()->NodeCount(), kNotReachable);
+ aliases_[graph()->end()->id()] = kUntrackable;
+ while (!stack.empty()) {
+ Node* node = stack.back();
+ stack.pop_back();
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ if (aliases_[node->id()] >= kUntrackable) {
+ aliases_[node->id()] = NextAlias();
+ }
+ break;
+ case IrOpcode::kFinishRegion: {
+ Node* allocate = NodeProperties::GetValueInput(node, 0);
+ if (allocate->opcode() == IrOpcode::kAllocate) {
+ if (aliases_[allocate->id()] >= kUntrackable) {
+ if (aliases_[allocate->id()] == kNotReachable) {
+ stack.push_back(allocate);
+ }
+ aliases_[allocate->id()] = NextAlias();
+ }
+ aliases_[node->id()] = aliases_[allocate->id()];
+ } else {
+ aliases_[node->id()] = NextAlias();
+ }
+ break;
+ }
+ default:
+ DCHECK_EQ(aliases_[node->id()], kUntrackable);
+ break;
+ }
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (aliases_[input->id()] == kNotReachable) {
+ stack.push_back(input);
+ aliases_[input->id()] = kUntrackable;
+ }
+ }
+ }
+
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Discovered trackable nodes");
+ for (EscapeAnalysis::Alias id = 0; id < graph()->NodeCount(); ++id) {
+ if (aliases_[id] < kUntrackable) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" #%u", id);
+ }
+ }
+ }
+ PrintF("\n");
+ }
+}
+
+
+void EscapeAnalysis::RunObjectAnalysis() {
+ virtual_states_.resize(graph()->NodeCount());
+ ZoneVector<Node*> stack(zone());
+ stack.push_back(graph()->start());
+ while (!stack.empty()) {
+ Node* node = stack.back();
+ stack.pop_back();
+ if (aliases_[node->id()] != kNotReachable && Process(node)) {
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ Node* use = edge.from();
+ if ((use->opcode() != IrOpcode::kLoadField &&
+ use->opcode() != IrOpcode::kLoadElement) ||
+ !IsDanglingEffectNode(use)) {
+ stack.push_back(use);
+ }
+ }
+ }
+ // First process loads: dangling loads are a problem otherwise.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ Node* use = edge.from();
+ if ((use->opcode() == IrOpcode::kLoadField ||
+ use->opcode() == IrOpcode::kLoadElement) &&
+ IsDanglingEffectNode(use)) {
+ stack.push_back(use);
+ }
+ }
+ }
+ }
+ }
+ if (FLAG_trace_turbo_escape) {
+ DebugPrint();
+ }
+}
+
+
+bool EscapeAnalysis::IsDanglingEffectNode(Node* node) {
+ if (node->op()->EffectInputCount() == 0) return false;
+ if (node->op()->EffectOutputCount() == 0) return false;
+ if (node->op()->EffectInputCount() == 1 &&
+ NodeProperties::GetEffectInput(node)->opcode() == IrOpcode::kStart) {
+ // The start node is used as sentinel for nodes that are in general
+ // effectful, but of which an analysis has determined that they do not
+ // produce effects in this instance. We don't consider these nodes dangling.
+ return false;
+ }
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool EscapeAnalysis::Process(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ ProcessAllocation(node);
+ break;
+ case IrOpcode::kBeginRegion:
+ ForwardVirtualState(node);
+ break;
+ case IrOpcode::kFinishRegion:
+ ProcessFinishRegion(node);
+ break;
+ case IrOpcode::kStoreField:
+ ProcessStoreField(node);
+ break;
+ case IrOpcode::kLoadField:
+ ProcessLoadField(node);
+ break;
+ case IrOpcode::kStoreElement:
+ ProcessStoreElement(node);
+ break;
+ case IrOpcode::kLoadElement:
+ ProcessLoadElement(node);
+ break;
+ case IrOpcode::kStart:
+ ProcessStart(node);
+ break;
+ case IrOpcode::kEffectPhi:
+ return ProcessEffectPhi(node);
+ break;
+ default:
+ if (node->op()->EffectInputCount() > 0) {
+ ForwardVirtualState(node);
+ }
+ ProcessAllocationUsers(node);
+ break;
+ }
+ return true;
+}
+
+
+void EscapeAnalysis::ProcessAllocationUsers(Node* node) {
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (!NodeProperties::IsValueEdge(edge) &&
+ !NodeProperties::IsContextEdge(edge))
+ continue;
+ switch (node->opcode()) {
+ case IrOpcode::kStoreField:
+ case IrOpcode::kLoadField:
+ case IrOpcode::kStoreElement:
+ case IrOpcode::kLoadElement:
+ case IrOpcode::kFrameState:
+ case IrOpcode::kStateValues:
+ case IrOpcode::kReferenceEqual:
+ case IrOpcode::kFinishRegion:
+ case IrOpcode::kPhi:
+ break;
+ default:
+ VirtualState* state = virtual_states_[node->id()];
+ if (VirtualObject* obj = ResolveVirtualObject(state, input)) {
+ if (obj->ClearAllFields()) {
+ state->LastChangedAt(node);
+ }
+ }
+ break;
+ }
+ }
+}
+
+
+bool EscapeAnalysis::IsEffectBranchPoint(Node* node) {
+ int count = 0;
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ if (++count > 1) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
+void EscapeAnalysis::ForwardVirtualState(Node* node) {
+ DCHECK_EQ(node->op()->EffectInputCount(), 1);
+ if (node->opcode() != IrOpcode::kLoadField &&
+ node->opcode() != IrOpcode::kLoadElement &&
+ node->opcode() != IrOpcode::kLoad && IsDanglingEffectNode(node)) {
+ PrintF("Dangeling effect node: #%d (%s)\n", node->id(),
+ node->op()->mnemonic());
+ UNREACHABLE();
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Break the cycle for effect phis.
+ if (effect->opcode() == IrOpcode::kEffectPhi) {
+ if (virtual_states_[effect->id()] == nullptr) {
+ virtual_states_[effect->id()] =
+ new (zone()) VirtualState(zone(), AliasCount());
+ }
+ }
+ DCHECK_NOT_NULL(virtual_states_[effect->id()]);
+ if (IsEffectBranchPoint(effect)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Copying object state %p from #%d (%s) to #%d (%s)\n",
+ static_cast<void*>(virtual_states_[effect->id()]), effect->id(),
+ effect->op()->mnemonic(), node->id(), node->op()->mnemonic());
+ }
+ if (!virtual_states_[node->id()]) {
+ virtual_states_[node->id()] =
+ new (zone()) VirtualState(*virtual_states_[effect->id()]);
+ } else {
+ virtual_states_[node->id()]->UpdateFrom(virtual_states_[effect->id()],
+ zone());
+ }
+ } else {
+ virtual_states_[node->id()] = virtual_states_[effect->id()];
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Forwarding object state %p from #%d (%s) to #%d (%s)\n",
+ static_cast<void*>(virtual_states_[effect->id()]), effect->id(),
+ effect->op()->mnemonic(), node->id(), node->op()->mnemonic());
+ }
+ }
+}
+
+
+void EscapeAnalysis::ProcessStart(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStart);
+ virtual_states_[node->id()] = new (zone()) VirtualState(zone(), AliasCount());
+}
+
+
+bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
+ bool changed = false;
+
+ VirtualState* mergeState = virtual_states_[node->id()];
+ if (!mergeState) {
+ mergeState = new (zone()) VirtualState(zone(), AliasCount());
+ virtual_states_[node->id()] = mergeState;
+ changed = true;
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Effect Phi #%d got new states map %p.\n", node->id(),
+ static_cast<void*>(mergeState));
+ }
+ } else if (mergeState->GetLastChanged() != node) {
+ changed = true;
+ }
+
+ cache_->Clear();
+
+ if (FLAG_trace_turbo_escape) {
+ PrintF("At Effect Phi #%d, merging states into %p:", node->id(),
+ static_cast<void*>(mergeState));
+ }
+
+ for (int i = 0; i < node->op()->EffectInputCount(); ++i) {
+ Node* input = NodeProperties::GetEffectInput(node, i);
+ VirtualState* state = virtual_states_[input->id()];
+ if (state) {
+ cache_->states().push_back(state);
+ }
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" %p (from %d %s)", static_cast<void*>(state), input->id(),
+ input->op()->mnemonic());
+ }
+ }
+ if (FLAG_trace_turbo_escape) {
+ PrintF("\n");
+ }
+
+ if (cache_->states().size() == 0) {
+ return changed;
+ }
+
+ changed = mergeState->MergeFrom(cache_, zone(), graph(), common(),
+ NodeProperties::GetControlInput(node)) ||
+ changed;
+
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Merge %s the node.\n", changed ? "changed" : "did not change");
+ }
+
+ if (changed) {
+ mergeState->LastChangedAt(node);
+ escape_status_.Resize();
+ }
+ return changed;
+}
+
+
+void EscapeAnalysis::ProcessAllocation(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
+ ForwardVirtualState(node);
+
+ // Check if we have already processed this node.
+ if (virtual_states_[node->id()]->VirtualObjectFromAlias(
+ aliases_[node->id()])) {
+ return;
+ }
+
+ NumberMatcher size(node->InputAt(0));
+ DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
+ if (size.HasValue()) {
+ virtual_states_[node->id()]->SetVirtualObject(
+ aliases_[node->id()],
+ new (zone())
+ VirtualObject(node->id(), zone(), size.Value() / kPointerSize));
+ } else {
+ virtual_states_[node->id()]->SetVirtualObject(
+ aliases_[node->id()], new (zone()) VirtualObject(node->id(), zone()));
+ }
+ virtual_states_[node->id()]->LastChangedAt(node);
+}
+
+
+void EscapeAnalysis::ProcessFinishRegion(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
+ ForwardVirtualState(node);
+ Node* allocation = NodeProperties::GetValueInput(node, 0);
+ if (allocation->opcode() == IrOpcode::kAllocate) {
+ VirtualState* state = virtual_states_[node->id()];
+ if (!state->VirtualObjectFromAlias(aliases_[node->id()])) {
+ VirtualObject* vobj_alloc =
+ state->VirtualObjectFromAlias(aliases_[allocation->id()]);
+ DCHECK_NOT_NULL(vobj_alloc);
+ state->SetVirtualObject(aliases_[node->id()], vobj_alloc);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Linked finish region node #%d to node #%d\n", node->id(),
+ allocation->id());
+ }
+ state->LastChangedAt(node);
+ }
+ }
+}
+
+
+Node* EscapeAnalysis::replacement(NodeId id) {
+ if (id >= replacements_.size()) return nullptr;
+ return replacements_[id];
+}
+
+
+Node* EscapeAnalysis::replacement(Node* node) {
+ return replacement(node->id());
+}
+
+
+bool EscapeAnalysis::SetReplacement(Node* node, Node* rep) {
+ bool changed = replacements_[node->id()] != rep;
+ replacements_[node->id()] = rep;
+ return changed;
+}
+
+
+bool EscapeAnalysis::UpdateReplacement(VirtualState* state, Node* node,
+ Node* rep) {
+ if (SetReplacement(node, rep)) {
+ state->LastChangedAt(node);
+ if (FLAG_trace_turbo_escape) {
+ if (rep) {
+ PrintF("Replacement of #%d is #%d (%s)\n", node->id(), rep->id(),
+ rep->op()->mnemonic());
+ } else {
+ PrintF("Replacement of #%d cleared\n", node->id());
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+
+Node* EscapeAnalysis::ResolveReplacement(Node* node) {
+ while (replacement(node)) {
+ node = replacement(node);
+ }
+ return node;
+}
+
+
+Node* EscapeAnalysis::GetReplacement(Node* node) {
+ return GetReplacement(node->id());
+}
+
+
+Node* EscapeAnalysis::GetReplacement(NodeId id) {
+ Node* node = nullptr;
+ while (replacement(id)) {
+ node = replacement(id);
+ id = node->id();
+ }
+ return node;
+}
+
+
+bool EscapeAnalysis::IsVirtual(Node* node) {
+ if (node->id() >= escape_status_.size()) {
+ return false;
+ }
+ return escape_status_.IsVirtual(node);
+}
+
+
+bool EscapeAnalysis::IsEscaped(Node* node) {
+ if (node->id() >= escape_status_.size()) {
+ return false;
+ }
+ return escape_status_.IsEscaped(node);
+}
+
+
+bool EscapeAnalysis::SetEscaped(Node* node) {
+ return escape_status_.SetEscaped(node);
+}
+
+
+VirtualObject* EscapeAnalysis::GetVirtualObject(Node* at, NodeId id) {
+ if (VirtualState* states = virtual_states_[at->id()]) {
+ return states->VirtualObjectFromAlias(aliases_[id]);
+ }
+ return nullptr;
+}
+
+
+VirtualObject* EscapeAnalysis::ResolveVirtualObject(VirtualState* state,
+ Node* node) {
+ VirtualObject* obj = GetVirtualObject(state, ResolveReplacement(node));
+ while (obj && replacement(obj->id())) {
+ if (VirtualObject* next = GetVirtualObject(state, replacement(obj->id()))) {
+ obj = next;
+ } else {
+ break;
+ }
+ }
+ return obj;
+}
+
+
+bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
+ DCHECK(IsVirtual(left) && IsVirtual(right));
+ left = ResolveReplacement(left);
+ right = ResolveReplacement(right);
+ if (IsEquivalentPhi(left, right)) {
+ return true;
+ }
+ return false;
+}
+
+
+int EscapeAnalysis::OffsetFromAccess(Node* node) {
+ DCHECK(OpParameter<FieldAccess>(node).offset % kPointerSize == 0);
+ return OpParameter<FieldAccess>(node).offset / kPointerSize;
+}
+
+
+void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* node,
+ VirtualState* state) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Load #%d from phi #%d", node->id(), from->id());
+ }
+
+ cache_->fields().clear();
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ cache_->fields().push_back(input);
+ }
+
+ cache_->LoadVirtualObjectsForFieldsFrom(state, aliases_);
+ if (cache_->objects().size() == cache_->fields().size()) {
+ cache_->GetFields(offset);
+ if (cache_->fields().size() == cache_->objects().size()) {
+ Node* rep = replacement(node);
+ if (!rep || !IsEquivalentPhi(rep, cache_->fields())) {
+ int value_input_count = static_cast<int>(cache_->fields().size());
+ cache_->fields().push_back(NodeProperties::GetControlInput(from));
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, value_input_count),
+ value_input_count + 1, &cache_->fields().front());
+ escape_status_.Resize();
+ SetReplacement(node, phi);
+ state->LastChangedAt(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" got phi created.\n");
+ }
+ } else if (FLAG_trace_turbo_escape) {
+ PrintF(" has already phi #%d.\n", rep->id());
+ }
+ } else if (FLAG_trace_turbo_escape) {
+ PrintF(" has incomplete field info.\n");
+ }
+ } else if (FLAG_trace_turbo_escape) {
+ PrintF(" has incomplete virtual object info.\n");
+ }
+}
+
+
+void EscapeAnalysis::ProcessLoadField(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kLoadField);
+ ForwardVirtualState(node);
+ Node* from = NodeProperties::GetValueInput(node, 0);
+ VirtualState* state = virtual_states_[node->id()];
+ if (VirtualObject* object = ResolveVirtualObject(state, from)) {
+ int offset = OffsetFromAccess(node);
+ if (!object->IsTracked()) return;
+ Node* value = object->GetField(offset);
+ if (value) {
+ value = ResolveReplacement(value);
+ }
+ // Record that the load has this alias.
+ UpdateReplacement(state, node, value);
+ } else {
+ if (from->opcode() == IrOpcode::kPhi &&
+ OpParameter<FieldAccess>(node).offset % kPointerSize == 0) {
+ int offset = OffsetFromAccess(node);
+ // Only binary phis are supported for now.
+ ProcessLoadFromPhi(offset, from, node, state);
+ }
+ }
+}
+
+
+void EscapeAnalysis::ProcessLoadElement(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kLoadElement);
+ ForwardVirtualState(node);
+ Node* from = NodeProperties::GetValueInput(node, 0);
+ VirtualState* state = virtual_states_[node->id()];
+ Node* index_node = node->InputAt(1);
+ NumberMatcher index(index_node);
+ DCHECK(index_node->opcode() != IrOpcode::kInt32Constant &&
+ index_node->opcode() != IrOpcode::kInt64Constant &&
+ index_node->opcode() != IrOpcode::kFloat32Constant &&
+ index_node->opcode() != IrOpcode::kFloat64Constant);
+ ElementAccess access = OpParameter<ElementAccess>(node);
+ if (index.HasValue()) {
+ int offset = index.Value() + access.header_size / kPointerSize;
+ if (VirtualObject* object = ResolveVirtualObject(state, from)) {
+ CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
+ kPointerSizeLog2);
+ CHECK_EQ(access.header_size % kPointerSize, 0);
+
+ if (!object->IsTracked()) return;
+ Node* value = object->GetField(offset);
+ if (value) {
+ value = ResolveReplacement(value);
+ }
+ // Record that the load has this alias.
+ UpdateReplacement(state, node, value);
+ } else if (from->opcode() == IrOpcode::kPhi) {
+ ElementAccess access = OpParameter<ElementAccess>(node);
+ int offset = index.Value() + access.header_size / kPointerSize;
+ ProcessLoadFromPhi(offset, from, node, state);
+ }
+ } else {
+ // We have a load from a non-const index, cannot eliminate object.
+ if (SetEscaped(from)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Setting #%d (%s) to escaped because store element #%d to "
+ "non-const "
+ "index #%d (%s)\n",
+ from->id(), from->op()->mnemonic(), node->id(), index_node->id(),
+ index_node->op()->mnemonic());
+ }
+ }
+ }
+}
+
+
+void EscapeAnalysis::ProcessStoreField(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
+ ForwardVirtualState(node);
+ Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* val = NodeProperties::GetValueInput(node, 1);
+ VirtualState* state = virtual_states_[node->id()];
+ if (VirtualObject* obj = ResolveVirtualObject(state, to)) {
+ if (!obj->IsTracked()) return;
+ int offset = OffsetFromAccess(node);
+ if (obj->SetField(offset, ResolveReplacement(val))) {
+ state->LastChangedAt(node);
+ }
+ }
+}
+
+
+void EscapeAnalysis::ProcessStoreElement(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
+ ForwardVirtualState(node);
+ Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* index_node = node->InputAt(1);
+ NumberMatcher index(index_node);
+ DCHECK(index_node->opcode() != IrOpcode::kInt32Constant &&
+ index_node->opcode() != IrOpcode::kInt64Constant &&
+ index_node->opcode() != IrOpcode::kFloat32Constant &&
+ index_node->opcode() != IrOpcode::kFloat64Constant);
+ ElementAccess access = OpParameter<ElementAccess>(node);
+ Node* val = NodeProperties::GetValueInput(node, 2);
+ if (index.HasValue()) {
+ int offset = index.Value() + access.header_size / kPointerSize;
+ VirtualState* states = virtual_states_[node->id()];
+ if (VirtualObject* obj = ResolveVirtualObject(states, to)) {
+ if (!obj->IsTracked()) return;
+ CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
+ kPointerSizeLog2);
+ CHECK_EQ(access.header_size % kPointerSize, 0);
+ if (obj->SetField(offset, ResolveReplacement(val))) {
+ states->LastChangedAt(node);
+ }
+ }
+ } else {
+ // We have a store to a non-const index, cannot eliminate object.
+ if (SetEscaped(to)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Setting #%d (%s) to escaped because store element #%d to "
+ "non-const "
+ "index #%d (%s)\n",
+ to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
+ index_node->op()->mnemonic());
+ }
+ }
+ }
+}
+
+
+Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
+ if ((node->opcode() == IrOpcode::kFinishRegion ||
+ node->opcode() == IrOpcode::kAllocate) &&
+ IsVirtual(node)) {
+ if (VirtualObject* vobj =
+ ResolveVirtualObject(virtual_states_[effect->id()], node)) {
+ if (Node* object_state = vobj->GetObjectState()) {
+ return object_state;
+ } else {
+ cache_->fields().clear();
+ for (size_t i = 0; i < vobj->field_count(); ++i) {
+ if (Node* field = vobj->GetField(i)) {
+ cache_->fields().push_back(field);
+ }
+ }
+ int input_count = static_cast<int>(cache_->fields().size());
+ Node* new_object_state =
+ graph()->NewNode(common()->ObjectState(input_count, vobj->id()),
+ input_count, &cache_->fields().front());
+ vobj->SetObjectState(new_object_state);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Creating object state #%d for vobj %p (from node #%d) at effect "
+ "#%d\n",
+ new_object_state->id(), static_cast<void*>(vobj), node->id(),
+ effect->id());
+ }
+ // Now fix uses of other objects.
+ for (size_t i = 0; i < vobj->field_count(); ++i) {
+ if (Node* field = vobj->GetField(i)) {
+ if (Node* field_object_state =
+ GetOrCreateObjectState(effect, field)) {
+ NodeProperties::ReplaceValueInput(
+ new_object_state, field_object_state, static_cast<int>(i));
+ }
+ }
+ }
+ return new_object_state;
+ }
+ }
+ }
+ return nullptr;
+}
+
+
+void EscapeAnalysis::DebugPrintObject(VirtualObject* object, Alias alias) {
+ PrintF(" Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
+ object->field_count());
+ for (size_t i = 0; i < object->field_count(); ++i) {
+ if (Node* f = object->GetField(i)) {
+ PrintF(" Field %zu = #%d (%s)\n", i, f->id(), f->op()->mnemonic());
+ }
+ }
+}
+
+
+void EscapeAnalysis::DebugPrintState(VirtualState* state) {
+ PrintF("Dumping object state %p\n", static_cast<void*>(state));
+ for (Alias alias = 0; alias < AliasCount(); ++alias) {
+ if (VirtualObject* object = state->VirtualObjectFromAlias(alias)) {
+ DebugPrintObject(object, alias);
+ }
+ }
+}
+
+
+void EscapeAnalysis::DebugPrint() {
+ ZoneVector<VirtualState*> object_states(zone());
+ for (NodeId id = 0; id < virtual_states_.size(); id++) {
+ if (VirtualState* states = virtual_states_[id]) {
+ if (std::find(object_states.begin(), object_states.end(), states) ==
+ object_states.end()) {
+ object_states.push_back(states);
+ }
+ }
+ }
+ for (size_t n = 0; n < object_states.size(); n++) {
+ DebugPrintState(object_states[n]);
+ }
+}
+
+
+VirtualObject* EscapeAnalysis::GetVirtualObject(VirtualState* state,
+ Node* node) {
+ if (node->id() >= aliases_.size()) return nullptr;
+ Alias alias = aliases_[node->id()];
+ if (alias >= state->size()) return nullptr;
+ return state->VirtualObjectFromAlias(alias);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/escape-analysis.h b/chromium/v8/src/compiler/escape-analysis.h
new file mode 100644
index 00000000000..ea7b11ecdf0
--- /dev/null
+++ b/chromium/v8/src/compiler/escape-analysis.h
@@ -0,0 +1,169 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ESCAPE_ANALYSIS_H_
+#define V8_COMPILER_ESCAPE_ANALYSIS_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class EscapeAnalysis;
+class VirtualState;
+class VirtualObject;
+
+
+// EscapeStatusAnalysis determines for each allocation whether it escapes.
+class EscapeStatusAnalysis {
+ public:
+ ~EscapeStatusAnalysis();
+
+ enum EscapeStatusFlag {
+ kUnknown = 0u,
+ kTracked = 1u << 0,
+ kEscaped = 1u << 1,
+ kOnStack = 1u << 2,
+ kVisited = 1u << 3,
+ };
+ typedef base::Flags<EscapeStatusFlag, unsigned char> EscapeStatusFlags;
+
+ void Run();
+
+ bool IsVirtual(Node* node);
+ bool IsEscaped(Node* node);
+ bool IsAllocation(Node* node);
+
+ void DebugPrint();
+
+ friend class EscapeAnalysis;
+
+ private:
+ EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
+ Zone* zone);
+ void Process(Node* node);
+ void ProcessAllocate(Node* node);
+ void ProcessFinishRegion(Node* node);
+ void ProcessStoreField(Node* node);
+ void ProcessStoreElement(Node* node);
+ bool CheckUsesForEscape(Node* node, bool phi_escaping = false) {
+ return CheckUsesForEscape(node, node, phi_escaping);
+ }
+ bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
+ void RevisitUses(Node* node);
+ void RevisitInputs(Node* node);
+ bool SetEscaped(Node* node);
+ bool HasEntry(Node* node);
+ void Resize();
+ size_t size();
+ bool IsAllocationPhi(Node* node);
+
+ Graph* graph() const { return graph_; }
+ Zone* zone() const { return zone_; }
+
+ EscapeAnalysis* object_analysis_;
+ Graph* const graph_;
+ Zone* const zone_;
+ ZoneVector<EscapeStatusFlags> status_;
+ ZoneDeque<Node*> queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
+};
+
+
+DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::EscapeStatusFlags)
+
+
+// Forward Declaration.
+class MergeCache;
+
+
+// EscapeObjectAnalysis simulates stores to determine values of loads if
+// an object is virtual and eliminated.
+class EscapeAnalysis {
+ public:
+ typedef NodeId Alias;
+
+ EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
+ ~EscapeAnalysis();
+
+ void Run();
+
+ Node* GetReplacement(Node* node);
+ bool IsVirtual(Node* node);
+ bool IsEscaped(Node* node);
+ bool CompareVirtualObjects(Node* left, Node* right);
+ Node* GetOrCreateObjectState(Node* effect, Node* node);
+
+ private:
+ void RunObjectAnalysis();
+ void AssignAliases();
+ bool Process(Node* node);
+ void ProcessLoadField(Node* node);
+ void ProcessStoreField(Node* node);
+ void ProcessLoadElement(Node* node);
+ void ProcessStoreElement(Node* node);
+ void ProcessAllocationUsers(Node* node);
+ void ProcessAllocation(Node* node);
+ void ProcessFinishRegion(Node* node);
+ void ProcessCall(Node* node);
+ void ProcessStart(Node* node);
+ bool ProcessEffectPhi(Node* node);
+ void ProcessLoadFromPhi(int offset, Node* from, Node* node,
+ VirtualState* states);
+
+ void ForwardVirtualState(Node* node);
+ bool IsEffectBranchPoint(Node* node);
+ bool IsDanglingEffectNode(Node* node);
+ int OffsetFromAccess(Node* node);
+
+ VirtualObject* GetVirtualObject(Node* at, NodeId id);
+ VirtualObject* ResolveVirtualObject(VirtualState* state, Node* node);
+ Node* GetReplacementIfSame(ZoneVector<VirtualObject*>& objs);
+
+ bool SetEscaped(Node* node);
+ Node* replacement(NodeId id);
+ Node* replacement(Node* node);
+ Node* ResolveReplacement(Node* node);
+ Node* GetReplacement(NodeId id);
+ bool SetReplacement(Node* node, Node* rep);
+ bool UpdateReplacement(VirtualState* state, Node* node, Node* rep);
+
+ VirtualObject* GetVirtualObject(VirtualState* state, Node* node);
+
+ void DebugPrint();
+ void DebugPrintState(VirtualState* state);
+ void DebugPrintObject(VirtualObject* state, Alias id);
+
+ Alias NextAlias() { return next_free_alias_++; }
+ Alias AliasCount() const { return next_free_alias_; }
+
+ Graph* graph() const { return graph_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ Zone* zone() const { return zone_; }
+
+ static const Alias kNotReachable;
+ static const Alias kUntrackable;
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ Zone* const zone_;
+ ZoneVector<VirtualState*> virtual_states_;
+ ZoneVector<Node*> replacements_;
+ EscapeStatusAnalysis escape_status_;
+ MergeCache* cache_;
+ ZoneVector<Alias> aliases_;
+ Alias next_free_alias_;
+
+ DISALLOW_COPY_AND_ASSIGN(EscapeAnalysis);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ESCAPE_ANALYSIS_H_
diff --git a/chromium/v8/src/compiler/fast-accessor-assembler.cc b/chromium/v8/src/compiler/fast-accessor-assembler.cc
new file mode 100644
index 00000000000..09d513fdc62
--- /dev/null
+++ b/chromium/v8/src/compiler/fast-accessor-assembler.cc
@@ -0,0 +1,220 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/fast-accessor-assembler.h"
+
+#include "src/base/logging.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/verifier.h"
+#include "src/handles-inl.h"
+#include "src/objects.h" // For FAA::GetInternalField impl.
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+FastAccessorAssembler::FastAccessorAssembler(Isolate* isolate)
+ : zone_(),
+ assembler_(new RawMachineAssembler(
+ isolate, new (zone()) Graph(zone()),
+ Linkage::GetJSCallDescriptor(&zone_, false, 1,
+ CallDescriptor::kNoFlags))),
+ state_(kBuilding) {}
+
+
+FastAccessorAssembler::~FastAccessorAssembler() {}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::IntegerConstant(
+ int const_value) {
+ CHECK_EQ(kBuilding, state_);
+ return FromRaw(assembler_->NumberConstant(const_value));
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::GetReceiver() {
+ CHECK_EQ(kBuilding, state_);
+
+ // For JS call descriptor, the receiver is parameter 0. If we use other
+ // call descriptors, this may or may not hold. So let's check.
+ CHECK(assembler_->call_descriptor()->IsJSFunctionCall());
+ return FromRaw(assembler_->Parameter(0));
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
+ ValueId value, int field_no) {
+ CHECK_EQ(kBuilding, state_);
+ // Determine the 'value' object's instance type.
+ Node* object_map =
+ assembler_->Load(MachineType::Pointer(), FromId(value),
+ assembler_->IntPtrConstant(
+ Internals::kHeapObjectMapOffset - kHeapObjectTag));
+ Node* instance_type = assembler_->WordAnd(
+ assembler_->Load(
+ MachineType::Uint16(), object_map,
+ assembler_->IntPtrConstant(
+ Internals::kMapInstanceTypeAndBitFieldOffset - kHeapObjectTag)),
+ assembler_->IntPtrConstant(0xff));
+
+ // Check whether we have a proper JSObject.
+ RawMachineLabel is_jsobject, is_not_jsobject, merge;
+ assembler_->Branch(
+ assembler_->WordEqual(
+ instance_type, assembler_->IntPtrConstant(Internals::kJSObjectType)),
+ &is_jsobject, &is_not_jsobject);
+
+ // JSObject? Then load the internal field field_no.
+ assembler_->Bind(&is_jsobject);
+ Node* internal_field = assembler_->Load(
+ MachineType::Pointer(), FromId(value),
+ assembler_->IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag +
+ kPointerSize * field_no));
+ assembler_->Goto(&merge);
+
+ // No JSObject? Return undefined.
+ // TODO(vogelheim): Check whether this is the appropriate action, or whether
+ // the method should take a label instead.
+ assembler_->Bind(&is_not_jsobject);
+ Node* fail_value = assembler_->UndefinedConstant();
+ assembler_->Goto(&merge);
+
+ // Return.
+ assembler_->Bind(&merge);
+ Node* phi = assembler_->Phi(MachineRepresentation::kTagged, internal_field,
+ fail_value);
+ return FromRaw(phi);
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadValue(ValueId value,
+ int offset) {
+ CHECK_EQ(kBuilding, state_);
+ return FromRaw(assembler_->Load(MachineType::IntPtr(), FromId(value),
+ assembler_->IntPtrConstant(offset)));
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadObject(ValueId value,
+ int offset) {
+ CHECK_EQ(kBuilding, state_);
+ return FromRaw(
+ assembler_->Load(MachineType::AnyTagged(),
+ assembler_->Load(MachineType::Pointer(), FromId(value),
+ assembler_->IntPtrConstant(offset))));
+}
+
+
+void FastAccessorAssembler::ReturnValue(ValueId value) {
+ CHECK_EQ(kBuilding, state_);
+ assembler_->Return(FromId(value));
+}
+
+
+void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value, int mask) {
+ CHECK_EQ(kBuilding, state_);
+ RawMachineLabel pass, fail;
+ assembler_->Branch(
+ assembler_->Word32Equal(
+ assembler_->Word32And(FromId(value), assembler_->Int32Constant(mask)),
+ assembler_->Int32Constant(0)),
+ &pass, &fail);
+ assembler_->Bind(&fail);
+ assembler_->Return(assembler_->NullConstant());
+ assembler_->Bind(&pass);
+}
+
+
+void FastAccessorAssembler::CheckNotZeroOrReturnNull(ValueId value) {
+ CHECK_EQ(kBuilding, state_);
+ RawMachineLabel is_null, not_null;
+ assembler_->Branch(
+ assembler_->IntPtrEqual(FromId(value), assembler_->IntPtrConstant(0)),
+ &is_null, &not_null);
+ assembler_->Bind(&is_null);
+ assembler_->Return(assembler_->NullConstant());
+ assembler_->Bind(&not_null);
+}
+
+
+FastAccessorAssembler::LabelId FastAccessorAssembler::MakeLabel() {
+ CHECK_EQ(kBuilding, state_);
+ RawMachineLabel* label =
+ new (zone()->New(sizeof(RawMachineLabel))) RawMachineLabel;
+ return FromRaw(label);
+}
+
+
+void FastAccessorAssembler::SetLabel(LabelId label_id) {
+ CHECK_EQ(kBuilding, state_);
+ assembler_->Bind(FromId(label_id));
+}
+
+
+void FastAccessorAssembler::CheckNotZeroOrJump(ValueId value_id,
+ LabelId label_id) {
+ CHECK_EQ(kBuilding, state_);
+ RawMachineLabel pass;
+ assembler_->Branch(
+ assembler_->IntPtrEqual(FromId(value_id), assembler_->IntPtrConstant(0)),
+ &pass, FromId(label_id));
+ assembler_->Bind(&pass);
+}
+
+
+MaybeHandle<Code> FastAccessorAssembler::Build() {
+ CHECK_EQ(kBuilding, state_);
+
+ // Cleanup: We no longer need this.
+ nodes_.clear();
+ labels_.clear();
+
+ // Export the schedule and call the compiler.
+ Schedule* schedule = assembler_->Export();
+ MaybeHandle<Code> code = Pipeline::GenerateCodeForCodeStub(
+ assembler_->isolate(), assembler_->call_descriptor(), assembler_->graph(),
+ schedule, Code::STUB, "FastAccessorAssembler");
+
+ // Update state & return.
+ state_ = !code.is_null() ? kBuilt : kError;
+ return code;
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::FromRaw(Node* node) {
+ nodes_.push_back(node);
+ ValueId value = {nodes_.size() - 1};
+ return value;
+}
+
+
+FastAccessorAssembler::LabelId FastAccessorAssembler::FromRaw(
+ RawMachineLabel* label) {
+ labels_.push_back(label);
+ LabelId label_id = {labels_.size() - 1};
+ return label_id;
+}
+
+
+Node* FastAccessorAssembler::FromId(ValueId value) const {
+ CHECK_LT(value.value_id, nodes_.size());
+ CHECK_NOT_NULL(nodes_.at(value.value_id));
+ return nodes_.at(value.value_id);
+}
+
+
+RawMachineLabel* FastAccessorAssembler::FromId(LabelId label) const {
+ CHECK_LT(label.label_id, labels_.size());
+ CHECK_NOT_NULL(labels_.at(label.label_id));
+ return labels_.at(label.label_id);
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/fast-accessor-assembler.h b/chromium/v8/src/compiler/fast-accessor-assembler.h
new file mode 100644
index 00000000000..a9df3f0749c
--- /dev/null
+++ b/chromium/v8/src/compiler/fast-accessor-assembler.h
@@ -0,0 +1,106 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
+#define V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
+
+#include <stdint.h>
+#include <vector>
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "include/v8-experimental.h"
+#include "src/base/macros.h"
+#include "src/base/smart-pointers.h"
+#include "src/handles.h"
+
+
+namespace v8 {
+namespace internal {
+
+class Code;
+class Isolate;
+class Zone;
+
+namespace compiler {
+
+class Node;
+class RawMachineAssembler;
+class RawMachineLabel;
+
+
+// This interface "exports" an aggregated subset of RawMachineAssembler, for
+// use by the API to implement Fast Dom Accessors.
+//
+// This interface is made for this single purpose only and does not attempt
+// to implement a general purpose solution. If you need one, please look at
+// RawMachineAssembler instead.
+//
+// The life cycle of a FastAccessorAssembler has two phases:
+// - After creating the instance, you can call an arbitrary sequence of
+// builder functions to build the desired function.
+// - When done, you can Build() the accessor and query for the build results.
+//
+// You cannot call any result getters before Build() was called & successful;
+// and you cannot call any builder functions after Build() was called.
+class FastAccessorAssembler {
+ public:
+ typedef v8::experimental::FastAccessorBuilder::ValueId ValueId;
+ typedef v8::experimental::FastAccessorBuilder::LabelId LabelId;
+
+ explicit FastAccessorAssembler(Isolate* isolate);
+ ~FastAccessorAssembler();
+
+ // Builder / assembler functions:
+ ValueId IntegerConstant(int int_constant);
+ ValueId GetReceiver();
+ ValueId LoadInternalField(ValueId value_id, int field_no);
+ ValueId LoadValue(ValueId value_id, int offset);
+ ValueId LoadObject(ValueId value_id, int offset);
+
+ // Builder / assembler functions for control flow.
+ void ReturnValue(ValueId value_id);
+ void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
+ void CheckNotZeroOrReturnNull(ValueId value_id);
+
+ // TODO(vogelheim): Implement a C++ callback.
+ // void CheckNotNullOrCallback(ValueId value_id, ..c++-callback type...,
+ // ValueId arg1, ValueId arg2, ...);
+
+ LabelId MakeLabel();
+ void SetLabel(LabelId label_id);
+ void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
+
+ // Assemble the code.
+ MaybeHandle<Code> Build();
+
+ private:
+ ValueId FromRaw(Node* node);
+ LabelId FromRaw(RawMachineLabel* label);
+ Node* FromId(ValueId value) const;
+ RawMachineLabel* FromId(LabelId value) const;
+
+ Zone* zone() { return &zone_; }
+
+ Zone zone_;
+ base::SmartPointer<RawMachineAssembler> assembler_;
+
+ // To prevent exposing the RMA internals to the outside world, we'll map
+ // Node + Label pointers integers wrapped in ValueId and LabelId instances.
+ // These vectors maintain this mapping.
+ std::vector<Node*> nodes_;
+ std::vector<RawMachineLabel*> labels_;
+
+ // Remember the current state for easy error checking. (We prefer to be
+ // strict as this class will be exposed at the API.)
+ enum { kBuilding, kBuilt, kError } state_;
+
+ DISALLOW_COPY_AND_ASSIGN(FastAccessorAssembler);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
diff --git a/chromium/v8/src/compiler/frame-elider.cc b/chromium/v8/src/compiler/frame-elider.cc
index f800b7786f0..7c3f9b2741f 100644
--- a/chromium/v8/src/compiler/frame-elider.cc
+++ b/chromium/v8/src/compiler/frame-elider.cc
@@ -22,7 +22,8 @@ void FrameElider::MarkBlocks() {
for (auto block : instruction_blocks()) {
if (block->needs_frame()) continue;
for (auto i = block->code_start(); i < block->code_end(); ++i) {
- if (InstructionAt(i)->IsCall()) {
+ if (InstructionAt(i)->IsCall() ||
+ InstructionAt(i)->opcode() == ArchOpcode::kArchDeoptimize) {
block->mark_needs_frame();
break;
}
diff --git a/chromium/v8/src/compiler/frame-states.cc b/chromium/v8/src/compiler/frame-states.cc
index 7170a845f78..387d6a9bbb0 100644
--- a/chromium/v8/src/compiler/frame-states.cc
+++ b/chromium/v8/src/compiler/frame-states.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/functional.h"
#include "src/compiler/frame-states.h"
+
+#include "src/base/functional.h"
#include "src/handles-inl.h"
namespace v8 {
@@ -51,9 +52,15 @@ std::ostream& operator<<(std::ostream& os, FrameStateType type) {
case FrameStateType::kJavaScriptFunction:
os << "JS_FRAME";
break;
+ case FrameStateType::kInterpretedFunction:
+ os << "INTERPRETED_FRAME";
+ break;
case FrameStateType::kArgumentsAdaptor:
os << "ARGUMENTS_ADAPTOR";
break;
+ case FrameStateType::kConstructStub:
+ os << "CONSTRUCT_STUB";
+ break;
}
return os;
}
diff --git a/chromium/v8/src/compiler/frame-states.h b/chromium/v8/src/compiler/frame-states.h
index 0684f112aa9..ddb55c35d24 100644
--- a/chromium/v8/src/compiler/frame-states.h
+++ b/chromium/v8/src/compiler/frame-states.h
@@ -76,8 +76,10 @@ class OutputFrameStateCombine {
// The type of stack frame that a FrameState node represents.
enum class FrameStateType {
- kJavaScriptFunction, // Represents an unoptimized JavaScriptFrame.
- kArgumentsAdaptor // Represents an ArgumentsAdaptorFrame.
+ kJavaScriptFunction, // Represents an unoptimized JavaScriptFrame.
+ kInterpretedFunction, // Represents an InterpretedFrame.
+ kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
+ kConstructStub // Represents a ConstructStubFrame.
};
@@ -107,6 +109,11 @@ class FrameStateFunctionInfo {
return context_calling_mode_;
}
+ static bool IsJSFunctionType(FrameStateType type) {
+ return type == FrameStateType::kJavaScriptFunction ||
+ type == FrameStateType::kInterpretedFunction;
+ }
+
private:
FrameStateType const type_;
int const parameter_count_;
diff --git a/chromium/v8/src/compiler/frame.cc b/chromium/v8/src/compiler/frame.cc
index 079fccb71cf..b08030b8c66 100644
--- a/chromium/v8/src/compiler/frame.cc
+++ b/chromium/v8/src/compiler/frame.cc
@@ -12,12 +12,40 @@ namespace v8 {
namespace internal {
namespace compiler {
-Frame::Frame(int fixed_frame_size_in_slots)
- : frame_slot_count_(fixed_frame_size_in_slots),
- spilled_callee_register_slot_count_(0),
- stack_slot_count_(0),
- allocated_registers_(NULL),
- allocated_double_registers_(NULL) {}
+Frame::Frame(int fixed_frame_size_in_slots, const CallDescriptor* descriptor)
+ : needs_frame_((descriptor != nullptr) &&
+ descriptor->RequiresFrameAsIncoming()),
+ frame_slot_count_(fixed_frame_size_in_slots),
+ callee_saved_slot_count_(0),
+ spill_slot_count_(0),
+ allocated_registers_(nullptr),
+ allocated_double_registers_(nullptr) {}
+
+
+void FrameAccessState::SetFrameAccessToDefault() {
+ if (frame()->needs_frame() && !FLAG_turbo_sp_frame_access) {
+ SetFrameAccessToFP();
+ } else {
+ SetFrameAccessToSP();
+ }
+}
+
+
+FrameOffset FrameAccessState::GetFrameOffset(int spill_slot) const {
+ const int offset =
+ (StandardFrameConstants::kFixedSlotCountAboveFp - spill_slot - 1) *
+ kPointerSize;
+ if (access_frame_with_fp()) {
+ DCHECK(frame()->needs_frame());
+ return FrameOffset::FromFramePointer(offset);
+ } else {
+ // No frame. Retrieve all parameters relative to stack pointer.
+ int sp_offset =
+ offset + ((frame()->GetSpToFpSlotCount() + sp_delta()) * kPointerSize);
+ return FrameOffset::FromStackPointer(sp_offset);
+ }
+}
+
} // namespace compiler
} // namespace internal
diff --git a/chromium/v8/src/compiler/frame.h b/chromium/v8/src/compiler/frame.h
index 0b066783c36..72f756b0dcd 100644
--- a/chromium/v8/src/compiler/frame.h
+++ b/chromium/v8/src/compiler/frame.h
@@ -12,19 +12,27 @@ namespace v8 {
namespace internal {
namespace compiler {
+class CallDescriptor;
+
// Collects the spill slot and other frame slot requirements for a compiled
// function. Frames are usually populated by the register allocator and are used
-// by Linkage to generate code for the prologue and epilogue to compiled code.
+// by Linkage to generate code for the prologue and epilogue to compiled
+// code. Frame objects must be considered immutable once they've been
+// instantiated and the basic information about the frame has been collected
+// into them. Mutable state associated with the frame is stored separately in
+// FrameAccessState.
//
-// Frames are divided up into three regions. The first is the fixed header,
-// which always has a constant size and can be predicted before code generation
-// begins depending on the type of code being generated. The second is the
-// region for spill slots, which is immediately below the fixed header and grows
-// as the register allocator needs to spill to the stack and asks the frame for
-// more space. The third region, which contains the callee-saved registers must
-// be reserved after register allocation, since its size can only be precisely
-// determined after register allocation once the number of used callee-saved
-// register is certain.
+// Frames are divided up into three regions.
+// - The first is the fixed header, which always has a constant size and can be
+// predicted before code generation begins depending on the type of code being
+// generated.
+// - The second is the region for spill slots, which is immediately below the
+// fixed header and grows as the register allocator needs to spill to the
+// stack and asks the frame for more space.
+// - The third region, which contains the callee-saved registers must be
+// reserved after register allocation, since its size can only be precisely
+// determined after register allocation once the number of used callee-saved
+// register is certain.
//
// Every pointer in a frame has a slot id. On 32-bit platforms, doubles consume
// two slots.
@@ -35,10 +43,10 @@ namespace compiler {
// for example JSFunctions store the function context and marker in the fixed
// header, with slot index 2 corresponding to the current function context and 3
// corresponding to the frame marker/JSFunction. The frame region immediately
-// below the fixed header contains spill slots starting a 4 for JsFunctions. The
-// callee-saved frame region below that starts at 4+spilled_slot_count. Callee
-// stack slots corresponding to parameters are accessible through negative slot
-// ids.
+// below the fixed header contains spill slots starting at 4 for JsFunctions.
+// The callee-saved frame region below that starts at 4+spill_slot_count_.
+// Callee stack slots corresponding to parameters are accessible through
+// negative slot ids.
//
// Every slot of a caller or callee frame is accessible by the register
// allocator and gap resolver with a SpillSlotOperand containing its
@@ -47,95 +55,120 @@ namespace compiler {
// Below an example JSFunction Frame with slot ids, frame regions and contents:
//
// slot JS frame
-// +-----------------+----------------------------
-// -n-1 | parameter 0 | ^
-// |- - - - - - - - -| |
-// -n | | Caller
-// ... | ... | frame slots
-// -2 | parameter n-1 | (slot < 0)
-// |- - - - - - - - -| |
-// -1 | parameter n | v
-// -----+-----------------+----------------------------
-// 0 | return addr | ^ ^
-// |- - - - - - - - -| | |
-// 1 | saved frame ptr | Fixed |
-// |- - - - - - - - -| Header <-- frame ptr |
-// 2 | Context | | |
-// |- - - - - - - - -| | |
-// 3 |JSFunction/Marker| v |
-// +-----------------+---- |
-// 4 | spill 1 | ^ Callee
-// |- - - - - - - - -| | frame slots
-// ... | ... | Spill slots (slot >= 0)
-// |- - - - - - - - -| | |
-// m+4 | spill m | v |
-// +-----------------+---- |
-// m+5 | callee-saved 1 | ^ |
-// |- - - - - - - - -| | |
-// | ... | Callee-saved |
-// |- - - - - - - - -| | |
-// m+r+4 | callee-saved r | v v
-// -----+-----------------+----- <-- stack ptr ---------
+// +-----------------+--------------------------------
+// -n-1 | parameter 0 | ^
+// |- - - - - - - - -| |
+// -n | | Caller
+// ... | ... | frame slots
+// -2 | parameter n-1 | (slot < 0)
+// |- - - - - - - - -| |
+// -1 | parameter n | v
+// -----+-----------------+--------------------------------
+// 0 | return addr | ^ ^
+// |- - - - - - - - -| | |
+// 1 | saved frame ptr | Fixed |
+// |- - - - - - - - -| Header <-- frame ptr |
+// 2 | Context | | |
+// |- - - - - - - - -| | |
+// 3 |JSFunction/Marker| v |
+// +-----------------+---- |
+// 4 | spill 1 | ^ Callee
+// |- - - - - - - - -| | frame slots
+// ... | ... | Spill slots (slot >= 0)
+// |- - - - - - - - -| | |
+// m+4 | spill m | v |
+// +-----------------+---- |
+// m+5 | callee-saved 1 | ^ |
+// |- - - - - - - - -| | |
+// | ... | Callee-saved |
+// |- - - - - - - - -| | |
+// m+r+4 | callee-saved r | v v
+// -----+-----------------+----- <-- stack ptr -------------
//
class Frame : public ZoneObject {
public:
- explicit Frame(int fixed_frame_size_in_slots);
+ explicit Frame(int fixed_frame_size_in_slots,
+ const CallDescriptor* descriptor);
- inline int GetTotalFrameSlotCount() { return frame_slot_count_; }
+ static int FPOffsetToSlot(int frame_offset) {
+ return StandardFrameConstants::kFixedSlotCountAboveFp - 1 -
+ frame_offset / kPointerSize;
+ }
- inline int GetSavedCalleeRegisterSlotCount() {
- return spilled_callee_register_slot_count_;
+ static int SlotToFPOffset(int slot) {
+ return (StandardFrameConstants::kFixedSlotCountAboveFp - 1 - slot) *
+ kPointerSize;
}
- inline int GetSpillSlotCount() { return stack_slot_count_; }
+
+ inline bool needs_frame() const { return needs_frame_; }
+ inline void MarkNeedsFrame() { needs_frame_ = true; }
+
+ inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
+
+ inline int GetSpToFpSlotCount() const {
+ return GetTotalFrameSlotCount() -
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ }
+ inline int GetSavedCalleeRegisterSlotCount() const {
+ return callee_saved_slot_count_;
+ }
+ inline int GetSpillSlotCount() const { return spill_slot_count_; }
inline void SetElidedFrameSizeInSlots(int slots) {
- DCHECK_EQ(0, spilled_callee_register_slot_count_);
- DCHECK_EQ(0, stack_slot_count_);
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ DCHECK_EQ(0, spill_slot_count_);
frame_slot_count_ = slots;
}
void SetAllocatedRegisters(BitVector* regs) {
- DCHECK(allocated_registers_ == NULL);
+ DCHECK(allocated_registers_ == nullptr);
allocated_registers_ = regs;
}
void SetAllocatedDoubleRegisters(BitVector* regs) {
- DCHECK(allocated_double_registers_ == NULL);
+ DCHECK(allocated_double_registers_ == nullptr);
allocated_double_registers_ = regs;
}
- bool DidAllocateDoubleRegisters() {
+ bool DidAllocateDoubleRegisters() const {
return !allocated_double_registers_->IsEmpty();
}
int AlignSavedCalleeRegisterSlots() {
- DCHECK_EQ(0, spilled_callee_register_slot_count_);
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ needs_frame_ = true;
int delta = frame_slot_count_ & 1;
frame_slot_count_ += delta;
return delta;
}
void AllocateSavedCalleeRegisterSlots(int count) {
+ needs_frame_ = true;
frame_slot_count_ += count;
- spilled_callee_register_slot_count_ += count;
+ callee_saved_slot_count_ += count;
}
int AllocateSpillSlot(int width) {
- DCHECK_EQ(0, spilled_callee_register_slot_count_);
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ needs_frame_ = true;
int frame_slot_count_before = frame_slot_count_;
int slot = AllocateAlignedFrameSlot(width);
- stack_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
+ spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
return slot;
}
int ReserveSpillSlots(size_t slot_count) {
- DCHECK_EQ(0, spilled_callee_register_slot_count_);
- DCHECK_EQ(0, stack_slot_count_);
- stack_slot_count_ += static_cast<int>(slot_count);
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ DCHECK_EQ(0, spill_slot_count_);
+ needs_frame_ = true;
+ spill_slot_count_ += static_cast<int>(slot_count);
frame_slot_count_ += static_cast<int>(slot_count);
return frame_slot_count_ - 1;
}
+ static const int kContextSlot = 2 + StandardFrameConstants::kCPSlotCount;
+ static const int kJSFunctionSlot = 3 + StandardFrameConstants::kCPSlotCount;
+
private:
int AllocateAlignedFrameSlot(int width) {
DCHECK(width == 4 || width == 8);
@@ -149,9 +182,10 @@ class Frame : public ZoneObject {
}
private:
+ bool needs_frame_;
int frame_slot_count_;
- int spilled_callee_register_slot_count_;
- int stack_slot_count_;
+ int callee_saved_slot_count_;
+ int spill_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
@@ -184,8 +218,40 @@ class FrameOffset {
static const int kFromSp = 1;
static const int kFromFp = 0;
};
-}
-}
-} // namespace v8::internal::compiler
+
+// Encapsulates the mutable state maintained during code generation about the
+// current function's frame.
+class FrameAccessState : public ZoneObject {
+ public:
+ explicit FrameAccessState(Frame* const frame)
+ : frame_(frame), access_frame_with_fp_(false), sp_delta_(0) {
+ SetFrameAccessToDefault();
+ }
+
+ Frame* frame() const { return frame_; }
+
+ int sp_delta() const { return sp_delta_; }
+ void ClearSPDelta() { sp_delta_ = 0; }
+ void IncreaseSPDelta(int amount) { sp_delta_ += amount; }
+
+ bool access_frame_with_fp() const { return access_frame_with_fp_; }
+ void SetFrameAccessToDefault();
+ void SetFrameAccessToFP() { access_frame_with_fp_ = true; }
+ void SetFrameAccessToSP() { access_frame_with_fp_ = false; }
+
+ // Get the frame offset for a given spill slot. The location depends on the
+ // calling convention and the specific frame layout, and may thus be
+ // architecture-specific. Negative spill slots indicate arguments on the
+ // caller's frame.
+ FrameOffset GetFrameOffset(int spill_slot) const;
+
+ private:
+ Frame* const frame_;
+ bool access_frame_with_fp_;
+ int sp_delta_;
+};
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_FRAME_H_
diff --git a/chromium/v8/src/compiler/gap-resolver.cc b/chromium/v8/src/compiler/gap-resolver.cc
index bad0a922748..4107b0f7bf7 100644
--- a/chromium/v8/src/compiler/gap-resolver.cc
+++ b/chromium/v8/src/compiler/gap-resolver.cc
@@ -75,7 +75,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// This move's source may have changed due to swaps to resolve cycles and so
// it may now be the last move in the cycle. If so remove it.
InstructionOperand source = move->source();
- if (source.EqualsModuloType(destination)) {
+ if (source.EqualsCanonicalized(destination)) {
move->Eliminate();
return;
}
diff --git a/chromium/v8/src/compiler/graph-reducer.cc b/chromium/v8/src/compiler/graph-reducer.cc
index 1be0b6dec7b..6f583d6b6ac 100644
--- a/chromium/v8/src/compiler/graph-reducer.cc
+++ b/chromium/v8/src/compiler/graph-reducer.cc
@@ -23,6 +23,9 @@ enum class GraphReducer::State : uint8_t {
};
+void Reducer::Finalize() {}
+
+
GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead)
: graph_(graph),
dead_(dead),
@@ -58,7 +61,11 @@ void GraphReducer::ReduceNode(Node* node) {
Push(node);
}
} else {
- break;
+ // Run all finalizers.
+ for (Reducer* const reducer : reducers_) reducer->Finalize();
+
+ // Check if we have new nodes to revisit.
+ if (revisit_.empty()) break;
}
}
DCHECK(revisit_.empty());
diff --git a/chromium/v8/src/compiler/graph-reducer.h b/chromium/v8/src/compiler/graph-reducer.h
index 39c302f8925..683c345c146 100644
--- a/chromium/v8/src/compiler/graph-reducer.h
+++ b/chromium/v8/src/compiler/graph-reducer.h
@@ -47,6 +47,11 @@ class Reducer {
// Try to reduce a node if possible.
virtual Reduction Reduce(Node* node) = 0;
+ // Invoked by the {GraphReducer} when all nodes are done. Can be used to
+ // do additional reductions at the end, which in turn can cause a new round
+ // of reductions.
+ virtual void Finalize();
+
// Helper functions for subclasses to produce reductions for a node.
static Reduction NoChange() { return Reduction(); }
static Reduction Replace(Node* node) { return Reduction(node); }
@@ -68,7 +73,8 @@ class AdvancedReducer : public Reducer {
// Revisit the {node} again later.
virtual void Revisit(Node* node) = 0;
// Replace value uses of {node} with {value} and effect uses of {node} with
- // {effect}. If {effect == NULL}, then use the effect input to {node}. All
+ // {effect}. If {effect == nullptr}, then use the effect input to {node}.
+ // All
// control uses will be relaxed assuming {node} cannot throw.
virtual void ReplaceWithValue(Node* node, Node* value, Node* effect,
Node* control) = 0;
@@ -144,7 +150,7 @@ class GraphReducer : public AdvancedReducer::Editor {
void Replace(Node* node, Node* replacement) final;
// Replace value uses of {node} with {value} and effect uses of {node} with
- // {effect}. If {effect == NULL}, then use the effect input to {node}. All
+ // {effect}. If {effect == nullptr}, then use the effect input to {node}. All
// control uses will be relaxed assuming {node} cannot throw.
void ReplaceWithValue(Node* node, Node* value, Node* effect,
Node* control) final;
diff --git a/chromium/v8/src/compiler/graph-visualizer.cc b/chromium/v8/src/compiler/graph-visualizer.cc
index 07ca04532bc..07851768b3d 100644
--- a/chromium/v8/src/compiler/graph-visualizer.cc
+++ b/chromium/v8/src/compiler/graph-visualizer.cc
@@ -41,7 +41,7 @@ FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
'_');
EmbeddedVector<char, 256> full_filename;
- if (phase == NULL) {
+ if (phase == nullptr) {
SNPrintF(full_filename, "%s.%s", filename.start(), suffix);
} else {
SNPrintF(full_filename, "%s-%s.%s", filename.start(), phase, suffix);
@@ -50,9 +50,9 @@ FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
}
-static int SafeId(Node* node) { return node == NULL ? -1 : node->id(); }
+static int SafeId(Node* node) { return node == nullptr ? -1 : node->id(); }
static const char* SafeMnemonic(Node* node) {
- return node == NULL ? "null" : node->op()->mnemonic();
+ return node == nullptr ? "null" : node->op()->mnemonic();
}
#define DEAD_COLOR "#999999"
@@ -158,7 +158,7 @@ class JSONGraphEdgeWriter {
void PrintEdges(Node* node) {
for (int i = 0; i < node->InputCount(); i++) {
Node* input = node->InputAt(i);
- if (input == NULL) continue;
+ if (input == nullptr) continue;
PrintEdge(node, i, input);
}
}
@@ -169,7 +169,7 @@ class JSONGraphEdgeWriter {
} else {
os_ << ",\n";
}
- const char* edge_type = NULL;
+ const char* edge_type = nullptr;
if (index < NodeProperties::FirstValueIndex(from)) {
edge_type = "unknown";
} else if (index < NodeProperties::FirstContextIndex(from)) {
@@ -207,190 +207,6 @@ std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
}
-class GraphVisualizer {
- public:
- GraphVisualizer(std::ostream& os, Zone* zone, const Graph* graph)
- : all_(zone, graph), os_(os) {}
-
- void Print();
-
- void PrintNode(Node* node, bool gray);
-
- private:
- void PrintEdge(Edge edge);
-
- AllNodes all_;
- std::ostream& os_;
-
- DISALLOW_COPY_AND_ASSIGN(GraphVisualizer);
-};
-
-
-static Node* GetControlCluster(Node* node) {
- if (OperatorProperties::IsBasicBlockBegin(node->op())) {
- return node;
- } else if (node->op()->ControlInputCount() == 1) {
- Node* control = NodeProperties::GetControlInput(node, 0);
- return control != NULL &&
- OperatorProperties::IsBasicBlockBegin(control->op())
- ? control
- : NULL;
- } else {
- return NULL;
- }
-}
-
-
-void GraphVisualizer::PrintNode(Node* node, bool gray) {
- Node* control_cluster = GetControlCluster(node);
- if (control_cluster != NULL) {
- os_ << " subgraph cluster_BasicBlock" << control_cluster->id() << " {\n";
- }
- os_ << " ID" << SafeId(node) << " [\n";
-
- os_ << " shape=\"record\"\n";
- switch (node->opcode()) {
- case IrOpcode::kEnd:
- case IrOpcode::kDead:
- case IrOpcode::kStart:
- os_ << " style=\"diagonals\"\n";
- break;
- case IrOpcode::kMerge:
- case IrOpcode::kIfTrue:
- case IrOpcode::kIfFalse:
- case IrOpcode::kLoop:
- os_ << " style=\"rounded\"\n";
- break;
- default:
- break;
- }
-
- if (gray) {
- os_ << " style=\"filled\"\n"
- << " fillcolor=\"" DEAD_COLOR "\"\n";
- }
-
- std::ostringstream label;
- label << *node->op();
- os_ << " label=\"{{#" << SafeId(node) << ":" << Escaped(label);
-
- auto i = node->input_edges().begin();
- for (int j = node->op()->ValueInputCount(); j > 0; ++i, j--) {
- os_ << "|<I" << (*i).index() << ">#" << SafeId((*i).to());
- }
- for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
- ++i, j--) {
- os_ << "|<I" << (*i).index() << ">X #" << SafeId((*i).to());
- }
- for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j > 0;
- ++i, j--) {
- os_ << "|<I" << (*i).index() << ">F #" << SafeId((*i).to());
- }
- for (int j = node->op()->EffectInputCount(); j > 0; ++i, j--) {
- os_ << "|<I" << (*i).index() << ">E #" << SafeId((*i).to());
- }
-
- if (OperatorProperties::IsBasicBlockBegin(node->op()) ||
- GetControlCluster(node) == NULL) {
- for (int j = node->op()->ControlInputCount(); j > 0; ++i, j--) {
- os_ << "|<I" << (*i).index() << ">C #" << SafeId((*i).to());
- }
- }
- os_ << "}";
-
- if (FLAG_trace_turbo_types && NodeProperties::IsTyped(node)) {
- Type* type = NodeProperties::GetType(node);
- std::ostringstream type_out;
- type->PrintTo(type_out);
- os_ << "|" << Escaped(type_out);
- }
- os_ << "}\"\n";
-
- os_ << " ]\n";
- if (control_cluster != NULL) os_ << " }\n";
-}
-
-
-static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
- if (NodeProperties::IsPhi(from)) {
- Node* control = NodeProperties::GetControlInput(from, 0);
- return control != NULL && control->opcode() != IrOpcode::kMerge &&
- control != to && index != 0;
- } else if (from->opcode() == IrOpcode::kLoop) {
- return index != 0;
- } else {
- return false;
- }
-}
-
-
-void GraphVisualizer::PrintEdge(Edge edge) {
- Node* from = edge.from();
- int index = edge.index();
- Node* to = edge.to();
-
- if (!all_.IsLive(to)) return; // skip inputs that point to dead or NULL.
-
- bool unconstrained = IsLikelyBackEdge(from, index, to);
- os_ << " ID" << SafeId(from);
-
- if (OperatorProperties::IsBasicBlockBegin(from->op()) ||
- GetControlCluster(from) == NULL ||
- (from->op()->ControlInputCount() > 0 &&
- NodeProperties::GetControlInput(from) != to)) {
- os_ << ":I" << index << ":n -> ID" << SafeId(to) << ":s"
- << "[" << (unconstrained ? "constraint=false, " : "")
- << (NodeProperties::IsControlEdge(edge) ? "style=bold, " : "")
- << (NodeProperties::IsEffectEdge(edge) ? "style=dotted, " : "")
- << (NodeProperties::IsContextEdge(edge) ? "style=dashed, " : "") << "]";
- } else {
- os_ << " -> ID" << SafeId(to) << ":s [color=transparent, "
- << (unconstrained ? "constraint=false, " : "")
- << (NodeProperties::IsControlEdge(edge) ? "style=dashed, " : "") << "]";
- }
- os_ << "\n";
-}
-
-
-void GraphVisualizer::Print() {
- os_ << "digraph D {\n"
- << " node [fontsize=8,height=0.25]\n"
- << " rankdir=\"BT\"\n"
- << " ranksep=\"1.2 equally\"\n"
- << " overlap=\"false\"\n"
- << " splines=\"true\"\n"
- << " concentrate=\"true\"\n"
- << " \n";
-
- // Find all nodes that are not reachable from end that use live nodes.
- std::set<Node*> gray;
- for (Node* const node : all_.live) {
- for (Node* const use : node->uses()) {
- if (!all_.IsLive(use)) gray.insert(use);
- }
- }
-
- // Make sure all nodes have been output before writing out the edges.
- for (Node* const node : all_.live) PrintNode(node, false);
- for (Node* const node : gray) PrintNode(node, true);
-
- // With all the nodes written, add the edges.
- for (Node* const node : all_.live) {
- for (Edge edge : node->use_edges()) {
- PrintEdge(edge);
- }
- }
- os_ << "}\n";
-}
-
-
-std::ostream& operator<<(std::ostream& os, const AsDOT& ad) {
- Zone tmp_zone;
- GraphVisualizer(os, &tmp_zone, &ad.graph).Print();
- return os;
-}
-
-
class GraphC1Visualizer {
public:
GraphC1Visualizer(std::ostream& os, Zone* zone); // NOLINT
@@ -581,7 +397,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
PrintIndent();
os_ << "flags\n";
- if (current->dominator() != NULL) {
+ if (current->dominator() != nullptr) {
PrintBlockProperty("dominator", current->dominator()->rpo_number());
}
@@ -639,7 +455,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
os_ << " ";
PrintType(node);
}
- if (positions != NULL) {
+ if (positions != nullptr) {
SourcePosition position = positions->GetSourcePosition(node);
if (position.IsKnown()) {
os_ << " pos:" << position.raw();
@@ -652,7 +468,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
if (control != BasicBlock::kNone) {
PrintIndent();
os_ << "0 0 ";
- if (current->control_input() != NULL) {
+ if (current->control_input() != nullptr) {
PrintNode(current->control_input());
} else {
os_ << -1 - current->rpo_number() << " Goto";
@@ -661,7 +477,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
for (BasicBlock* successor : current->successors()) {
os_ << " B" << successor->rpo_number();
}
- if (FLAG_trace_turbo_types && current->control_input() != NULL) {
+ if (FLAG_trace_turbo_types && current->control_input() != nullptr) {
os_ << " ";
PrintType(current->control_input());
}
@@ -669,13 +485,14 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
}
}
- if (instructions != NULL) {
+ if (instructions != nullptr) {
Tag LIR_tag(this, "LIR");
for (int j = instruction_block->first_instruction_index();
j <= instruction_block->last_instruction_index(); j++) {
PrintIndent();
- PrintableInstruction printable = {RegisterConfiguration::ArchDefault(),
- instructions->InstructionAt(j)};
+ PrintableInstruction printable = {
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
+ instructions->InstructionAt(j)};
os_ << j << " " << printable << " <|@\n";
}
}
@@ -714,18 +531,18 @@ void GraphC1Visualizer::PrintLiveRangeChain(TopLevelLiveRange* range,
void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
int vreg) {
- if (range != NULL && !range->IsEmpty()) {
+ if (range != nullptr && !range->IsEmpty()) {
PrintIndent();
os_ << vreg << ":" << range->relative_id() << " " << type;
if (range->HasRegisterAssigned()) {
AllocatedOperand op = AllocatedOperand::cast(range->GetAssignedOperand());
- int assigned_reg = op.index();
if (op.IsDoubleRegister()) {
- os_ << " \"" << DoubleRegister::AllocationIndexToString(assigned_reg)
- << "\"";
+ DoubleRegister assigned_reg = op.GetDoubleRegister();
+ os_ << " \"" << assigned_reg.ToString() << "\"";
} else {
DCHECK(op.IsRegister());
- os_ << " \"" << Register::AllocationIndexToString(assigned_reg) << "\"";
+ Register assigned_reg = op.GetRegister();
+ os_ << " \"" << assigned_reg.ToString() << "\"";
}
} else if (range->spilled()) {
auto top = range->TopLevel();
@@ -754,7 +571,7 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
}
UsePosition* current_pos = range->first_pos();
- while (current_pos != NULL) {
+ while (current_pos != nullptr) {
if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) {
os_ << " " << current_pos->pos().value() << " M";
}
diff --git a/chromium/v8/src/compiler/graph-visualizer.h b/chromium/v8/src/compiler/graph-visualizer.h
index d719540e231..1a971a55ed5 100644
--- a/chromium/v8/src/compiler/graph-visualizer.h
+++ b/chromium/v8/src/compiler/graph-visualizer.h
@@ -24,14 +24,6 @@ class SourcePositionTable;
FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
const char* suffix, const char* mode);
-struct AsDOT {
- explicit AsDOT(const Graph& g) : graph(g) {}
- const Graph& graph;
-};
-
-std::ostream& operator<<(std::ostream& os, const AsDOT& ad);
-
-
struct AsJSON {
AsJSON(const Graph& g, SourcePositionTable* p) : graph(g), positions(p) {}
const Graph& graph;
@@ -56,8 +48,8 @@ struct AsC1VCompilation {
struct AsC1V {
AsC1V(const char* phase, const Schedule* schedule,
- const SourcePositionTable* positions = NULL,
- const InstructionSequence* instructions = NULL)
+ const SourcePositionTable* positions = nullptr,
+ const InstructionSequence* instructions = nullptr)
: schedule_(schedule),
instructions_(instructions),
positions_(positions),
@@ -76,7 +68,6 @@ struct AsC1VRegisterAllocationData {
const RegisterAllocationData* data_;
};
-std::ostream& operator<<(std::ostream& os, const AsDOT& ad);
std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac);
std::ostream& operator<<(std::ostream& os, const AsC1V& ac);
std::ostream& operator<<(std::ostream& os,
diff --git a/chromium/v8/src/compiler/graph.h b/chromium/v8/src/compiler/graph.h
index 28686aa2ca4..b53c7fd3083 100644
--- a/chromium/v8/src/compiler/graph.h
+++ b/chromium/v8/src/compiler/graph.h
@@ -87,9 +87,6 @@ class Graph : public ZoneObject {
// Clone the {node}, and assign a new node id to the copy.
Node* CloneNode(const Node* node);
- template <class Visitor>
- inline void VisitNodeInputsFromEnd(Visitor* visitor);
-
Zone* zone() const { return zone_; }
Node* start() const { return start_; }
Node* end() const { return end_; }
diff --git a/chromium/v8/src/compiler/greedy-allocator.cc b/chromium/v8/src/compiler/greedy-allocator.cc
index e0368bf366f..683b75d49fa 100644
--- a/chromium/v8/src/compiler/greedy-allocator.cc
+++ b/chromium/v8/src/compiler/greedy-allocator.cc
@@ -50,19 +50,6 @@ LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
}
-// TODO(mtrofin): explain why splitting in gap START is always OK.
-LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
- int instruction_index) {
- LifetimePosition ret = LifetimePosition::Invalid();
-
- ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
- if (range->Start() >= ret || ret >= range->End()) {
- return LifetimePosition::Invalid();
- }
- return ret;
-}
-
-
} // namespace
@@ -249,7 +236,8 @@ void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) {
float eviction_weight = group_weight;
int eviction_reg = -1;
int free_reg = -1;
- for (int reg = 0; reg < num_registers(); ++reg) {
+ for (int i = 0; i < num_allocatable_registers(); ++i) {
+ int reg = allocatable_register_code(i);
float weight = GetMaximumConflictingWeight(reg, group, group_weight);
if (weight == LiveRange::kInvalidWeight) {
free_reg = reg;
@@ -313,19 +301,20 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
// Seek either the first free register, or, from the set of registers
// where the maximum conflict is lower than the candidate's weight, the one
// with the smallest such weight.
- for (int i = 0; i < num_registers(); i++) {
+ for (int i = 0; i < num_allocatable_registers(); i++) {
+ int reg = allocatable_register_code(i);
// Skip unnecessarily re-visiting the hinted register, if any.
- if (i == hinted_reg) continue;
+ if (reg == hinted_reg) continue;
float max_conflict_weight =
- GetMaximumConflictingWeight(i, range, competing_weight);
+ GetMaximumConflictingWeight(reg, range, competing_weight);
if (max_conflict_weight == LiveRange::kInvalidWeight) {
- free_reg = i;
+ free_reg = reg;
break;
}
if (max_conflict_weight < range->weight() &&
max_conflict_weight < smallest_weight) {
smallest_weight = max_conflict_weight;
- evictable_reg = i;
+ evictable_reg = reg;
}
}
}
@@ -372,43 +361,6 @@ void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
}
-void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
- size_t initial_range_count = data()->live_ranges().size();
- for (size_t i = 0; i < initial_range_count; ++i) {
- TopLevelLiveRange* range = data()->live_ranges()[i];
- if (!CanProcessRange(range)) continue;
- if (!range->HasSpillOperand()) continue;
-
- LifetimePosition start = range->Start();
- TRACE("Live range %d:%d is defined by a spill operand.\n",
- range->TopLevel()->vreg(), range->relative_id());
- auto next_pos = start;
- if (next_pos.IsGapPosition()) {
- next_pos = next_pos.NextStart();
- }
- auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
- // If the range already has a spill operand and it doesn't need a
- // register immediately, split it and spill the first part of the range.
- if (pos == nullptr) {
- Spill(range);
- } else if (pos->pos() > range->Start().NextStart()) {
- // Do not spill live range eagerly if use position that can benefit from
- // the register is too close to the start of live range.
- auto split_pos = GetSplitPositionForInstruction(
- range, pos->pos().ToInstructionIndex());
- // There is no place to split, so we can't split and spill.
- if (!split_pos.IsValid()) continue;
-
- split_pos =
- FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
-
- Split(range, data(), split_pos);
- Spill(range);
- }
- }
-}
-
-
void GreedyAllocator::AllocateRegisters() {
CHECK(scheduler().empty());
CHECK(allocations_.empty());
@@ -416,7 +368,7 @@ void GreedyAllocator::AllocateRegisters() {
TRACE("Begin allocating function %s with the Greedy Allocator\n",
data()->debug_name());
- SplitAndSpillRangesDefinedByMemoryOperand();
+ SplitAndSpillRangesDefinedByMemoryOperand(true);
GroupLiveRanges();
ScheduleAllocationCandidates();
PreallocateFixedRanges();
diff --git a/chromium/v8/src/compiler/greedy-allocator.h b/chromium/v8/src/compiler/greedy-allocator.h
index 45bbd87da89..b61ba4242f3 100644
--- a/chromium/v8/src/compiler/greedy-allocator.h
+++ b/chromium/v8/src/compiler/greedy-allocator.h
@@ -128,18 +128,10 @@ class GreedyAllocator final : public RegisterAllocator {
// Evict and reschedule conflicts of a given range, at a given register.
void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
- // Find the optimal split for ranges defined by a memory operand, e.g.
- // constants or function parameters passed on the stack.
- void SplitAndSpillRangesDefinedByMemoryOperand();
-
void TryAllocateCandidate(const AllocationCandidate& candidate);
void TryAllocateLiveRange(LiveRange* range);
void TryAllocateGroup(LiveRangeGroup* group);
- bool CanProcessRange(LiveRange* range) const {
- return range != nullptr && !range->IsEmpty() && range->kind() == mode();
- }
-
// Calculate the weight of a candidate for allocation.
void EnsureValidRangeWeight(LiveRange* range);
diff --git a/chromium/v8/src/compiler/ia32/code-generator-ia32.cc b/chromium/v8/src/compiler/ia32/code-generator-ia32.cc
index d4fe21505c4..f63bc22e434 100644
--- a/chromium/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/chromium/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -4,6 +4,7 @@
#include "src/compiler/code-generator.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
@@ -11,7 +12,6 @@
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/frames-ia32.h"
#include "src/ia32/macro-assembler-ia32.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -48,12 +48,18 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Operand(ToDoubleRegister(op));
}
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return Operand(offset.from_stack_pointer() ? esp : ebp,
offset.offset() + extra);
}
+ Operand ToMaterializableOperand(int materializable_offset) {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ Frame::FPOffsetToSlot(materializable_offset));
+ return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+ }
+
Operand HighOperand(InstructionOperand* op) {
DCHECK(op->IsDoubleStackSlot());
return ToOperand(op, kPointerSize);
@@ -217,6 +223,46 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
XMMRegister const input_;
};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ operand_(operand),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ lea(scratch1_, operand_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Operand const operand_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
} // namespace
@@ -286,13 +332,25 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
} while (false)
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ mov(esp, ebp);
- __ pop(ebp);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ add(esp, Immediate(sp_slot_delta * kPointerSize));
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
+ if (frame()->needs_frame()) {
+ __ mov(ebp, MemOperand(ebp, 0));
+ }
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -308,13 +366,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
- __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+ __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(reg);
}
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ jmp(code, RelocInfo::CODE_TARGET);
@@ -323,6 +384,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(reg);
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -335,6 +397,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
@@ -344,15 +407,27 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
break;
}
case kArchPrepareCallCFunction: {
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, i.TempRegister(0));
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
@@ -362,6 +437,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -374,12 +451,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -401,6 +481,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ bind(ool->exit());
break;
}
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ Register value = i.InputRegister(index);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
+ scratch0, scratch1, mode);
+ __ mov(operand, value);
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ not_zero, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kIA32Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -508,6 +606,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kIA32Lzcnt:
__ Lzcnt(i.OutputRegister(), i.InputOperand(0));
break;
+ case kIA32Tzcnt:
+ __ Tzcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kIA32Popcnt:
+ __ Popcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
case kSSEFloat32Cmp:
__ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
@@ -549,6 +653,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
+ case kSSEFloat32Round: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ break;
+ }
case kSSEFloat64Cmp:
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
@@ -889,14 +1000,51 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
+ case kIA32PushFloat32:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movss(Operand(esp, 0), i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else if (HasImmediateInput(instr, 0)) {
+ __ Move(kScratchDoubleReg, i.InputDouble(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movss(Operand(esp, 0), kScratchDoubleReg);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ __ movsd(kScratchDoubleReg, i.InputOperand(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movss(Operand(esp, 0), kScratchDoubleReg);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ }
+ break;
+ case kIA32PushFloat64:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else if (HasImmediateInput(instr, 0)) {
+ __ Move(kScratchDoubleReg, i.InputDouble(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movsd(Operand(esp, 0), kScratchDoubleReg);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ __ movsd(kScratchDoubleReg, i.InputOperand(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movsd(Operand(esp, 0), kScratchDoubleReg);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ }
+ break;
case kIA32Push:
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
+ frame_access_state()->IncreaseSPDelta(1);
} else {
__ push(i.InputOperand(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
break;
case kIA32Poke: {
@@ -908,24 +1056,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
- case kIA32StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register value = i.InputRegister(2);
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- if (HasImmediateInput(instr, 1)) {
- int index = i.InputInt32(1);
- Register scratch = i.TempRegister(1);
- __ mov(Operand(object, index), value);
- __ RecordWriteContextSlot(object, index, value, scratch, mode);
- } else {
- Register index = i.InputRegister(1);
- __ mov(Operand(object, index, times_1, 0), value);
- __ lea(index, Operand(object, index, times_1, 0));
- __ RecordWrite(object, index, value, mode);
- }
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break;
@@ -1285,20 +1415,20 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
// Assemble a prologue similar the to cdecl calling convention.
__ push(ebp);
__ mov(ebp, esp);
} else if (descriptor->IsJSFunctionCall()) {
- // TODO(turbofan): this prologue is redundant with OSR, but needed for
+ // TODO(turbofan): this prologue is redundant with OSR, but still needed for
// code aging.
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1346,10 +1476,10 @@ void CodeGenerator::AssembleReturn() {
}
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
__ pop(ebp); // Pop caller's frame pointer.
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -1362,14 +1492,14 @@ void CodeGenerator::AssembleReturn() {
}
size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
// Might need ecx for scratch if pop_size is too big.
- DCHECK_EQ(0, descriptor->CalleeSavedRegisters() & ecx.bit());
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
__ Ret(static_cast<int>(pop_size), ecx);
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- IA32OperandConverter g(this, NULL);
+ IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1396,11 +1526,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (IsMaterializableFromFrame(src, &offset)) {
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
- __ mov(dst, Operand(ebp, offset));
+ __ mov(dst, g.ToMaterializableOperand(offset));
} else {
DCHECK(destination->IsStackSlot());
Operand dst = g.ToOperand(destination);
- __ push(Operand(ebp, offset));
+ __ push(g.ToMaterializableOperand(offset));
__ pop(dst);
}
} else if (destination->IsRegister()) {
@@ -1479,25 +1609,38 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- IA32OperandConverter g(this, NULL);
+ IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Register-register.
Register src = g.ToRegister(source);
Register dst = g.ToRegister(destination);
- __ xchg(dst, src);
+ __ push(src);
+ __ mov(src, dst);
+ __ pop(dst);
} else if (source->IsRegister() && destination->IsStackSlot()) {
// Register-memory.
- __ xchg(g.ToRegister(source), g.ToOperand(destination));
- } else if (source->IsStackSlot() && destination->IsStackSlot()) {
- // Memory-memory.
- Operand src = g.ToOperand(source);
- Operand dst = g.ToOperand(destination);
- __ push(dst);
+ Register src = g.ToRegister(source);
__ push(src);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand dst = g.ToOperand(destination);
+ __ mov(src, dst);
+ frame_access_state()->IncreaseSPDelta(-1);
+ dst = g.ToOperand(destination);
__ pop(dst);
- __ pop(src);
+ } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+ // Memory-memory.
+ Operand dst1 = g.ToOperand(destination);
+ __ push(dst1);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand src1 = g.ToOperand(source);
+ __ push(src1);
+ Operand dst2 = g.ToOperand(destination);
+ __ pop(dst2);
+ frame_access_state()->IncreaseSPDelta(-1);
+ Operand src2 = g.ToOperand(source);
+ __ pop(src2);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
diff --git a/chromium/v8/src/compiler/ia32/instruction-codes-ia32.h b/chromium/v8/src/compiler/ia32/instruction-codes-ia32.h
index 2119947e942..816487db8c4 100644
--- a/chromium/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/chromium/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -31,6 +31,8 @@ namespace compiler {
V(IA32Sar) \
V(IA32Ror) \
V(IA32Lzcnt) \
+ V(IA32Tzcnt) \
+ V(IA32Popcnt) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -41,6 +43,7 @@ namespace compiler {
V(SSEFloat32Abs) \
V(SSEFloat32Neg) \
V(SSEFloat32Sqrt) \
+ V(SSEFloat32Round) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
@@ -93,8 +96,9 @@ namespace compiler {
V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
+ V(IA32PushFloat32) \
+ V(IA32PushFloat64) \
V(IA32Poke) \
- V(IA32StoreWriteBarrier) \
V(IA32StackCheck)
diff --git a/chromium/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/chromium/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
new file mode 100644
index 00000000000..0a8fcac59a3
--- /dev/null
+++ b/chromium/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -0,0 +1,135 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kIA32Add:
+ case kIA32And:
+ case kIA32Cmp:
+ case kIA32Test:
+ case kIA32Or:
+ case kIA32Xor:
+ case kIA32Sub:
+ case kIA32Imul:
+ case kIA32ImulHigh:
+ case kIA32UmulHigh:
+ case kIA32Idiv:
+ case kIA32Udiv:
+ case kIA32Not:
+ case kIA32Neg:
+ case kIA32Shl:
+ case kIA32Shr:
+ case kIA32Sar:
+ case kIA32Ror:
+ case kIA32Lzcnt:
+ case kIA32Tzcnt:
+ case kIA32Popcnt:
+ case kIA32Lea:
+ case kSSEFloat32Cmp:
+ case kSSEFloat32Add:
+ case kSSEFloat32Sub:
+ case kSSEFloat32Mul:
+ case kSSEFloat32Div:
+ case kSSEFloat32Max:
+ case kSSEFloat32Min:
+ case kSSEFloat32Abs:
+ case kSSEFloat32Neg:
+ case kSSEFloat32Sqrt:
+ case kSSEFloat32Round:
+ case kSSEFloat64Cmp:
+ case kSSEFloat64Add:
+ case kSSEFloat64Sub:
+ case kSSEFloat64Mul:
+ case kSSEFloat64Div:
+ case kSSEFloat64Mod:
+ case kSSEFloat64Max:
+ case kSSEFloat64Min:
+ case kSSEFloat64Abs:
+ case kSSEFloat64Neg:
+ case kSSEFloat64Sqrt:
+ case kSSEFloat64Round:
+ case kSSEFloat32ToFloat64:
+ case kSSEFloat64ToFloat32:
+ case kSSEFloat64ToInt32:
+ case kSSEFloat64ToUint32:
+ case kSSEInt32ToFloat64:
+ case kSSEUint32ToFloat64:
+ case kSSEFloat64ExtractLowWord32:
+ case kSSEFloat64ExtractHighWord32:
+ case kSSEFloat64InsertLowWord32:
+ case kSSEFloat64InsertHighWord32:
+ case kSSEFloat64LoadLowWord32:
+ case kAVXFloat32Add:
+ case kAVXFloat32Sub:
+ case kAVXFloat32Mul:
+ case kAVXFloat32Div:
+ case kAVXFloat32Max:
+ case kAVXFloat32Min:
+ case kAVXFloat64Add:
+ case kAVXFloat64Sub:
+ case kAVXFloat64Mul:
+ case kAVXFloat64Div:
+ case kAVXFloat64Max:
+ case kAVXFloat64Min:
+ case kAVXFloat64Abs:
+ case kAVXFloat64Neg:
+ case kAVXFloat32Abs:
+ case kAVXFloat32Neg:
+ case kIA32BitcastFI:
+ case kIA32BitcastIF:
+ return (instr->addressing_mode() == kMode_None)
+ ? kNoOpcodeFlags
+ : kIsLoadOperation | kHasSideEffect;
+
+ case kIA32Movsxbl:
+ case kIA32Movzxbl:
+ case kIA32Movb:
+ case kIA32Movsxwl:
+ case kIA32Movzxwl:
+ case kIA32Movw:
+ case kIA32Movl:
+ case kIA32Movss:
+ case kIA32Movsd:
+ // Moves are used for memory load/store operations.
+ return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
+
+ case kIA32StackCheck:
+ return kIsLoadOperation;
+
+ case kIA32Push:
+ case kIA32PushFloat32:
+ case kIA32PushFloat64:
+ case kIA32Poke:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/ia32/instruction-selector-ia32.cc b/chromium/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 792d1d5a47e..090645212e9 100644
--- a/chromium/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/chromium/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -50,18 +50,18 @@ class IA32OperandGenerator final : public OperandGenerator {
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
- int32_t displacement = (displacement_node == NULL)
+ int32_t displacement = (displacement_node == nullptr)
? 0
: OpParameter<int32_t>(displacement_node);
- if (base != NULL) {
+ if (base != nullptr) {
if (base->opcode() == IrOpcode::kInt32Constant) {
displacement += OpParameter<int32_t>(base);
- base = NULL;
+ base = nullptr;
}
}
- if (base != NULL) {
+ if (base != nullptr) {
inputs[(*input_count)++] = UseRegister(base);
- if (index != NULL) {
+ if (index != nullptr) {
DCHECK(scale >= 0 && scale <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
@@ -84,7 +84,7 @@ class IA32OperandGenerator final : public OperandGenerator {
}
} else {
DCHECK(scale >= 0 && scale <= 3);
- if (index != NULL) {
+ if (index != nullptr) {
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
inputs[(*input_count)++] = TempImmediate(displacement);
@@ -109,7 +109,7 @@ class IA32OperandGenerator final : public OperandGenerator {
size_t* input_count) {
BaseWithIndexAndDisplacement32Matcher m(node, true);
DCHECK(m.matches());
- if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+ if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
m.displacement(), inputs, input_count);
} else {
@@ -169,29 +169,29 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kIA32Movss;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kIA32Movsd;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kIA32Movsxbl : kIA32Movzxbl;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kIA32Movsxwl : kIA32Movzxwl;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
- default:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -214,95 +214,123 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK_EQ(kRepTagged, rep);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
- InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister()};
- Emit(kIA32StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
- g.UseImmediate(index), g.UseFixed(value, ecx), arraysize(temps),
- temps);
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
} else {
- InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
- Emit(kIA32StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
- g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
- temps);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kIA32Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kIA32Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kIA32Movb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kIA32Movw;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kIA32Movl;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
}
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kIA32Movss;
- break;
- case kRepFloat64:
- opcode = kIA32Movsd;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kIA32Movb;
- break;
- case kRepWord16:
- opcode = kIA32Movw;
- break;
- case kRepTagged: // Fall through.
- case kRepWord32:
- opcode = kIA32Movl;
- break;
- default:
- UNREACHABLE();
- return;
- }
+ InstructionOperand val;
+ if (g.CanBeImmediate(value)) {
+ val = g.UseImmediate(value);
+ } else if (rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit) {
+ val = g.UseByteRegister(value);
+ } else {
+ val = g.UseRegister(value);
+ }
- InstructionOperand val;
- if (g.CanBeImmediate(value)) {
- val = g.UseImmediate(value);
- } else if (rep == kRepWord8 || rep == kRepBit) {
- val = g.UseByteRegister(value);
- } else {
- val = g.UseRegister(value);
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code =
+ opcode | AddressingModeField::encode(addressing_mode);
+ inputs[input_count++] = val;
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
+ inputs);
}
-
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
IA32OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -322,38 +350,42 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
IA32OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
InstructionOperand value_operand =
- g.CanBeImmediate(value)
- ? g.UseImmediate(value)
- : ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
- : g.UseRegister(value));
+ g.CanBeImmediate(value) ? g.UseImmediate(value)
+ : ((rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit)
+ ? g.UseByteRegister(value)
+ : g.UseRegister(value));
InstructionOperand offset_operand = g.UseRegister(offset);
InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
@@ -524,8 +556,8 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, node, index, m.scale(), base, nullptr);
return;
}
VisitShift(this, node, kIA32Shl);
@@ -553,13 +585,25 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
IA32OperandGenerator g(this);
// Try to match the Add to a lea pattern
BaseWithIndexAndDisplacement32Matcher m(node);
if (m.matches() &&
- (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
+ (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
@@ -596,8 +640,8 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, node, index, m.scale(), base, nullptr);
return;
}
IA32OperandGenerator g(this);
@@ -816,11 +860,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
}
@@ -831,20 +895,20 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
- IA32OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
+}
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
+}
+
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ IA32OperandGenerator g(this);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -855,150 +919,41 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
0, nullptr, 0, nullptr, temp_count, temps);
// Poke any stack arguments.
- for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* input = buffer.pushed_nodes[n]) {
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
int const slot = static_cast<int>(n);
InstructionOperand value = g.CanBeImmediate(node)
- ? g.UseImmediate(input)
- : g.UseRegister(input);
+ ? g.UseImmediate(input.node())
+ : g.UseRegister(input.node());
Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
} else {
// Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- if (input == nullptr) continue;
- // TODO(titzer): IA32Push cannot handle stack->stack double moves
- // because there is no way to encode fixed double slots.
+ if (input.node() == nullptr) continue;
InstructionOperand value =
- g.CanBeImmediate(input)
- ? g.UseImmediate(input)
+ g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input))
- ? g.UseRegister(input)
- : g.Use(input);
- Emit(kIA32Push, g.NoOutput(), value);
- }
- }
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
+ sequence()->IsFloat(GetVirtualRegister(input.node()))
+ ? g.UseRegister(input.node())
+ : g.Use(input.node());
+ if (input.type() == MachineType::Float32()) {
+ Emit(kIA32PushFloat32, g.NoOutput(), value);
+ } else if (input.type() == MachineType::Float64()) {
+ Emit(kIA32PushFloat64, g.NoOutput(), value);
+ } else {
+ Emit(kIA32Push, g.NoOutput(), value);
+ }
}
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
}
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- IA32OperandGenerator g(this);
- CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
-
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
- // TODO(titzer): Handle pushing double parameters.
- InstructionOperand value =
- g.CanBeImmediate(input)
- ? g.UseImmediate(input)
- : IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
- Emit(kIA32Push, g.NoOutput(), value);
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t output_count = buffer.outputs.size();
- auto* outputs = &buffer.outputs.front();
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
namespace {
@@ -1150,12 +1105,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == NULL || selector->IsDefined(result)) {
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1357,10 +1312,20 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat32Min |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min |
- MachineOperatorBuilder::kWord32ShiftIsSafe;
+ MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kWord32Ctz;
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ flags |= MachineOperatorBuilder::kWord32Popcnt;
+ }
if (CpuFeatures::IsSupported(SSE4_1)) {
- flags |= MachineOperatorBuilder::kFloat64RoundDown |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ flags |= MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
}
return flags;
}
diff --git a/chromium/v8/src/compiler/instruction-codes.h b/chromium/v8/src/compiler/instruction-codes.h
index cb47be64467..6c31ac8f9d9 100644
--- a/chromium/v8/src/compiler/instruction-codes.h
+++ b/chromium/v8/src/compiler/instruction-codes.h
@@ -33,38 +33,49 @@ namespace v8 {
namespace internal {
namespace compiler {
+// Modes for ArchStoreWithWriteBarrier below.
+enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
+
+
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define ARCH_OPCODE_LIST(V) \
- V(ArchCallCodeObject) \
- V(ArchTailCallCodeObject) \
- V(ArchCallJSFunction) \
- V(ArchTailCallJSFunction) \
- V(ArchPrepareCallCFunction) \
- V(ArchCallCFunction) \
- V(ArchJmp) \
- V(ArchLookupSwitch) \
- V(ArchTableSwitch) \
- V(ArchNop) \
- V(ArchDeoptimize) \
- V(ArchRet) \
- V(ArchStackPointer) \
- V(ArchFramePointer) \
- V(ArchTruncateDoubleToI) \
- V(CheckedLoadInt8) \
- V(CheckedLoadUint8) \
- V(CheckedLoadInt16) \
- V(CheckedLoadUint16) \
- V(CheckedLoadWord32) \
- V(CheckedLoadWord64) \
- V(CheckedLoadFloat32) \
- V(CheckedLoadFloat64) \
- V(CheckedStoreWord8) \
- V(CheckedStoreWord16) \
- V(CheckedStoreWord32) \
- V(CheckedStoreWord64) \
- V(CheckedStoreFloat32) \
- V(CheckedStoreFloat64) \
+#define COMMON_ARCH_OPCODE_LIST(V) \
+ V(ArchCallCodeObject) \
+ V(ArchTailCallCodeObject) \
+ V(ArchCallJSFunction) \
+ V(ArchTailCallJSFunction) \
+ V(ArchPrepareCallCFunction) \
+ V(ArchCallCFunction) \
+ V(ArchPrepareTailCall) \
+ V(ArchLazyBailout) \
+ V(ArchJmp) \
+ V(ArchLookupSwitch) \
+ V(ArchTableSwitch) \
+ V(ArchNop) \
+ V(ArchThrowTerminator) \
+ V(ArchDeoptimize) \
+ V(ArchRet) \
+ V(ArchStackPointer) \
+ V(ArchFramePointer) \
+ V(ArchTruncateDoubleToI) \
+ V(ArchStoreWithWriteBarrier) \
+ V(CheckedLoadInt8) \
+ V(CheckedLoadUint8) \
+ V(CheckedLoadInt16) \
+ V(CheckedLoadUint16) \
+ V(CheckedLoadWord32) \
+ V(CheckedLoadWord64) \
+ V(CheckedLoadFloat32) \
+ V(CheckedLoadFloat64) \
+ V(CheckedStoreWord8) \
+ V(CheckedStoreWord16) \
+ V(CheckedStoreWord32) \
+ V(CheckedStoreWord64) \
+ V(CheckedStoreFloat32) \
+ V(CheckedStoreFloat64)
+
+#define ARCH_OPCODE_LIST(V) \
+ COMMON_ARCH_OPCODE_LIST(V) \
TARGET_ARCH_OPCODE_LIST(V)
enum ArchOpcode {
diff --git a/chromium/v8/src/compiler/instruction-scheduler.cc b/chromium/v8/src/compiler/instruction-scheduler.cc
new file mode 100644
index 00000000000..2f329ead415
--- /dev/null
+++ b/chromium/v8/src/compiler/instruction-scheduler.cc
@@ -0,0 +1,280 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+#include "src/base/adapters.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+InstructionScheduler::ScheduleGraphNode::ScheduleGraphNode(
+ Zone* zone,
+ Instruction* instr)
+ : instr_(instr),
+ successors_(zone),
+ unscheduled_predecessors_count_(0),
+ latency_(GetInstructionLatency(instr)),
+ total_latency_(-1),
+ start_cycle_(-1) {
+}
+
+
+void InstructionScheduler::ScheduleGraphNode::AddSuccessor(
+ ScheduleGraphNode* node) {
+ successors_.push_back(node);
+ node->unscheduled_predecessors_count_++;
+}
+
+
+InstructionScheduler::InstructionScheduler(Zone* zone,
+ InstructionSequence* sequence)
+ : zone_(zone),
+ sequence_(sequence),
+ graph_(zone),
+ last_side_effect_instr_(nullptr),
+ pending_loads_(zone),
+ last_live_in_reg_marker_(nullptr) {
+}
+
+
+void InstructionScheduler::StartBlock(RpoNumber rpo) {
+ DCHECK(graph_.empty());
+ DCHECK(last_side_effect_instr_ == nullptr);
+ DCHECK(pending_loads_.empty());
+ DCHECK(last_live_in_reg_marker_ == nullptr);
+ sequence()->StartBlock(rpo);
+}
+
+
+void InstructionScheduler::EndBlock(RpoNumber rpo) {
+ ScheduleBlock();
+ sequence()->EndBlock(rpo);
+ graph_.clear();
+ last_side_effect_instr_ = nullptr;
+ pending_loads_.clear();
+ last_live_in_reg_marker_ = nullptr;
+}
+
+
+void InstructionScheduler::AddInstruction(Instruction* instr) {
+ ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr);
+
+ if (IsBlockTerminator(instr)) {
+ // Make sure that basic block terminators are not moved by adding them
+ // as successor of every instruction.
+ for (auto node : graph_) {
+ node->AddSuccessor(new_node);
+ }
+ } else if (IsFixedRegisterParameter(instr)) {
+ if (last_live_in_reg_marker_ != nullptr) {
+ last_live_in_reg_marker_->AddSuccessor(new_node);
+ }
+ last_live_in_reg_marker_ = new_node;
+ } else {
+ if (last_live_in_reg_marker_ != nullptr) {
+ last_live_in_reg_marker_->AddSuccessor(new_node);
+ }
+
+ // Instructions with side effects and memory operations can't be
+ // reordered with respect to each other.
+ if (HasSideEffect(instr)) {
+ if (last_side_effect_instr_ != nullptr) {
+ last_side_effect_instr_->AddSuccessor(new_node);
+ }
+ for (auto load : pending_loads_) {
+ load->AddSuccessor(new_node);
+ }
+ pending_loads_.clear();
+ last_side_effect_instr_ = new_node;
+ } else if (IsLoadOperation(instr)) {
+ // Load operations can't be reordered with side effects instructions but
+ // independent loads can be reordered with respect to each other.
+ if (last_side_effect_instr_ != nullptr) {
+ last_side_effect_instr_->AddSuccessor(new_node);
+ }
+ pending_loads_.push_back(new_node);
+ }
+
+ // Look for operand dependencies.
+ for (auto node : graph_) {
+ if (HasOperandDependency(node->instruction(), instr)) {
+ node->AddSuccessor(new_node);
+ }
+ }
+ }
+
+ graph_.push_back(new_node);
+}
+
+
+bool InstructionScheduler::CompareNodes(ScheduleGraphNode *node1,
+ ScheduleGraphNode *node2) const {
+ return node1->total_latency() > node2->total_latency();
+}
+
+
+void InstructionScheduler::ScheduleBlock() {
+ ZoneLinkedList<ScheduleGraphNode*> ready_list(zone());
+
+ // Compute total latencies so that we can schedule the critical path first.
+ ComputeTotalLatencies();
+
+ // Add nodes which don't have dependencies to the ready list.
+ for (auto node : graph_) {
+ if (!node->HasUnscheduledPredecessor()) {
+ ready_list.push_back(node);
+ }
+ }
+
+ // Go through the ready list and schedule the instructions.
+ int cycle = 0;
+ while (!ready_list.empty()) {
+ auto candidate = ready_list.end();
+ for (auto iterator = ready_list.begin(); iterator != ready_list.end();
+ ++iterator) {
+ // Look for the best candidate to schedule.
+ // We only consider instructions that have all their operands ready and
+ // we try to schedule the critical path first (we look for the instruction
+ // with the highest latency on the path to reach the end of the graph).
+ if (cycle >= (*iterator)->start_cycle()) {
+ if ((candidate == ready_list.end()) ||
+ CompareNodes(*iterator, *candidate)) {
+ candidate = iterator;
+ }
+ }
+ }
+
+ if (candidate != ready_list.end()) {
+ sequence()->AddInstruction((*candidate)->instruction());
+
+ for (auto successor : (*candidate)->successors()) {
+ successor->DropUnscheduledPredecessor();
+ successor->set_start_cycle(
+ std::max(successor->start_cycle(),
+ cycle + (*candidate)->latency()));
+
+ if (!successor->HasUnscheduledPredecessor()) {
+ ready_list.push_back(successor);
+ }
+ }
+
+ ready_list.erase(candidate);
+ }
+
+ cycle++;
+ }
+}
+
+
+int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kArchNop:
+ case kArchStackPointer:
+ case kArchFramePointer:
+ case kArchTruncateDoubleToI:
+ return kNoOpcodeFlags;
+
+ case kArchPrepareCallCFunction:
+ case kArchPrepareTailCall:
+ case kArchCallCFunction:
+ case kArchCallCodeObject:
+ case kArchCallJSFunction:
+ case kArchLazyBailout:
+ return kHasSideEffect;
+
+ case kArchTailCallCodeObject:
+ case kArchTailCallJSFunction:
+ return kHasSideEffect | kIsBlockTerminator;
+
+ case kArchDeoptimize:
+ case kArchJmp:
+ case kArchLookupSwitch:
+ case kArchTableSwitch:
+ case kArchRet:
+ case kArchThrowTerminator:
+ return kIsBlockTerminator;
+
+ case kCheckedLoadInt8:
+ case kCheckedLoadUint8:
+ case kCheckedLoadInt16:
+ case kCheckedLoadUint16:
+ case kCheckedLoadWord32:
+ case kCheckedLoadWord64:
+ case kCheckedLoadFloat32:
+ case kCheckedLoadFloat64:
+ return kIsLoadOperation;
+
+ case kCheckedStoreWord8:
+ case kCheckedStoreWord16:
+ case kCheckedStoreWord32:
+ case kCheckedStoreWord64:
+ case kCheckedStoreFloat32:
+ case kCheckedStoreFloat64:
+ case kArchStoreWithWriteBarrier:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ TARGET_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ return GetTargetInstructionFlags(instr);
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+bool InstructionScheduler::HasOperandDependency(
+ const Instruction* instr1, const Instruction* instr2) const {
+ for (size_t i = 0; i < instr1->OutputCount(); ++i) {
+ for (size_t j = 0; j < instr2->InputCount(); ++j) {
+ const InstructionOperand* output = instr1->OutputAt(i);
+ const InstructionOperand* input = instr2->InputAt(j);
+
+ if (output->IsUnallocated() && input->IsUnallocated() &&
+ (UnallocatedOperand::cast(output)->virtual_register() ==
+ UnallocatedOperand::cast(input)->virtual_register())) {
+ return true;
+ }
+
+ if (output->IsConstant() && input->IsUnallocated() &&
+ (ConstantOperand::cast(output)->virtual_register() ==
+ UnallocatedOperand::cast(input)->virtual_register())) {
+ return true;
+ }
+ }
+ }
+
+ // TODO(bafsa): Do we need to look for anti-dependencies/output-dependencies?
+
+ return false;
+}
+
+
+bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
+ return ((GetInstructionFlags(instr) & kIsBlockTerminator) ||
+ (instr->flags_mode() == kFlags_branch));
+}
+
+
+void InstructionScheduler::ComputeTotalLatencies() {
+ for (auto node : base::Reversed(graph_)) {
+ int max_latency = 0;
+
+ for (auto successor : node->successors()) {
+ DCHECK(successor->total_latency() != -1);
+ if (successor->total_latency() > max_latency) {
+ max_latency = successor->total_latency();
+ }
+ }
+
+ node->set_total_latency(max_latency + node->latency());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/instruction-scheduler.h b/chromium/v8/src/compiler/instruction-scheduler.h
new file mode 100644
index 00000000000..fafbe479080
--- /dev/null
+++ b/chromium/v8/src/compiler/instruction-scheduler.h
@@ -0,0 +1,162 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SCHEDULER_H_
+#define V8_COMPILER_INSTRUCTION_SCHEDULER_H_
+
+#include "src/compiler/instruction.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A set of flags describing properties of the instructions so that the
+// scheduler is aware of dependencies between instructions.
+enum ArchOpcodeFlags {
+ kNoOpcodeFlags = 0,
+ kIsBlockTerminator = 1, // The instruction marks the end of a basic block
+ // e.g.: jump and return instructions.
+ kHasSideEffect = 2, // The instruction has some side effects (memory
+ // store, function call...)
+ kIsLoadOperation = 4, // The instruction is a memory load.
+};
+
+
+class InstructionScheduler final : public ZoneObject {
+ public:
+ InstructionScheduler(Zone* zone, InstructionSequence* sequence);
+
+ void StartBlock(RpoNumber rpo);
+ void EndBlock(RpoNumber rpo);
+
+ void AddInstruction(Instruction* instr);
+
+ static bool SchedulerSupported();
+
+ private:
+ // A scheduling graph node.
+ // Represent an instruction and their dependencies.
+ class ScheduleGraphNode: public ZoneObject {
+ public:
+ ScheduleGraphNode(Zone* zone, Instruction* instr);
+
+ // Mark the instruction represented by 'node' as a dependecy of this one.
+ // The current instruction will be registered as an unscheduled predecessor
+ // of 'node' (i.e. it must be scheduled before 'node').
+ void AddSuccessor(ScheduleGraphNode* node);
+
+ // Check if all the predecessors of this instruction have been scheduled.
+ bool HasUnscheduledPredecessor() {
+ return unscheduled_predecessors_count_ != 0;
+ }
+
+ // Record that we have scheduled one of the predecessors of this node.
+ void DropUnscheduledPredecessor() {
+ DCHECK(unscheduled_predecessors_count_ > 0);
+ unscheduled_predecessors_count_--;
+ }
+
+ Instruction* instruction() { return instr_; }
+ ZoneDeque<ScheduleGraphNode*>& successors() { return successors_; }
+ int latency() const { return latency_; }
+
+ int total_latency() const { return total_latency_; }
+ void set_total_latency(int latency) { total_latency_ = latency; }
+
+ int start_cycle() const { return start_cycle_; }
+ void set_start_cycle(int start_cycle) { start_cycle_ = start_cycle; }
+
+ private:
+ Instruction* instr_;
+ ZoneDeque<ScheduleGraphNode*> successors_;
+
+ // Number of unscheduled predecessors for this node.
+ int unscheduled_predecessors_count_;
+
+ // Estimate of the instruction latency (the number of cycles it takes for
+ // instruction to complete).
+ int latency_;
+
+ // The sum of all the latencies on the path from this node to the end of
+ // the graph (i.e. a node with no successor).
+ int total_latency_;
+
+ // The scheduler keeps a nominal cycle count to keep track of when the
+ // result of an instruction is available. This field is updated by the
+ // scheduler to indicate when the value of all the operands of this
+ // instruction will be available.
+ int start_cycle_;
+ };
+
+ // Compare the two nodes and return true if node1 is a better candidate than
+ // node2 (i.e. node1 should be scheduled before node2).
+ bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
+
+ // Perform scheduling for the current block.
+ void ScheduleBlock();
+
+ // Return the scheduling properties of the given instruction.
+ int GetInstructionFlags(const Instruction* instr) const;
+ int GetTargetInstructionFlags(const Instruction* instr) const;
+
+ // Return true if instr2 uses any value defined by instr1.
+ bool HasOperandDependency(const Instruction* instr1,
+ const Instruction* instr2) const;
+
+ // Return true if the instruction is a basic block terminator.
+ bool IsBlockTerminator(const Instruction* instr) const;
+
+ // Check whether the given instruction has side effects (e.g. function call,
+ // memory store).
+ bool HasSideEffect(const Instruction* instr) const {
+ return GetInstructionFlags(instr) & kHasSideEffect;
+ }
+
+ // Return true if the instruction is a memory load.
+ bool IsLoadOperation(const Instruction* instr) const {
+ return GetInstructionFlags(instr) & kIsLoadOperation;
+ }
+
+ // Identify nops used as a definition point for live-in registers at
+ // function entry.
+ bool IsFixedRegisterParameter(const Instruction* instr) const {
+ return (instr->arch_opcode() == kArchNop) &&
+ (instr->OutputCount() == 1) &&
+ (instr->OutputAt(0)->IsUnallocated()) &&
+ UnallocatedOperand::cast(instr->OutputAt(0))->HasFixedRegisterPolicy();
+ }
+
+ void ComputeTotalLatencies();
+
+ static int GetInstructionLatency(const Instruction* instr);
+
+ Zone* zone() { return zone_; }
+ InstructionSequence* sequence() { return sequence_; }
+
+ Zone* zone_;
+ InstructionSequence* sequence_;
+ ZoneVector<ScheduleGraphNode*> graph_;
+
+ // Last side effect instruction encountered while building the graph.
+ ScheduleGraphNode* last_side_effect_instr_;
+
+ // Set of load instructions encountered since the last side effect instruction
+ // which will be added as predecessors of the next instruction with side
+ // effects.
+ ZoneVector<ScheduleGraphNode*> pending_loads_;
+
+ // Live-in register markers are nop instructions which are emitted at the
+ // beginning of a basic block so that the register allocator will find a
+ // defining instruction for live-in values. They must not be moved.
+ // All these nops are chained together and added as a predecessor of every
+ // other instructions in the basic block.
+ ScheduleGraphNode* last_live_in_reg_marker_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INSTRUCTION_SCHEDULER_H_
diff --git a/chromium/v8/src/compiler/instruction-selector-impl.h b/chromium/v8/src/compiler/instruction-selector-impl.h
index 88283d48987..5cca8880d58 100644
--- a/chromium/v8/src/compiler/instruction-selector-impl.h
+++ b/chromium/v8/src/compiler/instruction-selector-impl.h
@@ -51,15 +51,13 @@ class OperandGenerator {
InstructionOperand DefineAsFixed(Node* node, Register reg) {
return Define(node, UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg),
- GetVReg(node)));
+ reg.code(), GetVReg(node)));
}
InstructionOperand DefineAsFixed(Node* node, DoubleRegister reg) {
return Define(node,
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg),
- GetVReg(node)));
+ reg.code(), GetVReg(node)));
}
InstructionOperand DefineAsConstant(Node* node) {
@@ -70,8 +68,16 @@ class OperandGenerator {
}
InstructionOperand DefineAsLocation(Node* node, LinkageLocation location,
- MachineType type) {
- return Define(node, ToUnallocatedOperand(location, type, GetVReg(node)));
+ MachineRepresentation rep) {
+ return Define(node, ToUnallocatedOperand(location, rep, GetVReg(node)));
+ }
+
+ InstructionOperand DefineAsDualLocation(Node* node,
+ LinkageLocation primary_location,
+ LinkageLocation secondary_location) {
+ return Define(node,
+ ToDualLocationUnallocatedOperand(
+ primary_location, secondary_location, GetVReg(node)));
}
InstructionOperand Use(Node* node) {
@@ -80,6 +86,12 @@ class OperandGenerator {
GetVReg(node)));
}
+ InstructionOperand UseAny(Node* node) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::ANY,
+ UnallocatedOperand::USED_AT_START,
+ GetVReg(node)));
+ }
+
InstructionOperand UseRegister(Node* node) {
return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
UnallocatedOperand::USED_AT_START,
@@ -107,15 +119,24 @@ class OperandGenerator {
InstructionOperand UseFixed(Node* node, Register reg) {
return Use(node, UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg),
- GetVReg(node)));
+ reg.code(), GetVReg(node)));
}
InstructionOperand UseFixed(Node* node, DoubleRegister reg) {
return Use(node,
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg),
- GetVReg(node)));
+ reg.code(), GetVReg(node)));
+ }
+
+ InstructionOperand UseExplicit(LinkageLocation location) {
+ MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
+ if (location.IsRegister()) {
+ return ExplicitOperand(LocationOperand::REGISTER, rep,
+ location.AsRegister());
+ } else {
+ return ExplicitOperand(LocationOperand::STACK_SLOT, rep,
+ location.GetLocation());
+ }
}
InstructionOperand UseImmediate(Node* node) {
@@ -123,8 +144,20 @@ class OperandGenerator {
}
InstructionOperand UseLocation(Node* node, LinkageLocation location,
- MachineType type) {
- return Use(node, ToUnallocatedOperand(location, type, GetVReg(node)));
+ MachineRepresentation rep) {
+ return Use(node, ToUnallocatedOperand(location, rep, GetVReg(node)));
+ }
+
+ // Used to force gap moves from the from_location to the to_location
+ // immediately before an instruction.
+ InstructionOperand UsePointerLocation(LinkageLocation to_location,
+ LinkageLocation from_location) {
+ MachineRepresentation rep = MachineType::PointerRepresentation();
+ UnallocatedOperand casted_from_operand =
+ UnallocatedOperand::cast(TempLocation(from_location, rep));
+ selector_->Emit(kArchNop, casted_from_operand);
+ return ToUnallocatedOperand(to_location, rep,
+ casted_from_operand.virtual_register());
}
InstructionOperand TempRegister() {
@@ -137,13 +170,13 @@ class OperandGenerator {
UnallocatedOperand op = UnallocatedOperand(
UnallocatedOperand::MUST_HAVE_REGISTER,
UnallocatedOperand::USED_AT_START, sequence()->NextVirtualRegister());
- sequence()->MarkAsRepresentation(kRepFloat64, op.virtual_register());
+ sequence()->MarkAsRepresentation(MachineRepresentation::kFloat64,
+ op.virtual_register());
return op;
}
InstructionOperand TempRegister(Register reg) {
- return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
- Register::ToAllocationIndex(reg),
+ return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, reg.code(),
InstructionOperand::kInvalidVirtualRegister);
}
@@ -151,8 +184,9 @@ class OperandGenerator {
return sequence()->AddImmediate(Constant(imm));
}
- InstructionOperand TempLocation(LinkageLocation location, MachineType type) {
- return ToUnallocatedOperand(location, type,
+ InstructionOperand TempLocation(LinkageLocation location,
+ MachineRepresentation rep) {
+ return ToUnallocatedOperand(location, rep,
sequence()->NextVirtualRegister());
}
@@ -205,8 +239,20 @@ class OperandGenerator {
return operand;
}
+ UnallocatedOperand ToDualLocationUnallocatedOperand(
+ LinkageLocation primary_location, LinkageLocation secondary_location,
+ int virtual_register) {
+ // We only support the primary location being a register and the secondary
+ // one a slot.
+ DCHECK(primary_location.IsRegister() &&
+ secondary_location.IsCalleeFrameSlot());
+ int reg_id = primary_location.AsRegister();
+ int slot_id = secondary_location.AsCalleeFrameSlot();
+ return UnallocatedOperand(reg_id, slot_id, virtual_register);
+ }
+
UnallocatedOperand ToUnallocatedOperand(LinkageLocation location,
- MachineType type,
+ MachineRepresentation rep,
int virtual_register) {
if (location.IsAnyRegister()) {
// any machine register.
@@ -224,8 +270,7 @@ class OperandGenerator {
location.AsCalleeFrameSlot(), virtual_register);
}
// a fixed register.
- MachineType rep = RepresentationOf(type);
- if (rep == kRepFloat64 || rep == kRepFloat32) {
+ if (IsFloatingPoint(rep)) {
return UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
location.AsRegister(), virtual_register);
}
@@ -317,33 +362,6 @@ class FlagsContinuation final {
BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch.
};
-
-// An internal helper class for generating the operands to calls.
-// TODO(bmeurer): Get rid of the CallBuffer business and make
-// InstructionSelector::VisitCall platform independent instead.
-struct CallBuffer {
- CallBuffer(Zone* zone, const CallDescriptor* descriptor,
- FrameStateDescriptor* frame_state);
-
- const CallDescriptor* descriptor;
- FrameStateDescriptor* frame_state_descriptor;
- NodeVector output_nodes;
- InstructionOperandVector outputs;
- InstructionOperandVector instruction_args;
- NodeVector pushed_nodes;
-
- size_t input_count() const { return descriptor->InputCount(); }
-
- size_t frame_state_count() const { return descriptor->FrameStateCount(); }
-
- size_t frame_state_value_count() const {
- return (frame_state_descriptor == NULL)
- ? 0
- : (frame_state_descriptor->GetTotalSize() +
- 1); // Include deopt id.
- }
-};
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/instruction-selector.cc b/chromium/v8/src/compiler/instruction-selector.cc
index 7200bf0e7a2..86868e59eee 100644
--- a/chromium/v8/src/compiler/instruction-selector.cc
+++ b/chromium/v8/src/compiler/instruction-selector.cc
@@ -12,6 +12,7 @@
#include "src/compiler/pipeline.h"
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
+#include "src/deoptimizer.h"
namespace v8 {
namespace internal {
@@ -29,12 +30,13 @@ InstructionSelector::InstructionSelector(
source_position_mode_(source_position_mode),
features_(features),
schedule_(schedule),
- current_block_(NULL),
+ current_block_(nullptr),
instructions_(zone),
defined_(node_count, false, zone),
used_(node_count, false, zone),
virtual_registers_(node_count,
- InstructionOperand::kInvalidVirtualRegister, zone) {
+ InstructionOperand::kInvalidVirtualRegister, zone),
+ scheduler_(nullptr) {
instructions_.reserve(node_count);
}
@@ -61,17 +63,55 @@ void InstructionSelector::SelectInstructions() {
}
// Schedule the selected instructions.
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ scheduler_ = new (zone()) InstructionScheduler(zone(), sequence());
+ }
+
for (auto const block : *blocks) {
InstructionBlock* instruction_block =
sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
size_t end = instruction_block->code_end();
size_t start = instruction_block->code_start();
DCHECK_LE(end, start);
- sequence()->StartBlock(RpoNumber::FromInt(block->rpo_number()));
+ StartBlock(RpoNumber::FromInt(block->rpo_number()));
while (start-- > end) {
- sequence()->AddInstruction(instructions_[start]);
+ AddInstruction(instructions_[start]);
}
- sequence()->EndBlock(RpoNumber::FromInt(block->rpo_number()));
+ EndBlock(RpoNumber::FromInt(block->rpo_number()));
+ }
+}
+
+
+void InstructionSelector::StartBlock(RpoNumber rpo) {
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->StartBlock(rpo);
+ } else {
+ sequence()->StartBlock(rpo);
+ }
+}
+
+
+void InstructionSelector::EndBlock(RpoNumber rpo) {
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->EndBlock(rpo);
+ } else {
+ sequence()->EndBlock(rpo);
+ }
+}
+
+
+void InstructionSelector::AddInstruction(Instruction* instr) {
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->AddInstruction(instr);
+ } else {
+ sequence()->AddInstruction(instr);
}
}
@@ -81,7 +121,7 @@ Instruction* InstructionSelector::Emit(InstructionCode opcode,
size_t temp_count,
InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
- return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
+ return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
}
@@ -240,42 +280,221 @@ void InstructionSelector::MarkAsUsed(Node* node) {
}
-void InstructionSelector::MarkAsRepresentation(MachineType rep,
+void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
const InstructionOperand& op) {
UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
- rep = RepresentationOf(rep);
sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
}
-void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
- rep = RepresentationOf(rep);
+void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
+ Node* node) {
sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
}
+namespace {
+
+enum class FrameStateInputKind { kAny, kStackSlot };
+
+
+InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
+ FrameStateInputKind kind) {
+ switch (input->opcode()) {
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kFloat32Constant:
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kHeapConstant:
+ return g->UseImmediate(input);
+ case IrOpcode::kObjectState:
+ UNREACHABLE();
+ break;
+ default:
+ switch (kind) {
+ case FrameStateInputKind::kStackSlot:
+ return g->UseUniqueSlot(input);
+ case FrameStateInputKind::kAny:
+ return g->UseAny(input);
+ }
+ }
+ UNREACHABLE();
+ return InstructionOperand();
+}
+
+
+class StateObjectDeduplicator {
+ public:
+ explicit StateObjectDeduplicator(Zone* zone) : objects_(zone) {}
+ static const size_t kNotDuplicated = SIZE_MAX;
+
+ size_t GetObjectId(Node* node) {
+ for (size_t i = 0; i < objects_.size(); ++i) {
+ if (objects_[i] == node) {
+ return i;
+ }
+ }
+ return kNotDuplicated;
+ }
+
+ size_t InsertObject(Node* node) {
+ size_t id = objects_.size();
+ objects_.push_back(node);
+ return id;
+ }
+
+ private:
+ ZoneVector<Node*> objects_;
+};
+
+
+// Returns the number of instruction operands added to inputs.
+size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
+ InstructionOperandVector* inputs,
+ OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ Node* input, MachineType type,
+ FrameStateInputKind kind, Zone* zone) {
+ switch (input->opcode()) {
+ case IrOpcode::kObjectState: {
+ size_t id = deduplicator->GetObjectId(input);
+ if (id == StateObjectDeduplicator::kNotDuplicated) {
+ size_t entries = 0;
+ id = deduplicator->InsertObject(input);
+ descriptor->fields().push_back(
+ StateValueDescriptor::Recursive(zone, id));
+ StateValueDescriptor* new_desc = &descriptor->fields().back();
+ for (Edge edge : input->input_edges()) {
+ entries += AddOperandToStateValueDescriptor(
+ new_desc, inputs, g, deduplicator, edge.to(),
+ MachineType::AnyTagged(), kind, zone);
+ }
+ return entries;
+ } else {
+ // Crankshaft counts duplicate objects for the running id, so we have
+ // to push the input again.
+ deduplicator->InsertObject(input);
+ descriptor->fields().push_back(
+ StateValueDescriptor::Duplicate(zone, id));
+ return 0;
+ }
+ break;
+ }
+ default: {
+ inputs->push_back(OperandForDeopt(g, input, kind));
+ descriptor->fields().push_back(StateValueDescriptor::Plain(zone, type));
+ return 1;
+ }
+ }
+}
+
+
+// Returns the number of instruction operands added to inputs.
+size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
+ Node* state, OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ InstructionOperandVector* inputs,
+ FrameStateInputKind kind, Zone* zone) {
+ DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
+
+ size_t entries = 0;
+ size_t initial_size = inputs->size();
+ USE(initial_size); // initial_size is only used for debug.
+
+ if (descriptor->outer_state()) {
+ entries += AddInputsToFrameStateDescriptor(
+ descriptor->outer_state(), state->InputAt(kFrameStateOuterStateInput),
+ g, deduplicator, inputs, kind, zone);
+ }
+
+ Node* parameters = state->InputAt(kFrameStateParametersInput);
+ Node* locals = state->InputAt(kFrameStateLocalsInput);
+ Node* stack = state->InputAt(kFrameStateStackInput);
+ Node* context = state->InputAt(kFrameStateContextInput);
+ Node* function = state->InputAt(kFrameStateFunctionInput);
+
+ DCHECK_EQ(descriptor->parameters_count(),
+ StateValuesAccess(parameters).size());
+ DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
+ DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
+
+ StateValueDescriptor* values_descriptor =
+ descriptor->GetStateValueDescriptor();
+ entries += AddOperandToStateValueDescriptor(
+ values_descriptor, inputs, g, deduplicator, function,
+ MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
+ for (StateValuesAccess::TypedNode input_node :
+ StateValuesAccess(parameters)) {
+ entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
+ }
+ if (descriptor->HasContext()) {
+ entries += AddOperandToStateValueDescriptor(
+ values_descriptor, inputs, g, deduplicator, context,
+ MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
+ }
+ for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
+ entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
+ }
+ for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
+ entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
+ }
+ DCHECK_EQ(initial_size + entries, inputs->size());
+ return entries;
+}
+
+} // namespace
+
+
+// An internal helper class for generating the operands to calls.
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
-CallBuffer::CallBuffer(Zone* zone, const CallDescriptor* d,
- FrameStateDescriptor* frame_desc)
- : descriptor(d),
- frame_state_descriptor(frame_desc),
- output_nodes(zone),
- outputs(zone),
- instruction_args(zone),
- pushed_nodes(zone) {
- output_nodes.reserve(d->ReturnCount());
- outputs.reserve(d->ReturnCount());
- pushed_nodes.reserve(input_count());
- instruction_args.reserve(input_count() + frame_state_value_count());
-}
+struct CallBuffer {
+ CallBuffer(Zone* zone, const CallDescriptor* descriptor,
+ FrameStateDescriptor* frame_state)
+ : descriptor(descriptor),
+ frame_state_descriptor(frame_state),
+ output_nodes(zone),
+ outputs(zone),
+ instruction_args(zone),
+ pushed_nodes(zone) {
+ output_nodes.reserve(descriptor->ReturnCount());
+ outputs.reserve(descriptor->ReturnCount());
+ pushed_nodes.reserve(input_count());
+ instruction_args.reserve(input_count() + frame_state_value_count());
+ }
+
+
+ const CallDescriptor* descriptor;
+ FrameStateDescriptor* frame_state_descriptor;
+ NodeVector output_nodes;
+ InstructionOperandVector outputs;
+ InstructionOperandVector instruction_args;
+ ZoneVector<PushParameter> pushed_nodes;
+
+ size_t input_count() const { return descriptor->InputCount(); }
+
+ size_t frame_state_count() const { return descriptor->FrameStateCount(); }
+
+ size_t frame_state_value_count() const {
+ return (frame_state_descriptor == nullptr)
+ ? 0
+ : (frame_state_descriptor->GetTotalSize() +
+ 1); // Include deopt id.
+ }
+};
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
- bool call_code_immediate,
- bool call_address_immediate) {
+ CallBufferFlags flags,
+ int stack_param_delta) {
OperandGenerator g(this);
DCHECK_LE(call->op()->ValueOutputCount(),
static_cast<int>(buffer->descriptor->ReturnCount()));
@@ -300,13 +519,13 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
// Filter out the outputs that aren't live because no projection uses them.
size_t outputs_needed_by_framestate =
- buffer->frame_state_descriptor == NULL
+ buffer->frame_state_descriptor == nullptr
? 0
: buffer->frame_state_descriptor->state_combine()
.ConsumedOutputCount();
for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
- bool output_is_live =
- buffer->output_nodes[i] != NULL || i < outputs_needed_by_framestate;
+ bool output_is_live = buffer->output_nodes[i] != nullptr ||
+ i < outputs_needed_by_framestate;
if (output_is_live) {
MachineType type =
buffer->descriptor->GetReturnType(static_cast<int>(i));
@@ -315,9 +534,10 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
Node* output = buffer->output_nodes[i];
InstructionOperand op =
- output == NULL ? g.TempLocation(location, type)
- : g.DefineAsLocation(output, location, type);
- MarkAsRepresentation(type, op);
+ output == nullptr
+ ? g.TempLocation(location, type.representation())
+ : g.DefineAsLocation(output, location, type.representation());
+ MarkAsRepresentation(type.representation(), op);
buffer->outputs.push_back(op);
}
@@ -326,6 +546,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
// The first argument is always the callee code.
Node* callee = call->InputAt(0);
+ bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
+ bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
buffer->instruction_args.push_back(
@@ -343,7 +565,11 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
case CallDescriptor::kCallJSFunction:
buffer->instruction_args.push_back(
g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
- buffer->descriptor->GetInputType(0)));
+ buffer->descriptor->GetInputType(0).representation()));
+ break;
+ case CallDescriptor::kLazyBailout:
+ // The target is ignored, but we still need to pass a value here.
+ buffer->instruction_args.push_back(g.UseImmediate(callee));
break;
}
DCHECK_EQ(1u, buffer->instruction_args.size());
@@ -352,19 +578,26 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
// follows (n is the number of value inputs to the frame state):
// arg 1 : deoptimization id.
// arg 2 - arg (n + 1) : value inputs to the frame state.
- if (buffer->frame_state_descriptor != NULL) {
+ size_t frame_state_entries = 0;
+ USE(frame_state_entries); // frame_state_entries is only used for debug.
+ if (buffer->frame_state_descriptor != nullptr) {
InstructionSequence::StateId state_id =
sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
Node* frame_state =
call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
- AddFrameStateInputs(frame_state, &buffer->instruction_args,
- buffer->frame_state_descriptor,
- FrameStateInputKind::kStackSlot);
+
+ StateObjectDeduplicator deduplicator(instruction_zone());
+
+ frame_state_entries =
+ 1 + AddInputsToFrameStateDescriptor(
+ buffer->frame_state_descriptor, frame_state, &g, &deduplicator,
+ &buffer->instruction_args, FrameStateInputKind::kStackSlot,
+ instruction_zone());
+
+ DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size());
}
- DCHECK(1 + buffer->frame_state_value_count() ==
- buffer->instruction_args.size());
size_t input_count = static_cast<size_t>(buffer->input_count());
@@ -374,27 +607,47 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
// as an InstructionOperand argument to the call.
auto iter(call->inputs().begin());
size_t pushed_count = 0;
+ bool call_tail = (flags & kCallTail) != 0;
for (size_t index = 0; index < input_count; ++iter, ++index) {
DCHECK(iter != call->inputs().end());
DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
if (index == 0) continue; // The first argument (callee) is already done.
+
+ LinkageLocation location = buffer->descriptor->GetInputLocation(index);
+ if (call_tail) {
+ location = LinkageLocation::ConvertToTailCallerLocation(
+ location, stack_param_delta);
+ }
InstructionOperand op =
- g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
- buffer->descriptor->GetInputType(index));
- if (UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
+ g.UseLocation(*iter, location,
+ buffer->descriptor->GetInputType(index).representation());
+ if (UnallocatedOperand::cast(op).HasFixedSlotPolicy() && !call_tail) {
int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1;
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
- buffer->pushed_nodes.resize(stack_index + 1, NULL);
+ buffer->pushed_nodes.resize(stack_index + 1);
}
- DCHECK(!buffer->pushed_nodes[stack_index]);
- buffer->pushed_nodes[stack_index] = *iter;
+ PushParameter parameter(*iter, buffer->descriptor->GetInputType(index));
+ buffer->pushed_nodes[stack_index] = parameter;
pushed_count++;
} else {
buffer->instruction_args.push_back(op);
}
}
DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
- buffer->frame_state_value_count());
+ frame_state_entries);
+ if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && call_tail &&
+ stack_param_delta != 0) {
+ // For tail calls that change the size of their parameter list and keep
+ // their return address on the stack, move the return address to just above
+ // the parameters.
+ LinkageLocation saved_return_location =
+ LinkageLocation::ForSavedCallerReturnAddress();
+ InstructionOperand return_address =
+ g.UsePointerLocation(LinkageLocation::ConvertToTailCallerLocation(
+ saved_return_location, stack_param_delta),
+ saved_return_location);
+ buffer->instruction_args.push_back(return_address);
+ }
}
@@ -435,7 +688,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
instruction_block->set_code_start(static_cast<int>(instructions_.size()));
instruction_block->set_code_end(current_block_end);
- current_block_ = NULL;
+ current_block_ = nullptr;
}
@@ -481,7 +734,6 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
DCHECK_EQ(IrOpcode::kIfDefault, sw.default_branch->front()->opcode());
// All other successors must be cases.
sw.case_count = block->SuccessorCount() - 1;
- DCHECK_LE(1u, sw.case_count);
sw.case_branches = &block->successors().front();
// Determine case values and their min/max.
sw.case_values = zone()->NewArray<int32_t>(sw.case_count);
@@ -504,15 +756,12 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
case BasicBlock::kReturn: {
DCHECK_EQ(IrOpcode::kReturn, input->opcode());
- return VisitReturn(input->InputAt(0));
+ return VisitReturn(input);
}
case BasicBlock::kDeoptimize: {
- // If the result itself is a return, return its input.
- Node* value =
- (input != nullptr && input->opcode() == IrOpcode::kDeoptimize)
- ? input->InputAt(0)
- : input;
- return VisitDeoptimize(value);
+ DeoptimizeKind kind = DeoptimizeKindOf(input->op());
+ Node* value = input->InputAt(0);
+ return VisitDeoptimize(kind, value);
}
case BasicBlock::kThrow:
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
@@ -545,23 +794,26 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
case IrOpcode::kTerminate:
+ case IrOpcode::kBeginRegion:
// No code needed for these graph artifacts.
return;
case IrOpcode::kIfException:
return MarkAsReference(node), VisitIfException(node);
- case IrOpcode::kFinish:
- return MarkAsReference(node), VisitFinish(node);
+ case IrOpcode::kFinishRegion:
+ return MarkAsReference(node), VisitFinishRegion(node);
+ case IrOpcode::kGuard:
+ return MarkAsReference(node), VisitGuard(node);
case IrOpcode::kParameter: {
MachineType type =
linkage()->GetParameterType(ParameterIndexOf(node->op()));
- MarkAsRepresentation(type, node);
+ MarkAsRepresentation(type.representation(), node);
return VisitParameter(node);
}
case IrOpcode::kOsrValue:
return MarkAsReference(node), VisitOsrValue(node);
case IrOpcode::kPhi: {
- MachineType type = OpParameter<MachineType>(node);
- MarkAsRepresentation(type, node);
+ MachineRepresentation rep = PhiRepresentationOf(node->op());
+ MarkAsRepresentation(rep, node);
return VisitPhi(node);
}
case IrOpcode::kProjection:
@@ -585,10 +837,11 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitCall(node);
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
+ case IrOpcode::kObjectState:
return;
case IrOpcode::kLoad: {
- LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
- MarkAsRepresentation(rep, node);
+ LoadRepresentation type = LoadRepresentationOf(node->op());
+ MarkAsRepresentation(type.representation(), node);
return VisitLoad(node);
}
case IrOpcode::kStore:
@@ -611,6 +864,12 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitWord32Equal(node);
case IrOpcode::kWord32Clz:
return MarkAsWord32(node), VisitWord32Clz(node);
+ case IrOpcode::kWord32Ctz:
+ return MarkAsWord32(node), VisitWord32Ctz(node);
+ case IrOpcode::kWord32Popcnt:
+ return MarkAsWord32(node), VisitWord32Popcnt(node);
+ case IrOpcode::kWord64Popcnt:
+ return MarkAsWord32(node), VisitWord64Popcnt(node);
case IrOpcode::kWord64And:
return MarkAsWord64(node), VisitWord64And(node);
case IrOpcode::kWord64Or:
@@ -625,6 +884,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitWord64Sar(node);
case IrOpcode::kWord64Ror:
return MarkAsWord64(node), VisitWord64Ror(node);
+ case IrOpcode::kWord64Clz:
+ return MarkAsWord64(node), VisitWord64Clz(node);
+ case IrOpcode::kWord64Ctz:
+ return MarkAsWord64(node), VisitWord64Ctz(node);
case IrOpcode::kWord64Equal:
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
@@ -659,8 +922,12 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitUint32MulHigh(node);
case IrOpcode::kInt64Add:
return MarkAsWord64(node), VisitInt64Add(node);
+ case IrOpcode::kInt64AddWithOverflow:
+ return MarkAsWord64(node), VisitInt64AddWithOverflow(node);
case IrOpcode::kInt64Sub:
return MarkAsWord64(node), VisitInt64Sub(node);
+ case IrOpcode::kInt64SubWithOverflow:
+ return MarkAsWord64(node), VisitInt64SubWithOverflow(node);
case IrOpcode::kInt64Mul:
return MarkAsWord64(node), VisitInt64Mul(node);
case IrOpcode::kInt64Div:
@@ -689,6 +956,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ return MarkAsWord64(node), VisitTryTruncateFloat64ToInt64(node);
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node);
+ case IrOpcode::kTryTruncateFloat64ToUint64:
+ return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node);
case IrOpcode::kChangeInt32ToInt64:
return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
case IrOpcode::kChangeUint32ToUint64:
@@ -699,8 +974,16 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitTruncateFloat64ToInt32(node);
case IrOpcode::kTruncateInt64ToInt32:
return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
+ case IrOpcode::kRoundInt64ToFloat32:
+ return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
+ case IrOpcode::kRoundInt64ToFloat64:
+ return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
case IrOpcode::kBitcastFloat32ToInt32:
return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
+ case IrOpcode::kRoundUint64ToFloat32:
+ return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
+ case IrOpcode::kRoundUint64ToFloat64:
+ return MarkAsFloat64(node), VisitRoundUint64ToFloat64(node);
case IrOpcode::kBitcastFloat64ToInt64:
return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
case IrOpcode::kBitcastInt32ToFloat32:
@@ -753,12 +1036,24 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitFloat64LessThan(node);
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64LessThanOrEqual(node);
+ case IrOpcode::kFloat32RoundDown:
+ return MarkAsFloat32(node), VisitFloat32RoundDown(node);
case IrOpcode::kFloat64RoundDown:
return MarkAsFloat64(node), VisitFloat64RoundDown(node);
+ case IrOpcode::kFloat32RoundUp:
+ return MarkAsFloat32(node), VisitFloat32RoundUp(node);
+ case IrOpcode::kFloat64RoundUp:
+ return MarkAsFloat64(node), VisitFloat64RoundUp(node);
+ case IrOpcode::kFloat32RoundTruncate:
+ return MarkAsFloat32(node), VisitFloat32RoundTruncate(node);
case IrOpcode::kFloat64RoundTruncate:
return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
case IrOpcode::kFloat64RoundTiesAway:
return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node);
+ case IrOpcode::kFloat32RoundTiesEven:
+ return MarkAsFloat32(node), VisitFloat32RoundTiesEven(node);
+ case IrOpcode::kFloat64RoundTiesEven:
+ return MarkAsFloat64(node), VisitFloat64RoundTiesEven(node);
case IrOpcode::kFloat64ExtractLowWord32:
return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node);
case IrOpcode::kFloat64ExtractHighWord32:
@@ -772,7 +1067,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kLoadFramePointer:
return VisitLoadFramePointer(node);
case IrOpcode::kCheckedLoad: {
- MachineType rep = OpParameter<MachineType>(node);
+ MachineRepresentation rep =
+ CheckedLoadRepresentationOf(node->op()).representation();
MarkAsRepresentation(rep, node);
return VisitCheckedLoad(node);
}
@@ -858,15 +1154,34 @@ void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
+
+
void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
@@ -908,11 +1223,51 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
}
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
@@ -925,7 +1280,14 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
#endif // V8_TARGET_ARCH_32_BIT
-void InstructionSelector::VisitFinish(Node* node) {
+void InstructionSelector::VisitFinishRegion(Node* node) {
+ OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+}
+
+
+void InstructionSelector::VisitGuard(Node* node) {
OperandGenerator g(this);
Node* value = node->InputAt(0);
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
@@ -935,9 +1297,16 @@ void InstructionSelector::VisitFinish(Node* node) {
void InstructionSelector::VisitParameter(Node* node) {
OperandGenerator g(this);
int index = ParameterIndexOf(node->op());
- Emit(kArchNop,
- g.DefineAsLocation(node, linkage()->GetParameterLocation(index),
- linkage()->GetParameterType(index)));
+ InstructionOperand op =
+ linkage()->ParameterHasSecondaryLocation(index)
+ ? g.DefineAsDualLocation(
+ node, linkage()->GetParameterLocation(index),
+ linkage()->GetParameterSecondaryLocation(index))
+ : g.DefineAsLocation(
+ node, linkage()->GetParameterLocation(index),
+ linkage()->GetParameterType(index).representation());
+
+ Emit(kArchNop, op);
}
@@ -946,8 +1315,9 @@ void InstructionSelector::VisitIfException(Node* node) {
Node* call = node->InputAt(1);
DCHECK_EQ(IrOpcode::kCall, call->opcode());
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(call);
- Emit(kArchNop, g.DefineAsLocation(node, descriptor->GetReturnLocation(0),
- descriptor->GetReturnType(0)));
+ Emit(kArchNop,
+ g.DefineAsLocation(node, descriptor->GetReturnLocation(0),
+ descriptor->GetReturnType(0).representation()));
}
@@ -955,7 +1325,7 @@ void InstructionSelector::VisitOsrValue(Node* node) {
OperandGenerator g(this);
int index = OpParameter<int>(node);
Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index),
- kMachAnyTagged));
+ MachineRepresentation::kTagged));
}
@@ -981,6 +1351,12 @@ void InstructionSelector::VisitProjection(Node* node) {
switch (value->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt32SubWithOverflow:
+ case IrOpcode::kInt64AddWithOverflow:
+ case IrOpcode::kInt64SubWithOverflow:
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ case IrOpcode::kTryTruncateFloat64ToUint64:
if (ProjectionIndexOf(node->op()) == 0u) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
} else {
@@ -1002,6 +1378,156 @@ void InstructionSelector::VisitConstant(Node* node) {
}
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+ OperandGenerator g(this);
+ const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+
+ FrameStateDescriptor* frame_state_descriptor = nullptr;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor = GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on some architectures it's probably better to use
+ // the code object in a register if there are multiple uses of it.
+ // Improve constant pool and the heuristics in the register allocator
+ // for where to emit constants.
+ CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
+ InitializeCallBuffer(node, &buffer, call_buffer_flags);
+
+ EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
+
+ // Pass label of exception handler block.
+ CallDescriptor::Flags flags = descriptor->flags();
+ if (handler) {
+ DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
+ if (hint == IfExceptionHint::kLocallyCaught) {
+ flags |= CallDescriptor::kHasLocalCatchHandler;
+ }
+ flags |= CallDescriptor::kHasExceptionHandler;
+ buffer.instruction_args.push_back(g.Label(handler));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode = kArchNop;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
+ break;
+ case CallDescriptor::kLazyBailout:
+ opcode = kArchLazyBailout | MiscField::encode(flags);
+ break;
+ }
+
+ // Emit the call instruction.
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())
+ ->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitTailCall(Node* node) {
+ OperandGenerator g(this);
+ CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
+ DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
+
+ // TODO(turbofan): Relax restriction for stack parameters.
+
+ int stack_param_delta = 0;
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node,
+ &stack_param_delta)) {
+ CallBuffer buffer(zone(), descriptor, nullptr);
+
+ // Compute InstructionOperands for inputs and outputs.
+ CallBufferFlags flags(kCallCodeImmediate | kCallTail);
+ if (IsTailCallAddressImmediate()) {
+ flags |= kCallAddressImmediate;
+ }
+ InitializeCallBuffer(node, &buffer, flags, stack_param_delta);
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ buffer.instruction_args.push_back(g.TempImmediate(stack_param_delta));
+
+ Emit(kArchPrepareTailCall, g.NoOutput(),
+ g.TempImmediate(stack_param_delta));
+
+ // Emit the tailcall instruction.
+ Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
+ } else {
+ FrameStateDescriptor* frame_state_descriptor =
+ descriptor->NeedsFrameState()
+ ? GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())))
+ : nullptr;
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ CallBufferFlags flags = kCallCodeImmediate;
+ if (IsTailCallAddressImmediate()) {
+ flags |= kCallAddressImmediate;
+ }
+ InitializeCallBuffer(node, &buffer, flags);
+
+ EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ size_t output_count = buffer.outputs.size();
+ auto* outputs = &buffer.outputs.front();
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())
+ ->MarkAsCall();
+ Emit(kArchRet, 0, nullptr, output_count, outputs);
+ }
+}
+
+
void InstructionSelector::VisitGoto(BasicBlock* target) {
// jump to the next block.
OperandGenerator g(this);
@@ -1009,43 +1535,57 @@ void InstructionSelector::VisitGoto(BasicBlock* target) {
}
-void InstructionSelector::VisitReturn(Node* value) {
- DCHECK_NOT_NULL(value);
+void InstructionSelector::VisitReturn(Node* ret) {
OperandGenerator g(this);
if (linkage()->GetIncomingDescriptor()->ReturnCount() == 0) {
Emit(kArchRet, g.NoOutput());
} else {
- Emit(kArchRet, g.NoOutput(),
- g.UseLocation(value, linkage()->GetReturnLocation(),
- linkage()->GetReturnType()));
+ const int ret_count = ret->op()->ValueInputCount();
+ auto value_locations = zone()->NewArray<InstructionOperand>(ret_count);
+ for (int i = 0; i < ret_count; ++i) {
+ value_locations[i] =
+ g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i),
+ linkage()->GetReturnType(i).representation());
+ }
+ Emit(kArchRet, 0, nullptr, ret_count, value_locations);
}
}
-void InstructionSelector::VisitDeoptimize(Node* value) {
+void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
OperandGenerator g(this);
FrameStateDescriptor* desc = GetFrameStateDescriptor(value);
- size_t arg_count = desc->GetTotalSize() + 1; // Include deopt id.
InstructionOperandVector args(instruction_zone());
- args.reserve(arg_count);
+ args.reserve(desc->GetTotalSize() + 1); // Include deopt id.
InstructionSequence::StateId state_id =
sequence()->AddFrameStateDescriptor(desc);
args.push_back(g.TempImmediate(state_id.ToInt()));
- AddFrameStateInputs(value, &args, desc, FrameStateInputKind::kAny);
+ StateObjectDeduplicator deduplicator(instruction_zone());
- DCHECK_EQ(args.size(), arg_count);
+ AddInputsToFrameStateDescriptor(desc, value, &g, &deduplicator, &args,
+ FrameStateInputKind::kAny,
+ instruction_zone());
- Emit(kArchDeoptimize, 0, nullptr, arg_count, &args.front(), 0, nullptr);
+ InstructionCode opcode = kArchDeoptimize;
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ opcode |= MiscField::encode(Deoptimizer::EAGER);
+ break;
+ case DeoptimizeKind::kSoft:
+ opcode |= MiscField::encode(Deoptimizer::SOFT);
+ break;
+ }
+ Emit(opcode, 0, nullptr, args.size(), &args.front(), 0, nullptr);
}
void InstructionSelector::VisitThrow(Node* value) {
OperandGenerator g(this);
- Emit(kArchNop, g.NoOutput()); // TODO(titzer)
+ Emit(kArchThrowTerminator, g.NoOutput()); // TODO(titzer)
}
@@ -1065,7 +1605,7 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
DCHECK_EQ(parameters, state_info.parameter_count());
DCHECK_EQ(locals, state_info.local_count());
- FrameStateDescriptor* outer_state = NULL;
+ FrameStateDescriptor* outer_state = nullptr;
Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
if (outer_node->opcode() == IrOpcode::kFrameState) {
outer_state = GetFrameStateDescriptor(outer_node);
@@ -1078,76 +1618,6 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
}
-InstructionOperand InstructionSelector::OperandForDeopt(
- OperandGenerator* g, Node* input, FrameStateInputKind kind) {
- switch (input->opcode()) {
- case IrOpcode::kInt32Constant:
- case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant:
- case IrOpcode::kHeapConstant:
- return g->UseImmediate(input);
- default:
- switch (kind) {
- case FrameStateInputKind::kStackSlot:
- return g->UseUniqueSlot(input);
- case FrameStateInputKind::kAny:
- return g->Use(input);
- }
- UNREACHABLE();
- return InstructionOperand();
- }
-}
-
-
-void InstructionSelector::AddFrameStateInputs(Node* state,
- InstructionOperandVector* inputs,
- FrameStateDescriptor* descriptor,
- FrameStateInputKind kind) {
- DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
-
- if (descriptor->outer_state()) {
- AddFrameStateInputs(state->InputAt(kFrameStateOuterStateInput), inputs,
- descriptor->outer_state(), kind);
- }
-
- Node* parameters = state->InputAt(kFrameStateParametersInput);
- Node* locals = state->InputAt(kFrameStateLocalsInput);
- Node* stack = state->InputAt(kFrameStateStackInput);
- Node* context = state->InputAt(kFrameStateContextInput);
- Node* function = state->InputAt(kFrameStateFunctionInput);
-
- DCHECK_EQ(descriptor->parameters_count(),
- StateValuesAccess(parameters).size());
- DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
- DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
-
- ZoneVector<MachineType> types(instruction_zone());
- types.reserve(descriptor->GetSize());
-
- OperandGenerator g(this);
- size_t value_index = 0;
- inputs->push_back(OperandForDeopt(&g, function, kind));
- descriptor->SetType(value_index++, kMachAnyTagged);
- for (StateValuesAccess::TypedNode input_node :
- StateValuesAccess(parameters)) {
- inputs->push_back(OperandForDeopt(&g, input_node.node, kind));
- descriptor->SetType(value_index++, input_node.type);
- }
- if (descriptor->HasContext()) {
- inputs->push_back(OperandForDeopt(&g, context, kind));
- descriptor->SetType(value_index++, kMachAnyTagged);
- }
- for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
- inputs->push_back(OperandForDeopt(&g, input_node.node, kind));
- descriptor->SetType(value_index++, input_node.type);
- }
- for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
- inputs->push_back(OperandForDeopt(&g, input_node.node, kind));
- descriptor->SetType(value_index++, input_node.type);
- }
- DCHECK(value_index == descriptor->GetSize());
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/instruction-selector.h b/chromium/v8/src/compiler/instruction-selector.h
index b8354fcfd1e..52aea70eb6f 100644
--- a/chromium/v8/src/compiler/instruction-selector.h
+++ b/chromium/v8/src/compiler/instruction-selector.h
@@ -9,6 +9,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-scheduler.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/zone-containers.h"
@@ -25,8 +26,20 @@ class Linkage;
class OperandGenerator;
struct SwitchInfo;
-typedef ZoneVector<InstructionOperand> InstructionOperandVector;
+// This struct connects nodes of parameters which are going to be pushed on the
+// call stack with their parameter index in the call descriptor of the callee.
+class PushParameter {
+ public:
+ PushParameter() : node_(nullptr), type_(MachineType::None()) {}
+ PushParameter(Node* node, MachineType type) : node_(node), type_(type) {}
+
+ Node* node() const { return node_; }
+ MachineType type() const { return type_; }
+ private:
+ Node* node_;
+ MachineType type_;
+};
// Instruction selection generates an InstructionSequence for a given Schedule.
class InstructionSelector final {
@@ -46,40 +59,44 @@ class InstructionSelector final {
// Visit code for the entire graph with the included schedule.
void SelectInstructions();
+ void StartBlock(RpoNumber rpo);
+ void EndBlock(RpoNumber rpo);
+ void AddInstruction(Instruction* instr);
+
// ===========================================================================
// ============= Architecture-independent code emission methods. =============
// ===========================================================================
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
- size_t temp_count = 0, InstructionOperand* temps = NULL);
+ size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, size_t temp_count = 0,
- InstructionOperand* temps = NULL);
+ InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
- size_t temp_count = 0, InstructionOperand* temps = NULL);
+ size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
InstructionOperand c, size_t temp_count = 0,
- InstructionOperand* temps = NULL);
+ InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
InstructionOperand c, InstructionOperand d,
- size_t temp_count = 0, InstructionOperand* temps = NULL);
+ size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
InstructionOperand c, InstructionOperand d,
InstructionOperand e, size_t temp_count = 0,
- InstructionOperand* temps = NULL);
+ InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
InstructionOperand c, InstructionOperand d,
InstructionOperand e, InstructionOperand f,
- size_t temp_count = 0, InstructionOperand* temps = NULL);
+ size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, size_t temp_count = 0,
- InstructionOperand* temps = NULL);
+ InstructionOperand* temps = nullptr);
Instruction* Emit(Instruction* instr);
// ===========================================================================
@@ -153,16 +170,34 @@ class InstructionSelector final {
// Inform the register allocation of the representation of the value produced
// by {node}.
- void MarkAsRepresentation(MachineType rep, Node* node);
- void MarkAsWord32(Node* node) { MarkAsRepresentation(kRepWord32, node); }
- void MarkAsWord64(Node* node) { MarkAsRepresentation(kRepWord64, node); }
- void MarkAsFloat32(Node* node) { MarkAsRepresentation(kRepFloat32, node); }
- void MarkAsFloat64(Node* node) { MarkAsRepresentation(kRepFloat64, node); }
- void MarkAsReference(Node* node) { MarkAsRepresentation(kRepTagged, node); }
+ void MarkAsRepresentation(MachineRepresentation rep, Node* node);
+ void MarkAsWord32(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kWord32, node);
+ }
+ void MarkAsWord64(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kWord64, node);
+ }
+ void MarkAsFloat32(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kFloat32, node);
+ }
+ void MarkAsFloat64(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kFloat64, node);
+ }
+ void MarkAsReference(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kTagged, node);
+ }
// Inform the register allocation of the representation of the unallocated
// operand {op}.
- void MarkAsRepresentation(MachineType rep, const InstructionOperand& op);
+ void MarkAsRepresentation(MachineRepresentation rep,
+ const InstructionOperand& op);
+
+ enum CallBufferFlag {
+ kCallCodeImmediate = 1u << 0,
+ kCallAddressImmediate = 1u << 1,
+ kCallTail = 1u << 2
+ };
+ typedef base::Flags<CallBufferFlag> CallBufferFlags;
// Initialize the call buffer with the InstructionOperands, nodes, etc,
// corresponding
@@ -170,18 +205,11 @@ class InstructionSelector final {
// {call_code_immediate} to generate immediate operands to calls of code.
// {call_address_immediate} to generate immediate operands to address calls.
void InitializeCallBuffer(Node* call, CallBuffer* buffer,
- bool call_code_immediate,
- bool call_address_immediate);
+ CallBufferFlags flags, int stack_param_delta = 0);
+ bool IsTailCallAddressImmediate();
FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
- enum class FrameStateInputKind { kAny, kStackSlot };
- void AddFrameStateInputs(Node* state, InstructionOperandVector* inputs,
- FrameStateDescriptor* descriptor,
- FrameStateInputKind kind);
- static InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
- FrameStateInputKind kind);
-
// ===========================================================================
// ============= Architecture-specific graph covering methods. ===============
// ===========================================================================
@@ -200,7 +228,8 @@ class InstructionSelector final {
MACHINE_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
- void VisitFinish(Node* node);
+ void VisitFinishRegion(Node* node);
+ void VisitGuard(Node* node);
void VisitParameter(Node* node);
void VisitIfException(Node* node);
void VisitOsrValue(Node* node);
@@ -212,10 +241,13 @@ class InstructionSelector final {
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
- void VisitDeoptimize(Node* value);
- void VisitReturn(Node* value);
+ void VisitDeoptimize(DeoptimizeKind kind, Node* value);
+ void VisitReturn(Node* ret);
void VisitThrow(Node* value);
+ void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
+ const CallDescriptor* descriptor, Node* node);
+
// ===========================================================================
Schedule* schedule() const { return schedule_; }
@@ -238,6 +270,7 @@ class InstructionSelector final {
BoolVector defined_;
BoolVector used_;
IntVector virtual_registers_;
+ InstructionScheduler* scheduler_;
};
} // namespace compiler
diff --git a/chromium/v8/src/compiler/instruction.cc b/chromium/v8/src/compiler/instruction.cc
index 0fbb94979e4..383e27dac62 100644
--- a/chromium/v8/src/compiler/instruction.cc
+++ b/chromium/v8/src/compiler/instruction.cc
@@ -6,6 +6,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/instruction.h"
#include "src/compiler/schedule.h"
+#include "src/compiler/state-values-utils.h"
namespace v8 {
namespace internal {
@@ -59,6 +60,22 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
}
+void InstructionOperand::Print(const RegisterConfiguration* config) const {
+ OFStream os(stdout);
+ PrintableInstructionOperand wrapper;
+ wrapper.register_configuration_ = config;
+ wrapper.op_ = *this;
+ os << wrapper << std::endl;
+}
+
+
+void InstructionOperand::Print() const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config);
+}
+
+
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionOperand& printable) {
const InstructionOperand& op = printable.op_;
@@ -74,11 +91,15 @@ std::ostream& operator<<(std::ostream& os,
case UnallocatedOperand::NONE:
return os;
case UnallocatedOperand::FIXED_REGISTER:
- return os << "(=" << conf->general_register_name(
- unalloc->fixed_register_index()) << ")";
+ return os << "(="
+ << conf->GetGeneralRegisterName(
+ unalloc->fixed_register_index())
+ << ")";
case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
- return os << "(=" << conf->double_register_name(
- unalloc->fixed_register_index()) << ")";
+ return os << "(="
+ << conf->GetDoubleRegisterName(
+ unalloc->fixed_register_index())
+ << ")";
case UnallocatedOperand::MUST_HAVE_REGISTER:
return os << "(R)";
case UnallocatedOperand::MUST_HAVE_SLOT:
@@ -101,45 +122,51 @@ std::ostream& operator<<(std::ostream& os,
return os << "[immediate:" << imm.indexed_value() << "]";
}
}
+ case InstructionOperand::EXPLICIT:
case InstructionOperand::ALLOCATED: {
- auto allocated = AllocatedOperand::cast(op);
- switch (allocated.allocated_kind()) {
- case AllocatedOperand::STACK_SLOT:
- os << "[stack:" << StackSlotOperand::cast(op).index();
+ auto allocated = LocationOperand::cast(op);
+ if (op.IsStackSlot()) {
+ os << "[stack:" << LocationOperand::cast(op).index();
+ } else if (op.IsDoubleStackSlot()) {
+ os << "[double_stack:" << LocationOperand::cast(op).index();
+ } else if (op.IsRegister()) {
+ os << "[" << LocationOperand::cast(op).GetRegister().ToString() << "|R";
+ } else {
+ DCHECK(op.IsDoubleRegister());
+ os << "[" << LocationOperand::cast(op).GetDoubleRegister().ToString()
+ << "|R";
+ }
+ if (allocated.IsExplicit()) {
+ os << "|E";
+ }
+ switch (allocated.representation()) {
+ case MachineRepresentation::kNone:
+ os << "|-";
break;
- case AllocatedOperand::DOUBLE_STACK_SLOT:
- os << "[double_stack:" << DoubleStackSlotOperand::cast(op).index();
+ case MachineRepresentation::kBit:
+ os << "|b";
break;
- case AllocatedOperand::REGISTER:
- os << "["
- << conf->general_register_name(RegisterOperand::cast(op).index())
- << "|R";
+ case MachineRepresentation::kWord8:
+ os << "|w8";
break;
- case AllocatedOperand::DOUBLE_REGISTER:
- os << "["
- << conf->double_register_name(
- DoubleRegisterOperand::cast(op).index()) << "|R";
+ case MachineRepresentation::kWord16:
+ os << "|w16";
break;
- }
- switch (allocated.machine_type()) {
- case kRepWord32:
+ case MachineRepresentation::kWord32:
os << "|w32";
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
os << "|w64";
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
os << "|f32";
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
os << "|f64";
break;
- case kRepTagged:
+ case MachineRepresentation::kTagged:
os << "|t";
break;
- default:
- os << "|?";
- break;
}
return os << "]";
}
@@ -151,6 +178,24 @@ std::ostream& operator<<(std::ostream& os,
}
+void MoveOperands::Print(const RegisterConfiguration* config) const {
+ OFStream os(stdout);
+ PrintableInstructionOperand wrapper;
+ wrapper.register_configuration_ = config;
+ wrapper.op_ = destination();
+ os << wrapper << " = ";
+ wrapper.op_ = source();
+ os << wrapper << std::endl;
+}
+
+
+void MoveOperands::Print() const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config);
+}
+
+
std::ostream& operator<<(std::ostream& os,
const PrintableMoveOperands& printable) {
const MoveOperands& mo = *printable.move_operands_;
@@ -178,11 +223,11 @@ MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
MoveOperands* to_eliminate = nullptr;
for (auto curr : *this) {
if (curr->IsEliminated()) continue;
- if (curr->destination().EqualsModuloType(move->source())) {
+ if (curr->destination().EqualsCanonicalized(move->source())) {
DCHECK(!replacement);
replacement = curr;
if (to_eliminate != nullptr) break;
- } else if (curr->destination().EqualsModuloType(move->destination())) {
+ } else if (curr->destination().EqualsCanonicalized(move->destination())) {
DCHECK(!to_eliminate);
to_eliminate = curr;
if (replacement != nullptr) break;
@@ -194,11 +239,21 @@ MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
}
+ExplicitOperand::ExplicitOperand(LocationKind kind, MachineRepresentation rep,
+ int index)
+ : LocationOperand(EXPLICIT, kind, rep, index) {
+ DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(rep),
+ Register::from_code(index).IsAllocatable());
+ DCHECK_IMPLIES(kind == REGISTER && IsFloatingPoint(rep),
+ DoubleRegister::from_code(index).IsAllocatable());
+}
+
+
Instruction::Instruction(InstructionCode opcode)
: opcode_(opcode),
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
TempCountField::encode(0) | IsCallField::encode(false)),
- reference_map_(NULL) {
+ reference_map_(nullptr) {
parallel_moves_[0] = nullptr;
parallel_moves_[1] = nullptr;
}
@@ -213,7 +268,7 @@ Instruction::Instruction(InstructionCode opcode, size_t output_count,
InputCountField::encode(input_count) |
TempCountField::encode(temp_count) |
IsCallField::encode(false)),
- reference_map_(NULL) {
+ reference_map_(nullptr) {
parallel_moves_[0] = nullptr;
parallel_moves_[1] = nullptr;
size_t offset = 0;
@@ -243,6 +298,22 @@ bool Instruction::AreMovesRedundant() const {
}
+void Instruction::Print(const RegisterConfiguration* config) const {
+ OFStream os(stdout);
+ PrintableInstruction wrapper;
+ wrapper.instr_ = this;
+ wrapper.register_configuration_ = config;
+ os << wrapper << std::endl;
+}
+
+
+void Instruction::Print() const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config);
+}
+
+
std::ostream& operator<<(std::ostream& os,
const PrintableParallelMove& printable) {
const ParallelMove& pm = *printable.parallel_move_;
@@ -260,7 +331,7 @@ std::ostream& operator<<(std::ostream& os,
void ReferenceMap::RecordReference(const AllocatedOperand& op) {
// Do not record arguments as pointers.
- if (op.IsStackSlot() && StackSlotOperand::cast(op).index() < 0) return;
+ if (op.IsStackSlot() && LocationOperand::cast(op).index() < 0) return;
DCHECK(!op.IsDoubleRegister() && !op.IsDoubleStackSlot());
reference_operands_.push_back(op);
}
@@ -269,8 +340,9 @@ void ReferenceMap::RecordReference(const AllocatedOperand& op) {
std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm) {
os << "{";
bool first = true;
- PrintableInstructionOperand poi = {RegisterConfiguration::ArchDefault(),
- InstructionOperand()};
+ PrintableInstructionOperand poi = {
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
+ InstructionOperand()};
for (auto& op : pm.reference_operands_) {
if (!first) {
os << ";";
@@ -387,7 +459,7 @@ std::ostream& operator<<(std::ostream& os,
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
os << "(";
- if (instr.parallel_moves()[i] != NULL) {
+ if (instr.parallel_moves()[i] != nullptr) {
PrintableParallelMove ppm = {printable.register_configuration_,
instr.parallel_moves()[i]};
os << ppm;
@@ -496,7 +568,7 @@ size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
static RpoNumber GetRpo(const BasicBlock* block) {
- if (block == NULL) return RpoNumber::Invalid();
+ if (block == nullptr) return RpoNumber::Invalid();
return RpoNumber::FromInt(block->rpo_number());
}
@@ -531,7 +603,7 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor(
Zone* zone, const Schedule* schedule) {
InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
new (blocks) InstructionBlocks(
- static_cast<int>(schedule->rpo_order()->size()), NULL, zone);
+ static_cast<int>(schedule->rpo_order()->size()), nullptr, zone);
size_t rpo_number = 0;
for (BasicBlockVector::const_iterator it = schedule->rpo_order()->begin();
it != schedule->rpo_order()->end(); ++it, ++rpo_number) {
@@ -617,7 +689,7 @@ int InstructionSequence::AddInstruction(Instruction* instr) {
int index = static_cast<int>(instructions_.size());
instructions_.push_back(instr);
if (instr->NeedsReferenceMap()) {
- DCHECK(instr->reference_map() == NULL);
+ DCHECK(instr->reference_map() == nullptr);
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
reference_map->set_instruction_position(index);
instr->set_reference_map(reference_map);
@@ -644,28 +716,28 @@ InstructionBlock* InstructionSequence::GetInstructionBlock(
}
-static MachineType FilterRepresentation(MachineType rep) {
- DCHECK_EQ(rep, RepresentationOf(rep));
+static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
switch (rep) {
- case kRepBit:
- case kRepWord8:
- case kRepWord16:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
return InstructionSequence::DefaultRepresentation();
- case kRepWord32:
- case kRepWord64:
- case kRepFloat32:
- case kRepFloat64:
- case kRepTagged:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kTagged:
return rep;
- default:
+ case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return kMachNone;
+ return MachineRepresentation::kNone;
}
-MachineType InstructionSequence::GetRepresentation(int virtual_register) const {
+MachineRepresentation InstructionSequence::GetRepresentation(
+ int virtual_register) const {
DCHECK_LE(0, virtual_register);
DCHECK_LT(virtual_register, VirtualRegisterCount());
if (virtual_register >= static_cast<int>(representations_.size())) {
@@ -675,17 +747,17 @@ MachineType InstructionSequence::GetRepresentation(int virtual_register) const {
}
-void InstructionSequence::MarkAsRepresentation(MachineType machine_type,
+void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
int virtual_register) {
DCHECK_LE(0, virtual_register);
DCHECK_LT(virtual_register, VirtualRegisterCount());
if (virtual_register >= static_cast<int>(representations_.size())) {
representations_.resize(VirtualRegisterCount(), DefaultRepresentation());
}
- machine_type = FilterRepresentation(machine_type);
- DCHECK_IMPLIES(representations_[virtual_register] != machine_type,
+ rep = FilterRepresentation(rep);
+ DCHECK_IMPLIES(representations_[virtual_register] != rep,
representations_[virtual_register] == DefaultRepresentation());
- representations_[virtual_register] = machine_type;
+ representations_[virtual_register] = rep;
}
@@ -732,6 +804,22 @@ void InstructionSequence::SetSourcePosition(const Instruction* instr,
}
+void InstructionSequence::Print(const RegisterConfiguration* config) const {
+ OFStream os(stdout);
+ PrintableInstructionSequence wrapper;
+ wrapper.register_configuration_ = config;
+ wrapper.sequence_ = this;
+ os << wrapper << std::endl;
+}
+
+
+void InstructionSequence::Print() const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config);
+}
+
+
FrameStateDescriptor::FrameStateDescriptor(
Zone* zone, FrameStateType type, BailoutId bailout_id,
OutputFrameStateCombine state_combine, size_t parameters_count,
@@ -744,11 +832,9 @@ FrameStateDescriptor::FrameStateDescriptor(
parameters_count_(parameters_count),
locals_count_(locals_count),
stack_count_(stack_count),
- types_(zone),
+ values_(zone),
shared_info_(shared_info),
- outer_state_(outer_state) {
- types_.resize(GetSize(), kMachNone);
-}
+ outer_state_(outer_state) {}
size_t FrameStateDescriptor::GetSize(OutputFrameStateCombine combine) const {
@@ -767,7 +853,7 @@ size_t FrameStateDescriptor::GetSize(OutputFrameStateCombine combine) const {
size_t FrameStateDescriptor::GetTotalSize() const {
size_t total_size = 0;
- for (const FrameStateDescriptor* iter = this; iter != NULL;
+ for (const FrameStateDescriptor* iter = this; iter != nullptr;
iter = iter->outer_state_) {
total_size += iter->GetSize();
}
@@ -777,7 +863,7 @@ size_t FrameStateDescriptor::GetTotalSize() const {
size_t FrameStateDescriptor::GetFrameCount() const {
size_t count = 0;
- for (const FrameStateDescriptor* iter = this; iter != NULL;
+ for (const FrameStateDescriptor* iter = this; iter != nullptr;
iter = iter->outer_state_) {
++count;
}
@@ -787,9 +873,9 @@ size_t FrameStateDescriptor::GetFrameCount() const {
size_t FrameStateDescriptor::GetJSFrameCount() const {
size_t count = 0;
- for (const FrameStateDescriptor* iter = this; iter != NULL;
+ for (const FrameStateDescriptor* iter = this; iter != nullptr;
iter = iter->outer_state_) {
- if (iter->type_ == FrameStateType::kJavaScriptFunction) {
+ if (FrameStateFunctionInfo::IsJSFunctionType(iter->type_)) {
++count;
}
}
@@ -797,17 +883,6 @@ size_t FrameStateDescriptor::GetJSFrameCount() const {
}
-MachineType FrameStateDescriptor::GetType(size_t index) const {
- return types_[index];
-}
-
-
-void FrameStateDescriptor::SetType(size_t index, MachineType type) {
- DCHECK(index < GetSize());
- types_[index] = type;
-}
-
-
std::ostream& operator<<(std::ostream& os, const RpoNumber& rpo) {
return os << rpo.ToSize();
}
diff --git a/chromium/v8/src/compiler/instruction.h b/chromium/v8/src/compiler/instruction.h
index a0718f3c215..8a6a0ae92a9 100644
--- a/chromium/v8/src/compiler/instruction.h
+++ b/chromium/v8/src/compiler/instruction.h
@@ -14,23 +14,26 @@
#include "src/compiler/frame.h"
#include "src/compiler/instruction-codes.h"
#include "src/compiler/opcodes.h"
-#include "src/compiler/register-configuration.h"
#include "src/compiler/source-position.h"
+#include "src/macro-assembler.h"
+#include "src/register-configuration.h"
#include "src/zone-allocator.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Forward declarations.
class Schedule;
+
class InstructionOperand {
public:
static const int kInvalidVirtualRegister = -1;
// TODO(dcarney): recover bit. INVALID can be represented as UNALLOCATED with
// kInvalidVirtualRegister and some DCHECKS.
- enum Kind { INVALID, UNALLOCATED, CONSTANT, IMMEDIATE, ALLOCATED };
+ enum Kind { INVALID, UNALLOCATED, CONSTANT, IMMEDIATE, EXPLICIT, ALLOCATED };
InstructionOperand() : InstructionOperand(INVALID) {}
@@ -39,12 +42,29 @@ class InstructionOperand {
#define INSTRUCTION_OPERAND_PREDICATE(name, type) \
bool Is##name() const { return kind() == type; }
INSTRUCTION_OPERAND_PREDICATE(Invalid, INVALID)
+ // UnallocatedOperands are place-holder operands created before register
+ // allocation. They later are assigned registers and become AllocatedOperands.
INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
+ // Constant operands participate in register allocation. They are allocated to
+ // registers but have a special "spilling" behavior. When a ConstantOperand
+ // value must be rematerialized, it is loaded from an immediate constant
+ // rather from an unspilled slot.
INSTRUCTION_OPERAND_PREDICATE(Constant, CONSTANT)
+ // ImmediateOperands do not participate in register allocation and are only
+ // embedded directly in instructions, e.g. small integers and on some
+ // platforms Objects.
INSTRUCTION_OPERAND_PREDICATE(Immediate, IMMEDIATE)
+ // ExplicitOperands do not participate in register allocation. They are
+ // created by the instruction selector for direct access to registers and
+ // stack slots, completely bypassing the register allocator. They are never
+ // associated with a virtual register
+ INSTRUCTION_OPERAND_PREDICATE(Explicit, EXPLICIT)
+ // AllocatedOperands are registers or stack slots that are assigned by the
+ // register allocator and are always associated with a virtual register.
INSTRUCTION_OPERAND_PREDICATE(Allocated, ALLOCATED)
#undef INSTRUCTION_OPERAND_PREDICATE
+ inline bool IsAnyRegister() const;
inline bool IsRegister() const;
inline bool IsDoubleRegister() const;
inline bool IsStackSlot() const;
@@ -69,18 +89,21 @@ class InstructionOperand {
return this->value_ < that.value_;
}
- bool EqualsModuloType(const InstructionOperand& that) const {
- return this->GetValueModuloType() == that.GetValueModuloType();
+ bool EqualsCanonicalized(const InstructionOperand& that) const {
+ return this->GetCanonicalizedValue() == that.GetCanonicalizedValue();
}
- bool CompareModuloType(const InstructionOperand& that) const {
- return this->GetValueModuloType() < that.GetValueModuloType();
+ bool CompareCanonicalized(const InstructionOperand& that) const {
+ return this->GetCanonicalizedValue() < that.GetCanonicalizedValue();
}
+ void Print(const RegisterConfiguration* config) const;
+ void Print() const;
+
protected:
explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {}
- inline uint64_t GetValueModuloType() const;
+ inline uint64_t GetCanonicalizedValue() const;
class KindField : public BitField64<Kind, 0, 3> {};
@@ -88,6 +111,9 @@ class InstructionOperand {
};
+typedef ZoneVector<InstructionOperand> InstructionOperandVector;
+
+
struct PrintableInstructionOperand {
const RegisterConfiguration* register_configuration_;
InstructionOperand op_;
@@ -175,6 +201,12 @@ class UnallocatedOperand : public InstructionOperand {
value_ |= LifetimeField::encode(lifetime);
}
+ UnallocatedOperand(int reg_id, int slot_id, int virtual_register)
+ : UnallocatedOperand(FIXED_REGISTER, reg_id, virtual_register) {
+ value_ |= HasSecondaryStorageField::encode(true);
+ value_ |= SecondaryStorageField::encode(slot_id);
+ }
+
// Predicates for the operand policy.
bool HasAnyPolicy() const {
return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
@@ -205,6 +237,15 @@ class UnallocatedOperand : public InstructionOperand {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == FIXED_DOUBLE_REGISTER;
}
+ bool HasSecondaryStorage() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_REGISTER &&
+ HasSecondaryStorageField::decode(value_);
+ }
+ int GetSecondaryStorage() const {
+ DCHECK(HasSecondaryStorage());
+ return SecondaryStorageField::decode(value_);
+ }
// [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
BasicPolicy basic_policy() const {
@@ -284,7 +325,9 @@ class UnallocatedOperand : public InstructionOperand {
// BitFields specific to BasicPolicy::EXTENDED_POLICY.
class ExtendedPolicyField : public BitField64<ExtendedPolicy, 36, 3> {};
class LifetimeField : public BitField64<Lifetime, 39, 1> {};
- class FixedRegisterField : public BitField64<int, 40, 6> {};
+ class HasSecondaryStorageField : public BitField64<bool, 40, 1> {};
+ class FixedRegisterField : public BitField64<int, 41, 6> {};
+ class SecondaryStorageField : public BitField64<int, 47, 3> {};
private:
explicit UnallocatedOperand(int virtual_register)
@@ -352,121 +395,158 @@ class ImmediateOperand : public InstructionOperand {
};
-class AllocatedOperand : public InstructionOperand {
+class LocationOperand : public InstructionOperand {
public:
- // TODO(dcarney): machine_type makes this now redundant. Just need to know is
- // the operand is a slot or a register.
- enum AllocatedKind {
- STACK_SLOT,
- DOUBLE_STACK_SLOT,
- REGISTER,
- DOUBLE_REGISTER
- };
-
- AllocatedOperand(AllocatedKind kind, MachineType machine_type, int index)
- : InstructionOperand(ALLOCATED) {
- DCHECK_IMPLIES(kind == REGISTER || kind == DOUBLE_REGISTER, index >= 0);
- DCHECK(IsSupportedMachineType(machine_type));
- value_ |= AllocatedKindField::encode(kind);
- value_ |= MachineTypeField::encode(machine_type);
+ enum LocationKind { REGISTER, STACK_SLOT };
+
+ LocationOperand(InstructionOperand::Kind operand_kind,
+ LocationOperand::LocationKind location_kind,
+ MachineRepresentation rep, int index)
+ : InstructionOperand(operand_kind) {
+ DCHECK_IMPLIES(location_kind == REGISTER, index >= 0);
+ DCHECK(IsSupportedRepresentation(rep));
+ value_ |= LocationKindField::encode(location_kind);
+ value_ |= RepresentationField::encode(rep);
value_ |= static_cast<int64_t>(index) << IndexField::kShift;
}
int index() const {
+ DCHECK(IsStackSlot() || IsDoubleStackSlot());
return static_cast<int64_t>(value_) >> IndexField::kShift;
}
- AllocatedKind allocated_kind() const {
- return AllocatedKindField::decode(value_);
+ Register GetRegister() const {
+ DCHECK(IsRegister());
+ return Register::from_code(static_cast<int64_t>(value_) >>
+ IndexField::kShift);
}
- MachineType machine_type() const { return MachineTypeField::decode(value_); }
+ DoubleRegister GetDoubleRegister() const {
+ DCHECK(IsDoubleRegister());
+ return DoubleRegister::from_code(static_cast<int64_t>(value_) >>
+ IndexField::kShift);
+ }
- static AllocatedOperand* New(Zone* zone, AllocatedKind kind,
- MachineType machine_type, int index) {
- return InstructionOperand::New(zone,
- AllocatedOperand(kind, machine_type, index));
+ LocationKind location_kind() const {
+ return LocationKindField::decode(value_);
}
- static bool IsSupportedMachineType(MachineType machine_type) {
- if (RepresentationOf(machine_type) != machine_type) return false;
- switch (machine_type) {
- case kRepWord32:
- case kRepWord64:
- case kRepFloat32:
- case kRepFloat64:
- case kRepTagged:
+ MachineRepresentation representation() const {
+ return RepresentationField::decode(value_);
+ }
+
+ static bool IsSupportedRepresentation(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kTagged:
return true;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kNone:
return false;
}
+ UNREACHABLE();
+ return false;
}
- INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED);
+ static LocationOperand* cast(InstructionOperand* op) {
+ DCHECK(ALLOCATED == op->kind() || EXPLICIT == op->kind());
+ return static_cast<LocationOperand*>(op);
+ }
+
+ static const LocationOperand* cast(const InstructionOperand* op) {
+ DCHECK(ALLOCATED == op->kind() || EXPLICIT == op->kind());
+ return static_cast<const LocationOperand*>(op);
+ }
+
+ static LocationOperand cast(const InstructionOperand& op) {
+ DCHECK(ALLOCATED == op.kind() || EXPLICIT == op.kind());
+ return *static_cast<const LocationOperand*>(&op);
+ }
STATIC_ASSERT(KindField::kSize == 3);
- class AllocatedKindField : public BitField64<AllocatedKind, 3, 2> {};
- class MachineTypeField : public BitField64<MachineType, 5, 16> {};
+ class LocationKindField : public BitField64<LocationKind, 3, 2> {};
+ class RepresentationField : public BitField64<MachineRepresentation, 5, 8> {};
class IndexField : public BitField64<int32_t, 35, 29> {};
};
+class ExplicitOperand : public LocationOperand {
+ public:
+ ExplicitOperand(LocationKind kind, MachineRepresentation rep, int index);
+
+ static ExplicitOperand* New(Zone* zone, LocationKind kind,
+ MachineRepresentation rep, int index) {
+ return InstructionOperand::New(zone, ExplicitOperand(kind, rep, index));
+ }
+
+ INSTRUCTION_OPERAND_CASTS(ExplicitOperand, EXPLICIT);
+};
+
+
+class AllocatedOperand : public LocationOperand {
+ public:
+ AllocatedOperand(LocationKind kind, MachineRepresentation rep, int index)
+ : LocationOperand(ALLOCATED, kind, rep, index) {}
+
+ static AllocatedOperand* New(Zone* zone, LocationKind kind,
+ MachineRepresentation rep, int index) {
+ return InstructionOperand::New(zone, AllocatedOperand(kind, rep, index));
+ }
+
+ INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED);
+};
+
+
#undef INSTRUCTION_OPERAND_CASTS
-#define ALLOCATED_OPERAND_LIST(V) \
- V(StackSlot, STACK_SLOT) \
- V(DoubleStackSlot, DOUBLE_STACK_SLOT) \
- V(Register, REGISTER) \
- V(DoubleRegister, DOUBLE_REGISTER)
-
-
-#define ALLOCATED_OPERAND_IS(SubKind, kOperandKind) \
- bool InstructionOperand::Is##SubKind() const { \
- return IsAllocated() && \
- AllocatedOperand::cast(this)->allocated_kind() == \
- AllocatedOperand::kOperandKind; \
- }
-ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_IS)
-#undef ALLOCATED_OPERAND_IS
-
-
-// TODO(dcarney): these subkinds are now pretty useless, nuke.
-#define ALLOCATED_OPERAND_CLASS(SubKind, kOperandKind) \
- class SubKind##Operand final : public AllocatedOperand { \
- public: \
- explicit SubKind##Operand(MachineType machine_type, int index) \
- : AllocatedOperand(kOperandKind, machine_type, index) {} \
- \
- static SubKind##Operand* New(Zone* zone, MachineType machine_type, \
- int index) { \
- return InstructionOperand::New(zone, \
- SubKind##Operand(machine_type, index)); \
- } \
- \
- static SubKind##Operand* cast(InstructionOperand* op) { \
- DCHECK_EQ(kOperandKind, AllocatedOperand::cast(op)->allocated_kind()); \
- return reinterpret_cast<SubKind##Operand*>(op); \
- } \
- \
- static const SubKind##Operand* cast(const InstructionOperand* op) { \
- DCHECK_EQ(kOperandKind, AllocatedOperand::cast(op)->allocated_kind()); \
- return reinterpret_cast<const SubKind##Operand*>(op); \
- } \
- \
- static SubKind##Operand cast(const InstructionOperand& op) { \
- DCHECK_EQ(kOperandKind, AllocatedOperand::cast(op).allocated_kind()); \
- return *static_cast<const SubKind##Operand*>(&op); \
- } \
- };
-ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_CLASS)
-#undef ALLOCATED_OPERAND_CLASS
+bool InstructionOperand::IsAnyRegister() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::REGISTER;
+}
-uint64_t InstructionOperand::GetValueModuloType() const {
- if (IsAllocated()) {
+bool InstructionOperand::IsRegister() const {
+ return IsAnyRegister() &&
+ !IsFloatingPoint(LocationOperand::cast(this)->representation());
+}
+
+bool InstructionOperand::IsDoubleRegister() const {
+ return IsAnyRegister() &&
+ IsFloatingPoint(LocationOperand::cast(this)->representation());
+}
+
+bool InstructionOperand::IsStackSlot() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::STACK_SLOT &&
+ !IsFloatingPoint(LocationOperand::cast(this)->representation());
+}
+
+bool InstructionOperand::IsDoubleStackSlot() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::STACK_SLOT &&
+ IsFloatingPoint(LocationOperand::cast(this)->representation());
+}
+
+uint64_t InstructionOperand::GetCanonicalizedValue() const {
+ if (IsAllocated() || IsExplicit()) {
// TODO(dcarney): put machine type last and mask.
- return AllocatedOperand::MachineTypeField::update(this->value_, kMachNone);
+ MachineRepresentation canonicalized_representation =
+ IsFloatingPoint(LocationOperand::cast(this)->representation())
+ ? MachineRepresentation::kFloat64
+ : MachineRepresentation::kNone;
+ return InstructionOperand::KindField::update(
+ LocationOperand::RepresentationField::update(
+ this->value_, canonicalized_representation),
+ LocationOperand::EXPLICIT);
}
return this->value_;
}
@@ -476,7 +556,7 @@ uint64_t InstructionOperand::GetValueModuloType() const {
struct CompareOperandModuloType {
bool operator()(const InstructionOperand& a,
const InstructionOperand& b) const {
- return a.CompareModuloType(b);
+ return a.CompareCanonicalized(b);
}
};
@@ -508,14 +588,14 @@ class MoveOperands final : public ZoneObject {
// True if this move a move into the given destination operand.
bool Blocks(const InstructionOperand& operand) const {
- return !IsEliminated() && source().EqualsModuloType(operand);
+ return !IsEliminated() && source().EqualsCanonicalized(operand);
}
// A move is redundant if it's been eliminated or if its source and
// destination are the same.
bool IsRedundant() const {
DCHECK_IMPLIES(!destination_.IsInvalid(), !destination_.IsConstant());
- return IsEliminated() || source_.EqualsModuloType(destination_);
+ return IsEliminated() || source_.EqualsCanonicalized(destination_);
}
// We clear both operands to indicate move that's been eliminated.
@@ -525,6 +605,9 @@ class MoveOperands final : public ZoneObject {
return source_.IsInvalid();
}
+ void Print(const RegisterConfiguration* config) const;
+ void Print() const;
+
private:
InstructionOperand source_;
InstructionOperand destination_;
@@ -651,7 +734,7 @@ class Instruction final {
// TODO(titzer): make call into a flags.
static Instruction* New(Zone* zone, InstructionCode opcode) {
- return New(zone, opcode, 0, NULL, 0, NULL, 0, NULL);
+ return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
}
static Instruction* New(Zone* zone, InstructionCode opcode,
@@ -659,9 +742,9 @@ class Instruction final {
size_t input_count, InstructionOperand* inputs,
size_t temp_count, InstructionOperand* temps) {
DCHECK(opcode >= 0);
- DCHECK(output_count == 0 || outputs != NULL);
- DCHECK(input_count == 0 || inputs != NULL);
- DCHECK(temp_count == 0 || temps != NULL);
+ DCHECK(output_count == 0 || outputs != nullptr);
+ DCHECK(input_count == 0 || inputs != nullptr);
+ DCHECK(temp_count == 0 || temps != nullptr);
size_t total_extra_ops = output_count + input_count + temp_count;
if (total_extra_ops != 0) total_extra_ops--;
int size = static_cast<int>(
@@ -677,7 +760,7 @@ class Instruction final {
}
bool IsCall() const { return IsCallField::decode(bit_field_); }
bool NeedsReferenceMap() const { return IsCall(); }
- bool HasReferenceMap() const { return reference_map_ != NULL; }
+ bool HasReferenceMap() const { return reference_map_ != nullptr; }
bool ClobbersRegisters() const { return IsCall(); }
bool ClobbersTemps() const { return IsCall(); }
@@ -693,7 +776,7 @@ class Instruction final {
void OverwriteWithNop() {
opcode_ = ArchOpcodeField::encode(kArchNop);
bit_field_ = 0;
- reference_map_ = NULL;
+ reference_map_ = nullptr;
}
bool IsNop() const {
@@ -728,6 +811,9 @@ class Instruction final {
ParallelMove* const* parallel_moves() const { return &parallel_moves_[0]; }
ParallelMove** parallel_moves() { return &parallel_moves_[0]; }
+ void Print(const RegisterConfiguration* config) const;
+ void Print() const;
+
private:
explicit Instruction(InstructionCode opcode);
@@ -864,6 +950,59 @@ class Constant final {
};
+std::ostream& operator<<(std::ostream& os, const Constant& constant);
+
+
+// Forward declarations.
+class FrameStateDescriptor;
+
+
+enum class StateValueKind { kPlain, kNested, kDuplicate };
+
+
+class StateValueDescriptor {
+ public:
+ explicit StateValueDescriptor(Zone* zone)
+ : kind_(StateValueKind::kPlain),
+ type_(MachineType::AnyTagged()),
+ id_(0),
+ fields_(zone) {}
+
+ static StateValueDescriptor Plain(Zone* zone, MachineType type) {
+ return StateValueDescriptor(StateValueKind::kPlain, zone, type, 0);
+ }
+ static StateValueDescriptor Recursive(Zone* zone, size_t id) {
+ return StateValueDescriptor(StateValueKind::kNested, zone,
+ MachineType::AnyTagged(), id);
+ }
+ static StateValueDescriptor Duplicate(Zone* zone, size_t id) {
+ return StateValueDescriptor(StateValueKind::kDuplicate, zone,
+ MachineType::AnyTagged(), id);
+ }
+
+ size_t size() { return fields_.size(); }
+ ZoneVector<StateValueDescriptor>& fields() { return fields_; }
+ int IsPlain() { return kind_ == StateValueKind::kPlain; }
+ int IsNested() { return kind_ == StateValueKind::kNested; }
+ int IsDuplicate() { return kind_ == StateValueKind::kDuplicate; }
+ MachineType type() const { return type_; }
+ MachineType GetOperandType(size_t index) const {
+ return fields_[index].type_;
+ }
+ size_t id() const { return id_; }
+
+ private:
+ StateValueDescriptor(StateValueKind kind, Zone* zone, MachineType type,
+ size_t id)
+ : kind_(kind), type_(type), id_(id), fields_(zone) {}
+
+ StateValueKind kind_;
+ MachineType type_;
+ size_t id_;
+ ZoneVector<StateValueDescriptor> fields_;
+};
+
+
class FrameStateDescriptor : public ZoneObject {
public:
FrameStateDescriptor(Zone* zone, FrameStateType type, BailoutId bailout_id,
@@ -882,7 +1021,7 @@ class FrameStateDescriptor : public ZoneObject {
MaybeHandle<SharedFunctionInfo> shared_info() const { return shared_info_; }
FrameStateDescriptor* outer_state() const { return outer_state_; }
bool HasContext() const {
- return type_ == FrameStateType::kJavaScriptFunction;
+ return FrameStateFunctionInfo::IsJSFunctionType(type_);
}
size_t GetSize(OutputFrameStateCombine combine =
@@ -891,8 +1030,10 @@ class FrameStateDescriptor : public ZoneObject {
size_t GetFrameCount() const;
size_t GetJSFrameCount() const;
- MachineType GetType(size_t index) const;
- void SetType(size_t index, MachineType type);
+ MachineType GetType(size_t index) const {
+ return values_.GetOperandType(index);
+ }
+ StateValueDescriptor* GetStateValueDescriptor() { return &values_; }
private:
FrameStateType type_;
@@ -901,12 +1042,13 @@ class FrameStateDescriptor : public ZoneObject {
size_t parameters_count_;
size_t locals_count_;
size_t stack_count_;
- ZoneVector<MachineType> types_;
+ StateValueDescriptor values_;
MaybeHandle<SharedFunctionInfo> const shared_info_;
FrameStateDescriptor* outer_state_;
};
-std::ostream& operator<<(std::ostream& os, const Constant& constant);
+
+typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
class PhiInstruction final : public ZoneObject {
@@ -1019,13 +1161,14 @@ class InstructionBlock final : public ZoneObject {
typedef ZoneDeque<Constant> ConstantDeque;
typedef std::map<int, Constant, std::less<int>,
- zone_allocator<std::pair<int, Constant> > > ConstantMap;
+ zone_allocator<std::pair<const int, Constant> > > ConstantMap;
typedef ZoneDeque<Instruction*> InstructionDeque;
typedef ZoneDeque<ReferenceMap*> ReferenceMapDeque;
-typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
typedef ZoneVector<InstructionBlock*> InstructionBlocks;
+
+// Forward declarations.
struct PrintableInstructionSequence;
@@ -1067,23 +1210,18 @@ class InstructionSequence final : public ZoneObject {
InstructionBlock* GetInstructionBlock(int instruction_index) const;
- static MachineType DefaultRepresentation() {
- return kPointerSize == 8 ? kRepWord64 : kRepWord32;
+ static MachineRepresentation DefaultRepresentation() {
+ return MachineType::PointerRepresentation();
}
- MachineType GetRepresentation(int virtual_register) const;
- void MarkAsRepresentation(MachineType machine_type, int virtual_register);
+ MachineRepresentation GetRepresentation(int virtual_register) const;
+ void MarkAsRepresentation(MachineRepresentation rep, int virtual_register);
bool IsReference(int virtual_register) const {
- return GetRepresentation(virtual_register) == kRepTagged;
+ return GetRepresentation(virtual_register) ==
+ MachineRepresentation::kTagged;
}
bool IsFloat(int virtual_register) const {
- switch (GetRepresentation(virtual_register)) {
- case kRepFloat32:
- case kRepFloat64:
- return true;
- default:
- return false;
- }
+ return IsFloatingPoint(GetRepresentation(virtual_register));
}
Instruction* GetBlockStart(RpoNumber rpo) const;
@@ -1182,6 +1320,8 @@ class InstructionSequence final : public ZoneObject {
}
return false;
}
+ void Print(const RegisterConfiguration* config) const;
+ void Print() const;
private:
friend std::ostream& operator<<(std::ostream& os,
@@ -1199,7 +1339,7 @@ class InstructionSequence final : public ZoneObject {
InstructionDeque instructions_;
int next_virtual_register_;
ReferenceMapDeque reference_maps_;
- ZoneVector<MachineType> representations_;
+ ZoneVector<MachineRepresentation> representations_;
DeoptimizationVector deoptimization_entries_;
DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
diff --git a/chromium/v8/src/compiler/interpreter-assembler.cc b/chromium/v8/src/compiler/interpreter-assembler.cc
index 1f5c0a26a5c..7080d021209 100644
--- a/chromium/v8/src/compiler/interpreter-assembler.cc
+++ b/chromium/v8/src/compiler/interpreter-assembler.cc
@@ -10,13 +10,13 @@
#include "src/compiler/graph.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/machine-type.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
#include "src/compiler/schedule.h"
#include "src/frames.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
+#include "src/machine-type.h"
#include "src/macro-assembler.h"
#include "src/zone.h"
@@ -30,11 +30,15 @@ InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
: bytecode_(bytecode),
raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone),
- Linkage::GetInterpreterDispatchDescriptor(zone), kMachPtr,
+ Linkage::GetInterpreterDispatchDescriptor(zone),
+ MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags())),
- end_nodes_(zone),
accumulator_(
raw_assembler_->Parameter(Linkage::kInterpreterAccumulatorParameter)),
+ bytecode_offset_(raw_assembler_->Parameter(
+ Linkage::kInterpreterBytecodeOffsetParameter)),
+ context_(
+ raw_assembler_->Parameter(Linkage::kInterpreterContextParameter)),
code_generated_(false) {}
@@ -44,14 +48,14 @@ InterpreterAssembler::~InterpreterAssembler() {}
Handle<Code> InterpreterAssembler::GenerateCode() {
DCHECK(!code_generated_);
- End();
+ // Disallow empty handlers that never return.
+ DCHECK_NE(0, graph()->end()->InputCount());
const char* bytecode_name = interpreter::Bytecodes::ToString(bytecode_);
Schedule* schedule = raw_assembler_->Export();
- // TODO(rmcilroy): use a non-testing code generator.
- Handle<Code> code = Pipeline::GenerateCodeForInterpreter(
+ Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
isolate(), raw_assembler_->call_descriptor(), graph(), schedule,
- bytecode_name);
+ Code::STUB, bytecode_name);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_trace_ignition_codegen) {
@@ -66,19 +70,19 @@ Handle<Code> InterpreterAssembler::GenerateCode() {
}
-Node* InterpreterAssembler::GetAccumulator() {
- return accumulator_;
-}
+Node* InterpreterAssembler::GetAccumulator() { return accumulator_; }
-void InterpreterAssembler::SetAccumulator(Node* value) {
- accumulator_ = value;
-}
+void InterpreterAssembler::SetAccumulator(Node* value) { accumulator_ = value; }
-Node* InterpreterAssembler::ContextTaggedPointer() {
- return raw_assembler_->Parameter(Linkage::kInterpreterContextParameter);
-}
+Node* InterpreterAssembler::GetContext() { return context_; }
+
+
+void InterpreterAssembler::SetContext(Node* value) { context_ = value; }
+
+
+Node* InterpreterAssembler::BytecodeOffset() { return bytecode_offset_; }
Node* InterpreterAssembler::RegisterFileRawPointer() {
@@ -91,52 +95,158 @@ Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
}
-Node* InterpreterAssembler::BytecodeOffset() {
- return raw_assembler_->Parameter(
- Linkage::kInterpreterBytecodeOffsetParameter);
+Node* InterpreterAssembler::DispatchTableRawPointer() {
+ return raw_assembler_->Parameter(Linkage::kInterpreterDispatchTableParameter);
}
-Node* InterpreterAssembler::DispatchTableRawPointer() {
- return raw_assembler_->Parameter(Linkage::kInterpreterDispatchTableParameter);
+Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
+ return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
}
-Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
- return WordShl(index, kPointerSizeLog2);
+Node* InterpreterAssembler::LoadRegister(int offset) {
+ return raw_assembler_->Load(MachineType::AnyTagged(),
+ RegisterFileRawPointer(), Int32Constant(offset));
}
-Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
- return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
+Node* InterpreterAssembler::LoadRegister(interpreter::Register reg) {
+ return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
+}
+
+
+Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
+ return WordShl(index, kPointerSizeLog2);
}
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
- return raw_assembler_->Load(kMachAnyTagged, RegisterFileRawPointer(),
+ return raw_assembler_->Load(MachineType::AnyTagged(),
+ RegisterFileRawPointer(),
RegisterFrameOffset(reg_index));
}
+Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
+ return raw_assembler_->Store(MachineRepresentation::kTagged,
+ RegisterFileRawPointer(), Int32Constant(offset),
+ value, kNoWriteBarrier);
+}
+
+
+Node* InterpreterAssembler::StoreRegister(Node* value,
+ interpreter::Register reg) {
+ return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
+}
+
+
Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
- return raw_assembler_->Store(kMachAnyTagged, RegisterFileRawPointer(),
- RegisterFrameOffset(reg_index), value);
+ return raw_assembler_->Store(
+ MachineRepresentation::kTagged, RegisterFileRawPointer(),
+ RegisterFrameOffset(reg_index), value, kNoWriteBarrier);
+}
+
+
+Node* InterpreterAssembler::NextRegister(Node* reg_index) {
+ // Register indexes are negative, so the next index is minus one.
+ return IntPtrAdd(reg_index, Int32Constant(-1));
}
Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(interpreter::OperandSize::kByte,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
return raw_assembler_->Load(
- kMachUint8, BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(1 + operand_index)));
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(),
+ Int32Constant(interpreter::Bytecodes::GetOperandOffset(
+ bytecode_, operand_index))));
}
Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(interpreter::OperandSize::kByte,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
Node* load = raw_assembler_->Load(
- kMachInt8, BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(1 + operand_index)));
+ MachineType::Int8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(),
+ Int32Constant(interpreter::Bytecodes::GetOperandOffset(
+ bytecode_, operand_index))));
+ // Ensure that we sign extend to full pointer size
+ if (kPointerSize == 8) {
+ load = raw_assembler_->ChangeInt32ToInt64(load);
+ }
+ return load;
+}
+
+
+Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
+ DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(interpreter::OperandSize::kShort,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ if (TargetSupportsUnalignedAccess()) {
+ return raw_assembler_->Load(
+ MachineType::Uint16(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(),
+ Int32Constant(interpreter::Bytecodes::GetOperandOffset(
+ bytecode_, operand_index))));
+ } else {
+ int offset =
+ interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
+ Node* first_byte = raw_assembler_->Load(
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
+ Node* second_byte = raw_assembler_->Load(
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
+#if V8_TARGET_LITTLE_ENDIAN
+ return raw_assembler_->WordOr(WordShl(second_byte, kBitsPerByte),
+ first_byte);
+#elif V8_TARGET_BIG_ENDIAN
+ return raw_assembler_->WordOr(WordShl(first_byte, kBitsPerByte),
+ second_byte);
+#else
+#error "Unknown Architecture"
+#endif
+ }
+}
+
+
+Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
+ int operand_index) {
+ DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(interpreter::OperandSize::kShort,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ int operand_offset =
+ interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
+ Node* load;
+ if (TargetSupportsUnalignedAccess()) {
+ load = raw_assembler_->Load(
+ MachineType::Int16(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
+ } else {
+#if V8_TARGET_LITTLE_ENDIAN
+ Node* hi_byte_offset = Int32Constant(operand_offset + 1);
+ Node* lo_byte_offset = Int32Constant(operand_offset);
+#elif V8_TARGET_BIG_ENDIAN
+ Node* hi_byte_offset = Int32Constant(operand_offset);
+ Node* lo_byte_offset = Int32Constant(operand_offset + 1);
+#else
+#error "Unknown Architecture"
+#endif
+ Node* hi_byte =
+ raw_assembler_->Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), hi_byte_offset));
+ Node* lo_byte =
+ raw_assembler_->Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), lo_byte_offset));
+ hi_byte = raw_assembler_->Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
+ load = raw_assembler_->Word32Or(hi_byte, lo_byte);
+ }
+
// Ensure that we sign extend to full pointer size
if (kPointerSize == 8) {
load = raw_assembler_->ChangeInt32ToInt64(load);
@@ -146,13 +256,25 @@ Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
- DCHECK_EQ(interpreter::OperandType::kCount,
- interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperand(operand_index);
+ switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+ case interpreter::OperandSize::kByte:
+ DCHECK_EQ(
+ interpreter::OperandType::kCount8,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperand(operand_index);
+ case interpreter::OperandSize::kShort:
+ DCHECK_EQ(
+ interpreter::OperandType::kCount16,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperandShort(operand_index);
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
}
-Node* InterpreterAssembler::BytecodeOperandImm8(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
DCHECK_EQ(interpreter::OperandType::kImm8,
interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
return BytecodeOperandSignExtended(operand_index);
@@ -160,16 +282,42 @@ Node* InterpreterAssembler::BytecodeOperandImm8(int operand_index) {
Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
- DCHECK_EQ(interpreter::OperandType::kIdx,
- interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperand(operand_index);
+ switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+ case interpreter::OperandSize::kByte:
+ DCHECK_EQ(
+ interpreter::OperandType::kIdx8,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperand(operand_index);
+ case interpreter::OperandSize::kShort:
+ DCHECK_EQ(
+ interpreter::OperandType::kIdx16,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperandShort(operand_index);
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
}
Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
- DCHECK_EQ(interpreter::OperandType::kReg,
- interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperandSignExtended(operand_index);
+ switch (interpreter::Bytecodes::GetOperandType(bytecode_, operand_index)) {
+ case interpreter::OperandType::kReg8:
+ case interpreter::OperandType::kRegPair8:
+ case interpreter::OperandType::kMaybeReg8:
+ DCHECK_EQ(
+ interpreter::OperandSize::kByte,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ return BytecodeOperandSignExtended(operand_index);
+ case interpreter::OperandType::kReg16:
+ DCHECK_EQ(
+ interpreter::OperandSize::kShort,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ return BytecodeOperandShortSignExtended(operand_index);
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
}
@@ -234,30 +382,54 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
Node* entry_offset =
IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
WordShl(index, kPointerSizeLog2));
- return raw_assembler_->Load(kMachAnyTagged, constant_pool, entry_offset);
+ return raw_assembler_->Load(MachineType::AnyTagged(), constant_pool,
+ entry_offset);
+}
+
+
+Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
+ int index) {
+ Node* entry_offset =
+ IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ WordShl(Int32Constant(index), kPointerSizeLog2));
+ return raw_assembler_->Load(MachineType::AnyTagged(), fixed_array,
+ entry_offset);
}
Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
- return raw_assembler_->Load(kMachAnyTagged, object,
+ return raw_assembler_->Load(MachineType::AnyTagged(), object,
IntPtrConstant(offset - kHeapObjectTag));
}
Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
- return raw_assembler_->Load(kMachAnyTagged, context,
+ return raw_assembler_->Load(MachineType::AnyTagged(), context,
IntPtrConstant(Context::SlotOffset(slot_index)));
}
-Node* InterpreterAssembler::LoadContextSlot(int slot_index) {
- return LoadContextSlot(ContextTaggedPointer(), slot_index);
+Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
+ Node* offset =
+ IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+ return raw_assembler_->Load(MachineType::AnyTagged(), context, offset);
+}
+
+
+Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
+ Node* value) {
+ Node* offset =
+ IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+ return raw_assembler_->Store(MachineRepresentation::kTagged, context, offset,
+ value, kFullWriteBarrier);
}
Node* InterpreterAssembler::LoadTypeFeedbackVector() {
Node* function = raw_assembler_->Load(
- kMachAnyTagged, RegisterFileRawPointer(),
+ MachineType::AnyTagged(), RegisterFileRawPointer(),
IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
Node* shared_info =
LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
@@ -267,21 +439,78 @@ Node* InterpreterAssembler::LoadTypeFeedbackVector() {
}
+Node* InterpreterAssembler::Projection(int index, Node* node) {
+ return raw_assembler_->Projection(index, node);
+}
+
+
+Node* InterpreterAssembler::CallConstruct(Node* new_target, Node* constructor,
+ Node* first_arg, Node* arg_count) {
+ Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
+
+ Node* code_target = HeapConstant(callable.code());
+
+ Node** args = zone()->NewArray<Node*>(5);
+ args[0] = arg_count;
+ args[1] = new_target;
+ args[2] = constructor;
+ args[3] = first_arg;
+ args[4] = GetContext();
+
+ return CallN(descriptor, code_target, args);
+}
+
+
+void InterpreterAssembler::CallPrologue() {
+ StoreRegister(SmiTag(bytecode_offset_),
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
+}
+
+
+void InterpreterAssembler::CallEpilogue() {
+ // Restore the bytecode offset from the stack frame.
+ bytecode_offset_ = SmiUntag(LoadRegister(
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+}
+
+
+Node* InterpreterAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
+ Node** args) {
+ CallPrologue();
+
+ Node* stack_pointer_before_call = nullptr;
+ if (FLAG_debug_code) {
+ stack_pointer_before_call = raw_assembler_->LoadStackPointer();
+ }
+ Node* return_val = raw_assembler_->CallN(descriptor, code_target, args);
+ if (FLAG_debug_code) {
+ Node* stack_pointer_after_call = raw_assembler_->LoadStackPointer();
+ AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
+ kUnexpectedStackPointer);
+ }
+
+ CallEpilogue();
+ return return_val;
+}
+
+
Node* InterpreterAssembler::CallJS(Node* function, Node* first_arg,
Node* arg_count) {
- Callable builtin = CodeFactory::PushArgsAndCall(isolate());
+ Callable callable = CodeFactory::InterpreterPushArgsAndCall(isolate());
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), builtin.descriptor(), 0, CallDescriptor::kNoFlags);
+ isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
- Node* code_target = HeapConstant(builtin.code());
+ Node* code_target = HeapConstant(callable.code());
Node** args = zone()->NewArray<Node*>(4);
args[0] = arg_count;
args[1] = first_arg;
args[2] = function;
- args[3] = ContextTaggedPointer();
+ args[3] = GetContext();
- return raw_assembler_->CallN(descriptor, code_target, args);
+ return CallN(descriptor, code_target, args);
}
@@ -289,7 +518,19 @@ Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
Node* target, Node** args) {
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, 0, CallDescriptor::kNoFlags);
- return raw_assembler_->CallN(call_descriptor, target, args);
+ return CallN(call_descriptor, target, args);
+}
+
+
+Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
+ Node* target, Node* arg1, Node* arg2,
+ Node* arg3) {
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = GetContext();
+ return CallIC(descriptor, target, args);
}
@@ -301,7 +542,7 @@ Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
args[1] = arg2;
args[2] = arg3;
args[3] = arg4;
- args[4] = ContextTaggedPointer();
+ args[4] = GetContext();
return CallIC(descriptor, target, args);
}
@@ -315,22 +556,67 @@ Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
args[2] = arg3;
args[3] = arg4;
args[4] = arg5;
- args[5] = ContextTaggedPointer();
+ args[5] = GetContext();
return CallIC(descriptor, target, args);
}
+Node* InterpreterAssembler::CallRuntime(Node* function_id, Node* first_arg,
+ Node* arg_count, int result_size) {
+ Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
+ Operator::kNoProperties, MachineType::AnyTagged(), result_size);
+ Node* code_target = HeapConstant(callable.code());
+
+ // Get the function entry from the function id.
+ Node* function_table = raw_assembler_->ExternalConstant(
+ ExternalReference::runtime_function_table_address(isolate()));
+ Node* function_offset = raw_assembler_->Int32Mul(
+ function_id, Int32Constant(sizeof(Runtime::Function)));
+ Node* function = IntPtrAdd(function_table, function_offset);
+ Node* function_entry =
+ raw_assembler_->Load(MachineType::Pointer(), function,
+ Int32Constant(offsetof(Runtime::Function, entry)));
+
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg_count;
+ args[1] = first_arg;
+ args[2] = function_entry;
+ args[3] = GetContext();
+
+ return CallN(descriptor, code_target, args);
+}
+
+
Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* arg1) {
- return raw_assembler_->CallRuntime1(function_id, arg1,
- ContextTaggedPointer());
+ CallPrologue();
+ Node* return_val =
+ raw_assembler_->CallRuntime1(function_id, arg1, GetContext());
+ CallEpilogue();
+ return return_val;
}
Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* arg1, Node* arg2) {
- return raw_assembler_->CallRuntime2(function_id, arg1, arg2,
- ContextTaggedPointer());
+ CallPrologue();
+ Node* return_val =
+ raw_assembler_->CallRuntime2(function_id, arg1, arg2, GetContext());
+ CallEpilogue();
+ return return_val;
+}
+
+
+Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4) {
+ CallPrologue();
+ Node* return_val = raw_assembler_->CallRuntime4(function_id, arg1, arg2, arg3,
+ arg4, GetContext());
+ CallEpilogue();
+ return return_val;
}
@@ -349,11 +635,9 @@ void InterpreterAssembler::Return() {
BytecodeOffset(),
BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(),
- ContextTaggedPointer() };
- Node* tail_call = raw_assembler_->TailCallN(
- call_descriptor(), exit_trampoline_code_object, args);
- // This should always be the end node.
- AddEndInput(tail_call);
+ GetContext() };
+ raw_assembler_->TailCallN(call_descriptor(), exit_trampoline_code_object,
+ args);
}
@@ -371,7 +655,7 @@ void InterpreterAssembler::Jump(Node* delta) { DispatchTo(Advance(delta)); }
void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
- RawMachineAssembler::Label match, no_match;
+ RawMachineLabel match, no_match;
Node* condition = raw_assembler_->WordEqual(lhs, rhs);
raw_assembler_->Branch(condition, &match, &no_match);
raw_assembler_->Bind(&match);
@@ -388,12 +672,12 @@ void InterpreterAssembler::Dispatch() {
void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
Node* target_bytecode = raw_assembler_->Load(
- kMachUint8, BytecodeArrayTaggedPointer(), new_bytecode_offset);
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
// TODO(rmcilroy): Create a code target dispatch table to avoid conversion
// from code object on every dispatch.
Node* target_code_object = raw_assembler_->Load(
- kMachPtr, DispatchTableRawPointer(),
+ MachineType::Pointer(), DispatchTableRawPointer(),
raw_assembler_->Word32Shl(target_bytecode,
Int32Constant(kPointerSizeLog2)));
@@ -409,26 +693,41 @@ void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
new_bytecode_offset,
BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(),
- ContextTaggedPointer() };
- Node* tail_call =
- raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
- // This should always be the end node.
- AddEndInput(tail_call);
+ GetContext() };
+ raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
}
-void InterpreterAssembler::AddEndInput(Node* input) {
- DCHECK_NOT_NULL(input);
- end_nodes_.push_back(input);
+void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
+ Node* abort_id = SmiTag(Int32Constant(bailout_reason));
+ Node* ret_value = CallRuntime(Runtime::kAbort, abort_id);
+ // Unreached, but keeps turbofan happy.
+ raw_assembler_->Return(ret_value);
}
-void InterpreterAssembler::End() {
- DCHECK(!end_nodes_.empty());
- int end_count = static_cast<int>(end_nodes_.size());
- Node* end = graph()->NewNode(raw_assembler_->common()->End(end_count),
- end_count, &end_nodes_[0]);
- graph()->SetEnd(end);
+void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
+ BailoutReason bailout_reason) {
+ RawMachineLabel match, no_match;
+ Node* condition = raw_assembler_->WordEqual(lhs, rhs);
+ raw_assembler_->Branch(condition, &match, &no_match);
+ raw_assembler_->Bind(&no_match);
+ Abort(bailout_reason);
+ raw_assembler_->Bind(&match);
+}
+
+
+// static
+bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ return false;
+#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
+ return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
+ return true;
+#else
+#error "Unknown Architecture"
+#endif
}
@@ -444,14 +743,9 @@ CallDescriptor* InterpreterAssembler::call_descriptor() const {
}
-Schedule* InterpreterAssembler::schedule() {
- return raw_assembler_->schedule();
-}
-
-
Zone* InterpreterAssembler::zone() { return raw_assembler_->zone(); }
-} // namespace interpreter
+} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/interpreter-assembler.h b/chromium/v8/src/compiler/interpreter-assembler.h
index 67ab9cc2a9d..fb79d3eaa2f 100644
--- a/chromium/v8/src/compiler/interpreter-assembler.h
+++ b/chromium/v8/src/compiler/interpreter-assembler.h
@@ -13,7 +13,6 @@
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
-#include "src/zone-containers.h"
namespace v8 {
namespace internal {
@@ -47,7 +46,7 @@ class InterpreterAssembler {
Node* BytecodeOperandIdx(int operand_index);
// Returns the Imm8 immediate for bytecode operand |operand_index| in the
// current bytecode.
- Node* BytecodeOperandImm8(int operand_index);
+ Node* BytecodeOperandImm(int operand_index);
// Returns the register index for bytecode operand |operand_index| in the
// current bytecode.
Node* BytecodeOperandReg(int operand_index);
@@ -56,10 +55,21 @@ class InterpreterAssembler {
Node* GetAccumulator();
void SetAccumulator(Node* value);
+ // Context.
+ Node* GetContext();
+ void SetContext(Node* value);
+
// Loads from and stores to the interpreter register file.
+ Node* LoadRegister(int offset);
+ Node* LoadRegister(interpreter::Register reg);
Node* LoadRegister(Node* reg_index);
+ Node* StoreRegister(Node* value, int offset);
+ Node* StoreRegister(Node* value, interpreter::Register reg);
Node* StoreRegister(Node* value, Node* reg_index);
+ // Returns the next consecutive register.
+ Node* NextRegister(Node* reg_index);
+
// Returns the location in memory of the register |reg_index| in the
// interpreter register file.
Node* RegisterLocation(Node* reg_index);
@@ -83,31 +93,52 @@ class InterpreterAssembler {
// Load constant at |index| in the constant pool.
Node* LoadConstantPoolEntry(Node* index);
+ // Load an element from a fixed array on the heap.
+ Node* LoadFixedArrayElement(Node* fixed_array, int index);
+
// Load a field from an object on the heap.
Node* LoadObjectField(Node* object, int offset);
- // Load |slot_index| from a context.
+ // Load |slot_index| from |context|.
Node* LoadContextSlot(Node* context, int slot_index);
-
- // Load |slot_index| from the current context.
- Node* LoadContextSlot(int slot_index);
+ Node* LoadContextSlot(Node* context, Node* slot_index);
+ // Stores |value| into |slot_index| of |context|.
+ Node* StoreContextSlot(Node* context, Node* slot_index, Node* value);
// Load the TypeFeedbackVector for the current function.
Node* LoadTypeFeedbackVector();
- // Call JSFunction or Callable |function| with |arg_count| (not including
- // receiver) and the first argument located at |first_arg|.
+ // Project the output value at index |index|
+ Node* Projection(int index, Node* node);
+
+ // Call constructor |constructor| with |arg_count| arguments (not
+ // including receiver) and the first argument located at
+ // |first_arg|. The |new_target| is the same as the
+ // |constructor| for the new keyword, but differs for the super
+ // keyword.
+ Node* CallConstruct(Node* new_target, Node* constructor, Node* first_arg,
+ Node* arg_count);
+
+ // Call JSFunction or Callable |function| with |arg_count|
+ // arguments (not including receiver) and the first argument
+ // located at |first_arg|.
Node* CallJS(Node* function, Node* first_arg, Node* arg_count);
// Call an IC code stub.
Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
+ Node* arg2, Node* arg3);
+ Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
Node* arg2, Node* arg3, Node* arg4);
Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
Node* arg2, Node* arg3, Node* arg4, Node* arg5);
// Call runtime function.
+ Node* CallRuntime(Node* function_id, Node* first_arg, Node* arg_count,
+ int return_size = 1);
Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1);
Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4);
// Jump relative to the current bytecode by |jump_offset|.
void Jump(Node* jump_offset);
@@ -122,9 +153,11 @@ class InterpreterAssembler {
// Dispatch to the bytecode.
void Dispatch();
+ // Abort with the given bailout reason.
+ void Abort(BailoutReason bailout_reason);
+
protected:
- // Close the graph.
- void End();
+ static bool TargetSupportsUnalignedAccess();
// Protected helpers (for testing) which delegate to RawMachineAssembler.
CallDescriptor* call_descriptor() const;
@@ -139,8 +172,11 @@ class InterpreterAssembler {
Node* BytecodeOffset();
// Returns a raw pointer to first entry in the interpreter dispatch table.
Node* DispatchTableRawPointer();
- // Returns a tagged pointer to the current context.
- Node* ContextTaggedPointer();
+
+ // Saves and restores interpreter bytecode offset to the interpreter stack
+ // frame when performing a call.
+ void CallPrologue();
+ void CallEpilogue();
// Returns the offset of register |index| relative to RegisterFilePointer().
Node* RegisterFrameOffset(Node* index);
@@ -148,10 +184,11 @@ class InterpreterAssembler {
Node* SmiShiftBitsConstant();
Node* BytecodeOperand(int operand_index);
Node* BytecodeOperandSignExtended(int operand_index);
+ Node* BytecodeOperandShort(int operand_index);
+ Node* BytecodeOperandShortSignExtended(int operand_index);
+ Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node** args);
- Node* CallJSBuiltin(int context_index, Node* receiver, Node** js_args,
- int js_arg_count);
// Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
// update BytecodeOffset() itself.
@@ -161,24 +198,26 @@ class InterpreterAssembler {
// Starts next instruction dispatch at |new_bytecode_offset|.
void DispatchTo(Node* new_bytecode_offset);
- // Adds an end node of the graph.
- void AddEndInput(Node* input);
+ // Abort operations for debug code.
+ void AbortIfWordNotEqual(Node* lhs, Node* rhs, BailoutReason bailout_reason);
// Private helpers which delegate to RawMachineAssembler.
Isolate* isolate();
- Schedule* schedule();
Zone* zone();
interpreter::Bytecode bytecode_;
base::SmartPointer<RawMachineAssembler> raw_assembler_;
- ZoneVector<Node*> end_nodes_;
+
Node* accumulator_;
+ Node* bytecode_offset_;
+ Node* context_;
+
bool code_generated_;
DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
};
-} // namespace interpreter
+} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/js-builtin-reducer.cc b/chromium/v8/src/compiler/js-builtin-reducer.cc
index 49ed031182d..a7a7da57cd6 100644
--- a/chromium/v8/src/compiler/js-builtin-reducer.cc
+++ b/chromium/v8/src/compiler/js-builtin-reducer.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/diamond.h"
#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
#include "src/types.h"
@@ -87,9 +87,7 @@ class JSCallReduction {
JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph)
- : AdvancedReducer(editor),
- jsgraph_(jsgraph),
- simplified_(jsgraph->zone()) {}
+ : AdvancedReducer(editor), jsgraph_(jsgraph) {}
// ECMA-262, section 15.8.2.11.
@@ -109,7 +107,7 @@ Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
for (int i = 1; i < r.GetJSCallArity(); i++) {
Node* const input = r.GetJSCallInput(i);
value = graph()->NewNode(
- common()->Select(kMachNone),
+ common()->Select(MachineRepresentation::kNone),
graph()->NewNode(simplified()->NumberLessThan(), input, value), value,
input);
}
@@ -175,6 +173,9 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
Graph* JSBuiltinReducer::graph() const { return jsgraph()->graph(); }
+Isolate* JSBuiltinReducer::isolate() const { return jsgraph()->isolate(); }
+
+
CommonOperatorBuilder* JSBuiltinReducer::common() const {
return jsgraph()->common();
}
@@ -184,6 +185,11 @@ MachineOperatorBuilder* JSBuiltinReducer::machine() const {
return jsgraph()->machine();
}
+
+SimplifiedOperatorBuilder* JSBuiltinReducer::simplified() const {
+ return jsgraph()->simplified();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/js-builtin-reducer.h b/chromium/v8/src/compiler/js-builtin-reducer.h
index 66b5723246c..cfacdc1e8c3 100644
--- a/chromium/v8/src/compiler/js-builtin-reducer.h
+++ b/chromium/v8/src/compiler/js-builtin-reducer.h
@@ -6,7 +6,6 @@
#define V8_COMPILER_JS_BUILTIN_REDUCER_H_
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
@@ -16,6 +15,7 @@ namespace compiler {
class CommonOperatorBuilder;
class JSGraph;
class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
class JSBuiltinReducer final : public AdvancedReducer {
@@ -26,18 +26,19 @@ class JSBuiltinReducer final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceFunctionCall(Node* node);
Reduction ReduceMathMax(Node* node);
Reduction ReduceMathImul(Node* node);
Reduction ReduceMathFround(Node* node);
- JSGraph* jsgraph() const { return jsgraph_; }
Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ SimplifiedOperatorBuilder* simplified() const;
JSGraph* jsgraph_;
- SimplifiedOperatorBuilder simplified_;
};
} // namespace compiler
diff --git a/chromium/v8/src/compiler/js-call-reducer.cc b/chromium/v8/src/compiler/js-call-reducer.cc
new file mode 100644
index 00000000000..a15d6fd6fdd
--- /dev/null
+++ b/chromium/v8/src/compiler/js-call-reducer.cc
@@ -0,0 +1,557 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-call-reducer.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+#include "src/objects-inl.h"
+#include "src/type-feedback-vector-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+VectorSlotPair CallCountFeedback(VectorSlotPair p) {
+ // Extract call count from {p}.
+ if (!p.IsValid()) return VectorSlotPair();
+ CallICNexus n(p.vector(), p.slot());
+ int const call_count = n.ExtractCallCount();
+ if (call_count <= 0) return VectorSlotPair();
+
+ // Create megamorphic CallIC feedback with the given {call_count}.
+ StaticFeedbackVectorSpec spec;
+ FeedbackVectorSlot slot = spec.AddCallICSlot();
+ Handle<TypeFeedbackMetadata> metadata =
+ TypeFeedbackMetadata::New(n.GetIsolate(), &spec);
+ Handle<TypeFeedbackVector> vector =
+ TypeFeedbackVector::New(n.GetIsolate(), metadata);
+ CallICNexus nexus(vector, slot);
+ nexus.ConfigureMegamorphic(call_count);
+ return VectorSlotPair(vector, slot);
+}
+
+} // namespace
+
+
+Reduction JSCallReducer::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCallConstruct:
+ return ReduceJSCallConstruct(node);
+ case IrOpcode::kJSCallFunction:
+ return ReduceJSCallFunction(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+// ES6 section 22.1.1 The Array Constructor
+Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+
+ // Check if we have an allocation site from the CallIC.
+ Handle<AllocationSite> site;
+ if (p.feedback().IsValid()) {
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ Handle<Object> feedback(nexus.GetFeedback(), isolate());
+ if (feedback->IsAllocationSite()) {
+ site = Handle<AllocationSite>::cast(feedback);
+ }
+ }
+
+ // Turn the {node} into a {JSCreateArray} call.
+ DCHECK_LE(2u, p.arity());
+ size_t const arity = p.arity() - 2;
+ NodeProperties::ReplaceValueInput(node, target, 0);
+ NodeProperties::ReplaceValueInput(node, target, 1);
+ NodeProperties::RemoveFrameStateInput(node, 1);
+ // TODO(bmeurer): We might need to propagate the tail call mode to
+ // the JSCreateArray operator, because an Array call in tail call
+ // position must always properly consume the parent stack frame.
+ NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
+ return Changed(node);
+}
+
+
+// ES6 section 20.1.1 The Number Constructor
+Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+
+ // Turn the {node} into a {JSToNumber} call.
+ DCHECK_LE(2u, p.arity());
+ Node* value = (p.arity() == 2) ? jsgraph()->ZeroConstant()
+ : NodeProperties::GetValueInput(node, 2);
+ NodeProperties::RemoveFrameStateInput(node, 1);
+ NodeProperties::ReplaceValueInputs(node, value);
+ NodeProperties::ChangeOp(node, javascript()->ToNumber());
+ return Changed(node);
+}
+
+
+// ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray )
+Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ Handle<JSFunction> apply =
+ Handle<JSFunction>::cast(HeapObjectMatcher(target).Value());
+ size_t arity = p.arity();
+ DCHECK_LE(2u, arity);
+ ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny;
+ if (arity == 2) {
+ // Neither thisArg nor argArray was provided.
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
+ node->ReplaceInput(0, node->InputAt(1));
+ node->ReplaceInput(1, jsgraph()->UndefinedConstant());
+ } else if (arity == 3) {
+ // The argArray was not provided, just remove the {target}.
+ node->RemoveInput(0);
+ --arity;
+ } else if (arity == 4) {
+ // Check if argArray is an arguments object, and {node} is the only value
+ // user of argArray (except for value uses in frame states).
+ Node* arg_array = NodeProperties::GetValueInput(node, 3);
+ if (arg_array->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
+ for (Edge edge : arg_array->use_edges()) {
+ if (edge.from()->opcode() == IrOpcode::kStateValues) continue;
+ if (!NodeProperties::IsValueEdge(edge)) continue;
+ if (edge.from() == node) continue;
+ return NoChange();
+ }
+ // Get to the actual frame state from which to extract the arguments;
+ // we can only optimize this in case the {node} was already inlined into
+ // some other function (and same for the {arg_array}).
+ CreateArgumentsParameters const& p =
+ CreateArgumentsParametersOf(arg_array->op());
+ Node* frame_state = NodeProperties::GetFrameStateInput(arg_array, 0);
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
+ FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
+ if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
+ // Need to take the parameters from the arguments adaptor.
+ frame_state = outer_state;
+ }
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ if (p.type() == CreateArgumentsParameters::kMappedArguments) {
+ // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ if (shared->internal_formal_parameter_count() != 0) return NoChange();
+ }
+ // Remove the argArray input from the {node}.
+ node->RemoveInput(static_cast<int>(--arity));
+ // Add the actual parameters to the {node}, skipping the receiver.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ for (int i = p.start_index() + 1; i < state_info.parameter_count(); ++i) {
+ node->InsertInput(graph()->zone(), static_cast<int>(arity),
+ parameters->InputAt(i));
+ ++arity;
+ }
+ // Drop the {target} from the {node}.
+ node->RemoveInput(0);
+ --arity;
+ } else {
+ return NoChange();
+ }
+ // Change {node} to the new {JSCallFunction} operator.
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, p.language_mode(),
+ CallCountFeedback(p.feedback()),
+ convert_mode, p.tail_call_mode()));
+ // Change context of {node} to the Function.prototype.apply context,
+ // to ensure any exception is thrown in the correct context.
+ NodeProperties::ReplaceContextInput(
+ node, jsgraph()->HeapConstant(handle(apply->context(), isolate())));
+ // Try to further reduce the JSCallFunction {node}.
+ Reduction const reduction = ReduceJSCallFunction(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
+
+
+// ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args)
+Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ Handle<JSFunction> call = Handle<JSFunction>::cast(
+ HeapObjectMatcher(NodeProperties::GetValueInput(node, 0)).Value());
+ // Change context of {node} to the Function.prototype.call context,
+ // to ensure any exception is thrown in the correct context.
+ NodeProperties::ReplaceContextInput(
+ node, jsgraph()->HeapConstant(handle(call->context(), isolate())));
+ // Remove the target from {node} and use the receiver as target instead, and
+ // the thisArg becomes the new target. If thisArg was not provided, insert
+ // undefined instead.
+ size_t arity = p.arity();
+ DCHECK_LE(2u, arity);
+ ConvertReceiverMode convert_mode;
+ if (arity == 2) {
+ // The thisArg was not provided, use undefined as receiver.
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
+ node->ReplaceInput(0, node->InputAt(1));
+ node->ReplaceInput(1, jsgraph()->UndefinedConstant());
+ } else {
+ // Just remove the target, which is the first value input.
+ convert_mode = ConvertReceiverMode::kAny;
+ node->RemoveInput(0);
+ --arity;
+ }
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, p.language_mode(),
+ CallCountFeedback(p.feedback()),
+ convert_mode, p.tail_call_mode()));
+ // Try to further reduce the JSCallFunction {node}.
+ Reduction const reduction = ReduceJSCallFunction(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
+
+
+Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Try to specialize JSCallFunction {node}s with constant {target}s.
+ HeapObjectMatcher m(target);
+ if (m.HasValue()) {
+ if (m.Value()->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ // Raise a TypeError if the {target} is a "classConstructor".
+ if (IsClassConstructor(shared->kind())) {
+ NodeProperties::RemoveFrameStateInput(node, 0);
+ NodeProperties::ReplaceValueInputs(node, target);
+ NodeProperties::ChangeOp(
+ node, javascript()->CallRuntime(
+ Runtime::kThrowConstructorNonCallableError, 1));
+ return Changed(node);
+ }
+
+ // Check for known builtin functions.
+ if (shared->HasBuiltinFunctionId()) {
+ switch (shared->builtin_function_id()) {
+ case kFunctionApply:
+ return ReduceFunctionPrototypeApply(node);
+ case kFunctionCall:
+ return ReduceFunctionPrototypeCall(node);
+ default:
+ break;
+ }
+ }
+
+ // Check for the Array constructor.
+ if (*function == function->native_context()->array_function()) {
+ return ReduceArrayConstructor(node);
+ }
+
+ // Check for the Number constructor.
+ if (*function == function->native_context()->number_function()) {
+ return ReduceNumberConstructor(node);
+ }
+ } else if (m.Value()->IsJSBoundFunction()) {
+ Handle<JSBoundFunction> function =
+ Handle<JSBoundFunction>::cast(m.Value());
+ Handle<JSReceiver> bound_target_function(
+ function->bound_target_function(), isolate());
+ Handle<Object> bound_this(function->bound_this(), isolate());
+ Handle<FixedArray> bound_arguments(function->bound_arguments(),
+ isolate());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ ConvertReceiverMode const convert_mode =
+ (bound_this->IsNull() || bound_this->IsUndefined())
+ ? ConvertReceiverMode::kNullOrUndefined
+ : ConvertReceiverMode::kNotNullOrUndefined;
+ size_t arity = p.arity();
+ DCHECK_LE(2u, arity);
+ // Patch {node} to use [[BoundTargetFunction]] and [[BoundThis]].
+ NodeProperties::ReplaceValueInput(
+ node, jsgraph()->Constant(bound_target_function), 0);
+ NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(bound_this),
+ 1);
+ // Insert the [[BoundArguments]] for {node}.
+ for (int i = 0; i < bound_arguments->length(); ++i) {
+ node->InsertInput(
+ graph()->zone(), i + 2,
+ jsgraph()->Constant(handle(bound_arguments->get(i), isolate())));
+ arity++;
+ }
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, p.language_mode(),
+ CallCountFeedback(p.feedback()),
+ convert_mode, p.tail_call_mode()));
+ // Try to further reduce the JSCallFunction {node}.
+ Reduction const reduction = ReduceJSCallFunction(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+
+ // Don't mess with other {node}s that have a constant {target}.
+ // TODO(bmeurer): Also support proxies here.
+ return NoChange();
+ }
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // Extract feedback from the {node} using the CallICNexus.
+ if (!p.feedback().IsValid()) return NoChange();
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ Handle<Object> feedback(nexus.GetFeedback(), isolate());
+ if (feedback->IsAllocationSite()) {
+ // Retrieve the Array function from the {node}.
+ Node* array_function;
+ Handle<Context> native_context;
+ if (GetNativeContext(node).ToHandle(&native_context)) {
+ array_function = jsgraph()->HeapConstant(
+ handle(native_context->array_function(), isolate()));
+ } else {
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ array_function = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ARRAY_FUNCTION_INDEX, true),
+ native_context, native_context, effect);
+ }
+
+ // Check that the {target} is still the {array_function}.
+ Node* check = effect =
+ graph()->NewNode(javascript()->StrictEqual(), target, array_function,
+ context, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Turn the {node} into a {JSCreateArray} call.
+ NodeProperties::ReplaceValueInput(node, array_function, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+ return ReduceArrayConstructor(node);
+ } else if (feedback->IsWeakCell()) {
+ Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
+ if (cell->value()->IsJSFunction()) {
+ Node* target_function =
+ jsgraph()->Constant(handle(cell->value(), isolate()));
+
+ // Check that the {target} is still the {target_function}.
+ Node* check = effect =
+ graph()->NewNode(javascript()->StrictEqual(), target, target_function,
+ context, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Specialize the JSCallFunction node to the {target_function}.
+ NodeProperties::ReplaceValueInput(node, target_function, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+
+ // Try to further reduce the JSCallFunction {node}.
+ Reduction const reduction = ReduceJSCallFunction(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+
+Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
+ CallConstructParameters const& p = CallConstructParametersOf(node->op());
+ DCHECK_LE(2u, p.arity());
+ int const arity = static_cast<int>(p.arity() - 2);
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to specialize JSCallConstruct {node}s with constant {target}s.
+ HeapObjectMatcher m(target);
+ if (m.HasValue()) {
+ if (m.Value()->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+
+ // Raise a TypeError if the {target} is not a constructor.
+ if (!function->IsConstructor()) {
+ // Drop the lazy bailout location and use the eager bailout point for
+ // the runtime function (actually as lazy bailout point). It doesn't
+ // really matter which bailout location we use since we never really
+ // go back after throwing the exception.
+ NodeProperties::RemoveFrameStateInput(node, 0);
+ NodeProperties::ReplaceValueInputs(node, target);
+ NodeProperties::ChangeOp(
+ node,
+ javascript()->CallRuntime(Runtime::kThrowCalledNonCallable, 1));
+ return Changed(node);
+ }
+
+ // Check for the ArrayConstructor.
+ if (*function == function->native_context()->array_function()) {
+ // Check if we have an allocation site.
+ Handle<AllocationSite> site;
+ if (p.feedback().IsValid()) {
+ Handle<Object> feedback(
+ p.feedback().vector()->Get(p.feedback().slot()), isolate());
+ if (feedback->IsAllocationSite()) {
+ site = Handle<AllocationSite>::cast(feedback);
+ }
+ }
+
+ // Turn the {node} into a {JSCreateArray} call.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+ for (int i = arity; i > 0; --i) {
+ NodeProperties::ReplaceValueInput(
+ node, NodeProperties::GetValueInput(node, i), i + 1);
+ }
+ NodeProperties::ReplaceValueInput(node, new_target, 1);
+ NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
+ return Changed(node);
+ }
+ }
+
+ // Don't mess with other {node}s that have a constant {target}.
+ // TODO(bmeurer): Also support optimizing bound functions and proxies here.
+ return NoChange();
+ }
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // TODO(mvstanton): Use ConstructICNexus here, once available.
+ Handle<Object> feedback;
+ if (!p.feedback().IsValid()) return NoChange();
+ feedback = handle(p.feedback().vector()->Get(p.feedback().slot()), isolate());
+ if (feedback->IsAllocationSite()) {
+ // The feedback is an AllocationSite, which means we have called the
+ // Array function and collected transition (and pretenuring) feedback
+ // for the resulting arrays. This has to be kept in sync with the
+ // implementation of the CallConstructStub.
+ Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
+
+ // Retrieve the Array function from the {node}.
+ Node* array_function;
+ Handle<Context> native_context;
+ if (GetNativeContext(node).ToHandle(&native_context)) {
+ array_function = jsgraph()->HeapConstant(
+ handle(native_context->array_function(), isolate()));
+ } else {
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ array_function = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ARRAY_FUNCTION_INDEX, true),
+ native_context, native_context, effect);
+ }
+
+ // Check that the {target} is still the {array_function}.
+ Node* check = effect =
+ graph()->NewNode(javascript()->StrictEqual(), target, array_function,
+ context, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Turn the {node} into a {JSCreateArray} call.
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+ NodeProperties::RemoveFrameStateInput(node, 1);
+ for (int i = arity; i > 0; --i) {
+ NodeProperties::ReplaceValueInput(
+ node, NodeProperties::GetValueInput(node, i), i + 1);
+ }
+ NodeProperties::ReplaceValueInput(node, new_target, 1);
+ NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
+ return Changed(node);
+ } else if (feedback->IsWeakCell()) {
+ Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
+ if (cell->value()->IsJSFunction()) {
+ Node* target_function =
+ jsgraph()->Constant(handle(cell->value(), isolate()));
+
+ // Check that the {target} is still the {target_function}.
+ Node* check = effect =
+ graph()->NewNode(javascript()->StrictEqual(), target, target_function,
+ context, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Specialize the JSCallConstruct node to the {target_function}.
+ NodeProperties::ReplaceValueInput(node, target_function, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+ if (target == new_target) {
+ NodeProperties::ReplaceValueInput(node, target_function, arity + 1);
+ }
+
+ // Try to further reduce the JSCallConstruct {node}.
+ Reduction const reduction = ReduceJSCallConstruct(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+
+ return NoChange();
+}
+
+
+MaybeHandle<Context> JSCallReducer::GetNativeContext(Node* node) {
+ Node* const context = NodeProperties::GetContextInput(node);
+ return NodeProperties::GetSpecializationNativeContext(context,
+ native_context());
+}
+
+
+Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
+
+
+Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
+
+
+CommonOperatorBuilder* JSCallReducer::common() const {
+ return jsgraph()->common();
+}
+
+
+JSOperatorBuilder* JSCallReducer::javascript() const {
+ return jsgraph()->javascript();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/js-call-reducer.h b/chromium/v8/src/compiler/js-call-reducer.h
new file mode 100644
index 00000000000..9ffae152ac0
--- /dev/null
+++ b/chromium/v8/src/compiler/js-call-reducer.h
@@ -0,0 +1,67 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_CALL_REDUCER_H_
+#define V8_COMPILER_JS_CALL_REDUCER_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+
+
+// Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
+// which might allow inlining or other optimizations to be performed afterwards.
+class JSCallReducer final : public Reducer {
+ public:
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSCallReducer(JSGraph* jsgraph, Flags flags,
+ MaybeHandle<Context> native_context)
+ : jsgraph_(jsgraph), flags_(flags), native_context_(native_context) {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceArrayConstructor(Node* node);
+ Reduction ReduceNumberConstructor(Node* node);
+ Reduction ReduceFunctionPrototypeApply(Node* node);
+ Reduction ReduceFunctionPrototypeCall(Node* node);
+ Reduction ReduceJSCallConstruct(Node* node);
+ Reduction ReduceJSCallFunction(Node* node);
+
+ MaybeHandle<Context> GetNativeContext(Node* node);
+
+ Graph* graph() const;
+ Flags flags() const { return flags_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
+ MaybeHandle<Context> native_context() const { return native_context_; }
+ CommonOperatorBuilder* common() const;
+ JSOperatorBuilder* javascript() const;
+
+ JSGraph* const jsgraph_;
+ Flags const flags_;
+ MaybeHandle<Context> const native_context_;
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(JSCallReducer::Flags)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_CALL_REDUCER_H_
diff --git a/chromium/v8/src/compiler/js-context-specialization.cc b/chromium/v8/src/compiler/js-context-specialization.cc
index 0ad25e179d7..4d9d1d95041 100644
--- a/chromium/v8/src/compiler/js-context-specialization.cc
+++ b/chromium/v8/src/compiler/js-context-specialization.cc
@@ -34,25 +34,7 @@ MaybeHandle<Context> JSContextSpecialization::GetSpecializationContext(
DCHECK(node->opcode() == IrOpcode::kJSLoadContext ||
node->opcode() == IrOpcode::kJSStoreContext);
Node* const object = NodeProperties::GetValueInput(node, 0);
- switch (object->opcode()) {
- case IrOpcode::kHeapConstant:
- return Handle<Context>::cast(OpParameter<Handle<HeapObject>>(object));
- case IrOpcode::kParameter: {
- Node* const start = NodeProperties::GetValueInput(object, 0);
- DCHECK_EQ(IrOpcode::kStart, start->opcode());
- int const index = ParameterIndexOf(object->op());
- // The context is always the last parameter to a JavaScript function, and
- // {Parameter} indices start at -1, so value outputs of {Start} look like
- // this: closure, receiver, param0, ..., paramN, context.
- if (index == start->op()->ValueOutputCount() - 2) {
- return context();
- }
- break;
- }
- default:
- break;
- }
- return MaybeHandle<Context>();
+ return NodeProperties::GetSpecializationContext(object, context());
}
diff --git a/chromium/v8/src/compiler/js-frame-specialization.cc b/chromium/v8/src/compiler/js-frame-specialization.cc
index 98b18274925..769d615e4a4 100644
--- a/chromium/v8/src/compiler/js-frame-specialization.cc
+++ b/chromium/v8/src/compiler/js-frame-specialization.cc
@@ -27,38 +27,44 @@ Reduction JSFrameSpecialization::Reduce(Node* node) {
Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
DCHECK_EQ(IrOpcode::kOsrValue, node->opcode());
- DisallowHeapAllocation no_gc;
- Object* object;
+ Handle<Object> value;
int const index = OpParameter<int>(node);
int const parameters_count = frame()->ComputeParametersCount() + 1;
if (index == Linkage::kOsrContextSpillSlotIndex) {
- object = frame()->context();
+ value = handle(frame()->context(), isolate());
} else if (index >= parameters_count) {
- object = frame()->GetExpression(index - parameters_count);
+ value = handle(frame()->GetExpression(index - parameters_count), isolate());
} else {
// The OsrValue index 0 is the receiver.
- object = index ? frame()->GetParameter(index - 1) : frame()->receiver();
+ value =
+ handle(index ? frame()->GetParameter(index - 1) : frame()->receiver(),
+ isolate());
}
- return Replace(jsgraph()->Constant(handle(object, isolate())));
+ return Replace(jsgraph()->Constant(value));
}
Reduction JSFrameSpecialization::ReduceParameter(Node* node) {
DCHECK_EQ(IrOpcode::kParameter, node->opcode());
- DisallowHeapAllocation no_gc;
- Object* object;
+ Handle<Object> value;
int const index = ParameterIndexOf(node->op());
int const parameters_count = frame()->ComputeParametersCount() + 1;
- if (index == Linkage::kJSFunctionCallClosureParamIndex) {
- object = frame()->function();
- } else if (index == parameters_count) {
- // The Parameter index (arity + 1) is the context.
- object = frame()->context();
+ if (index == Linkage::kJSCallClosureParamIndex) {
+ // The Parameter index references the closure.
+ value = handle(frame()->function(), isolate());
+ } else if (index == Linkage::GetJSCallArgCountParamIndex(parameters_count)) {
+ // The Parameter index references the parameter count.
+ value = handle(Smi::FromInt(parameters_count - 1), isolate());
+ } else if (index == Linkage::GetJSCallContextParamIndex(parameters_count)) {
+ // The Parameter index references the context.
+ value = handle(frame()->context(), isolate());
} else {
// The Parameter index 0 is the receiver.
- object = index ? frame()->GetParameter(index - 1) : frame()->receiver();
+ value =
+ handle(index ? frame()->GetParameter(index - 1) : frame()->receiver(),
+ isolate());
}
- return Replace(jsgraph()->Constant(handle(object, isolate())));
+ return Replace(jsgraph()->Constant(value));
}
diff --git a/chromium/v8/src/compiler/js-generic-lowering.cc b/chromium/v8/src/compiler/js-generic-lowering.cc
index eac05657861..15ce908a1ce 100644
--- a/chromium/v8/src/compiler/js-generic-lowering.cc
+++ b/chromium/v8/src/compiler/js-generic-lowering.cc
@@ -63,12 +63,14 @@ Reduction JSGenericLowering::Reduce(Node* node) {
}
-#define REPLACE_BINARY_OP_IC_CALL(op, token) \
- void JSGenericLowering::Lower##op(Node* node) { \
+#define REPLACE_BINARY_OP_IC_CALL(Op, token) \
+ void JSGenericLowering::Lower##Op(Node* node) { \
+ BinaryOperationParameters const& p = \
+ BinaryOperationParametersOf(node->op()); \
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
- ReplaceWithStubCall(node, CodeFactory::BinaryOpIC( \
- isolate(), token, \
- strength(OpParameter<LanguageMode>(node))), \
+ ReplaceWithStubCall(node, \
+ CodeFactory::BinaryOpIC(isolate(), token, \
+ strength(p.language_mode())), \
CallDescriptor::kPatchableCallSiteWithNop | flags); \
}
REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
@@ -117,6 +119,7 @@ REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSGreaterThanOrEqual, Token::GTE)
REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
+REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
#undef REPLACE_RUNTIME
@@ -156,7 +159,7 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
CallDescriptor* desc_compare = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 0,
CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node),
- Operator::kNoProperties, kMachIntPtr);
+ Operator::kNoProperties, MachineType::IntPtr());
Node* compare =
graph()->NewNode(common()->Call(desc_compare),
static_cast<int>(inputs.size()), &inputs.front());
@@ -201,7 +204,8 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
node->ReplaceInput(0, booleanize);
node->ReplaceInput(1, true_value);
node->ReplaceInput(2, false_value);
- NodeProperties::ChangeOp(node, common()->Select(kMachAnyTagged));
+ NodeProperties::ChangeOp(node,
+ common()->Select(MachineRepresentation::kTagged));
}
@@ -222,8 +226,8 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
Operator::Properties properties = node->op()->properties();
const Runtime::Function* fun = Runtime::FunctionForId(f);
int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
- CallDescriptor* desc =
- Linkage::GetRuntimeCallDescriptor(zone(), f, nargs, properties);
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), f, nargs, properties, CallDescriptor::kNeedsFrameState);
Node* ref = jsgraph()->ExternalConstant(ExternalReference(f, isolate()));
Node* arity = jsgraph()->Int32Constant(nargs);
node->InsertInput(zone(), 0, jsgraph()->CEntryStubConstant(fun->result_size));
@@ -233,15 +237,6 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
}
-void JSGenericLowering::LowerJSUnaryNot(Node* node) {
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- Callable callable = CodeFactory::ToBoolean(
- isolate(), ToBooleanStub::RESULT_AS_INVERSE_ODDBALL);
- ReplaceWithStubCall(node, callable,
- CallDescriptor::kPatchableCallSite | flags);
-}
-
-
void JSGenericLowering::LowerJSTypeOf(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
Callable callable = CodeFactory::Typeof(isolate());
@@ -251,8 +246,7 @@ void JSGenericLowering::LowerJSTypeOf(Node* node) {
void JSGenericLowering::LowerJSToBoolean(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- Callable callable =
- CodeFactory::ToBoolean(isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
+ Callable callable = CodeFactory::ToBoolean(isolate());
ReplaceWithStubCall(node, callable,
CallDescriptor::kPatchableCallSite | flags);
}
@@ -286,7 +280,7 @@ void JSGenericLowering::LowerJSToObject(Node* node) {
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- const LoadPropertyParameters& p = LoadPropertyParametersOf(node->op());
+ const PropertyAccess& p = PropertyAccessOf(node->op());
Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), p.language_mode(), UNINITIALIZED);
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
@@ -296,7 +290,7 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
+ NamedAccess const& p = NamedAccessOf(node->op());
Callable callable = CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_INSIDE_TYPEOF, p.language_mode(), UNINITIALIZED);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
@@ -306,44 +300,37 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
- if (p.slot_index() >= 0) {
- Callable callable = CodeFactory::LoadGlobalViaContext(isolate(), 0);
- Node* script_context = node->InputAt(0);
- node->ReplaceInput(0, jsgraph()->Int32Constant(p.slot_index()));
- node->ReplaceInput(1, script_context); // Set new context...
- node->RemoveInput(2);
- node->RemoveInput(2); // ...instead of old one.
- ReplaceWithStubCall(node, callable, flags);
-
- } else {
- Callable callable = CodeFactory::LoadICInOptimizedCode(
- isolate(), p.typeof_mode(), SLOPPY, UNINITIALIZED);
- node->RemoveInput(0); // script context
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- ReplaceWithStubCall(node, callable, flags);
- }
+ Callable callable = CodeFactory::LoadICInOptimizedCode(
+ isolate(), p.typeof_mode(), SLOPPY, UNINITIALIZED);
+ // Load global object from the context.
+ Node* native_context =
+ graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
+ jsgraph()->IntPtrConstant(
+ Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
+ effect, graph()->start());
+ Node* global = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), native_context,
+ jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
+ effect, graph()->start());
+ node->InsertInput(zone(), 0, global);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- const StorePropertyParameters& p = StorePropertyParametersOf(node->op());
+ PropertyAccess const& p = PropertyAccessOf(node->op());
LanguageMode language_mode = p.language_mode();
- // We have a special case where we do keyed stores but don't have a type
- // feedback vector slot allocated to support it. In this case, install
- // the megamorphic keyed store stub which needs neither vector nor slot.
- bool use_vector_slot = FLAG_vector_stores && p.feedback().index() != -1;
Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), language_mode,
- (use_vector_slot || !FLAG_vector_stores) ? UNINITIALIZED : MEGAMORPHIC);
- if (use_vector_slot) {
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
- } else {
- node->RemoveInput(3);
- }
+ isolate(), language_mode, UNINITIALIZED);
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
ReplaceWithStubCall(node, callable,
CallDescriptor::kPatchableCallSite | flags);
}
@@ -351,51 +338,40 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
void JSGenericLowering::LowerJSStoreNamed(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- const StoreNamedParameters& p = StoreNamedParametersOf(node->op());
+ NamedAccess const& p = NamedAccessOf(node->op());
Callable callable = CodeFactory::StoreICInOptimizedCode(
isolate(), p.language_mode(), UNINITIALIZED);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- if (FLAG_vector_stores) {
- DCHECK(p.feedback().index() != -1);
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
- } else {
- node->RemoveInput(3);
- }
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
ReplaceWithStubCall(node, callable,
CallDescriptor::kPatchableCallSite | flags);
}
void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
- if (p.slot_index() >= 0) {
- Callable callable =
- CodeFactory::StoreGlobalViaContext(isolate(), 0, p.language_mode());
- Node* script_context = node->InputAt(0);
- Node* value = node->InputAt(2);
- node->ReplaceInput(0, jsgraph()->Int32Constant(p.slot_index()));
- node->ReplaceInput(1, value);
- node->ReplaceInput(2, script_context); // Set new context...
- node->RemoveInput(3);
- node->RemoveInput(3); // ...instead of old one.
- ReplaceWithStubCall(node, callable, flags);
-
- } else {
- Callable callable = CodeFactory::StoreICInOptimizedCode(
- isolate(), p.language_mode(), UNINITIALIZED);
- node->RemoveInput(0); // script context
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- if (FLAG_vector_stores) {
- DCHECK(p.feedback().index() != -1);
- node->InsertInput(zone(), 3,
- jsgraph()->SmiConstant(p.feedback().index()));
- } else {
- node->RemoveInput(3);
- }
- ReplaceWithStubCall(node, callable,
- CallDescriptor::kPatchableCallSite | flags);
- }
+ Callable callable = CodeFactory::StoreICInOptimizedCode(
+ isolate(), p.language_mode(), UNINITIALIZED);
+ // Load global object from the context.
+ Node* native_context =
+ graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
+ jsgraph()->IntPtrConstant(
+ Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
+ effect, graph()->start());
+ Node* global = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), native_context,
+ jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
+ effect, graph()->start());
+ node->InsertInput(zone(), 0, global);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithStubCall(node, callable,
+ CallDescriptor::kPatchableCallSite | flags);
}
@@ -423,7 +399,7 @@ void JSGenericLowering::LowerJSLoadContext(Node* node) {
const ContextAccess& access = ContextAccessOf(node->op());
for (size_t i = 0; i < access.depth(); ++i) {
node->ReplaceInput(
- 0, graph()->NewNode(machine()->Load(kMachAnyTagged),
+ 0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
NodeProperties::GetValueInput(node, 0),
jsgraph()->Int32Constant(
Context::SlotOffset(Context::PREVIOUS_INDEX)),
@@ -433,7 +409,7 @@ void JSGenericLowering::LowerJSLoadContext(Node* node) {
node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
static_cast<int>(access.index()))));
node->AppendInput(zone(), graph()->start());
- NodeProperties::ChangeOp(node, machine()->Load(kMachAnyTagged));
+ NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
}
@@ -441,7 +417,7 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
const ContextAccess& access = ContextAccessOf(node->op());
for (size_t i = 0; i < access.depth(); ++i) {
node->ReplaceInput(
- 0, graph()->NewNode(machine()->Load(kMachAnyTagged),
+ 0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
NodeProperties::GetValueInput(node, 0),
jsgraph()->Int32Constant(
Context::SlotOffset(Context::PREVIOUS_INDEX)),
@@ -451,20 +427,20 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
static_cast<int>(access.index()))));
- NodeProperties::ChangeOp(node, machine()->Store(StoreRepresentation(
- kMachAnyTagged, kFullWriteBarrier)));
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(MachineRepresentation::kTagged,
+ kFullWriteBarrier)));
}
-void JSGenericLowering::LowerJSLoadDynamicGlobal(Node* node) {
- const DynamicGlobalAccess& access = DynamicGlobalAccessOf(node->op());
+void JSGenericLowering::LowerJSLoadDynamic(Node* node) {
+ const DynamicAccess& access = DynamicAccessOf(node->op());
Runtime::FunctionId function_id =
(access.typeof_mode() == NOT_INSIDE_TYPEOF)
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
Node* projection = graph()->NewNode(common()->Projection(0), node);
NodeProperties::ReplaceUses(node, projection, node, node, node);
- node->RemoveInput(NodeProperties::FirstFrameStateIndex(node) + 1);
node->RemoveInput(NodeProperties::FirstValueIndex(node));
node->InsertInput(zone(), 1, jsgraph()->Constant(access.name()));
ReplaceWithRuntimeCall(node, function_id);
@@ -472,19 +448,11 @@ void JSGenericLowering::LowerJSLoadDynamicGlobal(Node* node) {
}
-void JSGenericLowering::LowerJSLoadDynamicContext(Node* node) {
- const DynamicContextAccess& access = DynamicContextAccessOf(node->op());
- Node* projection = graph()->NewNode(common()->Projection(0), node);
- NodeProperties::ReplaceUses(node, projection, node, node, node);
- node->InsertInput(zone(), 1, jsgraph()->Constant(access.name()));
- ReplaceWithRuntimeCall(node, Runtime::kLoadLookupSlot);
- projection->ReplaceInput(0, node);
+void JSGenericLowering::LowerJSCreate(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kNewObject);
}
-void JSGenericLowering::LowerJSCreate(Node* node) { UNIMPLEMENTED(); }
-
-
void JSGenericLowering::LowerJSCreateArguments(Node* node) {
const CreateArgumentsParameters& p = CreateArgumentsParametersOf(node->op());
switch (p.type()) {
@@ -495,12 +463,33 @@ void JSGenericLowering::LowerJSCreateArguments(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kNewStrictArguments_Generic);
break;
case CreateArgumentsParameters::kRestArray:
- UNIMPLEMENTED();
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.start_index()));
+ ReplaceWithRuntimeCall(node, Runtime::kNewRestArguments_Generic);
break;
}
}
+void JSGenericLowering::LowerJSCreateArray(Node* node) {
+ CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ int const arity = static_cast<int>(p.arity());
+ Node* new_target = node->InputAt(1);
+ // TODO(turbofan): We embed the AllocationSite from the Operator at this
+ // point, which we should not do once we want to both consume the feedback
+ // but at the same time shared the optimized code across native contexts,
+ // as the AllocationSite is associated with a single native context (it's
+ // stored in the type feedback vector after all). Once we go for cross
+ // context code generation, we should somehow find a way to get to the
+ // allocation site for the actual native context at runtime.
+ Node* type_info = p.site().is_null() ? jsgraph()->UndefinedConstant()
+ : jsgraph()->HeapConstant(p.site());
+ node->RemoveInput(1);
+ node->InsertInput(zone(), 1 + arity, new_target);
+ node->InsertInput(zone(), 2 + arity, type_info);
+ ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
+}
+
+
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
CreateClosureParameters p = CreateClosureParametersOf(node->op());
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.shared_info()));
@@ -510,20 +499,43 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
}
+void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kCreateIterResultObject);
+}
+
+
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
- int literal_flags = OpParameter<int>(node->op());
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(literal_flags));
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
+ node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
ReplaceWithRuntimeCall(node, Runtime::kCreateArrayLiteral);
}
void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
- int literal_flags = OpParameter<int>(node->op());
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(literal_flags));
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
+ node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
}
+void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::FastCloneRegExp(isolate());
+ Node* literal_index = jsgraph()->SmiConstant(p.index());
+ Node* literal_flags = jsgraph()->SmiConstant(p.flags());
+ Node* pattern = jsgraph()->HeapConstant(p.constant());
+ node->InsertInput(graph()->zone(), 1, literal_index);
+ node->InsertInput(graph()->zone(), 2, pattern);
+ node->InsertInput(graph()->zone(), 3, literal_flags);
+ ReplaceWithStubCall(node, callable, flags);
+}
+
+
void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
Handle<String> name = OpParameter<Handle<String>>(node);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(name));
@@ -546,44 +558,47 @@ void JSGenericLowering::LowerJSCreateScriptContext(Node* node) {
void JSGenericLowering::LowerJSCallConstruct(Node* node) {
- int arity = OpParameter<int>(node);
- CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
- CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+ CallConstructParameters const& p = CallConstructParametersOf(node->op());
+ int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- CallDescriptor* desc =
- Linkage::GetStubCallDescriptor(isolate(), zone(), d, arity - 1, flags);
- Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
- Node* actual_construct = NodeProperties::GetValueInput(node, 0);
- Node* original_construct = NodeProperties::GetValueInput(node, arity - 1);
- node->RemoveInput(arity - 1); // Drop original constructor.
+ Callable callable = CodeFactory::Construct(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* new_target = node->InputAt(arg_count + 1);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(arg_count + 1); // Drop new target.
node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 1, jsgraph()->Int32Constant(arity - 2));
- node->InsertInput(zone(), 2, actual_construct);
- node->InsertInput(zone(), 3, original_construct);
- node->InsertInput(zone(), 4, jsgraph()->UndefinedConstant());
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, receiver);
NodeProperties::ChangeOp(node, common()->Call(desc));
}
void JSGenericLowering::LowerJSCallFunction(Node* node) {
- const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
- int arg_count = static_cast<int>(p.arity() - 2);
- CallFunctionStub stub(isolate(), arg_count, p.flags());
- CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ int const arg_count = static_cast<int>(p.arity() - 2);
+ ConvertReceiverMode const mode = p.convert_mode();
+ Callable callable = CodeFactory::Call(isolate(), mode);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- if (p.AllowTailCalls()) {
+ if (p.tail_call_mode() == TailCallMode::kAllow) {
flags |= CallDescriptor::kSupportsTailCalls;
}
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), d, static_cast<int>(p.arity() - 1), flags);
- Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
+ isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, stub_arity);
NodeProperties::ChangeOp(node, common()->Call(desc));
}
void JSGenericLowering::LowerJSCallRuntime(Node* node) {
const CallRuntimeParameters& p = CallRuntimeParametersOf(node->op());
+ AdjustFrameStatesForCall(node);
ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
}
@@ -609,7 +624,8 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
Runtime::Function const* function =
Runtime::FunctionForId(Runtime::kGetPropertyNamesFast);
CallDescriptor const* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function->function_id, 1, Operator::kNoProperties);
+ zone(), function->function_id, 1, Operator::kNoProperties,
+ CallDescriptor::kNeedsFrameState);
Node* cache_type = effect = graph()->NewNode(
common()->Call(descriptor),
jsgraph()->CEntryStubConstant(function->result_size), object,
@@ -618,11 +634,11 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
control = graph()->NewNode(common()->IfSuccess(), cache_type);
Node* object_map = effect = graph()->NewNode(
- machine()->Load(kMachAnyTagged), object,
+ machine()->Load(MachineType::AnyTagged()), object,
jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
effect, control);
Node* cache_type_map = effect = graph()->NewNode(
- machine()->Load(kMachAnyTagged), cache_type,
+ machine()->Load(MachineType::AnyTagged()), cache_type,
jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
effect, control);
Node* meta_map = jsgraph()->HeapConstant(isolate()->factory()->meta_map());
@@ -643,7 +659,7 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
{
// Enum cache case.
Node* cache_type_enum_length = etrue0 = graph()->NewNode(
- machine()->Load(kMachUint32), cache_type,
+ machine()->Load(MachineType::Uint32()), cache_type,
jsgraph()->IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag),
effect, if_true0);
cache_type_enum_length =
@@ -672,16 +688,16 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
{
// Load the enumeration cache from the instance descriptors of {object}.
Node* object_map_descriptors = efalse1 = graph()->NewNode(
- machine()->Load(kMachAnyTagged), object_map,
+ machine()->Load(MachineType::AnyTagged()), object_map,
jsgraph()->IntPtrConstant(Map::kDescriptorsOffset - kHeapObjectTag),
etrue0, if_false1);
Node* object_map_enum_cache = efalse1 = graph()->NewNode(
- machine()->Load(kMachAnyTagged), object_map_descriptors,
+ machine()->Load(MachineType::AnyTagged()), object_map_descriptors,
jsgraph()->IntPtrConstant(DescriptorArray::kEnumCacheOffset -
kHeapObjectTag),
efalse1, if_false1);
cache_array_false1 = efalse1 = graph()->NewNode(
- machine()->Load(kMachAnyTagged), object_map_enum_cache,
+ machine()->Load(MachineType::AnyTagged()), object_map_enum_cache,
jsgraph()->IntPtrConstant(
DescriptorArray::kEnumCacheBridgeCacheOffset - kHeapObjectTag),
efalse1, if_false1);
@@ -691,8 +707,8 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
etrue0 =
graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
cache_array_true0 =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_array_true1,
- cache_array_false1, if_true0);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true1, cache_array_false1, if_true0);
cache_length_true0 = graph()->NewNode(
machine()->WordShl(),
@@ -711,46 +727,25 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
Node* efalse0;
{
// FixedArray case.
- Node* object_instance_type = efalse0 = graph()->NewNode(
- machine()->Load(kMachUint8), object_map,
- jsgraph()->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag),
- effect, if_false0);
-
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- Node* check1 = graph()->NewNode(
- machine()->Uint32LessThanOrEqual(), object_instance_type,
- jsgraph()->Uint32Constant(LAST_JS_PROXY_TYPE));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* cache_type_true1 = jsgraph()->ZeroConstant(); // Zero indicates proxy
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* cache_type_false1 = jsgraph()->OneConstant(); // One means slow check
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- cache_type_false0 =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_type_true1,
- cache_type_false1, if_false0);
-
+ cache_type_false0 = jsgraph()->OneConstant(); // Smi means slow check
cache_array_false0 = cache_type;
cache_length_false0 = efalse0 = graph()->NewNode(
- machine()->Load(kMachAnyTagged), cache_array_false0,
+ machine()->Load(MachineType::AnyTagged()), cache_array_false0,
jsgraph()->IntPtrConstant(FixedArray::kLengthOffset - kHeapObjectTag),
- efalse0, if_false0);
+ effect, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
Node* cache_array =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_array_true0,
- cache_array_false0, control);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true0, cache_array_false0, control);
Node* cache_length =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_length_true0,
- cache_length_false0, control);
- cache_type = graph()->NewNode(common()->Phi(kMachAnyTagged, 2),
- cache_type_true0, cache_type_false0, control);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_length_true0, cache_length_false0, control);
+ cache_type =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_type_true0, cache_type_false0, control);
for (auto edge : node->use_edges()) {
if (NodeProperties::IsEffectEdge(edge)) {
@@ -794,6 +789,28 @@ void JSGenericLowering::LowerJSForInStep(Node* node) {
}
+void JSGenericLowering::LowerJSLoadMessage(Node* node) {
+ ExternalReference message_address =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ node->RemoveInput(NodeProperties::FirstContextIndex(node));
+ node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
+ node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
+ NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
+}
+
+
+void JSGenericLowering::LowerJSStoreMessage(Node* node) {
+ ExternalReference message_address =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ node->RemoveInput(NodeProperties::FirstContextIndex(node));
+ node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
+ node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
+ StoreRepresentation representation(MachineRepresentation::kTagged,
+ kNoWriteBarrier);
+ NodeProperties::ChangeOp(node, machine()->Store(representation));
+}
+
+
void JSGenericLowering::LowerJSYield(Node* node) { UNIMPLEMENTED(); }
@@ -802,7 +819,7 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* limit = graph()->NewNode(
- machine()->Load(kMachPtr),
+ machine()->Load(MachineType::Pointer()),
jsgraph()->ExternalConstant(
ExternalReference::address_of_stack_limit(isolate())),
jsgraph()->IntPtrConstant(0), effect, control);
diff --git a/chromium/v8/src/compiler/js-global-object-specialization.cc b/chromium/v8/src/compiler/js-global-object-specialization.cc
new file mode 100644
index 00000000000..e6f01b3efb3
--- /dev/null
+++ b/chromium/v8/src/compiler/js-global-object-specialization.cc
@@ -0,0 +1,320 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-global-object-specialization.h"
+
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/type-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct JSGlobalObjectSpecialization::ScriptContextTableLookupResult {
+ Handle<Context> context;
+ bool immutable;
+ int index;
+};
+
+
+JSGlobalObjectSpecialization::JSGlobalObjectSpecialization(
+ Editor* editor, JSGraph* jsgraph, Flags flags,
+ MaybeHandle<Context> native_context, CompilationDependencies* dependencies)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ flags_(flags),
+ native_context_(native_context),
+ dependencies_(dependencies),
+ type_cache_(TypeCache::Get()) {}
+
+
+Reduction JSGlobalObjectSpecialization::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSLoadGlobal:
+ return ReduceJSLoadGlobal(node);
+ case IrOpcode::kJSStoreGlobal:
+ return ReduceJSStoreGlobal(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
+ Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Retrieve the global object from the given {node}.
+ Handle<JSGlobalObject> global_object;
+ if (!GetGlobalObject(node).ToHandle(&global_object)) return NoChange();
+
+ // Try to lookup the name on the script context table first (lexical scoping).
+ ScriptContextTableLookupResult result;
+ if (LookupInScriptContextTable(global_object, name, &result)) {
+ if (result.context->is_the_hole(result.index)) return NoChange();
+ Node* context = jsgraph()->HeapConstant(result.context);
+ Node* value = effect = graph()->NewNode(
+ javascript()->LoadContext(0, result.index, result.immutable), context,
+ context, effect);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+
+ // Lookup on the global object instead. We only deal with own data
+ // properties of the global object here (represented as PropertyCell).
+ LookupIterator it(global_object, name, LookupIterator::OWN);
+ if (it.state() != LookupIterator::DATA) return NoChange();
+ Handle<PropertyCell> property_cell = it.GetPropertyCell();
+ PropertyDetails property_details = property_cell->property_details();
+ Handle<Object> property_cell_value(property_cell->value(), isolate());
+
+ // Load from non-configurable, read-only data property on the global
+ // object can be constant-folded, even without deoptimization support.
+ if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
+ Node* value = jsgraph()->Constant(property_cell_value);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+
+ // Load from non-configurable, data property on the global can be lowered to
+ // a field load, even without deoptimization, because the property cannot be
+ // deleted or reconfigured to an accessor/interceptor property. Yet, if
+ // deoptimization support is available, we can constant-fold certain global
+ // properties or at least lower them to field loads annotated with more
+ // precise type feedback.
+ Type* property_cell_value_type = Type::Tagged();
+ if (flags() & kDeoptimizationEnabled) {
+ // Record a code dependency on the cell if we can benefit from the
+ // additional feedback, or the global property is configurable (i.e.
+ // can be deleted or reconfigured to an accessor property).
+ if (property_details.cell_type() != PropertyCellType::kMutable ||
+ property_details.IsConfigurable()) {
+ dependencies()->AssumePropertyCell(property_cell);
+ }
+
+ // Load from constant/undefined global property can be constant-folded.
+ if ((property_details.cell_type() == PropertyCellType::kConstant ||
+ property_details.cell_type() == PropertyCellType::kUndefined)) {
+ Node* value = jsgraph()->Constant(property_cell_value);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+
+ // Load from constant type cell can benefit from type feedback.
+ if (property_details.cell_type() == PropertyCellType::kConstantType) {
+ // Compute proper type based on the current value in the cell.
+ if (property_cell_value->IsSmi()) {
+ property_cell_value_type = type_cache_.kSmi;
+ } else if (property_cell_value->IsNumber()) {
+ property_cell_value_type = type_cache_.kHeapNumber;
+ } else {
+ Handle<Map> property_cell_value_map(
+ Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
+ property_cell_value_type =
+ Type::Class(property_cell_value_map, graph()->zone());
+ }
+ }
+ } else if (property_details.IsConfigurable()) {
+ // Access to configurable global properties requires deoptimization support.
+ return NoChange();
+ }
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
+ jsgraph()->HeapConstant(property_cell), effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+
+Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
+ Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Retrieve the global object from the given {node}.
+ Handle<JSGlobalObject> global_object;
+ if (!GetGlobalObject(node).ToHandle(&global_object)) return NoChange();
+
+ // Try to lookup the name on the script context table first (lexical scoping).
+ ScriptContextTableLookupResult result;
+ if (LookupInScriptContextTable(global_object, name, &result)) {
+ if (result.context->is_the_hole(result.index)) return NoChange();
+ if (result.immutable) return NoChange();
+ Node* context = jsgraph()->HeapConstant(result.context);
+ effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
+ context, value, context, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
+ // Lookup on the global object instead. We only deal with own data
+ // properties of the global object here (represented as PropertyCell).
+ LookupIterator it(global_object, name, LookupIterator::OWN);
+ if (it.state() != LookupIterator::DATA) return NoChange();
+ Handle<PropertyCell> property_cell = it.GetPropertyCell();
+ PropertyDetails property_details = property_cell->property_details();
+ Handle<Object> property_cell_value(property_cell->value(), isolate());
+
+ // Don't even bother trying to lower stores to read-only data properties.
+ if (property_details.IsReadOnly()) return NoChange();
+ switch (property_details.cell_type()) {
+ case PropertyCellType::kUndefined: {
+ return NoChange();
+ }
+ case PropertyCellType::kConstant: {
+ // Store to constant property cell requires deoptimization support,
+ // because we might even need to eager deoptimize for mismatch.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ dependencies()->AssumePropertyCell(property_cell);
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
+ jsgraph()->Constant(property_cell_value));
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ break;
+ }
+ case PropertyCellType::kConstantType: {
+ // Store to constant-type property cell requires deoptimization support,
+ // because we might even need to eager deoptimize for mismatch.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ dependencies()->AssumePropertyCell(property_cell);
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Type* property_cell_value_type = Type::TaggedSigned();
+ if (property_cell_value->IsHeapObject()) {
+ // Deoptimize if the {value} is a Smi.
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_true);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfFalse(), branch);
+
+ // Load the {value} map check against the {property_cell} map.
+ Node* value_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, effect, control);
+ Handle<Map> property_cell_value_map(
+ Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
+ check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Any()), value_map,
+ jsgraph()->HeapConstant(property_cell_value_map));
+ property_cell_value_type = Type::TaggedPointer();
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
+ jsgraph()->HeapConstant(property_cell), value, effect, control);
+ break;
+ }
+ case PropertyCellType::kMutable: {
+ // Store to non-configurable, data property on the global can be lowered
+ // to a field store, even without deoptimization, because the property
+ // cannot be deleted or reconfigured to an accessor/interceptor property.
+ if (property_details.IsConfigurable()) {
+ // With deoptimization support, we can lower stores even to configurable
+ // data properties on the global object, by adding a code dependency on
+ // the cell.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ dependencies()->AssumePropertyCell(property_cell);
+ }
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForPropertyCellValue()),
+ jsgraph()->HeapConstant(property_cell), value, effect, control);
+ break;
+ }
+ }
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+
+MaybeHandle<JSGlobalObject> JSGlobalObjectSpecialization::GetGlobalObject(
+ Node* node) {
+ Node* const context = NodeProperties::GetContextInput(node);
+ return NodeProperties::GetSpecializationGlobalObject(context,
+ native_context());
+}
+
+
+bool JSGlobalObjectSpecialization::LookupInScriptContextTable(
+ Handle<JSGlobalObject> global_object, Handle<Name> name,
+ ScriptContextTableLookupResult* result) {
+ if (!name->IsString()) return false;
+ Handle<ScriptContextTable> script_context_table(
+ global_object->native_context()->script_context_table(), isolate());
+ ScriptContextTable::LookupResult lookup_result;
+ if (!ScriptContextTable::Lookup(script_context_table,
+ Handle<String>::cast(name), &lookup_result)) {
+ return false;
+ }
+ Handle<Context> script_context = ScriptContextTable::GetContext(
+ script_context_table, lookup_result.context_index);
+ result->context = script_context;
+ result->immutable = IsImmutableVariableMode(lookup_result.mode);
+ result->index = lookup_result.slot_index;
+ return true;
+}
+
+
+Graph* JSGlobalObjectSpecialization::graph() const {
+ return jsgraph()->graph();
+}
+
+
+Isolate* JSGlobalObjectSpecialization::isolate() const {
+ return jsgraph()->isolate();
+}
+
+
+CommonOperatorBuilder* JSGlobalObjectSpecialization::common() const {
+ return jsgraph()->common();
+}
+
+
+JSOperatorBuilder* JSGlobalObjectSpecialization::javascript() const {
+ return jsgraph()->javascript();
+}
+
+
+SimplifiedOperatorBuilder* JSGlobalObjectSpecialization::simplified() const {
+ return jsgraph()->simplified();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/js-global-object-specialization.h b/chromium/v8/src/compiler/js-global-object-specialization.h
new file mode 100644
index 00000000000..83d890c938d
--- /dev/null
+++ b/chromium/v8/src/compiler/js-global-object-specialization.h
@@ -0,0 +1,83 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
+#define V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class TypeCache;
+
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+class SimplifiedOperatorBuilder;
+
+
+// Specializes a given JSGraph to a given global object, potentially constant
+// folding some {JSLoadGlobal} nodes or strength reducing some {JSStoreGlobal}
+// nodes.
+class JSGlobalObjectSpecialization final : public AdvancedReducer {
+ public:
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
+ MaybeHandle<Context> native_context,
+ CompilationDependencies* dependencies);
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceJSLoadGlobal(Node* node);
+ Reduction ReduceJSStoreGlobal(Node* node);
+
+ // Retrieve the global object from the given {node} if known.
+ MaybeHandle<JSGlobalObject> GetGlobalObject(Node* node);
+
+ struct ScriptContextTableLookupResult;
+ bool LookupInScriptContextTable(Handle<JSGlobalObject> global_object,
+ Handle<Name> name,
+ ScriptContextTableLookupResult* result);
+
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
+ CommonOperatorBuilder* common() const;
+ JSOperatorBuilder* javascript() const;
+ SimplifiedOperatorBuilder* simplified() const;
+ Flags flags() const { return flags_; }
+ MaybeHandle<Context> native_context() const { return native_context_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+
+ JSGraph* const jsgraph_;
+ Flags const flags_;
+ MaybeHandle<Context> native_context_;
+ CompilationDependencies* const dependencies_;
+ TypeCache const& type_cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSGlobalObjectSpecialization);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(JSGlobalObjectSpecialization::Flags)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
diff --git a/chromium/v8/src/compiler/js-graph.cc b/chromium/v8/src/compiler/js-graph.cc
index 9b6b187c6aa..e9387982878 100644
--- a/chromium/v8/src/compiler/js-graph.cc
+++ b/chromium/v8/src/compiler/js-graph.cc
@@ -11,11 +11,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-Node* JSGraph::ImmovableHeapConstant(Handle<HeapObject> object) {
- return graph()->NewNode(common()->HeapConstant(object));
-}
-
-
#define CACHED(name, expr) \
cached_nodes_[name] ? cached_nodes_[name] : (cached_nodes_[name] = (expr))
@@ -23,37 +18,40 @@ Node* JSGraph::ImmovableHeapConstant(Handle<HeapObject> object) {
Node* JSGraph::CEntryStubConstant(int result_size) {
if (result_size == 1) {
return CACHED(kCEntryStubConstant,
- ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+ HeapConstant(CEntryStub(isolate(), 1).GetCode()));
}
- return ImmovableHeapConstant(CEntryStub(isolate(), result_size).GetCode());
+ return HeapConstant(CEntryStub(isolate(), result_size).GetCode());
+}
+
+
+Node* JSGraph::EmptyFixedArrayConstant() {
+ return CACHED(kEmptyFixedArrayConstant,
+ HeapConstant(factory()->empty_fixed_array()));
}
Node* JSGraph::UndefinedConstant() {
- return CACHED(kUndefinedConstant,
- ImmovableHeapConstant(factory()->undefined_value()));
+ return CACHED(kUndefinedConstant, HeapConstant(factory()->undefined_value()));
}
Node* JSGraph::TheHoleConstant() {
- return CACHED(kTheHoleConstant,
- ImmovableHeapConstant(factory()->the_hole_value()));
+ return CACHED(kTheHoleConstant, HeapConstant(factory()->the_hole_value()));
}
Node* JSGraph::TrueConstant() {
- return CACHED(kTrueConstant, ImmovableHeapConstant(factory()->true_value()));
+ return CACHED(kTrueConstant, HeapConstant(factory()->true_value()));
}
Node* JSGraph::FalseConstant() {
- return CACHED(kFalseConstant,
- ImmovableHeapConstant(factory()->false_value()));
+ return CACHED(kFalseConstant, HeapConstant(factory()->false_value()));
}
Node* JSGraph::NullConstant() {
- return CACHED(kNullConstant, ImmovableHeapConstant(factory()->null_value()));
+ return CACHED(kNullConstant, HeapConstant(factory()->null_value()));
}
@@ -74,11 +72,14 @@ Node* JSGraph::NaNConstant() {
Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
- // TODO(turbofan): canonicalize heap constants using <magic approach>.
- // TODO(titzer): We could also match against the addresses of immortable
- // immovables here, even without access to the heap, thus always
- // canonicalizing references to them.
- return graph()->NewNode(common()->HeapConstant(value));
+ if (value->IsConsString()) {
+ value = String::Flatten(Handle<String>::cast(value), TENURED);
+ }
+ Node** loc = cache_.FindHeapConstant(value);
+ if (*loc == nullptr) {
+ *loc = graph()->NewNode(common()->HeapConstant(value));
+ }
+ return *loc;
}
@@ -119,7 +120,7 @@ Node* JSGraph::Constant(int32_t value) {
Node* JSGraph::Int32Constant(int32_t value) {
Node** loc = cache_.FindInt32Constant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->Int32Constant(value));
}
return *loc;
@@ -128,7 +129,7 @@ Node* JSGraph::Int32Constant(int32_t value) {
Node* JSGraph::Int64Constant(int64_t value) {
Node** loc = cache_.FindInt64Constant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->Int64Constant(value));
}
return *loc;
@@ -137,7 +138,7 @@ Node* JSGraph::Int64Constant(int64_t value) {
Node* JSGraph::NumberConstant(double value) {
Node** loc = cache_.FindNumberConstant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->NumberConstant(value));
}
return *loc;
@@ -146,7 +147,7 @@ Node* JSGraph::NumberConstant(double value) {
Node* JSGraph::Float32Constant(float value) {
Node** loc = cache_.FindFloat32Constant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->Float32Constant(value));
}
return *loc;
@@ -155,7 +156,7 @@ Node* JSGraph::Float32Constant(float value) {
Node* JSGraph::Float64Constant(double value) {
Node** loc = cache_.FindFloat64Constant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->Float64Constant(value));
}
return *loc;
@@ -164,7 +165,7 @@ Node* JSGraph::Float64Constant(double value) {
Node* JSGraph::ExternalConstant(ExternalReference reference) {
Node** loc = cache_.FindExternalConstant(reference);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->ExternalConstant(reference));
}
return *loc;
diff --git a/chromium/v8/src/compiler/js-graph.h b/chromium/v8/src/compiler/js-graph.h
index 4f23773259d..5a25ed0697c 100644
--- a/chromium/v8/src/compiler/js-graph.h
+++ b/chromium/v8/src/compiler/js-graph.h
@@ -17,19 +17,22 @@ namespace v8 {
namespace internal {
namespace compiler {
+class SimplifiedOperatorBuilder;
class Typer;
// Implements a facade on a Graph, enhancing the graph with JS-specific
-// notions, including a builder for for JS* operators, canonicalized global
+// notions, including various builders for operators, canonicalized global
// constants, and various helper methods.
class JSGraph : public ZoneObject {
public:
JSGraph(Isolate* isolate, Graph* graph, CommonOperatorBuilder* common,
- JSOperatorBuilder* javascript, MachineOperatorBuilder* machine)
+ JSOperatorBuilder* javascript, SimplifiedOperatorBuilder* simplified,
+ MachineOperatorBuilder* machine)
: isolate_(isolate),
graph_(graph),
common_(common),
javascript_(javascript),
+ simplified_(simplified),
machine_(machine),
cache_(zone()) {
for (int i = 0; i < kNumCachedNodes; i++) cached_nodes_[i] = nullptr;
@@ -37,6 +40,7 @@ class JSGraph : public ZoneObject {
// Canonicalized global constants.
Node* CEntryStubConstant(int result_size);
+ Node* EmptyFixedArrayConstant();
Node* UndefinedConstant();
Node* TheHoleConstant();
Node* TrueConstant();
@@ -117,8 +121,9 @@ class JSGraph : public ZoneObject {
// Create a control node that serves as dependency for dead nodes.
Node* Dead();
- JSOperatorBuilder* javascript() const { return javascript_; }
CommonOperatorBuilder* common() const { return common_; }
+ JSOperatorBuilder* javascript() const { return javascript_; }
+ SimplifiedOperatorBuilder* simplified() const { return simplified_; }
MachineOperatorBuilder* machine() const { return machine_; }
Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
@@ -130,6 +135,7 @@ class JSGraph : public ZoneObject {
private:
enum CachedNode {
kCEntryStubConstant,
+ kEmptyFixedArrayConstant,
kUndefinedConstant,
kTheHoleConstant,
kTrueConstant,
@@ -147,11 +153,11 @@ class JSGraph : public ZoneObject {
Graph* graph_;
CommonOperatorBuilder* common_;
JSOperatorBuilder* javascript_;
+ SimplifiedOperatorBuilder* simplified_;
MachineOperatorBuilder* machine_;
CommonNodeCache cache_;
Node* cached_nodes_[kNumCachedNodes];
- Node* ImmovableHeapConstant(Handle<HeapObject> value);
Node* NumberConstant(double value);
DISALLOW_COPY_AND_ASSIGN(JSGraph);
diff --git a/chromium/v8/src/compiler/js-inlining-heuristic.cc b/chromium/v8/src/compiler/js-inlining-heuristic.cc
new file mode 100644
index 00000000000..cd5637b0c42
--- /dev/null
+++ b/chromium/v8/src/compiler/js-inlining-heuristic.cc
@@ -0,0 +1,141 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-inlining-heuristic.h"
+
+#include "src/compiler.h"
+#include "src/compiler/node-matchers.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction JSInliningHeuristic::Reduce(Node* node) {
+ if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+
+ // Check if we already saw that {node} before, and if so, just skip it.
+ if (seen_.find(node->id()) != seen_.end()) return NoChange();
+ seen_.insert(node->id());
+
+ Node* callee = node->InputAt(0);
+ HeapObjectMatcher match(callee);
+ if (!match.HasValue() || !match.Value()->IsJSFunction()) return NoChange();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+
+ // Functions marked with %SetForceInlineFlag are immediately inlined.
+ if (function->shared()->force_inline()) {
+ return inliner_.ReduceJSCall(node, function);
+ }
+
+ // Handling of special inlining modes right away:
+ // - For restricted inlining: stop all handling at this point.
+ // - For stressing inlining: immediately handle all functions.
+ switch (mode_) {
+ case kRestrictedInlining:
+ return NoChange();
+ case kStressInlining:
+ return inliner_.ReduceJSCall(node, function);
+ case kGeneralInlining:
+ break;
+ }
+
+ // ---------------------------------------------------------------------------
+ // Everything below this line is part of the inlining heuristic.
+ // ---------------------------------------------------------------------------
+
+ // Built-in functions are handled by the JSBuiltinReducer.
+ if (function->shared()->HasBuiltinFunctionId()) return NoChange();
+
+ // Don't inline builtins.
+ if (function->shared()->IsBuiltin()) return NoChange();
+
+ // Quick check on source code length to avoid parsing large candidate.
+ if (function->shared()->SourceSize() > FLAG_max_inlined_source_size) {
+ return NoChange();
+ }
+
+ // Quick check on the size of the AST to avoid parsing large candidate.
+ if (function->shared()->ast_node_count() > FLAG_max_inlined_nodes) {
+ return NoChange();
+ }
+
+ // Avoid inlining within or across the boundary of asm.js code.
+ if (info_->shared_info()->asm_function()) return NoChange();
+ if (function->shared()->asm_function()) return NoChange();
+
+ // Stop inlinining once the maximum allowed level is reached.
+ int level = 0;
+ for (Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ frame_state->opcode() == IrOpcode::kFrameState;
+ frame_state = NodeProperties::GetFrameStateInput(frame_state, 0)) {
+ if (++level > FLAG_max_inlining_levels) return NoChange();
+ }
+
+ // Gather feedback on how often this call site has been hit before.
+ int calls = -1; // Same default as CallICNexus::ExtractCallCount.
+ // TODO(turbofan): We also want call counts for constructor calls.
+ if (node->opcode() == IrOpcode::kJSCallFunction) {
+ CallFunctionParameters p = CallFunctionParametersOf(node->op());
+ if (p.feedback().IsValid()) {
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ calls = nexus.ExtractCallCount();
+ }
+ }
+
+ // ---------------------------------------------------------------------------
+ // Everything above this line is part of the inlining heuristic.
+ // ---------------------------------------------------------------------------
+
+ // In the general case we remember the candidate for later.
+ candidates_.insert({function, node, calls});
+ return NoChange();
+}
+
+
+void JSInliningHeuristic::Finalize() {
+ if (candidates_.empty()) return; // Nothing to do without candidates.
+ if (FLAG_trace_turbo_inlining) PrintCandidates();
+
+ // We inline at most one candidate in every iteration of the fixpoint.
+ // This is to ensure that we don't consume the full inlining budget
+ // on things that aren't called very often.
+ // TODO(bmeurer): Use std::priority_queue instead of std::set here.
+ while (!candidates_.empty()) {
+ if (cumulative_count_ > FLAG_max_inlined_nodes_cumulative) return;
+ auto i = candidates_.begin();
+ Candidate candidate = *i;
+ candidates_.erase(i);
+ // Make sure we don't try to inline dead candidate nodes.
+ if (!candidate.node->IsDead()) {
+ Reduction r = inliner_.ReduceJSCall(candidate.node, candidate.function);
+ if (r.Changed()) {
+ cumulative_count_ += candidate.function->shared()->ast_node_count();
+ return;
+ }
+ }
+ }
+}
+
+
+bool JSInliningHeuristic::CandidateCompare::operator()(
+ const Candidate& left, const Candidate& right) const {
+ return left.node != right.node && left.calls >= right.calls;
+}
+
+
+void JSInliningHeuristic::PrintCandidates() {
+ PrintF("Candidates for inlining (size=%zu):\n", candidates_.size());
+ for (const Candidate& candidate : candidates_) {
+ PrintF(" id:%d, calls:%d, size[source]:%d, size[ast]:%d / %s\n",
+ candidate.node->id(), candidate.calls,
+ candidate.function->shared()->SourceSize(),
+ candidate.function->shared()->ast_node_count(),
+ candidate.function->shared()->DebugName()->ToCString().get());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/js-inlining-heuristic.h b/chromium/v8/src/compiler/js-inlining-heuristic.h
new file mode 100644
index 00000000000..7f577475bf4
--- /dev/null
+++ b/chromium/v8/src/compiler/js-inlining-heuristic.h
@@ -0,0 +1,62 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_INLINING_HEURISTIC_H_
+#define V8_COMPILER_JS_INLINING_HEURISTIC_H_
+
+#include "src/compiler/js-inlining.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSInliningHeuristic final : public AdvancedReducer {
+ public:
+ enum Mode { kGeneralInlining, kRestrictedInlining, kStressInlining };
+ JSInliningHeuristic(Editor* editor, Mode mode, Zone* local_zone,
+ CompilationInfo* info, JSGraph* jsgraph)
+ : AdvancedReducer(editor),
+ mode_(mode),
+ inliner_(editor, local_zone, info, jsgraph),
+ candidates_(local_zone),
+ seen_(local_zone),
+ info_(info) {}
+
+ Reduction Reduce(Node* node) final;
+
+ // Processes the list of candidates gathered while the reducer was running,
+ // and inlines call sites that the heuristic determines to be important.
+ void Finalize() final;
+
+ private:
+ struct Candidate {
+ Handle<JSFunction> function; // The call target being inlined.
+ Node* node; // The call site at which to inline.
+ int calls; // Number of times the call site was hit.
+ };
+
+ // Comparator for candidates.
+ struct CandidateCompare {
+ bool operator()(const Candidate& left, const Candidate& right) const;
+ };
+
+ // Candidates are kept in a sorted set of unique candidates.
+ typedef ZoneSet<Candidate, CandidateCompare> Candidates;
+
+ // Dumps candidates to console.
+ void PrintCandidates();
+
+ Mode const mode_;
+ JSInliner inliner_;
+ Candidates candidates_;
+ ZoneSet<NodeId> seen_;
+ CompilationInfo* info_;
+ int cumulative_count_ = 0;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_INLINING_HEURISTIC_H_
diff --git a/chromium/v8/src/compiler/js-inlining.cc b/chromium/v8/src/compiler/js-inlining.cc
index 0b7c78979c8..99a1547b9ac 100644
--- a/chromium/v8/src/compiler/js-inlining.cc
+++ b/chromium/v8/src/compiler/js-inlining.cc
@@ -4,21 +4,21 @@
#include "src/compiler/js-inlining.h"
-#include "src/ast.h"
-#include "src/ast-numbering.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-numbering.h"
+#include "src/ast/scopes.h"
#include "src/compiler.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/full-codegen/full-codegen.h"
#include "src/isolate-inl.h"
-#include "src/parser.h"
-#include "src/rewriter.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
namespace v8 {
namespace internal {
@@ -30,30 +30,45 @@ namespace compiler {
} while (false)
-// Provides convenience accessors for calls to JS functions.
-class JSCallFunctionAccessor {
+// Provides convenience accessors for the common layout of nodes having either
+// the {JSCallFunction} or the {JSCallConstruct} operator.
+class JSCallAccessor {
public:
- explicit JSCallFunctionAccessor(Node* call) : call_(call) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, call->opcode());
+ explicit JSCallAccessor(Node* call) : call_(call) {
+ DCHECK(call->opcode() == IrOpcode::kJSCallFunction ||
+ call->opcode() == IrOpcode::kJSCallConstruct);
}
- Node* jsfunction() { return call_->InputAt(0); }
+ Node* target() {
+ // Both, {JSCallFunction} and {JSCallConstruct}, have same layout here.
+ return call_->InputAt(0);
+ }
+
+ Node* receiver() {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, call_->opcode());
+ return call_->InputAt(1);
+ }
- Node* receiver() { return call_->InputAt(1); }
+ Node* new_target() {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, call_->opcode());
+ return call_->InputAt(formal_arguments() + 1);
+ }
- Node* formal_argument(size_t index) {
- DCHECK(index < formal_arguments());
- return call_->InputAt(static_cast<int>(2 + index));
+ Node* frame_state_before() {
+ return NodeProperties::GetFrameStateInput(call_, 1);
}
- size_t formal_arguments() {
- // {value_inputs} includes jsfunction and receiver.
- size_t value_inputs = call_->op()->ValueInputCount();
- DCHECK_GE(call_->InputCount(), 2);
- return value_inputs - 2;
+ Node* frame_state_after() {
+ // Both, {JSCallFunction} and {JSCallConstruct}, have frame state after.
+ return NodeProperties::GetFrameStateInput(call_, 0);
}
- Node* frame_state() { return NodeProperties::GetFrameStateInput(call_, 0); }
+ int formal_arguments() {
+ // Both, {JSCallFunction} and {JSCallConstruct}, have two extra inputs:
+ // - JSCallConstruct: Includes target function and new target.
+ // - JSCallFunction: Includes target function and receiver.
+ return call_->op()->ValueInputCount() - 2;
+ }
private:
Node* call_;
@@ -117,20 +132,23 @@ class CopyVisitor {
};
-Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
- Node* start, Node* end) {
+Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
+ Node* frame_state, Node* start, Node* end) {
// The scheduler is smart enough to place our code; we just ensure {control}
// becomes the control input of the start of the inlinee, and {effect} becomes
// the effect input of the start of the inlinee.
Node* control = NodeProperties::GetControlInput(call);
Node* effect = NodeProperties::GetEffectInput(call);
- // Context is last argument.
+ int const inlinee_new_target_index =
+ static_cast<int>(start->op()->ValueOutputCount()) - 3;
+ int const inlinee_arity_index =
+ static_cast<int>(start->op()->ValueOutputCount()) - 2;
int const inlinee_context_index =
static_cast<int>(start->op()->ValueOutputCount()) - 1;
- // {inliner_inputs} counts JSFunction, Receiver, arguments, but not
- // context, effect, control.
+ // {inliner_inputs} counts JSFunction, receiver, arguments, but not
+ // new target value, argument count, context, effect or control.
int inliner_inputs = call->op()->ValueInputCount();
// Iterate over all uses of the start node.
for (Edge edge : start->use_edges()) {
@@ -139,10 +157,16 @@ Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
case IrOpcode::kParameter: {
int index = 1 + ParameterIndexOf(use->op());
DCHECK_LE(index, inlinee_context_index);
- if (index < inliner_inputs && index < inlinee_context_index) {
+ if (index < inliner_inputs && index < inlinee_new_target_index) {
// There is an input from the call, and the index is a value
// projection but not the context, so rewire the input.
Replace(use, call->InputAt(index));
+ } else if (index == inlinee_new_target_index) {
+ // The projection is requesting the new target value.
+ Replace(use, new_target);
+ } else if (index == inlinee_arity_index) {
+ // The projection is requesting the number of arguments.
+ Replace(use, jsgraph_->Int32Constant(inliner_inputs - 2));
} else if (index == inlinee_context_index) {
// The projection is requesting the inlinee function context.
Replace(use, context);
@@ -199,7 +223,7 @@ Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
values.push_back(control_output);
effects.push_back(control_output);
Node* value_output = jsgraph_->graph()->NewNode(
- jsgraph_->common()->Phi(kMachAnyTagged, input_count),
+ jsgraph_->common()->Phi(MachineRepresentation::kTagged, input_count),
static_cast<int>(values.size()), &values.front());
Node* effect_output = jsgraph_->graph()->NewNode(
jsgraph_->common()->EffectPhi(input_count),
@@ -213,23 +237,22 @@ Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
}
-Node* JSInliner::CreateArgumentsAdaptorFrameState(
- JSCallFunctionAccessor* call, Handle<SharedFunctionInfo> shared_info,
- Zone* temp_zone) {
+Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
+ int parameter_count,
+ FrameStateType frame_state_type,
+ Handle<SharedFunctionInfo> shared) {
const FrameStateFunctionInfo* state_info =
jsgraph_->common()->CreateFrameStateFunctionInfo(
- FrameStateType::kArgumentsAdaptor,
- static_cast<int>(call->formal_arguments()) + 1, 0, shared_info,
+ frame_state_type, parameter_count + 1, 0, shared,
CALL_MAINTAINS_NATIVE_CONTEXT);
const Operator* op = jsgraph_->common()->FrameState(
BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
const Operator* op0 = jsgraph_->common()->StateValues(0);
Node* node0 = jsgraph_->graph()->NewNode(op0);
- NodeVector params(temp_zone);
- params.push_back(call->receiver());
- for (size_t argument = 0; argument != call->formal_arguments(); ++argument) {
- params.push_back(call->formal_argument(argument));
+ NodeVector params(local_zone_);
+ for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
+ params.push_back(node->InputAt(1 + parameter));
}
const Operator* op_param =
jsgraph_->common()->StateValues(static_cast<int>(params.size()));
@@ -237,25 +260,70 @@ Node* JSInliner::CreateArgumentsAdaptorFrameState(
op_param, static_cast<int>(params.size()), &params.front());
return jsgraph_->graph()->NewNode(op, params_node, node0, node0,
jsgraph_->UndefinedConstant(),
- call->jsfunction(), call->frame_state());
+ node->InputAt(0), outer_frame_state);
}
-Reduction JSInliner::Reduce(Node* node) {
- if (node->opcode() != IrOpcode::kJSCallFunction) return NoChange();
+namespace {
+
+// TODO(mstarzinger,verwaest): Move this predicate onto SharedFunctionInfo?
+bool NeedsImplicitReceiver(Handle<JSFunction> function, Isolate* isolate) {
+ Code* construct_stub = function->shared()->construct_stub();
+ return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub() &&
+ construct_stub != *isolate->builtins()->ConstructedNonConstructable();
+}
- JSCallFunctionAccessor call(node);
- HeapObjectMatcher match(call.jsfunction());
- if (!match.HasValue()) return NoChange();
+} // namespace
- if (!match.Value()->IsJSFunction()) return NoChange();
+
+Reduction JSInliner::Reduce(Node* node) {
+ if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+
+ // This reducer can handle both normal function calls as well a constructor
+ // calls whenever the target is a constant function object, as follows:
+ // - JSCallFunction(target:constant, receiver, args...)
+ // - JSCallConstruct(target:constant, args..., new.target)
+ HeapObjectMatcher match(node->InputAt(0));
+ if (!match.HasValue() || !match.Value()->IsJSFunction()) return NoChange();
Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
- if (mode_ == kRestrictedInlining && !function->shared()->force_inline()) {
+
+ return ReduceJSCall(node, function);
+}
+
+
+Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
+ DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+ JSCallAccessor call(node);
+
+ // Function must be inlineable.
+ if (!function->shared()->IsInlineable()) {
+ TRACE("Not inlining %s into %s because callee is not inlineable\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
+ // Constructor must be constructable.
+ if (node->opcode() == IrOpcode::kJSCallConstruct &&
+ !function->IsConstructor()) {
+ TRACE("Not inlining %s into %s because constructor is not constructable.\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
+ // Class constructors are callable, but [[Call]] will raise an exception.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
+ if (node->opcode() == IrOpcode::kJSCallFunction &&
+ IsClassConstructor(function->shared()->kind())) {
+ TRACE("Not inlining %s into %s because callee is a class constructor.\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
+ // Function contains break points.
if (function->shared()->HasDebugInfo()) {
- // Function contains break points.
TRACE("Not inlining %s into %s because callee may contain break points\n",
function->shared()->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
@@ -281,7 +349,7 @@ Reduction JSInliner::Reduce(Node* node) {
// TODO(turbofan): TranslatedState::GetAdaptedArguments() currently relies on
// not inlining recursive functions. We might want to relax that at some
// point.
- for (Node* frame_state = call.frame_state();
+ for (Node* frame_state = call.frame_state_after();
frame_state->opcode() == IrOpcode::kFrameState;
frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
FrameStateInfo const& info = OpParameter<FrameStateInfo>(frame_state);
@@ -295,10 +363,20 @@ Reduction JSInliner::Reduce(Node* node) {
}
}
+ // TODO(turbofan): Inlining into a try-block is not yet supported.
+ if (NodeProperties::IsExceptionalCall(node)) {
+ TRACE("Not inlining %s into %s because of surrounding try-block\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
Zone zone;
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
- if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
+ if (info_->is_deoptimization_enabled()) {
+ info.MarkAsDeoptimizationEnabled();
+ }
if (!Compiler::ParseAndAnalyze(info.parse_info())) {
TRACE("Not inlining %s into %s because parsing failed\n",
@@ -310,53 +388,131 @@ Reduction JSInliner::Reduce(Node* node) {
return NoChange();
}
+ // In strong mode, in case of too few arguments we need to throw a TypeError
+ // so we must not inline this call.
+ int parameter_count = info.literal()->parameter_count();
+ if (is_strong(info.language_mode()) &&
+ call.formal_arguments() < parameter_count) {
+ TRACE("Not inlining %s into %s because too few arguments for strong mode\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
if (!Compiler::EnsureDeoptimizationSupport(&info)) {
TRACE("Not inlining %s into %s because deoptimization support failed\n",
function->shared()->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
+ // Remember that we inlined this function. This needs to be called right
+ // after we ensure deoptimization support so that the code flusher
+ // does not remove the code with the deoptimization support.
+ info_->AddInlinedFunction(info.shared_info());
+
+ // ----------------------------------------------------------------
+ // After this point, we've made a decision to inline this function.
+ // We shall not bailout from inlining if we got here.
TRACE("Inlining %s into %s\n",
function->shared()->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
- Graph graph(info.zone());
+ // TODO(mstarzinger): We could use the temporary zone for the graph because
+ // nodes are copied. This however leads to Zone-Types being allocated in the
+ // wrong zone and makes the engine explode at high speeds. Explosion bad!
+ Graph graph(jsgraph_->zone());
JSGraph jsgraph(info.isolate(), &graph, jsgraph_->common(),
- jsgraph_->javascript(), jsgraph_->machine());
+ jsgraph_->javascript(), jsgraph_->simplified(),
+ jsgraph_->machine());
AstGraphBuilder graph_builder(local_zone_, &info, &jsgraph);
graph_builder.CreateGraph(false);
+ CopyVisitor visitor(&graph, jsgraph_->graph(), &zone);
+ visitor.CopyGraph();
+
+ Node* start = visitor.GetCopy(graph.start());
+ Node* end = visitor.GetCopy(graph.end());
+ Node* frame_state = call.frame_state_after();
+ Node* new_target = jsgraph_->UndefinedConstant();
+
+ // Insert nodes around the call that model the behavior required for a
+ // constructor dispatch (allocate implicit receiver and check return value).
+ // This models the behavior usually accomplished by our {JSConstructStub}.
+ // Note that the context has to be the callers context (input to call node).
+ Node* receiver = jsgraph_->UndefinedConstant(); // Implicit receiver.
+ if (node->opcode() == IrOpcode::kJSCallConstruct &&
+ NeedsImplicitReceiver(function, info_->isolate())) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* create = jsgraph_->graph()->NewNode(
+ jsgraph_->javascript()->Create(), call.target(), call.new_target(),
+ context, call.frame_state_before(), effect);
+ NodeProperties::ReplaceEffectInput(node, create);
+ // Insert a check of the return value to determine whether the return value
+ // or the implicit receiver should be selected as a result of the call.
+ Node* check = jsgraph_->graph()->NewNode(
+ jsgraph_->javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1),
+ node, context, node, start);
+ Node* select = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->Select(MachineRepresentation::kTagged), check, node,
+ create);
+ NodeProperties::ReplaceUses(node, select, check, node, node);
+ NodeProperties::ReplaceValueInput(select, node, 1);
+ NodeProperties::ReplaceValueInput(check, node, 0);
+ NodeProperties::ReplaceEffectInput(check, node);
+ receiver = create; // The implicit receiver.
+ }
+
+ // Swizzle the inputs of the {JSCallConstruct} node to look like inputs to a
+ // normal {JSCallFunction} node so that the rest of the inlining machinery
+ // behaves as if we were dealing with a regular function invocation.
+ if (node->opcode() == IrOpcode::kJSCallConstruct) {
+ new_target = call.new_target(); // Retrieve new target value input.
+ node->RemoveInput(call.formal_arguments() + 1); // Drop new target.
+ node->InsertInput(jsgraph_->graph()->zone(), 1, receiver);
+ // Insert a construct stub frame into the chain of frame states. This will
+ // reconstruct the proper frame when deoptimizing within the constructor.
+ frame_state = CreateArtificialFrameState(
+ node, frame_state, call.formal_arguments(),
+ FrameStateType::kConstructStub, info.shared_info());
+ }
+
// The inlinee specializes to the context from the JSFunction object.
// TODO(turbofan): We might want to load the context from the JSFunction at
// runtime in case we only know the SharedFunctionInfo once we have dynamic
// type feedback in the compiler.
Node* context = jsgraph_->Constant(handle(function->context()));
- CopyVisitor visitor(&graph, jsgraph_->graph(), info.zone());
- visitor.CopyGraph();
-
- Node* start = visitor.GetCopy(graph.start());
- Node* end = visitor.GetCopy(graph.end());
-
- Node* frame_state = call.frame_state();
- size_t const inlinee_formal_parameters = start->op()->ValueOutputCount() - 3;
- // Insert argument adaptor frame if required.
- if (call.formal_arguments() != inlinee_formal_parameters) {
- // In strong mode, in case of too few arguments we need to throw a
- // TypeError so we must not inline this call.
- if (is_strong(info.language_mode()) &&
- call.formal_arguments() < inlinee_formal_parameters) {
- return NoChange();
- }
- frame_state = CreateArgumentsAdaptorFrameState(&call, info.shared_info(),
- info.zone());
+ // Insert a JSConvertReceiver node for sloppy callees. Note that the context
+ // passed into this node has to be the callees context (loaded above). Note
+ // that the frame state passed to the JSConvertReceiver must be the frame
+ // state _before_ the call; it is not necessary to fiddle with the receiver
+ // in that frame state tho, as the conversion of the receiver can be repeated
+ // any number of times, it's not observable.
+ if (node->opcode() == IrOpcode::kJSCallFunction &&
+ is_sloppy(info.language_mode()) && !function->shared()->native()) {
+ const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* convert = jsgraph_->graph()->NewNode(
+ jsgraph_->javascript()->ConvertReceiver(p.convert_mode()),
+ call.receiver(), context, call.frame_state_before(), effect, start);
+ NodeProperties::ReplaceValueInput(node, convert, 1);
+ NodeProperties::ReplaceEffectInput(node, convert);
}
- // Remember that we inlined this function.
- info_->AddInlinedFunction(info.shared_info());
+ // Insert argument adaptor frame if required. The callees formal parameter
+ // count (i.e. value outputs of start node minus target, receiver, new target,
+ // arguments count and context) have to match the number of arguments passed
+ // to the call.
+ DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
+ if (call.formal_arguments() != parameter_count) {
+ frame_state = CreateArtificialFrameState(
+ node, frame_state, call.formal_arguments(),
+ FrameStateType::kArgumentsAdaptor, info.shared_info());
+ }
- return InlineCall(node, context, frame_state, start, end);
+ return InlineCall(node, new_target, context, frame_state, start, end);
}
} // namespace compiler
diff --git a/chromium/v8/src/compiler/js-inlining.h b/chromium/v8/src/compiler/js-inlining.h
index 21057e61e40..99eff96c4c9 100644
--- a/chromium/v8/src/compiler/js-inlining.h
+++ b/chromium/v8/src/compiler/js-inlining.h
@@ -16,35 +16,37 @@ class CompilationInfo;
namespace compiler {
-// Forward declarations.
-class JSCallFunctionAccessor;
-
+// The JSInliner provides the core graph inlining machinery. Note that this
+// class only deals with the mechanics of how to inline one graph into another,
+// heuristics that decide what and how much to inline are beyond its scope.
class JSInliner final : public AdvancedReducer {
public:
- enum Mode { kRestrictedInlining, kGeneralInlining };
-
- JSInliner(Editor* editor, Mode mode, Zone* local_zone, CompilationInfo* info,
+ JSInliner(Editor* editor, Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph)
: AdvancedReducer(editor),
- mode_(mode),
local_zone_(local_zone),
info_(info),
jsgraph_(jsgraph) {}
+ // Reducer interface, eagerly inlines everything.
Reduction Reduce(Node* node) final;
+ // Can be used by inlining heuristics or by testing code directly, without
+ // using the above generic reducer interface of the inlining machinery.
+ Reduction ReduceJSCall(Node* node, Handle<JSFunction> function);
+
private:
- Mode const mode_;
Zone* local_zone_;
CompilationInfo* info_;
JSGraph* jsgraph_;
- Node* CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
- Handle<SharedFunctionInfo> shared_info,
- Zone* temp_zone);
+ Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
+ int parameter_count,
+ FrameStateType frame_state_type,
+ Handle<SharedFunctionInfo> shared);
- Reduction InlineCall(Node* call, Node* context, Node* frame_state,
- Node* start, Node* end);
+ Reduction InlineCall(Node* call, Node* new_target, Node* context,
+ Node* frame_state, Node* start, Node* end);
};
} // namespace compiler
diff --git a/chromium/v8/src/compiler/js-intrinsic-lowering.cc b/chromium/v8/src/compiler/js-intrinsic-lowering.cc
index 219a452a7d6..ca5cb932b4b 100644
--- a/chromium/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/chromium/v8/src/compiler/js-intrinsic-lowering.cc
@@ -6,13 +6,16 @@
#include <stack>
+#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/counters.h"
#include "src/objects-inl.h"
+#include "src/type-cache.h"
namespace v8 {
namespace internal {
@@ -23,7 +26,7 @@ JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph,
: AdvancedReducer(editor),
jsgraph_(jsgraph),
mode_(mode),
- simplified_(jsgraph->zone()) {}
+ type_cache_(TypeCache::Get()) {}
Reduction JSIntrinsicLowering::Reduce(Node* node) {
@@ -34,16 +37,14 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
switch (f->function_id) {
case Runtime::kInlineConstructDouble:
return ReduceConstructDouble(node);
- case Runtime::kInlineDateField:
- return ReduceDateField(node);
+ case Runtime::kInlineCreateIterResultObject:
+ return ReduceCreateIterResultObject(node);
case Runtime::kInlineDeoptimizeNow:
return ReduceDeoptimizeNow(node);
case Runtime::kInlineDoubleHi:
return ReduceDoubleHi(node);
case Runtime::kInlineDoubleLo:
return ReduceDoubleLo(node);
- case Runtime::kInlineHeapObjectGetMap:
- return ReduceHeapObjectGetMap(node);
case Runtime::kInlineIncrementStatsCounter:
return ReduceIncrementStatsCounter(node);
case Runtime::kInlineIsArray:
@@ -53,35 +54,21 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
case Runtime::kInlineIsTypedArray:
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
case Runtime::kInlineIsFunction:
- return ReduceIsInstanceType(node, JS_FUNCTION_TYPE);
+ return ReduceIsFunction(node);
case Runtime::kInlineIsRegExp:
return ReduceIsInstanceType(node, JS_REGEXP_TYPE);
+ case Runtime::kInlineIsJSReceiver:
+ return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
return ReduceIsSmi(node);
case Runtime::kInlineJSValueGetValue:
return ReduceJSValueGetValue(node);
- case Runtime::kInlineLikely:
- return ReduceUnLikely(node, BranchHint::kTrue);
- case Runtime::kInlineMapGetInstanceType:
- return ReduceMapGetInstanceType(node);
case Runtime::kInlineMathClz32:
return ReduceMathClz32(node);
case Runtime::kInlineMathFloor:
return ReduceMathFloor(node);
case Runtime::kInlineMathSqrt:
return ReduceMathSqrt(node);
- case Runtime::kInlineOneByteSeqStringGetChar:
- return ReduceSeqStringGetChar(node, String::ONE_BYTE_ENCODING);
- case Runtime::kInlineOneByteSeqStringSetChar:
- return ReduceSeqStringSetChar(node, String::ONE_BYTE_ENCODING);
- case Runtime::kInlineStringGetLength:
- return ReduceStringGetLength(node);
- case Runtime::kInlineTwoByteSeqStringGetChar:
- return ReduceSeqStringGetChar(node, String::TWO_BYTE_ENCODING);
- case Runtime::kInlineTwoByteSeqStringSetChar:
- return ReduceSeqStringSetChar(node, String::TWO_BYTE_ENCODING);
- case Runtime::kInlineUnlikely:
- return ReduceUnLikely(node, BranchHint::kFalse);
case Runtime::kInlineValueOf:
return ReduceValueOf(node);
case Runtime::kInlineIsMinusZero:
@@ -90,16 +77,36 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceFixedArrayGet(node);
case Runtime::kInlineFixedArraySet:
return ReduceFixedArraySet(node);
- case Runtime::kInlineGetTypeFeedbackVector:
- return ReduceGetTypeFeedbackVector(node);
- case Runtime::kInlineGetCallerJSFunction:
- return ReduceGetCallerJSFunction(node);
+ case Runtime::kInlineRegExpConstructResult:
+ return ReduceRegExpConstructResult(node);
+ case Runtime::kInlineRegExpExec:
+ return ReduceRegExpExec(node);
+ case Runtime::kInlineRegExpFlags:
+ return ReduceRegExpFlags(node);
+ case Runtime::kInlineRegExpSource:
+ return ReduceRegExpSource(node);
+ case Runtime::kInlineSubString:
+ return ReduceSubString(node);
+ case Runtime::kInlineToInteger:
+ return ReduceToInteger(node);
+ case Runtime::kInlineToLength:
+ return ReduceToLength(node);
+ case Runtime::kInlineToName:
+ return ReduceToName(node);
+ case Runtime::kInlineToNumber:
+ return ReduceToNumber(node);
case Runtime::kInlineToObject:
return ReduceToObject(node);
- case Runtime::kInlineThrowNotDateError:
- return ReduceThrowNotDateError(node);
- case Runtime::kInlineCallFunction:
- return ReduceCallFunction(node);
+ case Runtime::kInlineToPrimitive:
+ return ReduceToPrimitive(node);
+ case Runtime::kInlineToString:
+ return ReduceToString(node);
+ case Runtime::kInlineCall:
+ return ReduceCall(node);
+ case Runtime::kInlineTailCall:
+ return ReduceTailCall(node);
+ case Runtime::kInlineGetSuperConstructor:
+ return ReduceGetSuperConstructor(node);
default:
break;
}
@@ -107,6 +114,16 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
}
+Reduction JSIntrinsicLowering::ReduceCreateIterResultObject(Node* node) {
+ Node* const value = NodeProperties::GetValueInput(node, 0);
+ Node* const done = NodeProperties::GetValueInput(node, 1);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ return Change(node, javascript()->CreateIterResultObject(), value, done,
+ context, effect);
+}
+
+
Reduction JSIntrinsicLowering::ReduceConstructDouble(Node* node) {
Node* high = NodeProperties::GetValueInput(node, 0);
Node* low = NodeProperties::GetValueInput(node, 1);
@@ -120,24 +137,6 @@ Reduction JSIntrinsicLowering::ReduceConstructDouble(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceDateField(Node* node) {
- Node* const value = NodeProperties::GetValueInput(node, 0);
- Node* const index = NodeProperties::GetValueInput(node, 1);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- NumberMatcher mindex(index);
- if (mindex.Is(JSDate::kDateValue)) {
- return Change(
- node,
- simplified()->LoadField(AccessBuilder::ForJSDateField(
- static_cast<JSDate::FieldIndex>(static_cast<int>(mindex.Value())))),
- value, effect, control);
- }
- // TODO(turbofan): Optimize more patterns.
- return NoChange();
-}
-
-
Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
if (mode() != kDeoptimizationEnabled) return NoChange();
Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
@@ -146,7 +145,8 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
// TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(), frame_state, effect, control);
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, control);
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
node->TrimInputCount(0);
@@ -165,15 +165,6 @@ Reduction JSIntrinsicLowering::ReduceDoubleLo(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceHeapObjectGetMap(Node* node) {
- Node* value = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- return Change(node, simplified()->LoadField(AccessBuilder::ForMap()), value,
- effect, control);
-}
-
-
Reduction JSIntrinsicLowering::ReduceIncrementStatsCounter(Node* node) {
if (!FLAG_native_code_counters) return ChangeToUndefined(node);
HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
@@ -206,8 +197,6 @@ Reduction JSIntrinsicLowering::ReduceIsInstanceType(
// } else {
// return %_GetInstanceType(%_GetMap(value)) == instance_type;
// }
- MachineType const type = static_cast<MachineType>(kTypeBool | kRepTagged);
-
Node* value = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -235,31 +224,108 @@ Reduction JSIntrinsicLowering::ReduceIsInstanceType(
ReplaceWithValue(node, node, ephi);
// Turn the {node} into a Phi.
- return Change(node, common()->Phi(type, 2), vtrue, vfalse, merge);
+ return Change(node, common()->Phi(MachineRepresentation::kTagged, 2), vtrue,
+ vfalse, merge);
}
-Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
- return Change(node, simplified()->ObjectIsSmi());
+Reduction JSIntrinsicLowering::ReduceIsFunction(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (value_type->Is(Type::Function())) {
+ value = jsgraph()->TrueConstant();
+ } else {
+ // if (%_IsSmi(value)) {
+ // return false;
+ // } else {
+ // return FIRST_FUNCTION_TYPE <= %_GetInstanceType(%_GetMap(value))
+ // }
+ STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
+
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->FalseConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, effect, if_false),
+ effect, if_false);
+ Node* vfalse =
+ graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Int32Constant(FIRST_FUNCTION_TYPE), efalse);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
+ ReplaceWithValue(node, node, effect, control);
+ return Replace(value);
}
-Reduction JSIntrinsicLowering::ReduceJSValueGetValue(Node* node) {
+Reduction JSIntrinsicLowering::ReduceIsJSReceiver(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- return Change(node, simplified()->LoadField(AccessBuilder::ForValue()), value,
- effect, control);
+ if (value_type->Is(Type::Receiver())) {
+ value = jsgraph()->TrueConstant();
+ } else if (!value_type->Maybe(Type::Receiver())) {
+ value = jsgraph()->FalseConstant();
+ } else {
+ // if (%_IsSmi(value)) {
+ // return false;
+ // } else {
+ // return FIRST_JS_RECEIVER_TYPE <= %_GetInstanceType(%_GetMap(value))
+ // }
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->FalseConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, effect, if_false),
+ effect, if_false);
+ Node* vfalse = graph()->NewNode(
+ machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Int32Constant(FIRST_JS_RECEIVER_TYPE), efalse);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
+ ReplaceWithValue(node, node, effect, control);
+ return Replace(value);
}
-Reduction JSIntrinsicLowering::ReduceMapGetInstanceType(Node* node) {
+Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
+ return Change(node, simplified()->ObjectIsSmi());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceJSValueGetValue(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- return Change(node,
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- value, effect, control);
+ return Change(node, simplified()->LoadField(AccessBuilder::ForValue()), value,
+ effect, control);
}
@@ -279,77 +345,6 @@ Reduction JSIntrinsicLowering::ReduceMathSqrt(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceSeqStringGetChar(
- Node* node, String::Encoding encoding) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- RelaxControls(node);
- node->ReplaceInput(2, effect);
- node->ReplaceInput(3, control);
- node->TrimInputCount(4);
- NodeProperties::ChangeOp(
- node,
- simplified()->LoadElement(AccessBuilder::ForSeqStringChar(encoding)));
- return Changed(node);
-}
-
-
-Reduction JSIntrinsicLowering::ReduceSeqStringSetChar(
- Node* node, String::Encoding encoding) {
- // Note: The intrinsic has a strange argument order, so we need to reshuffle.
- Node* index = NodeProperties::GetValueInput(node, 0);
- Node* chr = NodeProperties::GetValueInput(node, 1);
- Node* string = NodeProperties::GetValueInput(node, 2);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- ReplaceWithValue(node, string, node);
- NodeProperties::RemoveType(node);
- node->ReplaceInput(0, string);
- node->ReplaceInput(1, index);
- node->ReplaceInput(2, chr);
- node->ReplaceInput(3, effect);
- node->ReplaceInput(4, control);
- node->TrimInputCount(5);
- NodeProperties::ChangeOp(
- node,
- simplified()->StoreElement(AccessBuilder::ForSeqStringChar(encoding)));
- return Changed(node);
-}
-
-
-Reduction JSIntrinsicLowering::ReduceStringGetLength(Node* node) {
- Node* value = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- return Change(node, simplified()->LoadField(
- AccessBuilder::ForStringLength(graph()->zone())),
- value, effect, control);
-}
-
-
-Reduction JSIntrinsicLowering::ReduceUnLikely(Node* node, BranchHint hint) {
- std::stack<Node*> nodes_to_visit;
- nodes_to_visit.push(node);
- while (!nodes_to_visit.empty()) {
- Node* current = nodes_to_visit.top();
- nodes_to_visit.pop();
- for (Node* use : current->uses()) {
- if (use->opcode() == IrOpcode::kJSToBoolean) {
- // We have to "look through" ToBoolean calls.
- nodes_to_visit.push(use);
- } else if (use->opcode() == IrOpcode::kBranch) {
- // Actually set the hint on any branch using the intrinsic node.
- NodeProperties::ChangeOp(use, common()->Branch(hint));
- }
- }
- }
- // Apart from adding hints to branchs nodes, this is the identity function.
- Node* value = NodeProperties::GetValueInput(node, 0);
- ReplaceWithValue(node, value);
- return Changed(value);
-}
-
-
Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
// if (%_IsSmi(value)) {
// return value;
@@ -360,7 +355,8 @@ Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
// }
const Operator* const merge_op = common()->Merge(2);
const Operator* const ephi_op = common()->EffectPhi(2);
- const Operator* const phi_op = common()->Phi(kMachAnyTagged, 2);
+ const Operator* const phi_op =
+ common()->Phi(MachineRepresentation::kTagged, 2);
Node* value = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -473,83 +469,156 @@ Reduction JSIntrinsicLowering::ReduceFixedArraySet(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceGetTypeFeedbackVector(Node* node) {
- Node* func = node->InputAt(0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- FieldAccess access = AccessBuilder::ForJSFunctionSharedFunctionInfo();
- Node* load =
- graph()->NewNode(simplified()->LoadField(access), func, effect, control);
- access = AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector();
- return Change(node, simplified()->LoadField(access), load, load, control);
+Reduction JSIntrinsicLowering::ReduceRegExpConstructResult(Node* node) {
+ // TODO(bmeurer): Introduce JSCreateRegExpResult?
+ return Change(node, CodeFactory::RegExpConstructResult(isolate()), 0);
}
-Reduction JSIntrinsicLowering::ReduceGetCallerJSFunction(Node* node) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+Reduction JSIntrinsicLowering::ReduceRegExpExec(Node* node) {
+ return Change(node, CodeFactory::RegExpExec(isolate()), 4);
+}
- Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
- Node* outer_frame = frame_state->InputAt(kFrameStateOuterStateInput);
- if (outer_frame->opcode() == IrOpcode::kFrameState) {
- // Use the runtime implementation to throw the appropriate error if the
- // containing function is inlined.
- return NoChange();
- }
- // TODO(danno): This implementation forces intrinsic lowering to happen after
- // inlining, which is fine for now, but eventually the frame-querying logic
- // probably should go later, e.g. in instruction selection, so that there is
- // no phase-ordering dependency.
- FieldAccess access = AccessBuilder::ForFrameCallerFramePtr();
- Node* fp = graph()->NewNode(machine()->LoadFramePointer());
- Node* next_fp =
- graph()->NewNode(simplified()->LoadField(access), fp, effect, control);
- return Change(node, simplified()->LoadField(AccessBuilder::ForFrameMarker()),
- next_fp, effect, control);
+Reduction JSIntrinsicLowering::ReduceRegExpFlags(Node* node) {
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op =
+ simplified()->LoadField(AccessBuilder::ForJSRegExpFlags());
+ return Change(node, op, receiver, effect, control);
}
-Reduction JSIntrinsicLowering::ReduceThrowNotDateError(Node* node) {
- if (mode() != kDeoptimizationEnabled) return NoChange();
- Node* const frame_state = NodeProperties::GetFrameStateInput(node, 1);
+Reduction JSIntrinsicLowering::ReduceRegExpSource(Node* node) {
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op =
+ simplified()->LoadField(AccessBuilder::ForJSRegExpSource());
+ return Change(node, op, receiver, effect, control);
+}
- // TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(), frame_state, effect, control);
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- node->TrimInputCount(0);
- NodeProperties::ChangeOp(node, common()->Dead());
+Reduction JSIntrinsicLowering::ReduceSubString(Node* node) {
+ return Change(node, CodeFactory::SubString(isolate()), 3);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToInteger(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToName(Node* node) {
+ NodeProperties::ChangeOp(node, javascript()->ToName());
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToNumber(Node* node) {
+ NodeProperties::ChangeOp(node, javascript()->ToNumber());
return Changed(node);
}
+Reduction JSIntrinsicLowering::ReduceToLength(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
+ if (value_type->Max() <= 0.0) {
+ value = jsgraph()->ZeroConstant();
+ } else if (value_type->Min() >= kMaxSafeInteger) {
+ value = jsgraph()->Constant(kMaxSafeInteger);
+ } else {
+ if (value_type->Min() <= 0.0) {
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), value,
+ jsgraph()->ZeroConstant()),
+ jsgraph()->ZeroConstant(), value);
+ value_type = Type::Range(0.0, value_type->Max(), graph()->zone());
+ NodeProperties::SetType(value, value_type);
+ }
+ if (value_type->Max() > kMaxSafeInteger) {
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->Constant(kMaxSafeInteger), value),
+ jsgraph()->Constant(kMaxSafeInteger), value);
+ value_type =
+ Type::Range(value_type->Min(), kMaxSafeInteger, graph()->zone());
+ NodeProperties::SetType(value, value_type);
+ }
+ }
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ return Change(node, CodeFactory::ToLength(isolate()), 0);
+}
+
+
Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
NodeProperties::ChangeOp(node, javascript()->ToObject());
return Changed(node);
}
-Reduction JSIntrinsicLowering::ReduceCallFunction(Node* node) {
- CallRuntimeParameters params = OpParameter<CallRuntimeParameters>(node->op());
- size_t arity = params.arity();
- Node* function = node->InputAt(static_cast<int>(arity - 1));
- while (--arity != 0) {
- node->ReplaceInput(static_cast<int>(arity),
- node->InputAt(static_cast<int>(arity - 1)));
+Reduction JSIntrinsicLowering::ReduceToPrimitive(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ if (value_type->Is(Type::Primitive())) {
+ ReplaceWithValue(node, value);
+ return Replace(value);
}
- node->ReplaceInput(0, function);
+ return NoChange();
+}
+
+
+Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
+ NodeProperties::ChangeOp(node, javascript()->ToString());
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
+ size_t const arity = CallRuntimeParametersOf(node->op()).arity();
NodeProperties::ChangeOp(
- node,
- javascript()->CallFunction(params.arity(), NO_CALL_FUNCTION_FLAGS, STRICT,
- VectorSlotPair(), ALLOW_TAIL_CALLS));
+ node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
+ ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow));
return Changed(node);
}
+Reduction JSIntrinsicLowering::ReduceTailCall(Node* node) {
+ size_t const arity = CallRuntimeParametersOf(node->op()).arity();
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
+ ConvertReceiverMode::kAny,
+ TailCallMode::kAllow));
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
+ Node* active_function = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* active_function_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ active_function, effect, control);
+ return Change(node, simplified()->LoadField(AccessBuilder::ForMapPrototype()),
+ active_function_map, effect, control);
+}
+
+
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b) {
RelaxControls(node);
@@ -592,9 +661,24 @@ Reduction JSIntrinsicLowering::ChangeToUndefined(Node* node, Node* effect) {
}
+Reduction JSIntrinsicLowering::Change(Node* node, Callable const& callable,
+ int stack_parameter_count) {
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), stack_parameter_count,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+}
+
+
Graph* JSIntrinsicLowering::graph() const { return jsgraph()->graph(); }
+Isolate* JSIntrinsicLowering::isolate() const { return jsgraph()->isolate(); }
+
+
CommonOperatorBuilder* JSIntrinsicLowering::common() const {
return jsgraph()->common();
}
@@ -608,6 +692,11 @@ MachineOperatorBuilder* JSIntrinsicLowering::machine() const {
return jsgraph()->machine();
}
+
+SimplifiedOperatorBuilder* JSIntrinsicLowering::simplified() const {
+ return jsgraph()->simplified();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/js-intrinsic-lowering.h b/chromium/v8/src/compiler/js-intrinsic-lowering.h
index 15e9b4053e7..1977a5847d8 100644
--- a/chromium/v8/src/compiler/js-intrinsic-lowering.h
+++ b/chromium/v8/src/compiler/js-intrinsic-lowering.h
@@ -7,10 +7,15 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class Callable;
+class TypeCache;
+
+
namespace compiler {
// Forward declarations.
@@ -18,6 +23,7 @@ class CommonOperatorBuilder;
class JSOperatorBuilder;
class JSGraph;
class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
// Lowers certain JS-level runtime calls.
@@ -33,32 +39,38 @@ class JSIntrinsicLowering final : public AdvancedReducer {
private:
Reduction ReduceConstructDouble(Node* node);
- Reduction ReduceDateField(Node* node);
+ Reduction ReduceCreateIterResultObject(Node* node);
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceDoubleHi(Node* node);
Reduction ReduceDoubleLo(Node* node);
- Reduction ReduceHeapObjectGetMap(Node* node);
Reduction ReduceIncrementStatsCounter(Node* node);
Reduction ReduceIsMinusZero(Node* node);
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
+ Reduction ReduceIsFunction(Node* node);
+ Reduction ReduceIsJSReceiver(Node* node);
Reduction ReduceIsSmi(Node* node);
Reduction ReduceJSValueGetValue(Node* node);
- Reduction ReduceMapGetInstanceType(Node* node);
Reduction ReduceMathClz32(Node* node);
Reduction ReduceMathFloor(Node* node);
Reduction ReduceMathSqrt(Node* node);
- Reduction ReduceSeqStringGetChar(Node* node, String::Encoding encoding);
- Reduction ReduceSeqStringSetChar(Node* node, String::Encoding encoding);
- Reduction ReduceStringGetLength(Node* node);
- Reduction ReduceUnLikely(Node* node, BranchHint hint);
Reduction ReduceValueOf(Node* node);
Reduction ReduceFixedArrayGet(Node* node);
Reduction ReduceFixedArraySet(Node* node);
- Reduction ReduceGetTypeFeedbackVector(Node* node);
- Reduction ReduceGetCallerJSFunction(Node* node);
- Reduction ReduceThrowNotDateError(Node* node);
+ Reduction ReduceRegExpConstructResult(Node* node);
+ Reduction ReduceRegExpExec(Node* node);
+ Reduction ReduceRegExpFlags(Node* node);
+ Reduction ReduceRegExpSource(Node* node);
+ Reduction ReduceSubString(Node* node);
+ Reduction ReduceToInteger(Node* node);
+ Reduction ReduceToLength(Node* node);
+ Reduction ReduceToName(Node* node);
+ Reduction ReduceToNumber(Node* node);
Reduction ReduceToObject(Node* node);
- Reduction ReduceCallFunction(Node* node);
+ Reduction ReduceToPrimitive(Node* node);
+ Reduction ReduceToString(Node* node);
+ Reduction ReduceCall(Node* node);
+ Reduction ReduceTailCall(Node* node);
+ Reduction ReduceGetSuperConstructor(Node* node);
Reduction Change(Node* node, const Operator* op);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
@@ -66,18 +78,22 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c,
Node* d);
Reduction ChangeToUndefined(Node* node, Node* effect = nullptr);
+ Reduction Change(Node* node, Callable const& callable,
+ int stack_parameter_count);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
MachineOperatorBuilder* machine() const;
+ SimplifiedOperatorBuilder* simplified() const;
DeoptimizationMode mode() const { return mode_; }
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ TypeCache const& type_cache() const { return type_cache_; }
JSGraph* const jsgraph_;
DeoptimizationMode const mode_;
- SimplifiedOperatorBuilder simplified_;
+ TypeCache const& type_cache_;
};
} // namespace compiler
diff --git a/chromium/v8/src/compiler/js-native-context-specialization.cc b/chromium/v8/src/compiler/js-native-context-specialization.cc
new file mode 100644
index 00000000000..06cf770f332
--- /dev/null
+++ b/chromium/v8/src/compiler/js-native-context-specialization.cc
@@ -0,0 +1,1033 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-native-context-specialization.h"
+
+#include "src/accessors.h"
+#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/access-info.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/field-index-inl.h"
+#include "src/isolate-inl.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/type-cache.h"
+#include "src/type-feedback-vector.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+JSNativeContextSpecialization::JSNativeContextSpecialization(
+ Editor* editor, JSGraph* jsgraph, Flags flags,
+ MaybeHandle<Context> native_context, CompilationDependencies* dependencies,
+ Zone* zone)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ flags_(flags),
+ native_context_(native_context),
+ dependencies_(dependencies),
+ zone_(zone),
+ type_cache_(TypeCache::Get()) {}
+
+
+Reduction JSNativeContextSpecialization::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSLoadNamed:
+ return ReduceJSLoadNamed(node);
+ case IrOpcode::kJSStoreNamed:
+ return ReduceJSStoreNamed(node);
+ case IrOpcode::kJSLoadProperty:
+ return ReduceJSLoadProperty(node);
+ case IrOpcode::kJSStoreProperty:
+ return ReduceJSStoreProperty(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceNamedAccess(
+ Node* node, Node* value, MapHandleList const& receiver_maps,
+ Handle<Name> name, AccessMode access_mode, LanguageMode language_mode,
+ Node* index) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
+ node->opcode() == IrOpcode::kJSStoreNamed ||
+ node->opcode() == IrOpcode::kJSLoadProperty ||
+ node->opcode() == IrOpcode::kJSStoreProperty);
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // Retrieve the native context from the given {node}.
+ Handle<Context> native_context;
+ if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
+
+ // Compute property access infos for the receiver maps.
+ AccessInfoFactory access_info_factory(dependencies(), native_context,
+ graph()->zone());
+ ZoneVector<PropertyAccessInfo> access_infos(zone());
+ if (!access_info_factory.ComputePropertyAccessInfos(
+ receiver_maps, name, access_mode, &access_infos)) {
+ return NoChange();
+ }
+
+ // Nothing to do if we have no non-deprecated maps.
+ if (access_infos.empty()) return NoChange();
+
+ // The final states for every polymorphic branch. We join them with
+ // Merge++Phi+EffectPhi at the bottom.
+ ZoneVector<Node*> values(zone());
+ ZoneVector<Node*> effects(zone());
+ ZoneVector<Node*> controls(zone());
+
+ // The list of "exiting" controls, which currently go to a single deoptimize.
+ // TODO(bmeurer): Consider using an IC as fallback.
+ Node* const exit_effect = effect;
+ ZoneVector<Node*> exit_controls(zone());
+
+ // Ensure that {index} matches the specified {name} (if {index} is given).
+ if (index != nullptr) {
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Name()),
+ index, jsgraph()->HeapConstant(name));
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ }
+
+ // Ensure that {receiver} is a heap object.
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+ control = graph()->NewNode(common()->IfFalse(), branch);
+ Node* receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+ Node* receiverissmi_effect = effect;
+
+ // Load the {receiver} map. The resulting effect is the dominating effect for
+ // all (polymorphic) branches.
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+
+ // Generate code for the various different property access patterns.
+ Node* fallthrough_control = control;
+ for (PropertyAccessInfo const& access_info : access_infos) {
+ Node* this_value = value;
+ Node* this_receiver = receiver;
+ Node* this_effect = effect;
+ Node* this_control;
+
+ // Perform map check on {receiver}.
+ Type* receiver_type = access_info.receiver_type();
+ if (receiver_type->Is(Type::String())) {
+ // Emit an instance type check for strings.
+ Node* receiver_instance_type = this_effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ receiver_map, this_effect, fallthrough_control);
+ Node* check =
+ graph()->NewNode(machine()->Uint32LessThan(), receiver_instance_type,
+ jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ } else {
+ // Emit a (sequence of) map checks for other {receiver}s.
+ ZoneVector<Node*> this_controls(zone());
+ ZoneVector<Node*> this_effects(zone());
+ for (auto i = access_info.receiver_type()->Classes(); !i.Done();
+ i.Advance()) {
+ Handle<Map> map = i.Current();
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
+ receiver_map, jsgraph()->Constant(map));
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_effects.push_back(this_effect);
+ }
+
+ // The Number case requires special treatment to also deal with Smis.
+ if (receiver_type->Is(Type::Number())) {
+ // Join this check with the "receiver is smi" check above, and mark the
+ // "receiver is smi" check as "consumed" so that we don't deoptimize if
+ // the {receiver} is actually a Smi.
+ if (receiverissmi_control != nullptr) {
+ this_controls.push_back(receiverissmi_control);
+ this_effects.push_back(receiverissmi_effect);
+ receiverissmi_control = receiverissmi_effect = nullptr;
+ }
+ }
+
+ // Create dominating Merge+EffectPhi for this {receiver} type.
+ int const this_control_count = static_cast<int>(this_controls.size());
+ this_control =
+ (this_control_count == 1)
+ ? this_controls.front()
+ : graph()->NewNode(common()->Merge(this_control_count),
+ this_control_count, &this_controls.front());
+ this_effects.push_back(this_control);
+ int const this_effect_count = static_cast<int>(this_effects.size());
+ this_effect =
+ (this_control_count == 1)
+ ? this_effects.front()
+ : graph()->NewNode(common()->EffectPhi(this_control_count),
+ this_effect_count, &this_effects.front());
+ }
+
+ // Determine actual holder and perform prototype chain checks.
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ AssumePrototypesStable(receiver_type, native_context, holder);
+ }
+
+ // Generate the actual property access.
+ if (access_info.IsNotFound()) {
+ DCHECK_EQ(AccessMode::kLoad, access_mode);
+ if (is_strong(language_mode)) {
+ // TODO(bmeurer/mstarzinger): Add support for lowering inside try
+ // blocks rewiring the IfException edge to a runtime call/throw.
+ exit_controls.push_back(this_control);
+ continue;
+ } else {
+ this_value = jsgraph()->UndefinedConstant();
+ }
+ } else if (access_info.IsDataConstant()) {
+ this_value = jsgraph()->Constant(access_info.constant());
+ if (access_mode == AccessMode::kStore) {
+ Node* check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Tagged()), value, this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ }
+ } else {
+ DCHECK(access_info.IsDataField());
+ FieldIndex const field_index = access_info.field_index();
+ FieldCheck const field_check = access_info.field_check();
+ Type* const field_type = access_info.field_type();
+ switch (field_check) {
+ case FieldCheck::kNone:
+ break;
+ case FieldCheck::kJSArrayBufferViewBufferNotNeutered: {
+ Node* this_buffer = this_effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewBuffer()),
+ this_receiver, this_effect, this_control);
+ Node* this_buffer_bit_field = this_effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferBitField()),
+ this_buffer, this_effect, this_control);
+ Node* check = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(), this_buffer_bit_field,
+ jsgraph()->Int32Constant(
+ 1 << JSArrayBuffer::WasNeutered::kShift)),
+ jsgraph()->Int32Constant(0));
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_control = graph()->NewNode(common()->IfFalse(), branch);
+ break;
+ }
+ }
+ if (access_mode == AccessMode::kLoad &&
+ access_info.holder().ToHandle(&holder)) {
+ this_receiver = jsgraph()->Constant(holder);
+ }
+ Node* this_storage = this_receiver;
+ if (!field_index.is_inobject()) {
+ this_storage = this_effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
+ this_storage, this_effect, this_control);
+ }
+ FieldAccess field_access = {kTaggedBase, field_index.offset(), name,
+ field_type, MachineType::AnyTagged()};
+ if (access_mode == AccessMode::kLoad) {
+ if (field_type->Is(Type::UntaggedFloat64())) {
+ if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+ !FLAG_unbox_double_fields) {
+ this_storage = this_effect =
+ graph()->NewNode(simplified()->LoadField(field_access),
+ this_storage, this_effect, this_control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ }
+ field_access.machine_type = MachineType::Float64();
+ }
+ this_value = this_effect =
+ graph()->NewNode(simplified()->LoadField(field_access),
+ this_storage, this_effect, this_control);
+ } else {
+ DCHECK_EQ(AccessMode::kStore, access_mode);
+ if (field_type->Is(Type::UntaggedFloat64())) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(
+ graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_value = graph()->NewNode(common()->Guard(Type::Number()),
+ this_value, this_control);
+
+ if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+ !FLAG_unbox_double_fields) {
+ if (access_info.HasTransitionMap()) {
+ // Allocate a MutableHeapNumber for the new property.
+ Callable callable =
+ CodeFactory::AllocateMutableHeapNumber(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow);
+ Node* this_box = this_effect = graph()->NewNode(
+ common()->Call(desc),
+ jsgraph()->HeapConstant(callable.code()),
+ jsgraph()->NoContextConstant(), this_effect, this_control);
+ this_effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
+ this_box, this_value, this_effect, this_control);
+ this_value = this_box;
+
+ field_access.type = Type::TaggedPointer();
+ } else {
+ // We just store directly to the MutableHeapNumber.
+ this_storage = this_effect =
+ graph()->NewNode(simplified()->LoadField(field_access),
+ this_storage, this_effect, this_control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ field_access.machine_type = MachineType::Float64();
+ }
+ } else {
+ // Unboxed double field, we store directly to the field.
+ field_access.machine_type = MachineType::Float64();
+ }
+ } else if (field_type->Is(Type::TaggedSigned())) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(
+ graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
+ this_value, this_control);
+ } else if (field_type->Is(Type::TaggedPointer())) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_control = graph()->NewNode(common()->IfFalse(), branch);
+ if (field_type->NumClasses() > 0) {
+ // Emit a (sequence of) map checks for the value.
+ ZoneVector<Node*> this_controls(zone());
+ Node* this_value_map = this_effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMap()), this_value,
+ this_effect, this_control);
+ for (auto i = field_type->Classes(); !i.Done(); i.Advance()) {
+ Handle<Map> field_map(i.Current());
+ check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Internal()),
+ this_value_map, jsgraph()->Constant(field_map));
+ branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ this_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_controls.push_back(
+ graph()->NewNode(common()->IfTrue(), branch));
+ }
+ exit_controls.push_back(this_control);
+ int const this_control_count =
+ static_cast<int>(this_controls.size());
+ this_control =
+ (this_control_count == 1)
+ ? this_controls.front()
+ : graph()->NewNode(common()->Merge(this_control_count),
+ this_control_count,
+ &this_controls.front());
+ }
+ } else {
+ DCHECK(field_type->Is(Type::Tagged()));
+ }
+ Handle<Map> transition_map;
+ if (access_info.transition_map().ToHandle(&transition_map)) {
+ this_effect = graph()->NewNode(common()->BeginRegion(), this_effect);
+ this_effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), this_receiver,
+ jsgraph()->Constant(transition_map), this_effect, this_control);
+ }
+ this_effect = graph()->NewNode(simplified()->StoreField(field_access),
+ this_storage, this_value, this_effect,
+ this_control);
+ if (access_info.HasTransitionMap()) {
+ this_effect =
+ graph()->NewNode(common()->FinishRegion(),
+ jsgraph()->UndefinedConstant(), this_effect);
+ }
+ }
+ }
+
+ // Remember the final state for this property access.
+ values.push_back(this_value);
+ effects.push_back(this_effect);
+ controls.push_back(this_control);
+ }
+
+ // Collect the fallthrough control as final "exit" control.
+ if (fallthrough_control != control) {
+ // Mark the last fallthrough branch as deferred.
+ MarkAsDeferred(fallthrough_control);
+ }
+ exit_controls.push_back(fallthrough_control);
+
+ // Also collect the "receiver is smi" control if we didn't handle the case of
+ // Number primitives in the polymorphic branches above.
+ if (receiverissmi_control != nullptr) {
+ // Mark the "receiver is smi" case as deferred.
+ MarkAsDeferred(receiverissmi_control);
+ DCHECK_EQ(exit_effect, receiverissmi_effect);
+ exit_controls.push_back(receiverissmi_control);
+ }
+
+ // Generate the single "exit" point, where we get if either all map/instance
+ // type checks failed, or one of the assumptions inside one of the cases
+ // failes (i.e. failing prototype chain check).
+ // TODO(bmeurer): Consider falling back to IC here if deoptimization is
+ // disabled.
+ int const exit_control_count = static_cast<int>(exit_controls.size());
+ Node* exit_control =
+ (exit_control_count == 1)
+ ? exit_controls.front()
+ : graph()->NewNode(common()->Merge(exit_control_count),
+ exit_control_count, &exit_controls.front());
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, exit_effect, exit_control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+
+ // Generate the final merge point for all (polymorphic) branches.
+ int const control_count = static_cast<int>(controls.size());
+ if (control_count == 0) {
+ value = effect = control = jsgraph()->Dead();
+ } else if (control_count == 1) {
+ value = values.front();
+ effect = effects.front();
+ control = controls.front();
+ } else {
+ control = graph()->NewNode(common()->Merge(control_count), control_count,
+ &controls.front());
+ values.push_back(control);
+ value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, control_count),
+ control_count + 1, &values.front());
+ effects.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(control_count),
+ control_count + 1, &effects.front());
+ }
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
+ NamedAccess const& p = NamedAccessOf(node->op());
+ Node* const value = jsgraph()->Dead();
+
+ // Extract receiver maps from the LOAD_IC using the LoadICNexus.
+ MapHandleList receiver_maps;
+ if (!p.feedback().IsValid()) return NoChange();
+ LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
+ DCHECK_LT(0, receiver_maps.length());
+
+ // Try to lower the named access based on the {receiver_maps}.
+ return ReduceNamedAccess(node, value, receiver_maps, p.name(),
+ AccessMode::kLoad, p.language_mode());
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode());
+ NamedAccess const& p = NamedAccessOf(node->op());
+ Node* const value = NodeProperties::GetValueInput(node, 1);
+
+ // Extract receiver maps from the STORE_IC using the StoreICNexus.
+ MapHandleList receiver_maps;
+ if (!p.feedback().IsValid()) return NoChange();
+ StoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
+ DCHECK_LT(0, receiver_maps.length());
+
+ // Try to lower the named access based on the {receiver_maps}.
+ return ReduceNamedAccess(node, value, receiver_maps, p.name(),
+ AccessMode::kStore, p.language_mode());
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceElementAccess(
+ Node* node, Node* index, Node* value, MapHandleList const& receiver_maps,
+ AccessMode access_mode, LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
+ node->opcode() == IrOpcode::kJSStoreProperty);
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // TODO(bmeurer): Add support for non-standard stores.
+ if (store_mode != STANDARD_STORE) return NoChange();
+
+ // Retrieve the native context from the given {node}.
+ Handle<Context> native_context;
+ if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
+
+ // Compute element access infos for the receiver maps.
+ AccessInfoFactory access_info_factory(dependencies(), native_context,
+ graph()->zone());
+ ZoneVector<ElementAccessInfo> access_infos(zone());
+ if (!access_info_factory.ComputeElementAccessInfos(receiver_maps, access_mode,
+ &access_infos)) {
+ return NoChange();
+ }
+
+ // Nothing to do if we have no non-deprecated maps.
+ if (access_infos.empty()) return NoChange();
+
+ // The final states for every polymorphic branch. We join them with
+ // Merge+Phi+EffectPhi at the bottom.
+ ZoneVector<Node*> values(zone());
+ ZoneVector<Node*> effects(zone());
+ ZoneVector<Node*> controls(zone());
+
+ // The list of "exiting" controls, which currently go to a single deoptimize.
+ // TODO(bmeurer): Consider using an IC as fallback.
+ Node* const exit_effect = effect;
+ ZoneVector<Node*> exit_controls(zone());
+
+ // Ensure that {receiver} is a heap object.
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ control = graph()->NewNode(common()->IfFalse(), branch);
+
+ // Load the {receiver} map. The resulting effect is the dominating effect for
+ // all (polymorphic) branches.
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+
+ // Generate code for the various different element access patterns.
+ Node* fallthrough_control = control;
+ for (ElementAccessInfo const& access_info : access_infos) {
+ Node* this_receiver = receiver;
+ Node* this_value = value;
+ Node* this_index = index;
+ Node* this_effect;
+ Node* this_control;
+
+ // Perform map check on {receiver}.
+ Type* receiver_type = access_info.receiver_type();
+ bool receiver_is_jsarray = true;
+ {
+ ZoneVector<Node*> this_controls(zone());
+ ZoneVector<Node*> this_effects(zone());
+ for (auto i = access_info.receiver_type()->Classes(); !i.Done();
+ i.Advance()) {
+ Handle<Map> map = i.Current();
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
+ receiver_map, jsgraph()->Constant(map));
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+ this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_effects.push_back(effect);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ if (!map->IsJSArrayMap()) receiver_is_jsarray = false;
+ }
+
+ // Generate possible elements kind transitions.
+ for (auto transition : access_info.transitions()) {
+ Handle<Map> transition_source = transition.first;
+ Handle<Map> transition_target = transition.second;
+
+ // Check if {receiver} has the specified {transition_source} map.
+ Node* check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Any()), receiver_map,
+ jsgraph()->HeapConstant(transition_source));
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+
+ // Migrate {receiver} from {transition_source} to {transition_target}.
+ Node* transition_control = graph()->NewNode(common()->IfTrue(), branch);
+ Node* transition_effect = effect;
+ if (IsSimpleMapChangeTransition(transition_source->elements_kind(),
+ transition_target->elements_kind())) {
+ // In-place migration, just store the {transition_target} map.
+ transition_effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), receiver,
+ jsgraph()->HeapConstant(transition_target), transition_effect,
+ transition_control);
+ } else {
+ // Instance migration, let the stub deal with the {receiver}.
+ TransitionElementsKindStub stub(isolate(),
+ transition_source->elements_kind(),
+ transition_target->elements_kind(),
+ transition_source->IsJSArrayMap());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ transition_effect = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(stub.GetCode()),
+ receiver, jsgraph()->HeapConstant(transition_target), context,
+ frame_state, transition_effect, transition_control);
+ }
+ this_controls.push_back(transition_control);
+ this_effects.push_back(transition_effect);
+
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ }
+
+ // Create single chokepoint for the control.
+ int const this_control_count = static_cast<int>(this_controls.size());
+ if (this_control_count == 1) {
+ this_control = this_controls.front();
+ this_effect = this_effects.front();
+ } else {
+ this_control =
+ graph()->NewNode(common()->Merge(this_control_count),
+ this_control_count, &this_controls.front());
+ this_effects.push_back(this_control);
+ this_effect =
+ graph()->NewNode(common()->EffectPhi(this_control_count),
+ this_control_count + 1, &this_effects.front());
+ }
+ }
+
+ // Certain stores need a prototype chain check because shape changes
+ // could allow callbacks on elements in the prototype chain that are
+ // not compatible with (monomorphic) keyed stores.
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ AssumePrototypesStable(receiver_type, native_context, holder);
+ }
+
+ // Check that the {index} is actually a Number.
+ if (!NumberMatcher(this_index).HasValue()) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsNumber(), this_index);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_index = graph()->NewNode(common()->Guard(Type::Number()), this_index,
+ this_control);
+ }
+
+ // Convert the {index} to an unsigned32 value and check if the result is
+ // equal to the original {index}.
+ if (!NumberMatcher(this_index).IsInRange(0.0, kMaxUInt32)) {
+ Node* this_index32 =
+ graph()->NewNode(simplified()->NumberToUint32(), this_index);
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), this_index32,
+ this_index);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_index = this_index32;
+ }
+
+ // TODO(bmeurer): We currently specialize based on elements kind. We should
+ // also be able to properly support strings and other JSObjects here.
+ ElementsKind elements_kind = access_info.elements_kind();
+
+ // Load the elements for the {receiver}.
+ Node* this_elements = this_effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ this_receiver, this_effect, this_control);
+
+ // Don't try to store to a copy-on-write backing store.
+ if (access_mode == AccessMode::kStore &&
+ IsFastSmiOrObjectElementsKind(elements_kind)) {
+ Node* this_elements_map = this_effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ this_elements, this_effect, this_control);
+ check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Any()), this_elements_map,
+ jsgraph()->HeapConstant(factory()->fixed_array_map()));
+ branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
+ this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ }
+
+ // Load the length of the {receiver}.
+ Node* this_length = this_effect =
+ receiver_is_jsarray
+ ? graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSArrayLength(elements_kind)),
+ this_receiver, this_effect, this_control)
+ : graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ this_elements, this_effect, this_control);
+
+ // Check that the {index} is in the valid range for the {receiver}.
+ Node* check = graph()->NewNode(simplified()->NumberLessThan(), this_index,
+ this_length);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
+ this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Compute the element access.
+ Type* element_type = Type::Any();
+ MachineType element_machine_type = MachineType::AnyTagged();
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ element_type = type_cache_.kFloat64;
+ element_machine_type = MachineType::Float64();
+ } else if (IsFastSmiElementsKind(elements_kind)) {
+ element_type = type_cache_.kSmi;
+ }
+ ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
+ element_type, element_machine_type};
+
+ // Access the actual element.
+ // TODO(bmeurer): Refactor this into separate methods or even a separate
+ // class that deals with the elements access.
+ if (access_mode == AccessMode::kLoad) {
+ // Compute the real element access type, which includes the hole in case
+ // of holey backing stores.
+ if (elements_kind == FAST_HOLEY_ELEMENTS ||
+ elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ element_access.type = Type::Union(
+ element_type,
+ Type::Constant(factory()->the_hole_value(), graph()->zone()),
+ graph()->zone());
+ }
+ // Perform the actual backing store access.
+ this_value = this_effect = graph()->NewNode(
+ simplified()->LoadElement(element_access), this_elements, this_index,
+ this_effect, this_control);
+ // Handle loading from holey backing stores correctly, by either mapping
+ // the hole to undefined if possible, or deoptimizing otherwise.
+ if (elements_kind == FAST_HOLEY_ELEMENTS ||
+ elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ // Perform the hole check on the result.
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(element_access.type),
+ this_value, jsgraph()->TheHoleConstant());
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ // Check if we are allowed to turn the hole into undefined.
+ Type* initial_holey_array_type = Type::Class(
+ handle(isolate()->get_initial_js_array_map(elements_kind)),
+ graph()->zone());
+ if (receiver_type->NowIs(initial_holey_array_type) &&
+ isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+ // Add a code dependency on the array protector cell.
+ AssumePrototypesStable(receiver_type, native_context,
+ isolate()->initial_object_prototype());
+ dependencies()->AssumePropertyCell(factory()->array_protector());
+ // Turn the hole into undefined.
+ this_control =
+ graph()->NewNode(common()->Merge(2), if_true, if_false);
+ this_value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->UndefinedConstant(), this_value, this_control);
+ element_type =
+ Type::Union(element_type, Type::Undefined(), graph()->zone());
+ } else {
+ // Deoptimize in case of the hole.
+ exit_controls.push_back(if_true);
+ this_control = if_false;
+ }
+ // Rename the result to represent the actual type (not polluted by the
+ // hole).
+ this_value = graph()->NewNode(common()->Guard(element_type), this_value,
+ this_control);
+ } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+ // Perform the hole check on the result.
+ Node* check =
+ graph()->NewNode(simplified()->NumberIsHoleNaN(), this_value);
+ // Check if we are allowed to return the hole directly.
+ Type* initial_holey_array_type = Type::Class(
+ handle(isolate()->get_initial_js_array_map(elements_kind)),
+ graph()->zone());
+ if (receiver_type->NowIs(initial_holey_array_type) &&
+ isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+ // Add a code dependency on the array protector cell.
+ AssumePrototypesStable(receiver_type, native_context,
+ isolate()->initial_object_prototype());
+ dependencies()->AssumePropertyCell(factory()->array_protector());
+ // Turn the hole into undefined.
+ this_value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged,
+ BranchHint::kFalse),
+ check, jsgraph()->UndefinedConstant(), this_value);
+ } else {
+ // Deoptimize in case of the hole.
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ this_control = graph()->NewNode(common()->IfFalse(), branch);
+ exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ }
+ }
+ } else {
+ DCHECK_EQ(AccessMode::kStore, access_mode);
+ if (IsFastSmiElementsKind(elements_kind)) {
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
+ this_value, this_control);
+ } else if (IsFastDoubleElementsKind(elements_kind)) {
+ Node* check =
+ graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
+ this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_value = graph()->NewNode(common()->Guard(Type::Number()),
+ this_value, this_control);
+ }
+ this_effect = graph()->NewNode(simplified()->StoreElement(element_access),
+ this_elements, this_index, this_value,
+ this_effect, this_control);
+ }
+
+ // Remember the final state for this element access.
+ values.push_back(this_value);
+ effects.push_back(this_effect);
+ controls.push_back(this_control);
+ }
+
+ // Collect the fallthrough control as final "exit" control.
+ if (fallthrough_control != control) {
+ // Mark the last fallthrough branch as deferred.
+ MarkAsDeferred(fallthrough_control);
+ }
+ exit_controls.push_back(fallthrough_control);
+
+ // Generate the single "exit" point, where we get if either all map/instance
+ // type checks failed, or one of the assumptions inside one of the cases
+ // failes (i.e. failing prototype chain check).
+ // TODO(bmeurer): Consider falling back to IC here if deoptimization is
+ // disabled.
+ int const exit_control_count = static_cast<int>(exit_controls.size());
+ Node* exit_control =
+ (exit_control_count == 1)
+ ? exit_controls.front()
+ : graph()->NewNode(common()->Merge(exit_control_count),
+ exit_control_count, &exit_controls.front());
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, exit_effect, exit_control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+
+ // Generate the final merge point for all (polymorphic) branches.
+ int const control_count = static_cast<int>(controls.size());
+ if (control_count == 0) {
+ value = effect = control = jsgraph()->Dead();
+ } else if (control_count == 1) {
+ value = values.front();
+ effect = effects.front();
+ control = controls.front();
+ } else {
+ control = graph()->NewNode(common()->Merge(control_count), control_count,
+ &controls.front());
+ values.push_back(control);
+ value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, control_count),
+ control_count + 1, &values.front());
+ effects.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(control_count),
+ control_count + 1, &effects.front());
+ }
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
+ Node* node, Node* index, Node* value, FeedbackNexus const& nexus,
+ AccessMode access_mode, LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
+ node->opcode() == IrOpcode::kJSStoreProperty);
+
+ // Extract receiver maps from the {nexus}.
+ MapHandleList receiver_maps;
+ if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
+ DCHECK_LT(0, receiver_maps.length());
+
+ // Optimize access for constant {index}.
+ HeapObjectMatcher mindex(index);
+ if (mindex.HasValue() && mindex.Value()->IsPrimitive()) {
+ // Keyed access requires a ToPropertyKey on the {index} first before
+ // looking up the property on the object (see ES6 section 12.3.2.1).
+ // We can only do this for non-observable ToPropertyKey invocations,
+ // so we limit the constant indices to primitives at this point.
+ Handle<Name> name;
+ if (Object::ToName(isolate(), mindex.Value()).ToHandle(&name)) {
+ uint32_t array_index;
+ if (name->AsArrayIndex(&array_index)) {
+ // Use the constant array index.
+ index = jsgraph()->Constant(static_cast<double>(array_index));
+ } else {
+ name = factory()->InternalizeName(name);
+ return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
+ language_mode);
+ }
+ }
+ }
+
+ // Check if we have feedback for a named access.
+ if (Name* name = nexus.FindFirstName()) {
+ return ReduceNamedAccess(node, value, receiver_maps,
+ handle(name, isolate()), access_mode,
+ language_mode, index);
+ }
+
+ // Try to lower the element access based on the {receiver_maps}.
+ return ReduceElementAccess(node, index, value, receiver_maps, access_mode,
+ language_mode, store_mode);
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
+ PropertyAccess const& p = PropertyAccessOf(node->op());
+ Node* const index = NodeProperties::GetValueInput(node, 1);
+ Node* const value = jsgraph()->Dead();
+
+ // Extract receiver maps from the KEYED_LOAD_IC using the KeyedLoadICNexus.
+ if (!p.feedback().IsValid()) return NoChange();
+ KeyedLoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+ // Try to lower the keyed access based on the {nexus}.
+ return ReduceKeyedAccess(node, index, value, nexus, AccessMode::kLoad,
+ p.language_mode(), STANDARD_STORE);
+}
+
+
+Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode());
+ PropertyAccess const& p = PropertyAccessOf(node->op());
+ Node* const index = NodeProperties::GetValueInput(node, 1);
+ Node* const value = NodeProperties::GetValueInput(node, 2);
+
+ // Extract receiver maps from the KEYED_STORE_IC using the KeyedStoreICNexus.
+ if (!p.feedback().IsValid()) return NoChange();
+ KeyedStoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+ // Extract the keyed access store mode from the KEYED_STORE_IC.
+ KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
+
+ // Try to lower the keyed access based on the {nexus}.
+ return ReduceKeyedAccess(node, index, value, nexus, AccessMode::kStore,
+ p.language_mode(), store_mode);
+}
+
+
+void JSNativeContextSpecialization::AssumePrototypesStable(
+ Type* receiver_type, Handle<Context> native_context,
+ Handle<JSObject> holder) {
+ // Determine actual holder and perform prototype chain checks.
+ for (auto i = receiver_type->Classes(); !i.Done(); i.Advance()) {
+ Handle<Map> map = i.Current();
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ Handle<JSFunction> constructor;
+ if (Map::GetConstructorFunction(map, native_context)
+ .ToHandle(&constructor)) {
+ map = handle(constructor->initial_map(), isolate());
+ }
+ dependencies()->AssumePrototypeMapsStable(map, holder);
+ }
+}
+
+
+void JSNativeContextSpecialization::MarkAsDeferred(Node* if_projection) {
+ Node* branch = NodeProperties::GetControlInput(if_projection);
+ DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
+ if (if_projection->opcode() == IrOpcode::kIfTrue) {
+ NodeProperties::ChangeOp(branch, common()->Branch(BranchHint::kFalse));
+ } else {
+ DCHECK_EQ(IrOpcode::kIfFalse, if_projection->opcode());
+ NodeProperties::ChangeOp(branch, common()->Branch(BranchHint::kTrue));
+ }
+}
+
+
+MaybeHandle<Context> JSNativeContextSpecialization::GetNativeContext(
+ Node* node) {
+ Node* const context = NodeProperties::GetContextInput(node);
+ return NodeProperties::GetSpecializationNativeContext(context,
+ native_context());
+}
+
+
+Graph* JSNativeContextSpecialization::graph() const {
+ return jsgraph()->graph();
+}
+
+
+Isolate* JSNativeContextSpecialization::isolate() const {
+ return jsgraph()->isolate();
+}
+
+
+Factory* JSNativeContextSpecialization::factory() const {
+ return isolate()->factory();
+}
+
+
+MachineOperatorBuilder* JSNativeContextSpecialization::machine() const {
+ return jsgraph()->machine();
+}
+
+
+CommonOperatorBuilder* JSNativeContextSpecialization::common() const {
+ return jsgraph()->common();
+}
+
+
+JSOperatorBuilder* JSNativeContextSpecialization::javascript() const {
+ return jsgraph()->javascript();
+}
+
+
+SimplifiedOperatorBuilder* JSNativeContextSpecialization::simplified() const {
+ return jsgraph()->simplified();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/js-native-context-specialization.h b/chromium/v8/src/compiler/js-native-context-specialization.h
new file mode 100644
index 00000000000..45ff87f6197
--- /dev/null
+++ b/chromium/v8/src/compiler/js-native-context-specialization.h
@@ -0,0 +1,116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_NATIVE_CONTEXT_SPECIALIZATION_H_
+#define V8_COMPILER_JS_NATIVE_CONTEXT_SPECIALIZATION_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class Factory;
+class FeedbackNexus;
+class TypeCache;
+
+
+namespace compiler {
+
+// Forward declarations.
+enum class AccessMode;
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
+
+
+// Specializes a given JSGraph to a given native context, potentially constant
+// folding some {LoadGlobal} nodes or strength reducing some {StoreGlobal}
+// nodes. And also specializes {LoadNamed} and {StoreNamed} nodes according
+// to type feedback (if available).
+class JSNativeContextSpecialization final : public AdvancedReducer {
+ public:
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
+ MaybeHandle<Context> native_context,
+ CompilationDependencies* dependencies,
+ Zone* zone);
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceJSLoadNamed(Node* node);
+ Reduction ReduceJSStoreNamed(Node* node);
+ Reduction ReduceJSLoadProperty(Node* node);
+ Reduction ReduceJSStoreProperty(Node* node);
+
+ Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
+ MapHandleList const& receiver_maps,
+ AccessMode access_mode,
+ LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode);
+ Reduction ReduceKeyedAccess(Node* node, Node* index, Node* value,
+ FeedbackNexus const& nexus,
+ AccessMode access_mode,
+ LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode);
+ Reduction ReduceNamedAccess(Node* node, Node* value,
+ MapHandleList const& receiver_maps,
+ Handle<Name> name, AccessMode access_mode,
+ LanguageMode language_mode,
+ Node* index = nullptr);
+
+ // Adds stability dependencies on all prototypes of every class in
+ // {receiver_type} up to (and including) the {holder}.
+ void AssumePrototypesStable(Type* receiver_type,
+ Handle<Context> native_context,
+ Handle<JSObject> holder);
+
+ // Assuming that {if_projection} is either IfTrue or IfFalse, adds a hint on
+ // the dominating Branch that {if_projection} is the unlikely (deferred) case.
+ void MarkAsDeferred(Node* if_projection);
+
+ // Retrieve the native context from the given {node} if known.
+ MaybeHandle<Context> GetNativeContext(Node* node);
+
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
+ Factory* factory() const;
+ CommonOperatorBuilder* common() const;
+ JSOperatorBuilder* javascript() const;
+ SimplifiedOperatorBuilder* simplified() const;
+ MachineOperatorBuilder* machine() const;
+ Flags flags() const { return flags_; }
+ MaybeHandle<Context> native_context() const { return native_context_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Zone* zone() const { return zone_; }
+
+ JSGraph* const jsgraph_;
+ Flags const flags_;
+ MaybeHandle<Context> native_context_;
+ CompilationDependencies* const dependencies_;
+ Zone* const zone_;
+ TypeCache const& type_cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(JSNativeContextSpecialization);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(JSNativeContextSpecialization::Flags)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_NATIVE_CONTEXT_SPECIALIZATION_H_
diff --git a/chromium/v8/src/compiler/js-operator.cc b/chromium/v8/src/compiler/js-operator.cc
index 37369f69707..1455f0a9a98 100644
--- a/chromium/v8/src/compiler/js-operator.cc
+++ b/chromium/v8/src/compiler/js-operator.cc
@@ -16,7 +16,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-VectorSlotPair::VectorSlotPair() : slot_(FeedbackVectorICSlot::Invalid()) {}
+VectorSlotPair::VectorSlotPair() {}
int VectorSlotPair::index() const {
@@ -40,11 +40,106 @@ size_t hash_value(VectorSlotPair const& p) {
}
-std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
- os << p.arity() << ", " << p.flags() << ", " << p.language_mode();
- if (p.AllowTailCalls()) {
- os << ", ALLOW_TAIL_CALLS";
+ConvertReceiverMode ConvertReceiverModeOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSConvertReceiver, op->opcode());
+ return OpParameter<ConvertReceiverMode>(op);
+}
+
+
+ToBooleanHints ToBooleanHintsOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSToBoolean, op->opcode());
+ return OpParameter<ToBooleanHints>(op);
+}
+
+
+size_t hash_value(TailCallMode mode) {
+ return base::hash_value(static_cast<unsigned>(mode));
+}
+
+
+std::ostream& operator<<(std::ostream& os, TailCallMode mode) {
+ switch (mode) {
+ case TailCallMode::kAllow:
+ return os << "ALLOW_TAIL_CALLS";
+ case TailCallMode::kDisallow:
+ return os << "DISALLOW_TAIL_CALLS";
}
+ UNREACHABLE();
+ return os;
+}
+
+
+bool operator==(BinaryOperationParameters const& lhs,
+ BinaryOperationParameters const& rhs) {
+ return lhs.language_mode() == rhs.language_mode() &&
+ lhs.hints() == rhs.hints();
+}
+
+
+bool operator!=(BinaryOperationParameters const& lhs,
+ BinaryOperationParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(BinaryOperationParameters const& p) {
+ return base::hash_combine(p.language_mode(), p.hints());
+}
+
+
+std::ostream& operator<<(std::ostream& os, BinaryOperationParameters const& p) {
+ return os << p.language_mode() << ", " << p.hints();
+}
+
+
+BinaryOperationParameters const& BinaryOperationParametersOf(
+ Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
+ op->opcode() == IrOpcode::kJSBitwiseXor ||
+ op->opcode() == IrOpcode::kJSBitwiseAnd ||
+ op->opcode() == IrOpcode::kJSShiftLeft ||
+ op->opcode() == IrOpcode::kJSShiftRight ||
+ op->opcode() == IrOpcode::kJSShiftRightLogical ||
+ op->opcode() == IrOpcode::kJSAdd ||
+ op->opcode() == IrOpcode::kJSSubtract ||
+ op->opcode() == IrOpcode::kJSMultiply ||
+ op->opcode() == IrOpcode::kJSDivide ||
+ op->opcode() == IrOpcode::kJSModulus);
+ return OpParameter<BinaryOperationParameters>(op);
+}
+
+
+bool operator==(CallConstructParameters const& lhs,
+ CallConstructParameters const& rhs) {
+ return lhs.arity() == rhs.arity() && lhs.feedback() == rhs.feedback();
+}
+
+
+bool operator!=(CallConstructParameters const& lhs,
+ CallConstructParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(CallConstructParameters const& p) {
+ return base::hash_combine(p.arity(), p.feedback());
+}
+
+
+std::ostream& operator<<(std::ostream& os, CallConstructParameters const& p) {
+ return os << p.arity();
+}
+
+
+CallConstructParameters const& CallConstructParametersOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, op->opcode());
+ return OpParameter<CallConstructParameters>(op);
+}
+
+
+std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
+ os << p.arity() << ", " << p.language_mode() << ", " << p.convert_mode()
+ << ", " << p.tail_call_mode();
return os;
}
@@ -121,156 +216,101 @@ ContextAccess const& ContextAccessOf(Operator const* op) {
}
-DynamicGlobalAccess::DynamicGlobalAccess(const Handle<String>& name,
- uint32_t check_bitset,
- const VectorSlotPair& feedback,
- TypeofMode typeof_mode)
- : name_(name),
- check_bitset_(check_bitset),
- feedback_(feedback),
- typeof_mode_(typeof_mode) {
- DCHECK(check_bitset == kFullCheckRequired || check_bitset < 0x80000000U);
-}
-
-
-bool operator==(DynamicGlobalAccess const& lhs,
- DynamicGlobalAccess const& rhs) {
- UNIMPLEMENTED();
- return true;
-}
-
-
-bool operator!=(DynamicGlobalAccess const& lhs,
- DynamicGlobalAccess const& rhs) {
- return !(lhs == rhs);
-}
+DynamicAccess::DynamicAccess(const Handle<String>& name, TypeofMode typeof_mode)
+ : name_(name), typeof_mode_(typeof_mode) {}
-size_t hash_value(DynamicGlobalAccess const& access) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-std::ostream& operator<<(std::ostream& os, DynamicGlobalAccess const& access) {
- return os << Brief(*access.name()) << ", " << access.check_bitset() << ", "
- << access.typeof_mode();
-}
-
-
-DynamicGlobalAccess const& DynamicGlobalAccessOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kJSLoadDynamicGlobal, op->opcode());
- return OpParameter<DynamicGlobalAccess>(op);
-}
-
-
-DynamicContextAccess::DynamicContextAccess(const Handle<String>& name,
- uint32_t check_bitset,
- const ContextAccess& context_access)
- : name_(name),
- check_bitset_(check_bitset),
- context_access_(context_access) {
- DCHECK(check_bitset == kFullCheckRequired || check_bitset < 0x80000000U);
-}
-
-
-bool operator==(DynamicContextAccess const& lhs,
- DynamicContextAccess const& rhs) {
+bool operator==(DynamicAccess const& lhs, DynamicAccess const& rhs) {
UNIMPLEMENTED();
return true;
}
-bool operator!=(DynamicContextAccess const& lhs,
- DynamicContextAccess const& rhs) {
+bool operator!=(DynamicAccess const& lhs, DynamicAccess const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(DynamicContextAccess const& access) {
+size_t hash_value(DynamicAccess const& access) {
UNIMPLEMENTED();
return 0;
}
-std::ostream& operator<<(std::ostream& os, DynamicContextAccess const& access) {
- return os << Brief(*access.name()) << ", " << access.check_bitset() << ", "
- << access.context_access();
+std::ostream& operator<<(std::ostream& os, DynamicAccess const& access) {
+ return os << Brief(*access.name()) << ", " << access.typeof_mode();
}
-DynamicContextAccess const& DynamicContextAccessOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kJSLoadDynamicContext, op->opcode());
- return OpParameter<DynamicContextAccess>(op);
+DynamicAccess const& DynamicAccessOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSLoadDynamic, op->opcode());
+ return OpParameter<DynamicAccess>(op);
}
-bool operator==(LoadNamedParameters const& lhs,
- LoadNamedParameters const& rhs) {
+bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
return lhs.name().location() == rhs.name().location() &&
lhs.language_mode() == rhs.language_mode() &&
lhs.feedback() == rhs.feedback();
}
-bool operator!=(LoadNamedParameters const& lhs,
- LoadNamedParameters const& rhs) {
+bool operator!=(NamedAccess const& lhs, NamedAccess const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(LoadNamedParameters const& p) {
+size_t hash_value(NamedAccess const& p) {
return base::hash_combine(p.name().location(), p.language_mode(),
p.feedback());
}
-std::ostream& operator<<(std::ostream& os, LoadNamedParameters const& p) {
+std::ostream& operator<<(std::ostream& os, NamedAccess const& p) {
return os << Brief(*p.name()) << ", " << p.language_mode();
}
-std::ostream& operator<<(std::ostream& os, LoadPropertyParameters const& p) {
+NamedAccess const& NamedAccessOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSLoadNamed ||
+ op->opcode() == IrOpcode::kJSStoreNamed);
+ return OpParameter<NamedAccess>(op);
+}
+
+
+std::ostream& operator<<(std::ostream& os, PropertyAccess const& p) {
return os << p.language_mode();
}
-bool operator==(LoadPropertyParameters const& lhs,
- LoadPropertyParameters const& rhs) {
+bool operator==(PropertyAccess const& lhs, PropertyAccess const& rhs) {
return lhs.language_mode() == rhs.language_mode() &&
lhs.feedback() == rhs.feedback();
}
-bool operator!=(LoadPropertyParameters const& lhs,
- LoadPropertyParameters const& rhs) {
+bool operator!=(PropertyAccess const& lhs, PropertyAccess const& rhs) {
return !(lhs == rhs);
}
-const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSLoadProperty, op->opcode());
- return OpParameter<LoadPropertyParameters>(op);
+PropertyAccess const& PropertyAccessOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSLoadProperty ||
+ op->opcode() == IrOpcode::kJSStoreProperty);
+ return OpParameter<PropertyAccess>(op);
}
-size_t hash_value(LoadPropertyParameters const& p) {
+size_t hash_value(PropertyAccess const& p) {
return base::hash_combine(p.language_mode(), p.feedback());
}
-const LoadNamedParameters& LoadNamedParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSLoadNamed, op->opcode());
- return OpParameter<LoadNamedParameters>(op);
-}
-
-
bool operator==(LoadGlobalParameters const& lhs,
LoadGlobalParameters const& rhs) {
return lhs.name().location() == rhs.name().location() &&
lhs.feedback() == rhs.feedback() &&
- lhs.typeof_mode() == rhs.typeof_mode() &&
- lhs.slot_index() == rhs.slot_index();
+ lhs.typeof_mode() == rhs.typeof_mode();
}
@@ -281,14 +321,12 @@ bool operator!=(LoadGlobalParameters const& lhs,
size_t hash_value(LoadGlobalParameters const& p) {
- return base::hash_combine(p.name().location(), p.typeof_mode(),
- p.slot_index());
+ return base::hash_combine(p.name().location(), p.typeof_mode());
}
std::ostream& operator<<(std::ostream& os, LoadGlobalParameters const& p) {
- return os << Brief(*p.name()) << ", " << p.typeof_mode()
- << ", slot: " << p.slot_index();
+ return os << Brief(*p.name()) << ", " << p.typeof_mode();
}
@@ -302,8 +340,7 @@ bool operator==(StoreGlobalParameters const& lhs,
StoreGlobalParameters const& rhs) {
return lhs.language_mode() == rhs.language_mode() &&
lhs.name().location() == rhs.name().location() &&
- lhs.feedback() == rhs.feedback() &&
- lhs.slot_index() == rhs.slot_index();
+ lhs.feedback() == rhs.feedback();
}
@@ -315,13 +352,12 @@ bool operator!=(StoreGlobalParameters const& lhs,
size_t hash_value(StoreGlobalParameters const& p) {
return base::hash_combine(p.language_mode(), p.name().location(),
- p.feedback(), p.slot_index());
+ p.feedback());
}
std::ostream& operator<<(std::ostream& os, StoreGlobalParameters const& p) {
- return os << p.language_mode() << ", " << Brief(*p.name())
- << ", slot: " << p.slot_index();
+ return os << p.language_mode() << ", " << Brief(*p.name());
}
@@ -331,168 +367,157 @@ const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op) {
}
-bool operator==(StoreNamedParameters const& lhs,
- StoreNamedParameters const& rhs) {
- return lhs.language_mode() == rhs.language_mode() &&
- lhs.name().location() == rhs.name().location() &&
- lhs.feedback() == rhs.feedback();
+bool operator==(CreateArgumentsParameters const& lhs,
+ CreateArgumentsParameters const& rhs) {
+ return lhs.type() == rhs.type() && lhs.start_index() == rhs.start_index();
}
-bool operator!=(StoreNamedParameters const& lhs,
- StoreNamedParameters const& rhs) {
+bool operator!=(CreateArgumentsParameters const& lhs,
+ CreateArgumentsParameters const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(StoreNamedParameters const& p) {
- return base::hash_combine(p.language_mode(), p.name().location(),
- p.feedback());
+size_t hash_value(CreateArgumentsParameters const& p) {
+ return base::hash_combine(p.type(), p.start_index());
}
-std::ostream& operator<<(std::ostream& os, StoreNamedParameters const& p) {
- return os << p.language_mode() << ", " << Brief(*p.name());
+std::ostream& operator<<(std::ostream& os, CreateArgumentsParameters const& p) {
+ return os << p.type() << ", " << p.start_index();
}
-const StoreNamedParameters& StoreNamedParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSStoreNamed, op->opcode());
- return OpParameter<StoreNamedParameters>(op);
+const CreateArgumentsParameters& CreateArgumentsParametersOf(
+ const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateArguments, op->opcode());
+ return OpParameter<CreateArgumentsParameters>(op);
}
-bool operator==(StorePropertyParameters const& lhs,
- StorePropertyParameters const& rhs) {
- return lhs.language_mode() == rhs.language_mode() &&
- lhs.feedback() == rhs.feedback();
+bool operator==(CreateArrayParameters const& lhs,
+ CreateArrayParameters const& rhs) {
+ return lhs.arity() == rhs.arity() &&
+ lhs.site().location() == rhs.site().location();
}
-bool operator!=(StorePropertyParameters const& lhs,
- StorePropertyParameters const& rhs) {
+bool operator!=(CreateArrayParameters const& lhs,
+ CreateArrayParameters const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(StorePropertyParameters const& p) {
- return base::hash_combine(p.language_mode(), p.feedback());
+size_t hash_value(CreateArrayParameters const& p) {
+ return base::hash_combine(p.arity(), p.site().location());
}
-std::ostream& operator<<(std::ostream& os, StorePropertyParameters const& p) {
- return os << p.language_mode();
+std::ostream& operator<<(std::ostream& os, CreateArrayParameters const& p) {
+ os << p.arity();
+ if (!p.site().is_null()) os << ", " << Brief(*p.site());
+ return os;
}
-const StorePropertyParameters& StorePropertyParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSStoreProperty, op->opcode());
- return OpParameter<StorePropertyParameters>(op);
+const CreateArrayParameters& CreateArrayParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, op->opcode());
+ return OpParameter<CreateArrayParameters>(op);
}
-bool operator==(CreateArgumentsParameters const& lhs,
- CreateArgumentsParameters const& rhs) {
- return lhs.type() == rhs.type() && lhs.start_index() == rhs.start_index();
+bool operator==(CreateClosureParameters const& lhs,
+ CreateClosureParameters const& rhs) {
+ return lhs.pretenure() == rhs.pretenure() &&
+ lhs.shared_info().location() == rhs.shared_info().location();
}
-bool operator!=(CreateArgumentsParameters const& lhs,
- CreateArgumentsParameters const& rhs) {
+bool operator!=(CreateClosureParameters const& lhs,
+ CreateClosureParameters const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(CreateArgumentsParameters const& p) {
- return base::hash_combine(p.type(), p.start_index());
+size_t hash_value(CreateClosureParameters const& p) {
+ return base::hash_combine(p.pretenure(), p.shared_info().location());
}
-std::ostream& operator<<(std::ostream& os, CreateArgumentsParameters const& p) {
- return os << p.type() << ", " << p.start_index();
+std::ostream& operator<<(std::ostream& os, CreateClosureParameters const& p) {
+ return os << p.pretenure() << ", " << Brief(*p.shared_info());
}
-const CreateArgumentsParameters& CreateArgumentsParametersOf(
- const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSCreateArguments, op->opcode());
- return OpParameter<CreateArgumentsParameters>(op);
+const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateClosure, op->opcode());
+ return OpParameter<CreateClosureParameters>(op);
}
-bool operator==(CreateClosureParameters const& lhs,
- CreateClosureParameters const& rhs) {
- return lhs.pretenure() == rhs.pretenure() &&
- lhs.shared_info().is_identical_to(rhs.shared_info());
+bool operator==(CreateLiteralParameters const& lhs,
+ CreateLiteralParameters const& rhs) {
+ return lhs.constant().location() == rhs.constant().location() &&
+ lhs.flags() == rhs.flags() && lhs.index() == rhs.index();
}
-bool operator!=(CreateClosureParameters const& lhs,
- CreateClosureParameters const& rhs) {
+bool operator!=(CreateLiteralParameters const& lhs,
+ CreateLiteralParameters const& rhs) {
return !(lhs == rhs);
}
-size_t hash_value(CreateClosureParameters const& p) {
- // TODO(mstarzinger): Include hash of the SharedFunctionInfo here.
- base::hash<PretenureFlag> h;
- return h(p.pretenure());
+size_t hash_value(CreateLiteralParameters const& p) {
+ return base::hash_combine(p.constant().location(), p.flags(), p.index());
}
-std::ostream& operator<<(std::ostream& os, CreateClosureParameters const& p) {
- return os << p.pretenure() << ", " << Brief(*p.shared_info());
+std::ostream& operator<<(std::ostream& os, CreateLiteralParameters const& p) {
+ return os << Brief(*p.constant()) << ", " << p.flags() << ", " << p.index();
}
-const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kJSCreateClosure, op->opcode());
- return OpParameter<CreateClosureParameters>(op);
+const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSCreateLiteralArray ||
+ op->opcode() == IrOpcode::kJSCreateLiteralObject ||
+ op->opcode() == IrOpcode::kJSCreateLiteralRegExp);
+ return OpParameter<CreateLiteralParameters>(op);
}
-#define CACHED_OP_LIST(V) \
- V(Equal, Operator::kNoProperties, 2, 1) \
- V(NotEqual, Operator::kNoProperties, 2, 1) \
- V(StrictEqual, Operator::kNoThrow, 2, 1) \
- V(StrictNotEqual, Operator::kNoThrow, 2, 1) \
- V(UnaryNot, Operator::kEliminatable, 1, 1) \
- V(ToBoolean, Operator::kEliminatable, 1, 1) \
- V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToString, Operator::kNoProperties, 1, 1) \
- V(ToName, Operator::kNoProperties, 1, 1) \
- V(ToObject, Operator::kNoProperties, 1, 1) \
- V(Yield, Operator::kNoProperties, 1, 1) \
- V(Create, Operator::kEliminatable, 0, 1) \
- V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(TypeOf, Operator::kEliminatable, 1, 1) \
- V(InstanceOf, Operator::kNoProperties, 2, 1) \
- V(ForInDone, Operator::kPure, 2, 1) \
- V(ForInNext, Operator::kNoProperties, 4, 1) \
- V(ForInPrepare, Operator::kNoProperties, 1, 3) \
- V(ForInStep, Operator::kPure, 1, 1) \
- V(StackCheck, Operator::kNoProperties, 0, 0) \
- V(CreateFunctionContext, Operator::kNoProperties, 1, 1) \
- V(CreateWithContext, Operator::kNoProperties, 2, 1) \
+#define CACHED_OP_LIST(V) \
+ V(Equal, Operator::kNoProperties, 2, 1) \
+ V(NotEqual, Operator::kNoProperties, 2, 1) \
+ V(StrictEqual, Operator::kNoThrow, 2, 1) \
+ V(StrictNotEqual, Operator::kNoThrow, 2, 1) \
+ V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToString, Operator::kNoProperties, 1, 1) \
+ V(ToName, Operator::kNoProperties, 1, 1) \
+ V(ToObject, Operator::kNoProperties, 1, 1) \
+ V(Yield, Operator::kNoProperties, 1, 1) \
+ V(Create, Operator::kEliminatable, 2, 1) \
+ V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
+ V(HasProperty, Operator::kNoProperties, 2, 1) \
+ V(TypeOf, Operator::kEliminatable, 1, 1) \
+ V(InstanceOf, Operator::kNoProperties, 2, 1) \
+ V(ForInDone, Operator::kPure, 2, 1) \
+ V(ForInNext, Operator::kNoProperties, 4, 1) \
+ V(ForInPrepare, Operator::kNoProperties, 1, 3) \
+ V(ForInStep, Operator::kPure, 1, 1) \
+ V(LoadMessage, Operator::kNoThrow, 0, 1) \
+ V(StoreMessage, Operator::kNoThrow, 1, 0) \
+ V(StackCheck, Operator::kNoProperties, 0, 0) \
+ V(CreateWithContext, Operator::kNoProperties, 2, 1) \
V(CreateModuleContext, Operator::kNoProperties, 2, 1)
-#define CACHED_OP_LIST_WITH_LANGUAGE_MODE(V) \
- V(LessThan, Operator::kNoProperties, 2, 1) \
- V(GreaterThan, Operator::kNoProperties, 2, 1) \
- V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
- V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1) \
- V(BitwiseOr, Operator::kNoProperties, 2, 1) \
- V(BitwiseXor, Operator::kNoProperties, 2, 1) \
- V(BitwiseAnd, Operator::kNoProperties, 2, 1) \
- V(ShiftLeft, Operator::kNoProperties, 2, 1) \
- V(ShiftRight, Operator::kNoProperties, 2, 1) \
- V(ShiftRightLogical, Operator::kNoProperties, 2, 1) \
- V(Add, Operator::kNoProperties, 2, 1) \
- V(Subtract, Operator::kNoProperties, 2, 1) \
- V(Multiply, Operator::kNoProperties, 2, 1) \
- V(Divide, Operator::kNoProperties, 2, 1) \
- V(Modulus, Operator::kNoProperties, 2, 1)
+#define CACHED_OP_LIST_WITH_LANGUAGE_MODE(V) \
+ V(LessThan, Operator::kNoProperties, 2, 1) \
+ V(GreaterThan, Operator::kNoProperties, 2, 1) \
+ V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
+ V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1)
struct JSOperatorGlobalCache final {
@@ -566,13 +591,153 @@ CACHED_OP_LIST_WITH_LANGUAGE_MODE(CACHED_WITH_LANGUAGE_MODE)
#undef CACHED_WITH_LANGUAGE_MODE
-const Operator* JSOperatorBuilder::CallFunction(size_t arity,
- CallFunctionFlags flags,
- LanguageMode language_mode,
- VectorSlotPair const& feedback,
- TailCallMode tail_call_mode) {
- CallFunctionParameters parameters(arity, flags, language_mode, feedback,
- tail_call_mode);
+const Operator* JSOperatorBuilder::BitwiseOr(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSBitwiseOr, Operator::kNoProperties, // opcode
+ "JSBitwiseOr", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::BitwiseXor(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSBitwiseXor, Operator::kNoProperties, // opcode
+ "JSBitwiseXor", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::BitwiseAnd(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSBitwiseAnd, Operator::kNoProperties, // opcode
+ "JSBitwiseAnd", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::ShiftLeft(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSShiftLeft, Operator::kNoProperties, // opcode
+ "JSShiftLeft", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::ShiftRight(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSShiftRight, Operator::kNoProperties, // opcode
+ "JSShiftRight", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::ShiftRightLogical(
+ LanguageMode language_mode, BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSShiftRightLogical, Operator::kNoProperties, // opcode
+ "JSShiftRightLogical", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Add(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSAdd, Operator::kNoProperties, // opcode
+ "JSAdd", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Subtract(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSSubtract, Operator::kNoProperties, // opcode
+ "JSSubtract", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Multiply(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSMultiply, Operator::kNoProperties, // opcode
+ "JSMultiply", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Divide(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSDivide, Operator::kNoProperties, // opcode
+ "JSDivide", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Modulus(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSModulus, Operator::kNoProperties, // opcode
+ "JSModulus", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ return new (zone()) Operator1<ToBooleanHints>( //--
+ IrOpcode::kJSToBoolean, Operator::kEliminatable, // opcode
+ "JSToBoolean", // name
+ 1, 1, 0, 1, 1, 0, // inputs/outputs
+ hints); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CallFunction(
+ size_t arity, LanguageMode language_mode, VectorSlotPair const& feedback,
+ ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
+ CallFunctionParameters parameters(arity, language_mode, feedback,
+ tail_call_mode, convert_mode);
return new (zone()) Operator1<CallFunctionParameters>( // --
IrOpcode::kJSCallFunction, Operator::kNoProperties, // opcode
"JSCallFunction", // name
@@ -594,58 +759,70 @@ const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id,
}
-const Operator* JSOperatorBuilder::CallConstruct(int arguments) {
- return new (zone()) Operator1<int>( // --
+const Operator* JSOperatorBuilder::CallConstruct(
+ size_t arity, VectorSlotPair const& feedback) {
+ CallConstructParameters parameters(arity, feedback);
+ return new (zone()) Operator1<CallConstructParameters>( // --
IrOpcode::kJSCallConstruct, Operator::kNoProperties, // opcode
"JSCallConstruct", // name
- arguments, 1, 1, 1, 1, 2, // counts
- arguments); // parameter
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
-const Operator* JSOperatorBuilder::LoadNamed(const Handle<Name>& name,
- const VectorSlotPair& feedback,
- LanguageMode language_mode) {
- LoadNamedParameters parameters(name, feedback, language_mode);
- return new (zone()) Operator1<LoadNamedParameters>( // --
+const Operator* JSOperatorBuilder::ConvertReceiver(
+ ConvertReceiverMode convert_mode) {
+ return new (zone()) Operator1<ConvertReceiverMode>( // --
+ IrOpcode::kJSConvertReceiver, Operator::kNoThrow, // opcode
+ "JSConvertReceiver", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ convert_mode); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::LoadNamed(LanguageMode language_mode,
+ Handle<Name> name,
+ const VectorSlotPair& feedback) {
+ NamedAccess access(language_mode, name, feedback);
+ return new (zone()) Operator1<NamedAccess>( // --
IrOpcode::kJSLoadNamed, Operator::kNoProperties, // opcode
"JSLoadNamed", // name
2, 1, 1, 1, 1, 2, // counts
- parameters); // parameter
+ access); // parameter
}
-const Operator* JSOperatorBuilder::LoadProperty(const VectorSlotPair& feedback,
- LanguageMode language_mode) {
- LoadPropertyParameters parameters(feedback, language_mode);
- return new (zone()) Operator1<LoadPropertyParameters>( // --
+const Operator* JSOperatorBuilder::LoadProperty(
+ LanguageMode language_mode, VectorSlotPair const& feedback) {
+ PropertyAccess access(language_mode, feedback);
+ return new (zone()) Operator1<PropertyAccess>( // --
IrOpcode::kJSLoadProperty, Operator::kNoProperties, // opcode
"JSLoadProperty", // name
3, 1, 1, 1, 1, 2, // counts
- parameters); // parameter
+ access); // parameter
}
const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
- const Handle<Name>& name,
- const VectorSlotPair& feedback) {
- StoreNamedParameters parameters(language_mode, feedback, name);
- return new (zone()) Operator1<StoreNamedParameters>( // --
+ Handle<Name> name,
+ VectorSlotPair const& feedback) {
+ NamedAccess access(language_mode, name, feedback);
+ return new (zone()) Operator1<NamedAccess>( // --
IrOpcode::kJSStoreNamed, Operator::kNoProperties, // opcode
"JSStoreNamed", // name
3, 1, 1, 0, 1, 2, // counts
- parameters); // parameter
+ access); // parameter
}
const Operator* JSOperatorBuilder::StoreProperty(
- LanguageMode language_mode, const VectorSlotPair& feedback) {
- StorePropertyParameters parameters(language_mode, feedback);
- return new (zone()) Operator1<StorePropertyParameters>( // --
+ LanguageMode language_mode, VectorSlotPair const& feedback) {
+ PropertyAccess access(language_mode, feedback);
+ return new (zone()) Operator1<PropertyAccess>( // --
IrOpcode::kJSStoreProperty, Operator::kNoProperties, // opcode
"JSStoreProperty", // name
4, 1, 1, 0, 1, 2, // counts
- parameters); // parameter
+ access); // parameter
}
@@ -660,26 +837,24 @@ const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name,
const VectorSlotPair& feedback,
- TypeofMode typeof_mode,
- int slot_index) {
- LoadGlobalParameters parameters(name, feedback, typeof_mode, slot_index);
+ TypeofMode typeof_mode) {
+ LoadGlobalParameters parameters(name, feedback, typeof_mode);
return new (zone()) Operator1<LoadGlobalParameters>( // --
IrOpcode::kJSLoadGlobal, Operator::kNoProperties, // opcode
"JSLoadGlobal", // name
- 3, 1, 1, 1, 1, 2, // counts
+ 1, 1, 1, 1, 1, 2, // counts
parameters); // parameter
}
const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode,
const Handle<Name>& name,
- const VectorSlotPair& feedback,
- int slot_index) {
- StoreGlobalParameters parameters(language_mode, feedback, name, slot_index);
+ const VectorSlotPair& feedback) {
+ StoreGlobalParameters parameters(language_mode, feedback, name);
return new (zone()) Operator1<StoreGlobalParameters>( // --
IrOpcode::kJSStoreGlobal, Operator::kNoProperties, // opcode
"JSStoreGlobal", // name
- 4, 1, 1, 0, 1, 2, // counts
+ 2, 1, 1, 0, 1, 2, // counts
parameters); // parameter
}
@@ -707,28 +882,14 @@ const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) {
}
-const Operator* JSOperatorBuilder::LoadDynamicGlobal(
- const Handle<String>& name, uint32_t check_bitset,
- const VectorSlotPair& feedback, TypeofMode typeof_mode) {
- DynamicGlobalAccess access(name, check_bitset, feedback, typeof_mode);
- return new (zone()) Operator1<DynamicGlobalAccess>( // --
- IrOpcode::kJSLoadDynamicGlobal, Operator::kNoProperties, // opcode
- "JSLoadDynamicGlobal", // name
- 2, 1, 1, 1, 1, 2, // counts
- access); // parameter
-}
-
-
-const Operator* JSOperatorBuilder::LoadDynamicContext(
- const Handle<String>& name, uint32_t check_bitset, size_t depth,
- size_t index) {
- ContextAccess context_access(depth, index, false);
- DynamicContextAccess access(name, check_bitset, context_access);
- return new (zone()) Operator1<DynamicContextAccess>( // --
- IrOpcode::kJSLoadDynamicContext, Operator::kNoProperties, // opcode
- "JSLoadDynamicContext", // name
- 1, 1, 1, 1, 1, 2, // counts
- access); // parameter
+const Operator* JSOperatorBuilder::LoadDynamic(const Handle<String>& name,
+ TypeofMode typeof_mode) {
+ DynamicAccess access(name, typeof_mode);
+ return new (zone()) Operator1<DynamicAccess>( // --
+ IrOpcode::kJSLoadDynamic, Operator::kNoProperties, // opcode
+ "JSLoadDynamic", // name
+ 2, 1, 1, 1, 1, 2, // counts
+ access); // parameter
}
@@ -744,6 +905,19 @@ const Operator* JSOperatorBuilder::CreateArguments(
}
+const Operator* JSOperatorBuilder::CreateArray(size_t arity,
+ Handle<AllocationSite> site) {
+ // constructor, new_target, arg1, ..., argN
+ int const value_input_count = static_cast<int>(arity) + 2;
+ CreateArrayParameters parameters(arity, site);
+ return new (zone()) Operator1<CreateArrayParameters>( // --
+ IrOpcode::kJSCreateArray, Operator::kNoProperties, // opcode
+ "JSCreateArray", // name
+ value_input_count, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+
const Operator* JSOperatorBuilder::CreateClosure(
Handle<SharedFunctionInfo> shared_info, PretenureFlag pretenure) {
CreateClosureParameters parameters(shared_info, pretenure);
@@ -755,28 +929,56 @@ const Operator* JSOperatorBuilder::CreateClosure(
}
-const Operator* JSOperatorBuilder::CreateLiteralArray(int literal_flags) {
- return new (zone()) Operator1<int>( // --
+const Operator* JSOperatorBuilder::CreateLiteralArray(
+ Handle<FixedArray> constant_elements, int literal_flags,
+ int literal_index) {
+ CreateLiteralParameters parameters(constant_elements, literal_flags,
+ literal_index);
+ return new (zone()) Operator1<CreateLiteralParameters>( // --
IrOpcode::kJSCreateLiteralArray, Operator::kNoProperties, // opcode
"JSCreateLiteralArray", // name
- 3, 1, 1, 1, 1, 2, // counts
- literal_flags); // parameter
+ 1, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
-const Operator* JSOperatorBuilder::CreateLiteralObject(int literal_flags) {
- return new (zone()) Operator1<int>( // --
+const Operator* JSOperatorBuilder::CreateLiteralObject(
+ Handle<FixedArray> constant_properties, int literal_flags,
+ int literal_index) {
+ CreateLiteralParameters parameters(constant_properties, literal_flags,
+ literal_index);
+ return new (zone()) Operator1<CreateLiteralParameters>( // --
IrOpcode::kJSCreateLiteralObject, Operator::kNoProperties, // opcode
"JSCreateLiteralObject", // name
- 3, 1, 1, 1, 1, 2, // counts
- literal_flags); // parameter
+ 1, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateLiteralRegExp(
+ Handle<String> constant_pattern, int literal_flags, int literal_index) {
+ CreateLiteralParameters parameters(constant_pattern, literal_flags,
+ literal_index);
+ return new (zone()) Operator1<CreateLiteralParameters>( // --
+ IrOpcode::kJSCreateLiteralRegExp, Operator::kNoProperties, // opcode
+ "JSCreateLiteralRegExp", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kJSCreateFunctionContext, Operator::kNoProperties, // opcode
+ "JSCreateFunctionContext", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ slot_count); // parameter
}
const Operator* JSOperatorBuilder::CreateCatchContext(
const Handle<String>& name) {
- return new (zone()) Operator1<Handle<String>, Handle<String>::equal_to,
- Handle<String>::hash>( // --
+ return new (zone()) Operator1<Handle<String>>( // --
IrOpcode::kJSCreateCatchContext, Operator::kNoProperties, // opcode
"JSCreateCatchContext", // name
2, 1, 1, 1, 1, 2, // counts
@@ -786,8 +988,7 @@ const Operator* JSOperatorBuilder::CreateCatchContext(
const Operator* JSOperatorBuilder::CreateBlockContext(
const Handle<ScopeInfo>& scpope_info) {
- return new (zone()) Operator1<Handle<ScopeInfo>, Handle<ScopeInfo>::equal_to,
- Handle<ScopeInfo>::hash>( // --
+ return new (zone()) Operator1<Handle<ScopeInfo>>( // --
IrOpcode::kJSCreateBlockContext, Operator::kNoProperties, // opcode
"JSCreateBlockContext", // name
1, 1, 1, 1, 1, 2, // counts
@@ -797,8 +998,7 @@ const Operator* JSOperatorBuilder::CreateBlockContext(
const Operator* JSOperatorBuilder::CreateScriptContext(
const Handle<ScopeInfo>& scpope_info) {
- return new (zone()) Operator1<Handle<ScopeInfo>, Handle<ScopeInfo>::equal_to,
- Handle<ScopeInfo>::hash>( // --
+ return new (zone()) Operator1<Handle<ScopeInfo>>( // --
IrOpcode::kJSCreateScriptContext, Operator::kNoProperties, // opcode
"JSCreateScriptContext", // name
1, 1, 1, 1, 1, 2, // counts
diff --git a/chromium/v8/src/compiler/js-operator.h b/chromium/v8/src/compiler/js-operator.h
index 88b2dd304e5..ca7c7ea6579 100644
--- a/chromium/v8/src/compiler/js-operator.h
+++ b/chromium/v8/src/compiler/js-operator.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_JS_OPERATOR_H_
#define V8_COMPILER_JS_OPERATOR_H_
+#include "src/compiler/type-hints.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -16,24 +17,24 @@ class Operator;
struct JSOperatorGlobalCache;
-// Defines a pair of {TypeFeedbackVector} and {TypeFeedbackVectorICSlot}, which
+// Defines a pair of {TypeFeedbackVector} and {TypeFeedbackVectorSlot}, which
// is used to access the type feedback for a certain {Node}.
class VectorSlotPair {
public:
VectorSlotPair();
- VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: vector_(vector), slot_(slot) {}
- bool IsValid() const { return !vector_.is_null(); }
+ bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
Handle<TypeFeedbackVector> vector() const { return vector_; }
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
int index() const;
private:
const Handle<TypeFeedbackVector> vector_;
- const FeedbackVectorICSlot slot_;
+ const FeedbackVectorSlot slot_;
};
bool operator==(VectorSlotPair const&, VectorSlotPair const&);
@@ -41,26 +42,100 @@ bool operator!=(VectorSlotPair const&, VectorSlotPair const&);
size_t hash_value(VectorSlotPair const&);
-enum TailCallMode { NO_TAIL_CALLS, ALLOW_TAIL_CALLS };
+
+// The ConvertReceiverMode is used as parameter by JSConvertReceiver operators.
+ConvertReceiverMode ConvertReceiverModeOf(Operator const* op);
+
+
+// The ToBooleanHints are used as parameter by JSToBoolean operators.
+ToBooleanHints ToBooleanHintsOf(Operator const* op);
+
+
+// Defines whether tail call optimization is allowed.
+enum class TailCallMode : unsigned { kAllow, kDisallow };
+
+size_t hash_value(TailCallMode);
+
+std::ostream& operator<<(std::ostream&, TailCallMode);
+
+
+// Defines the language mode and hints for a JavaScript binary operations.
+// This is used as parameter by JSAdd, JSSubtract, etc. operators.
+class BinaryOperationParameters final {
+ public:
+ BinaryOperationParameters(LanguageMode language_mode,
+ BinaryOperationHints hints)
+ : language_mode_(language_mode), hints_(hints) {}
+
+ LanguageMode language_mode() const { return language_mode_; }
+ BinaryOperationHints hints() const { return hints_; }
+
+ private:
+ LanguageMode const language_mode_;
+ BinaryOperationHints const hints_;
+};
+
+bool operator==(BinaryOperationParameters const&,
+ BinaryOperationParameters const&);
+bool operator!=(BinaryOperationParameters const&,
+ BinaryOperationParameters const&);
+
+size_t hash_value(BinaryOperationParameters const&);
+
+std::ostream& operator<<(std::ostream&, BinaryOperationParameters const&);
+
+BinaryOperationParameters const& BinaryOperationParametersOf(Operator const*);
+
+
+// Defines the arity and the feedback for a JavaScript constructor call. This is
+// used as a parameter by JSCallConstruct operators.
+class CallConstructParameters final {
+ public:
+ CallConstructParameters(size_t arity, VectorSlotPair const& feedback)
+ : arity_(arity), feedback_(feedback) {}
+
+ size_t arity() const { return arity_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ size_t const arity_;
+ VectorSlotPair const feedback_;
+};
+
+bool operator==(CallConstructParameters const&, CallConstructParameters const&);
+bool operator!=(CallConstructParameters const&, CallConstructParameters const&);
+
+size_t hash_value(CallConstructParameters const&);
+
+std::ostream& operator<<(std::ostream&, CallConstructParameters const&);
+
+CallConstructParameters const& CallConstructParametersOf(Operator const*);
+
// Defines the arity and the call flags for a JavaScript function call. This is
// used as a parameter by JSCallFunction operators.
class CallFunctionParameters final {
public:
- CallFunctionParameters(size_t arity, CallFunctionFlags flags,
- LanguageMode language_mode,
+ CallFunctionParameters(size_t arity, LanguageMode language_mode,
VectorSlotPair const& feedback,
- TailCallMode tail_call_mode)
- : bit_field_(ArityField::encode(arity) | FlagsField::encode(flags) |
- LanguageModeField::encode(language_mode)),
- feedback_(feedback),
- tail_call_mode_(tail_call_mode) {}
+ TailCallMode tail_call_mode,
+ ConvertReceiverMode convert_mode)
+ : bit_field_(ArityField::encode(arity) |
+ ConvertReceiverModeField::encode(convert_mode) |
+ LanguageModeField::encode(language_mode) |
+ TailCallModeField::encode(tail_call_mode)),
+ feedback_(feedback) {}
size_t arity() const { return ArityField::decode(bit_field_); }
- CallFunctionFlags flags() const { return FlagsField::decode(bit_field_); }
LanguageMode language_mode() const {
return LanguageModeField::decode(bit_field_);
}
+ ConvertReceiverMode convert_mode() const {
+ return ConvertReceiverModeField::decode(bit_field_);
+ }
+ TailCallMode tail_call_mode() const {
+ return TailCallModeField::decode(bit_field_);
+ }
VectorSlotPair const& feedback() const { return feedback_; }
bool operator==(CallFunctionParameters const& that) const {
@@ -71,20 +146,18 @@ class CallFunctionParameters final {
return !(*this == that);
}
- bool AllowTailCalls() const { return tail_call_mode_ == ALLOW_TAIL_CALLS; }
-
private:
friend size_t hash_value(CallFunctionParameters const& p) {
return base::hash_combine(p.bit_field_, p.feedback_);
}
- typedef BitField<size_t, 0, 28> ArityField;
- typedef BitField<CallFunctionFlags, 28, 2> FlagsField;
- typedef BitField<LanguageMode, 30, 2> LanguageModeField;
+ typedef BitField<size_t, 0, 27> ArityField;
+ typedef BitField<ConvertReceiverMode, 27, 2> ConvertReceiverModeField;
+ typedef BitField<LanguageMode, 29, 2> LanguageModeField;
+ typedef BitField<TailCallMode, 31, 1> TailCallModeField;
const uint32_t bit_field_;
const VectorSlotPair feedback_;
- bool tail_call_mode_;
};
size_t hash_value(CallFunctionParameters const&);
@@ -148,113 +221,56 @@ std::ostream& operator<<(std::ostream&, ContextAccess const&);
ContextAccess const& ContextAccessOf(Operator const*);
-// Defines the name for a dynamic variable lookup. The {check_bitset} allows to
-// inline checks whether the lookup yields in a global variable. This is used as
-// a parameter by JSLoadDynamicGlobal and JSStoreDynamicGlobal operators.
-class DynamicGlobalAccess final {
+// Defines the name for a dynamic variable lookup. This is used as a parameter
+// by JSLoadDynamic and JSStoreDynamic operators.
+class DynamicAccess final {
public:
- DynamicGlobalAccess(const Handle<String>& name, uint32_t check_bitset,
- const VectorSlotPair& feedback, TypeofMode typeof_mode);
+ DynamicAccess(const Handle<String>& name, TypeofMode typeof_mode);
const Handle<String>& name() const { return name_; }
- uint32_t check_bitset() const { return check_bitset_; }
- const VectorSlotPair& feedback() const { return feedback_; }
TypeofMode typeof_mode() const { return typeof_mode_; }
- // Indicates that an inline check is disabled.
- bool RequiresFullCheck() const {
- return check_bitset() == kFullCheckRequired;
- }
-
- // Limit of context chain length to which inline check is possible.
- static const int kMaxCheckDepth = 30;
-
- // Sentinel for {check_bitset} disabling inline checks.
- static const uint32_t kFullCheckRequired = -1;
-
private:
const Handle<String> name_;
- const uint32_t check_bitset_;
- const VectorSlotPair feedback_;
const TypeofMode typeof_mode_;
};
-size_t hash_value(DynamicGlobalAccess const&);
+size_t hash_value(DynamicAccess const&);
-bool operator==(DynamicGlobalAccess const&, DynamicGlobalAccess const&);
-bool operator!=(DynamicGlobalAccess const&, DynamicGlobalAccess const&);
+bool operator==(DynamicAccess const&, DynamicAccess const&);
+bool operator!=(DynamicAccess const&, DynamicAccess const&);
-std::ostream& operator<<(std::ostream&, DynamicGlobalAccess const&);
+std::ostream& operator<<(std::ostream&, DynamicAccess const&);
-DynamicGlobalAccess const& DynamicGlobalAccessOf(Operator const*);
+DynamicAccess const& DynamicAccessOf(Operator const*);
-// Defines the name for a dynamic variable lookup. The {check_bitset} allows to
-// inline checks whether the lookup yields in a context variable. This is used
-// as a parameter by JSLoadDynamicContext and JSStoreDynamicContext operators.
-class DynamicContextAccess final {
+// Defines the property of an object for a named access. This is
+// used as a parameter by the JSLoadNamed and JSStoreNamed operators.
+class NamedAccess final {
public:
- DynamicContextAccess(const Handle<String>& name, uint32_t check_bitset,
- const ContextAccess& context_access);
-
- const Handle<String>& name() const { return name_; }
- uint32_t check_bitset() const { return check_bitset_; }
- const ContextAccess& context_access() const { return context_access_; }
-
- // Indicates that an inline check is disabled.
- bool RequiresFullCheck() const {
- return check_bitset() == kFullCheckRequired;
- }
-
- // Limit of context chain length to which inline check is possible.
- static const int kMaxCheckDepth = 30;
-
- // Sentinel for {check_bitset} disabling inline checks.
- static const uint32_t kFullCheckRequired = -1;
-
- private:
- const Handle<String> name_;
- const uint32_t check_bitset_;
- const ContextAccess context_access_;
-};
-
-size_t hash_value(DynamicContextAccess const&);
-
-bool operator==(DynamicContextAccess const&, DynamicContextAccess const&);
-bool operator!=(DynamicContextAccess const&, DynamicContextAccess const&);
-
-std::ostream& operator<<(std::ostream&, DynamicContextAccess const&);
-
-DynamicContextAccess const& DynamicContextAccessOf(Operator const*);
-
-
-// Defines the property being loaded from an object by a named load. This is
-// used as a parameter by JSLoadNamed operators.
-class LoadNamedParameters final {
- public:
- LoadNamedParameters(const Handle<Name>& name, const VectorSlotPair& feedback,
- LanguageMode language_mode)
+ NamedAccess(LanguageMode language_mode, Handle<Name> name,
+ VectorSlotPair const& feedback)
: name_(name), feedback_(feedback), language_mode_(language_mode) {}
- const Handle<Name>& name() const { return name_; }
+ Handle<Name> name() const { return name_; }
LanguageMode language_mode() const { return language_mode_; }
-
- const VectorSlotPair& feedback() const { return feedback_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
private:
- const Handle<Name> name_;
- const VectorSlotPair feedback_;
- const LanguageMode language_mode_;
+ Handle<Name> const name_;
+ VectorSlotPair const feedback_;
+ LanguageMode const language_mode_;
};
-bool operator==(LoadNamedParameters const&, LoadNamedParameters const&);
-bool operator!=(LoadNamedParameters const&, LoadNamedParameters const&);
+bool operator==(NamedAccess const&, NamedAccess const&);
+bool operator!=(NamedAccess const&, NamedAccess const&);
-size_t hash_value(LoadNamedParameters const&);
+size_t hash_value(NamedAccess const&);
-std::ostream& operator<<(std::ostream&, LoadNamedParameters const&);
+std::ostream& operator<<(std::ostream&, NamedAccess const&);
-const LoadNamedParameters& LoadNamedParametersOf(const Operator* op);
+const NamedAccess& NamedAccessOf(const Operator* op);
// Defines the property being loaded from an object by a named load. This is
@@ -262,24 +278,18 @@ const LoadNamedParameters& LoadNamedParametersOf(const Operator* op);
class LoadGlobalParameters final {
public:
LoadGlobalParameters(const Handle<Name>& name, const VectorSlotPair& feedback,
- TypeofMode typeof_mode, int slot_index)
- : name_(name),
- feedback_(feedback),
- typeof_mode_(typeof_mode),
- slot_index_(slot_index) {}
+ TypeofMode typeof_mode)
+ : name_(name), feedback_(feedback), typeof_mode_(typeof_mode) {}
const Handle<Name>& name() const { return name_; }
TypeofMode typeof_mode() const { return typeof_mode_; }
const VectorSlotPair& feedback() const { return feedback_; }
- int slot_index() const { return slot_index_; }
-
private:
const Handle<Name> name_;
const VectorSlotPair feedback_;
const TypeofMode typeof_mode_;
- const int slot_index_;
};
bool operator==(LoadGlobalParameters const&, LoadGlobalParameters const&);
@@ -298,22 +308,17 @@ class StoreGlobalParameters final {
public:
StoreGlobalParameters(LanguageMode language_mode,
const VectorSlotPair& feedback,
- const Handle<Name>& name, int slot_index)
- : language_mode_(language_mode),
- name_(name),
- feedback_(feedback),
- slot_index_(slot_index) {}
+ const Handle<Name>& name)
+ : language_mode_(language_mode), name_(name), feedback_(feedback) {}
LanguageMode language_mode() const { return language_mode_; }
const VectorSlotPair& feedback() const { return feedback_; }
const Handle<Name>& name() const { return name_; }
- int slot_index() const { return slot_index_; }
private:
const LanguageMode language_mode_;
const Handle<Name> name_;
const VectorSlotPair feedback_;
- int slot_index_;
};
bool operator==(StoreGlobalParameters const&, StoreGlobalParameters const&);
@@ -326,85 +331,29 @@ std::ostream& operator<<(std::ostream&, StoreGlobalParameters const&);
const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op);
-// Defines the property being loaded from an object. This is
-// used as a parameter by JSLoadProperty operators.
-class LoadPropertyParameters final {
+// Defines the property of an object for a keyed access. This is used
+// as a parameter by the JSLoadProperty and JSStoreProperty operators.
+class PropertyAccess final {
public:
- explicit LoadPropertyParameters(const VectorSlotPair& feedback,
- LanguageMode language_mode)
+ PropertyAccess(LanguageMode language_mode, VectorSlotPair const& feedback)
: feedback_(feedback), language_mode_(language_mode) {}
- const VectorSlotPair& feedback() const { return feedback_; }
-
- LanguageMode language_mode() const { return language_mode_; }
-
- private:
- const VectorSlotPair feedback_;
- const LanguageMode language_mode_;
-};
-
-bool operator==(LoadPropertyParameters const&, LoadPropertyParameters const&);
-bool operator!=(LoadPropertyParameters const&, LoadPropertyParameters const&);
-
-size_t hash_value(LoadPropertyParameters const&);
-
-std::ostream& operator<<(std::ostream&, LoadPropertyParameters const&);
-
-const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op);
-
-
-// Defines the property being stored to an object by a named store. This is
-// used as a parameter by JSStoreNamed operator.
-class StoreNamedParameters final {
- public:
- StoreNamedParameters(LanguageMode language_mode,
- const VectorSlotPair& feedback, const Handle<Name>& name)
- : language_mode_(language_mode), name_(name), feedback_(feedback) {}
-
- LanguageMode language_mode() const { return language_mode_; }
- const VectorSlotPair& feedback() const { return feedback_; }
- const Handle<Name>& name() const { return name_; }
-
- private:
- const LanguageMode language_mode_;
- const Handle<Name> name_;
- const VectorSlotPair feedback_;
-};
-
-bool operator==(StoreNamedParameters const&, StoreNamedParameters const&);
-bool operator!=(StoreNamedParameters const&, StoreNamedParameters const&);
-
-size_t hash_value(StoreNamedParameters const&);
-
-std::ostream& operator<<(std::ostream&, StoreNamedParameters const&);
-
-const StoreNamedParameters& StoreNamedParametersOf(const Operator* op);
-
-
-// Defines the property being stored to an object. This is used as a parameter
-// by JSStoreProperty operators.
-class StorePropertyParameters final {
- public:
- StorePropertyParameters(LanguageMode language_mode,
- const VectorSlotPair& feedback)
- : language_mode_(language_mode), feedback_(feedback) {}
-
LanguageMode language_mode() const { return language_mode_; }
- const VectorSlotPair& feedback() const { return feedback_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
private:
- const LanguageMode language_mode_;
- const VectorSlotPair feedback_;
+ VectorSlotPair const feedback_;
+ LanguageMode const language_mode_;
};
-bool operator==(StorePropertyParameters const&, StorePropertyParameters const&);
-bool operator!=(StorePropertyParameters const&, StorePropertyParameters const&);
+bool operator==(PropertyAccess const&, PropertyAccess const&);
+bool operator!=(PropertyAccess const&, PropertyAccess const&);
-size_t hash_value(StorePropertyParameters const&);
+size_t hash_value(PropertyAccess const&);
-std::ostream& operator<<(std::ostream&, StorePropertyParameters const&);
+std::ostream& operator<<(std::ostream&, PropertyAccess const&);
-const StorePropertyParameters& StorePropertyParametersOf(const Operator* op);
+PropertyAccess const& PropertyAccessOf(const Operator* op);
// Defines specifics about arguments object or rest parameter creation. This is
@@ -436,6 +385,31 @@ const CreateArgumentsParameters& CreateArgumentsParametersOf(
const Operator* op);
+// Defines shared information for the array that should be created. This is
+// used as parameter by JSCreateArray operators.
+class CreateArrayParameters final {
+ public:
+ explicit CreateArrayParameters(size_t arity, Handle<AllocationSite> site)
+ : arity_(arity), site_(site) {}
+
+ size_t arity() const { return arity_; }
+ Handle<AllocationSite> site() const { return site_; }
+
+ private:
+ size_t const arity_;
+ Handle<AllocationSite> const site_;
+};
+
+bool operator==(CreateArrayParameters const&, CreateArrayParameters const&);
+bool operator!=(CreateArrayParameters const&, CreateArrayParameters const&);
+
+size_t hash_value(CreateArrayParameters const&);
+
+std::ostream& operator<<(std::ostream&, CreateArrayParameters const&);
+
+const CreateArrayParameters& CreateArrayParametersOf(const Operator* op);
+
+
// Defines shared information for the closure that should be created. This is
// used as a parameter by JSCreateClosure operators.
class CreateClosureParameters final {
@@ -462,6 +436,34 @@ std::ostream& operator<<(std::ostream&, CreateClosureParameters const&);
const CreateClosureParameters& CreateClosureParametersOf(const Operator* op);
+// Defines shared information for the literal that should be created. This is
+// used as parameter by JSCreateLiteralArray, JSCreateLiteralObject and
+// JSCreateLiteralRegExp operators.
+class CreateLiteralParameters final {
+ public:
+ CreateLiteralParameters(Handle<HeapObject> constant, int flags, int index)
+ : constant_(constant), flags_(flags), index_(index) {}
+
+ Handle<HeapObject> constant() const { return constant_; }
+ int flags() const { return flags_; }
+ int index() const { return index_; }
+
+ private:
+ Handle<HeapObject> const constant_;
+ int const flags_;
+ int const index_;
+};
+
+bool operator==(CreateLiteralParameters const&, CreateLiteralParameters const&);
+bool operator!=(CreateLiteralParameters const&, CreateLiteralParameters const&);
+
+size_t hash_value(CreateLiteralParameters const&);
+
+std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&);
+
+const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
+
+
// Interface for building JavaScript-level operators, e.g. directly from the
// AST. Most operators have no parameters, thus can be globally shared for all
// graphs.
@@ -477,20 +479,29 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* GreaterThan(LanguageMode language_mode);
const Operator* LessThanOrEqual(LanguageMode language_mode);
const Operator* GreaterThanOrEqual(LanguageMode language_mode);
- const Operator* BitwiseOr(LanguageMode language_mode);
- const Operator* BitwiseXor(LanguageMode language_mode);
- const Operator* BitwiseAnd(LanguageMode language_mode);
- const Operator* ShiftLeft(LanguageMode language_mode);
- const Operator* ShiftRight(LanguageMode language_mode);
- const Operator* ShiftRightLogical(LanguageMode language_mode);
- const Operator* Add(LanguageMode language_mode);
- const Operator* Subtract(LanguageMode language_mode);
- const Operator* Multiply(LanguageMode language_mode);
- const Operator* Divide(LanguageMode language_mode);
- const Operator* Modulus(LanguageMode language_mode);
-
- const Operator* UnaryNot();
- const Operator* ToBoolean();
+ const Operator* BitwiseOr(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* BitwiseXor(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* BitwiseAnd(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* ShiftLeft(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* ShiftRight(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* ShiftRightLogical(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* Add(LanguageMode language_mode, BinaryOperationHints hints);
+ const Operator* Subtract(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* Multiply(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* Divide(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* Modulus(LanguageMode language_mode,
+ BinaryOperationHints hints);
+
+ const Operator* ToBoolean(ToBooleanHints hints);
const Operator* ToNumber();
const Operator* ToString();
const Operator* ToName();
@@ -500,30 +511,36 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* Create();
const Operator* CreateArguments(CreateArgumentsParameters::Type type,
int start_index);
+ const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
PretenureFlag pretenure);
- const Operator* CreateLiteralArray(int literal_flags);
- const Operator* CreateLiteralObject(int literal_flags);
+ const Operator* CreateIterResultObject();
+ const Operator* CreateLiteralArray(Handle<FixedArray> constant_elements,
+ int literal_flags, int literal_index);
+ const Operator* CreateLiteralObject(Handle<FixedArray> constant_properties,
+ int literal_flags, int literal_index);
+ const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
+ int literal_flags, int literal_index);
const Operator* CallFunction(
- size_t arity, CallFunctionFlags flags, LanguageMode language_mode,
+ size_t arity, LanguageMode language_mode,
VectorSlotPair const& feedback = VectorSlotPair(),
- TailCallMode tail_call_mode = NO_TAIL_CALLS);
+ ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
+ const Operator* CallConstruct(size_t arity, VectorSlotPair const& feedback);
- const Operator* CallConstruct(int arguments);
+ const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
- const Operator* LoadProperty(const VectorSlotPair& feedback,
- LanguageMode language_mode);
- const Operator* LoadNamed(const Handle<Name>& name,
- const VectorSlotPair& feedback,
- LanguageMode language_mode);
+ const Operator* LoadProperty(LanguageMode language_mode,
+ VectorSlotPair const& feedback);
+ const Operator* LoadNamed(LanguageMode language_mode, Handle<Name> name,
+ VectorSlotPair const& feedback);
const Operator* StoreProperty(LanguageMode language_mode,
- const VectorSlotPair& feedback);
- const Operator* StoreNamed(LanguageMode language_mode,
- const Handle<Name>& name,
- const VectorSlotPair& feedback);
+ VectorSlotPair const& feedback);
+ const Operator* StoreNamed(LanguageMode language_mode, Handle<Name> name,
+ VectorSlotPair const& feedback);
const Operator* DeleteProperty(LanguageMode language_mode);
@@ -531,23 +548,16 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* LoadGlobal(const Handle<Name>& name,
const VectorSlotPair& feedback,
- TypeofMode typeof_mode = NOT_INSIDE_TYPEOF,
- int slot_index = -1);
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
const Operator* StoreGlobal(LanguageMode language_mode,
const Handle<Name>& name,
- const VectorSlotPair& feedback,
- int slot_index = -1);
+ const VectorSlotPair& feedback);
const Operator* LoadContext(size_t depth, size_t index, bool immutable);
const Operator* StoreContext(size_t depth, size_t index);
- const Operator* LoadDynamicGlobal(const Handle<String>& name,
- uint32_t check_bitset,
- const VectorSlotPair& feedback,
- TypeofMode typeof_mode);
- const Operator* LoadDynamicContext(const Handle<String>& name,
- uint32_t check_bitset, size_t depth,
- size_t index);
+ const Operator* LoadDynamic(const Handle<String>& name,
+ TypeofMode typeof_mode);
const Operator* TypeOf();
const Operator* InstanceOf();
@@ -557,9 +567,12 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* ForInPrepare();
const Operator* ForInStep();
+ const Operator* LoadMessage();
+ const Operator* StoreMessage();
+
const Operator* StackCheck();
- const Operator* CreateFunctionContext();
+ const Operator* CreateFunctionContext(int slot_count);
const Operator* CreateCatchContext(const Handle<String>& name);
const Operator* CreateWithContext();
const Operator* CreateBlockContext(const Handle<ScopeInfo>& scpope_info);
diff --git a/chromium/v8/src/compiler/js-type-feedback-lowering.cc b/chromium/v8/src/compiler/js-type-feedback-lowering.cc
deleted file mode 100644
index d97a305d08c..00000000000
--- a/chromium/v8/src/compiler/js-type-feedback-lowering.cc
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/js-type-feedback-lowering.h"
-
-#include "src/compiler/access-builder.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
-#include "src/type-feedback-vector.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-JSTypeFeedbackLowering::JSTypeFeedbackLowering(Editor* editor, Flags flags,
- JSGraph* jsgraph)
- : AdvancedReducer(editor),
- flags_(flags),
- jsgraph_(jsgraph),
- simplified_(graph()->zone()) {}
-
-
-Reduction JSTypeFeedbackLowering::Reduce(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kJSLoadNamed:
- return ReduceJSLoadNamed(node);
- default:
- break;
- }
- return NoChange();
-}
-
-
-Reduction JSTypeFeedbackLowering::ReduceJSLoadNamed(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Type* receiver_type = NodeProperties::GetType(receiver);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- // We need to make optimistic assumptions to continue.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
- LoadNamedParameters const& p = LoadNamedParametersOf(node->op());
- if (p.feedback().vector().is_null()) return NoChange();
- if (p.name().is_identical_to(factory()->length_string())) {
- LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
- MapHandleList maps;
- if (nexus.ExtractMaps(&maps) > 0) {
- for (Handle<Map> map : maps) {
- if (map->instance_type() >= FIRST_NONSTRING_TYPE) return NoChange();
- }
- // Optimistic optimization for "length" property of strings.
- if (receiver_type->Maybe(Type::TaggedSigned())) {
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check, control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
- effect, if_true);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- control = graph()->NewNode(common()->IfFalse(), branch);
- }
- Node* receiver_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- receiver, effect, control);
- Node* receiver_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- receiver_map, effect, control);
- Node* check =
- graph()->NewNode(machine()->Uint32LessThan(), receiver_instance_type,
- jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
- effect, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- control = graph()->NewNode(common()->IfTrue(), branch);
- Node* value = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForStringLength(graph()->zone())),
- receiver, effect, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
- return NoChange();
-}
-
-
-Factory* JSTypeFeedbackLowering::factory() const {
- return isolate()->factory();
-}
-
-
-CommonOperatorBuilder* JSTypeFeedbackLowering::common() const {
- return jsgraph()->common();
-}
-
-
-Graph* JSTypeFeedbackLowering::graph() const { return jsgraph()->graph(); }
-
-
-Isolate* JSTypeFeedbackLowering::isolate() const {
- return jsgraph()->isolate();
-}
-
-
-MachineOperatorBuilder* JSTypeFeedbackLowering::machine() const {
- return jsgraph()->machine();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/compiler/js-type-feedback-lowering.h b/chromium/v8/src/compiler/js-type-feedback-lowering.h
deleted file mode 100644
index db0fbdd626a..00000000000
--- a/chromium/v8/src/compiler/js-type-feedback-lowering.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_TYPE_FEEDBACK_LOWERING_H_
-#define V8_COMPILER_JS_TYPE_FEEDBACK_LOWERING_H_
-
-#include "src/base/flags.h"
-#include "src/compiler/graph-reducer.h"
-#include "src/compiler/simplified-operator.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class Factory;
-
-namespace compiler {
-
-// Forward declarations.
-class CommonOperatorBuilder;
-class JSGraph;
-class MachineOperatorBuilder;
-
-
-// Lowers JS-level operators to simplified operators based on type feedback.
-class JSTypeFeedbackLowering final : public AdvancedReducer {
- public:
- // Various configuration flags to control the operation of this lowering.
- enum Flag {
- kNoFlags = 0,
- kDeoptimizationEnabled = 1 << 0,
- };
- typedef base::Flags<Flag> Flags;
-
- JSTypeFeedbackLowering(Editor* editor, Flags flags, JSGraph* jsgraph);
- ~JSTypeFeedbackLowering() final {}
-
- Reduction Reduce(Node* node) final;
-
- private:
- Reduction ReduceJSLoadNamed(Node* node);
-
- Factory* factory() const;
- Flags flags() const { return flags_; }
- Graph* graph() const;
- Isolate* isolate() const;
- JSGraph* jsgraph() const { return jsgraph_; }
- CommonOperatorBuilder* common() const;
- MachineOperatorBuilder* machine() const;
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
- Flags const flags_;
- JSGraph* const jsgraph_;
- SimplifiedOperatorBuilder simplified_;
-
- DISALLOW_COPY_AND_ASSIGN(JSTypeFeedbackLowering);
-};
-
-DEFINE_OPERATORS_FOR_FLAGS(JSTypeFeedbackLowering::Flags)
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_JS_TYPE_FEEDBACK_LOWERING_H_
diff --git a/chromium/v8/src/compiler/js-type-feedback.cc b/chromium/v8/src/compiler/js-type-feedback.cc
deleted file mode 100644
index 395a7dccca7..00000000000
--- a/chromium/v8/src/compiler/js-type-feedback.cc
+++ /dev/null
@@ -1,364 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/js-type-feedback.h"
-
-#include "src/property-details.h"
-
-#include "src/accessors.h"
-#include "src/ast.h"
-#include "src/compiler.h"
-#include "src/type-info.h"
-
-#include "src/compiler/access-builder.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/frame-states.h"
-#include "src/compiler/node-aux-data.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/operator-properties.h"
-#include "src/compiler/simplified-operator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-enum LoadOrStore { LOAD, STORE };
-
-// TODO(turbofan): fix deoptimization problems
-#define ENABLE_FAST_PROPERTY_LOADS false
-#define ENABLE_FAST_PROPERTY_STORES false
-
-JSTypeFeedbackTable::JSTypeFeedbackTable(Zone* zone)
- : type_feedback_id_map_(TypeFeedbackIdMap::key_compare(),
- TypeFeedbackIdMap::allocator_type(zone)),
- feedback_vector_ic_slot_map_(TypeFeedbackIdMap::key_compare(),
- TypeFeedbackIdMap::allocator_type(zone)) {}
-
-
-void JSTypeFeedbackTable::Record(Node* node, TypeFeedbackId id) {
- type_feedback_id_map_.insert(std::make_pair(node->id(), id));
-}
-
-
-void JSTypeFeedbackTable::Record(Node* node, FeedbackVectorICSlot slot) {
- feedback_vector_ic_slot_map_.insert(std::make_pair(node->id(), slot));
-}
-
-
-Reduction JSTypeFeedbackSpecializer::Reduce(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kJSLoadProperty:
- return ReduceJSLoadProperty(node);
- case IrOpcode::kJSLoadNamed:
- return ReduceJSLoadNamed(node);
- case IrOpcode::kJSLoadGlobal:
- return ReduceJSLoadGlobal(node);
- case IrOpcode::kJSStoreNamed:
- return ReduceJSStoreNamed(node);
- case IrOpcode::kJSStoreProperty:
- return ReduceJSStoreProperty(node);
- default:
- break;
- }
- return NoChange();
-}
-
-
-static void AddFieldAccessTypes(FieldAccess* access,
- PropertyDetails property_details) {
- if (property_details.representation().IsSmi()) {
- access->type = Type::SignedSmall();
- access->machine_type = static_cast<MachineType>(kTypeInt32 | kRepTagged);
- } else if (property_details.representation().IsDouble()) {
- access->type = Type::Number();
- access->machine_type = kMachFloat64;
- }
-}
-
-
-static bool GetInObjectFieldAccess(LoadOrStore mode, Handle<Map> map,
- Handle<Name> name, FieldAccess* access) {
- access->base_is_tagged = kTaggedBase;
- access->offset = -1;
- access->name = name;
- access->type = Type::Any();
- access->machine_type = kMachAnyTagged;
-
- // Check for properties that have accessors but are JSObject fields.
- if (Accessors::IsJSObjectFieldAccessor(map, name, &access->offset)) {
- // TODO(turbofan): fill in types for special JSObject field accesses.
- return true;
- }
-
- // Check if the map is a dictionary.
- if (map->is_dictionary_map()) return false;
-
- // Search the descriptor array.
- DescriptorArray* descriptors = map->instance_descriptors();
- int number = descriptors->SearchWithCache(*name, *map);
- if (number == DescriptorArray::kNotFound) return false;
- PropertyDetails property_details = descriptors->GetDetails(number);
-
- bool is_smi = property_details.representation().IsSmi();
- bool is_double = property_details.representation().IsDouble();
-
- if (property_details.type() != DATA) {
- // TODO(turbofan): constant loads and stores.
- return false;
- }
-
- // Transfer known types from property details.
- AddFieldAccessTypes(access, property_details);
-
- if (mode == STORE) {
- if (property_details.IsReadOnly()) {
- // TODO(turbofan): deopt, ignore or throw on readonly stores.
- return false;
- }
- if (is_smi || is_double) {
- // TODO(turbofan): check type and deopt for SMI/double stores.
- return false;
- }
- }
-
- int index = map->instance_descriptors()->GetFieldIndex(number);
- FieldIndex field_index = FieldIndex::ForPropertyIndex(*map, index, is_double);
-
- if (field_index.is_inobject()) {
- if (is_double && !map->IsUnboxedDoubleField(field_index)) {
- // TODO(turbofan): support for out-of-line (MutableHeapNumber) loads.
- return false;
- }
- access->offset = field_index.offset();
- return true;
- }
-
- // TODO(turbofan): handle out of object properties.
- return false;
-}
-
-
-Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamed(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kJSLoadNamed);
- if (mode() != kDeoptimizationEnabled) return NoChange();
- Node* frame_state_before = GetFrameStateBefore(node);
- if (frame_state_before == nullptr) return NoChange();
-
- const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
- SmallMapList maps;
-
- FeedbackVectorICSlot slot = js_type_feedback_->FindFeedbackVectorICSlot(node);
- if (slot.IsInvalid() ||
- oracle()->LoadInlineCacheState(slot) == UNINITIALIZED) {
- // No type feedback ids or the load is uninitialized.
- return NoChange();
- }
- oracle()->PropertyReceiverTypes(slot, p.name(), &maps);
-
- Node* receiver = node->InputAt(0);
- Node* effect = NodeProperties::GetEffectInput(node);
-
- if (maps.length() != 1) return NoChange(); // TODO(turbofan): polymorphism
- if (!ENABLE_FAST_PROPERTY_LOADS) return NoChange();
-
- Handle<Map> map = maps.first();
- FieldAccess field_access;
- if (!GetInObjectFieldAccess(LOAD, map, p.name(), &field_access)) {
- return NoChange();
- }
-
- Node* control = NodeProperties::GetControlInput(node);
- Node* check_success;
- Node* check_failed;
- BuildMapCheck(receiver, map, true, effect, control, &check_success,
- &check_failed);
-
- // Build the actual load.
- Node* load = graph()->NewNode(simplified()->LoadField(field_access), receiver,
- effect, check_success);
-
- // TODO(turbofan): handle slow case instead of deoptimizing.
- Node* deopt = graph()->NewNode(common()->Deoptimize(), frame_state_before,
- effect, check_failed);
- NodeProperties::MergeControlToEnd(graph(), common(), deopt);
- ReplaceWithValue(node, load, load, check_success);
- return Replace(load);
-}
-
-
-Reduction JSTypeFeedbackSpecializer::ReduceJSLoadGlobal(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kJSLoadGlobal);
- Handle<String> name =
- Handle<String>::cast(LoadGlobalParametersOf(node->op()).name());
- // Try to optimize loads from the global object.
- Handle<Object> constant_value =
- jsgraph()->isolate()->factory()->GlobalConstantFor(name);
- if (!constant_value.is_null()) {
- // Always optimize global constants.
- Node* constant = jsgraph()->Constant(constant_value);
- ReplaceWithValue(node, constant);
- return Replace(constant);
- }
-
- if (global_object_.is_null()) {
- // Nothing else can be done if we don't have a global object.
- return NoChange();
- }
-
- if (mode() == kDeoptimizationEnabled) {
- // Handle lookups in the script context.
- {
- Handle<ScriptContextTable> script_contexts(
- global_object_->native_context()->script_context_table());
- ScriptContextTable::LookupResult lookup;
- if (ScriptContextTable::Lookup(script_contexts, name, &lookup)) {
- // TODO(turbofan): introduce a LoadContext here.
- return NoChange();
- }
- }
-
- // Constant promotion or cell access requires lazy deoptimization support.
- LookupIterator it(global_object_, name, LookupIterator::OWN);
-
- if (it.state() == LookupIterator::DATA) {
- Handle<PropertyCell> cell = it.GetPropertyCell();
- dependencies_->AssumePropertyCell(cell);
-
- if (it.property_details().cell_type() == PropertyCellType::kConstant) {
- // Constant promote the global's current value.
- Handle<Object> constant_value(cell->value(), jsgraph()->isolate());
- if (constant_value->IsConsString()) {
- constant_value =
- String::Flatten(Handle<String>::cast(constant_value));
- }
- Node* constant = jsgraph()->Constant(constant_value);
- ReplaceWithValue(node, constant);
- return Replace(constant);
- } else {
- // Load directly from the property cell.
- FieldAccess access = AccessBuilder::ForPropertyCellValue();
- Node* control = NodeProperties::GetControlInput(node);
- Node* load_field = graph()->NewNode(
- simplified()->LoadField(access), jsgraph()->Constant(cell),
- NodeProperties::GetEffectInput(node), control);
- ReplaceWithValue(node, load_field, load_field, control);
- return Replace(load_field);
- }
- }
- } else {
- // TODO(turbofan): non-configurable properties on the global object
- // should be loadable through a cell without deoptimization support.
- }
-
- return NoChange();
-}
-
-
-Reduction JSTypeFeedbackSpecializer::ReduceJSLoadProperty(Node* node) {
- return NoChange();
-}
-
-
-Reduction JSTypeFeedbackSpecializer::ReduceJSStoreNamed(Node* node) {
- DCHECK(node->opcode() == IrOpcode::kJSStoreNamed);
- Node* frame_state_before = GetFrameStateBefore(node);
- if (frame_state_before == nullptr) return NoChange();
-
- const StoreNamedParameters& p = StoreNamedParametersOf(node->op());
- SmallMapList maps;
- TypeFeedbackId id = js_type_feedback_->FindTypeFeedbackId(node);
- if (id.IsNone() || oracle()->StoreIsUninitialized(id) == UNINITIALIZED) {
- // No type feedback ids or the store is uninitialized.
- // TODO(titzer): no feedback from vector ICs from stores.
- return NoChange();
- } else {
- oracle()->AssignmentReceiverTypes(id, p.name(), &maps);
- }
-
- Node* receiver = node->InputAt(0);
- Node* effect = NodeProperties::GetEffectInput(node);
-
- if (maps.length() != 1) return NoChange(); // TODO(turbofan): polymorphism
-
- if (!ENABLE_FAST_PROPERTY_STORES) return NoChange();
-
- Handle<Map> map = maps.first();
- FieldAccess field_access;
- if (!GetInObjectFieldAccess(STORE, map, p.name(), &field_access)) {
- return NoChange();
- }
-
- Node* control = NodeProperties::GetControlInput(node);
- Node* check_success;
- Node* check_failed;
- BuildMapCheck(receiver, map, true, effect, control, &check_success,
- &check_failed);
-
- // Build the actual load.
- Node* value = node->InputAt(1);
- Node* store = graph()->NewNode(simplified()->StoreField(field_access),
- receiver, value, effect, check_success);
-
- // TODO(turbofan): handle slow case instead of deoptimizing.
- Node* deopt = graph()->NewNode(common()->Deoptimize(), frame_state_before,
- effect, check_failed);
- NodeProperties::MergeControlToEnd(graph(), common(), deopt);
- ReplaceWithValue(node, store, store, check_success);
- return Replace(store);
-}
-
-
-Reduction JSTypeFeedbackSpecializer::ReduceJSStoreProperty(Node* node) {
- return NoChange();
-}
-
-
-void JSTypeFeedbackSpecializer::BuildMapCheck(Node* receiver, Handle<Map> map,
- bool smi_check, Node* effect,
- Node* control, Node** success,
- Node** fail) {
- Node* if_smi = nullptr;
- if (smi_check) {
- Node* branch_smi = graph()->NewNode(
- common()->Branch(BranchHint::kFalse),
- graph()->NewNode(simplified()->ObjectIsSmi(), receiver), control);
- if_smi = graph()->NewNode(common()->IfTrue(), branch_smi);
- control = graph()->NewNode(common()->IfFalse(), branch_smi);
- }
-
- FieldAccess map_access = AccessBuilder::ForMap();
- Node* receiver_map = graph()->NewNode(simplified()->LoadField(map_access),
- receiver, effect, control);
- Node* map_const = jsgraph_->Constant(map);
- Node* cmp = graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
- receiver_map, map_const);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), cmp, control);
- *success = graph()->NewNode(common()->IfTrue(), branch);
- *fail = graph()->NewNode(common()->IfFalse(), branch);
-
- if (if_smi) {
- *fail = graph()->NewNode(common()->Merge(2), *fail, if_smi);
- }
-}
-
-
-// Get the frame state before an operation if it exists and has a valid
-// bailout id.
-Node* JSTypeFeedbackSpecializer::GetFrameStateBefore(Node* node) {
- int count = OperatorProperties::GetFrameStateInputCount(node->op());
- DCHECK_LE(count, 2);
- if (count == 2) {
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- if (frame_state->opcode() == IrOpcode::kFrameState) {
- BailoutId id = OpParameter<FrameStateInfo>(node).bailout_id();
- if (id != BailoutId::None()) return frame_state;
- }
- }
- return nullptr;
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/compiler/js-type-feedback.h b/chromium/v8/src/compiler/js-type-feedback.h
deleted file mode 100644
index 84060f80964..00000000000
--- a/chromium/v8/src/compiler/js-type-feedback.h
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_TYPE_FEEDBACK_H_
-#define V8_COMPILER_JS_TYPE_FEEDBACK_H_
-
-#include "src/utils.h"
-
-#include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/node-aux-data.h"
-#include "src/compiler/simplified-operator.h"
-
-namespace v8 {
-namespace internal {
-
-class TypeFeedbackOracle;
-class SmallMapList;
-class CompilationDependencies;
-
-namespace compiler {
-
-// Stores type feedback information for nodes in the graph in a separate
-// data structure.
-class JSTypeFeedbackTable : public ZoneObject {
- public:
- explicit JSTypeFeedbackTable(Zone* zone);
-
- void Record(Node* node, TypeFeedbackId id);
- void Record(Node* node, FeedbackVectorICSlot slot);
-
- private:
- friend class JSTypeFeedbackSpecializer;
- typedef std::map<NodeId, TypeFeedbackId, std::less<NodeId>,
- zone_allocator<TypeFeedbackId> > TypeFeedbackIdMap;
- typedef std::map<NodeId, FeedbackVectorICSlot, std::less<NodeId>,
- zone_allocator<FeedbackVectorICSlot> >
- FeedbackVectorICSlotMap;
-
- TypeFeedbackIdMap type_feedback_id_map_;
- FeedbackVectorICSlotMap feedback_vector_ic_slot_map_;
-
- TypeFeedbackId FindTypeFeedbackId(Node* node) {
- TypeFeedbackIdMap::const_iterator it =
- type_feedback_id_map_.find(node->id());
- return it == type_feedback_id_map_.end() ? TypeFeedbackId::None()
- : it->second;
- }
-
- FeedbackVectorICSlot FindFeedbackVectorICSlot(Node* node) {
- FeedbackVectorICSlotMap::const_iterator it =
- feedback_vector_ic_slot_map_.find(node->id());
- return it == feedback_vector_ic_slot_map_.end()
- ? FeedbackVectorICSlot::Invalid()
- : it->second;
- }
-};
-
-
-// Specializes a graph to the type feedback recorded in the
-// {js_type_feedback} provided to the constructor.
-class JSTypeFeedbackSpecializer : public AdvancedReducer {
- public:
- enum DeoptimizationMode { kDeoptimizationEnabled, kDeoptimizationDisabled };
-
- JSTypeFeedbackSpecializer(Editor* editor, JSGraph* jsgraph,
- JSTypeFeedbackTable* js_type_feedback,
- TypeFeedbackOracle* oracle,
- Handle<GlobalObject> global_object,
- DeoptimizationMode mode,
- CompilationDependencies* dependencies)
- : AdvancedReducer(editor),
- jsgraph_(jsgraph),
- simplified_(jsgraph->graph()->zone()),
- js_type_feedback_(js_type_feedback),
- oracle_(oracle),
- global_object_(global_object),
- mode_(mode),
- dependencies_(dependencies) {
- CHECK_NOT_NULL(js_type_feedback);
- }
-
- Reduction Reduce(Node* node) override;
-
- // Visible for unit testing.
- Reduction ReduceJSLoadGlobal(Node* node);
- Reduction ReduceJSLoadNamed(Node* node);
- Reduction ReduceJSLoadProperty(Node* node);
- Reduction ReduceJSStoreNamed(Node* node);
- Reduction ReduceJSStoreProperty(Node* node);
-
- private:
- JSGraph* jsgraph_;
- SimplifiedOperatorBuilder simplified_;
- JSTypeFeedbackTable* js_type_feedback_;
- TypeFeedbackOracle* oracle_;
- Handle<GlobalObject> global_object_;
- DeoptimizationMode const mode_;
- CompilationDependencies* dependencies_;
-
- TypeFeedbackOracle* oracle() { return oracle_; }
- Graph* graph() { return jsgraph_->graph(); }
- JSGraph* jsgraph() { return jsgraph_; }
- CommonOperatorBuilder* common() { return jsgraph_->common(); }
- DeoptimizationMode mode() const { return mode_; }
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
- void BuildMapCheck(Node* receiver, Handle<Map> map, bool smi_check,
- Node* effect, Node* control, Node** success, Node** fail);
-
- Node* GetFrameStateBefore(Node* node);
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif
diff --git a/chromium/v8/src/compiler/js-typed-lowering.cc b/chromium/v8/src/compiler/js-typed-lowering.cc
index 7c25afcfafa..a31aa0070c6 100644
--- a/chromium/v8/src/compiler/js-typed-lowering.cc
+++ b/chromium/v8/src/compiler/js-typed-lowering.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-typed-lowering.h"
@@ -10,44 +11,33 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/state-values-utils.h"
+#include "src/type-cache.h"
#include "src/types.h"
namespace v8 {
namespace internal {
namespace compiler {
-// TODO(turbofan): js-typed-lowering improvements possible
-// - immediately put in type bounds for all new nodes
-// - relax effects from generic but not-side-effecting operations
-
-
-JSTypedLowering::JSTypedLowering(Editor* editor, JSGraph* jsgraph, Zone* zone)
- : AdvancedReducer(editor), jsgraph_(jsgraph), simplified_(graph()->zone()) {
- for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
- double min = kMinInt / (1 << k);
- double max = kMaxInt / (1 << k);
- shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
- }
-}
-
+namespace {
// A helper class to construct inline allocations on the simplified operator
// level. This keeps track of the effect chain for initial stores on a newly
// allocated object and also provides helpers for commonly allocated objects.
class AllocationBuilder final {
public:
- AllocationBuilder(JSGraph* jsgraph, SimplifiedOperatorBuilder* simplified,
- Node* effect, Node* control)
+ AllocationBuilder(JSGraph* jsgraph, Node* effect, Node* control)
: jsgraph_(jsgraph),
- simplified_(simplified),
allocation_(nullptr),
effect_(effect),
control_(control) {}
// Primitive allocation of static size.
- void Allocate(int size) {
- allocation_ = graph()->NewNode(
- simplified()->Allocate(), jsgraph()->Constant(size), effect_, control_);
+ void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
+ effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
+ allocation_ =
+ graph()->NewNode(simplified()->Allocate(pretenure),
+ jsgraph()->Constant(size), effect_, control_);
effect_ = allocation_;
}
@@ -57,12 +47,23 @@ class AllocationBuilder final {
value, effect_, control_);
}
+ // Primitive store into an element.
+ void Store(ElementAccess const& access, Node* index, Node* value) {
+ effect_ = graph()->NewNode(simplified()->StoreElement(access), allocation_,
+ index, value, effect_, control_);
+ }
+
// Compound allocation of a FixedArray.
- void AllocateArray(int length, Handle<Map> map) {
- Allocate(FixedArray::SizeFor(length));
+ void AllocateArray(int length, Handle<Map> map,
+ PretenureFlag pretenure = NOT_TENURED) {
+ DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
+ map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+ int size = (map->instance_type() == FIXED_ARRAY_TYPE)
+ ? FixedArray::SizeFor(length)
+ : FixedDoubleArray::SizeFor(length);
+ Allocate(size, pretenure);
Store(AccessBuilder::ForMap(), map);
- Store(AccessBuilder::ForFixedArrayLength(graph()->zone()),
- jsgraph()->Constant(length));
+ Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
}
// Compound store of a constant into a field.
@@ -70,22 +71,33 @@ class AllocationBuilder final {
Store(access, jsgraph()->Constant(value));
}
- Node* allocation() const { return allocation_; }
- Node* effect() const { return effect_; }
+ void FinishAndChange(Node* node) {
+ NodeProperties::SetType(allocation_, NodeProperties::GetType(node));
+ node->ReplaceInput(0, allocation_);
+ node->ReplaceInput(1, effect_);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, common()->FinishRegion());
+ }
+
+ Node* Finish() {
+ return graph()->NewNode(common()->FinishRegion(), allocation_, effect_);
+ }
protected:
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph_->graph(); }
- SimplifiedOperatorBuilder* simplified() { return simplified_; }
+ CommonOperatorBuilder* common() { return jsgraph_->common(); }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
private:
JSGraph* const jsgraph_;
- SimplifiedOperatorBuilder* simplified_;
Node* allocation_;
Node* effect_;
Node* control_;
};
+} // namespace
+
// A helper class to simplify the process of reducing a single binop node with a
// JSOperator. This class manages the rewriting of context, control, and effect
@@ -135,11 +147,6 @@ class JSBinopReduction final {
node_->ReplaceInput(1, ConvertToUI32(right(), right_signedness));
}
- void ConvertInputsToString() {
- node_->ReplaceInput(0, ConvertToString(left()));
- node_->ReplaceInput(1, ConvertToString(right()));
- }
-
void SwapInputs() {
Node* l = left();
Node* r = right();
@@ -211,13 +218,24 @@ class JSBinopReduction final {
return ChangeToPureOperator(op, false, type);
}
- bool IsStrong() { return is_strong(OpParameter<LanguageMode>(node_)); }
+ // TODO(turbofan): Strong mode should be killed soonish!
+ bool IsStrong() const {
+ if (node_->opcode() == IrOpcode::kJSLessThan ||
+ node_->opcode() == IrOpcode::kJSLessThanOrEqual ||
+ node_->opcode() == IrOpcode::kJSGreaterThan ||
+ node_->opcode() == IrOpcode::kJSGreaterThanOrEqual) {
+ return is_strong(OpParameter<LanguageMode>(node_));
+ }
+ return is_strong(BinaryOperationParametersOf(node_->op()).language_mode());
+ }
- bool OneInputIs(Type* t) { return left_type()->Is(t) || right_type()->Is(t); }
+ bool LeftInputIs(Type* t) { return left_type()->Is(t); }
- bool BothInputsAre(Type* t) {
- return left_type()->Is(t) && right_type()->Is(t);
- }
+ bool RightInputIs(Type* t) { return right_type()->Is(t); }
+
+ bool OneInputIs(Type* t) { return LeftInputIs(t) || RightInputIs(t); }
+
+ bool BothInputsAre(Type* t) { return LeftInputIs(t) && RightInputIs(t); }
bool OneInputCannotBe(Type* t) {
return !left_type()->Maybe(t) || !right_type()->Maybe(t);
@@ -247,16 +265,6 @@ class JSBinopReduction final {
JSTypedLowering* lowering_; // The containing lowering instance.
Node* node_; // The original node.
- Node* ConvertToString(Node* node) {
- // Avoid introducing too many eager ToString() operations.
- Reduction reduced = lowering_->ReduceJSToStringInput(node);
- if (reduced.Changed()) return reduced.replacement();
- Node* n = graph()->NewNode(javascript()->ToString(), node, context(),
- effect(), control());
- update_effect(n);
- return n;
- }
-
Node* CreateFrameStateForLeftInput(Node* frame_state) {
FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
@@ -376,8 +384,8 @@ class JSBinopReduction final {
// Wire conversions to existing {IfException} continuation.
Node* exception_merge = if_exception;
Node* exception_value =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), left_exception,
- right_exception, exception_merge);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ left_exception, right_exception, exception_merge);
Node* exception_effect =
graph()->NewNode(common()->EffectPhi(2), left_exception,
right_exception, exception_merge);
@@ -416,7 +424,34 @@ class JSBinopReduction final {
};
+// TODO(turbofan): js-typed-lowering improvements possible
+// - immediately put in type bounds for all new nodes
+// - relax effects from generic but not-side-effecting operations
+
+
+JSTypedLowering::JSTypedLowering(Editor* editor,
+ CompilationDependencies* dependencies,
+ Flags flags, JSGraph* jsgraph, Zone* zone)
+ : AdvancedReducer(editor),
+ dependencies_(dependencies),
+ flags_(flags),
+ jsgraph_(jsgraph),
+ true_type_(Type::Constant(factory()->true_value(), graph()->zone())),
+ false_type_(Type::Constant(factory()->false_value(), graph()->zone())),
+ the_hole_type_(
+ Type::Constant(factory()->the_hole_value(), graph()->zone())),
+ type_cache_(TypeCache::Get()) {
+ for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
+ double min = kMinInt / (1 << k);
+ double max = kMaxInt / (1 << k);
+ shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
+ }
+}
+
+
Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) {
// JSAdd(x:number, y:number) => NumberAdd(x, y)
@@ -447,6 +482,8 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Reduction JSTypedLowering::ReduceJSModulus(Node* node) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) {
// JSModulus(x:number, x:number) => NumberModulus(x, y)
@@ -459,6 +496,8 @@ Reduction JSTypedLowering::ReduceJSModulus(Node* node) {
Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
const Operator* numberOp) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.IsStrong() || numberOp == simplified()->NumberModulus()) {
if (r.BothInputsAre(Type::Number())) {
@@ -473,6 +512,8 @@ Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
Reduction JSTypedLowering::ReduceInt32Binop(Node* node, const Operator* intOp) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.IsStrong()) {
if (r.BothInputsAre(Type::Number())) {
@@ -491,6 +532,8 @@ Reduction JSTypedLowering::ReduceInt32Binop(Node* node, const Operator* intOp) {
Reduction JSTypedLowering::ReduceUI32Shift(Node* node,
Signedness left_signedness,
const Operator* shift_op) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.IsStrong()) {
if (r.BothInputsAre(Type::Number())) {
@@ -507,6 +550,8 @@ Reduction JSTypedLowering::ReduceUI32Shift(Node* node,
Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::String())) {
// If both inputs are definitely strings, perform a string comparison.
@@ -578,6 +623,8 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) {
@@ -587,18 +634,40 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
return r.ChangeToStringComparisonOperator(simplified()->StringEqual(),
invert);
}
+ if (r.BothInputsAre(Type::Boolean())) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
+ invert);
+ }
if (r.BothInputsAre(Type::Receiver())) {
return r.ChangeToPureOperator(
simplified()->ReferenceEqual(Type::Receiver()), invert);
}
- // TODO(turbofan): js-typed-lowering of Equal(undefined)
- // TODO(turbofan): js-typed-lowering of Equal(null)
- // TODO(turbofan): js-typed-lowering of Equal(boolean)
+ if (r.OneInputIs(Type::NullOrUndefined())) {
+ Callable const callable = CodeFactory::CompareNilIC(isolate(), kNullValue);
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ node->RemoveInput(r.LeftInputIs(Type::NullOrUndefined()) ? 0 : 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ if (invert) {
+ // Insert an boolean not to invert the value.
+ Node* value = graph()->NewNode(simplified()->BooleanNot(), node);
+ node->ReplaceUses(value);
+ // Note: ReplaceUses() smashes all uses, so smash it back here.
+ value->ReplaceInput(0, node);
+ return Replace(value);
+ }
+ return Changed(node);
+ }
return NoChange();
}
Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.left() == r.right()) {
// x === x is always true if x != NaN
@@ -617,6 +686,10 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
return Replace(replacement);
}
}
+ if (r.OneInputIs(the_hole_type_)) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(the_hole_type_),
+ invert);
+ }
if (r.OneInputIs(Type::Undefined())) {
return r.ChangeToPureOperator(
simplified()->ReferenceEqual(Type::Undefined()), invert);
@@ -653,46 +726,13 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
}
-Reduction JSTypedLowering::ReduceJSUnaryNot(Node* node) {
- Node* const input = node->InputAt(0);
- Type* const input_type = NodeProperties::GetType(input);
- if (input_type->Is(Type::Boolean())) {
- // JSUnaryNot(x:boolean) => BooleanNot(x)
- RelaxEffectsAndControls(node);
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->BooleanNot());
- return Changed(node);
- } else if (input_type->Is(Type::OrderedNumber())) {
- // JSUnaryNot(x:number) => NumberEqual(x,#0)
- RelaxEffectsAndControls(node);
- node->ReplaceInput(1, jsgraph()->ZeroConstant());
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, simplified()->NumberEqual());
- return Changed(node);
- } else if (input_type->Is(Type::String())) {
- // JSUnaryNot(x:string) => NumberEqual(x.length,#0)
- FieldAccess const access = AccessBuilder::ForStringLength(graph()->zone());
- // It is safe for the load to be effect-free (i.e. not linked into effect
- // chain) because we assume String::length to be immutable.
- Node* length = graph()->NewNode(simplified()->LoadField(access), input,
- graph()->start(), graph()->start());
- ReplaceWithValue(node, node, length);
- node->ReplaceInput(0, length);
- node->ReplaceInput(1, jsgraph()->ZeroConstant());
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, simplified()->NumberEqual());
- return Changed(node);
- }
- return NoChange();
-}
-
-
Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
Node* const input = node->InputAt(0);
Type* const input_type = NodeProperties::GetType(input);
+ Node* const effect = NodeProperties::GetEffectInput(node);
if (input_type->Is(Type::Boolean())) {
// JSToBoolean(x:boolean) => x
- ReplaceWithValue(node, input);
+ ReplaceWithValue(node, input, effect);
return Replace(input);
} else if (input_type->Is(Type::OrderedNumber())) {
// JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
@@ -704,11 +744,9 @@ Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
return Changed(node);
} else if (input_type->Is(Type::String())) {
// JSToBoolean(x:string) => NumberLessThan(#0,x.length)
- FieldAccess const access = AccessBuilder::ForStringLength(graph()->zone());
- // It is safe for the load to be effect-free (i.e. not linked into effect
- // chain) because we assume String::length to be immutable.
+ FieldAccess const access = AccessBuilder::ForStringLength();
Node* length = graph()->NewNode(simplified()->LoadField(access), input,
- graph()->start(), graph()->start());
+ effect, graph()->start());
ReplaceWithValue(node, node, length);
node->ReplaceInput(0, jsgraph()->ZeroConstant());
node->ReplaceInput(1, length);
@@ -727,6 +765,21 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
if (result.Changed()) return result;
return Changed(input); // JSToNumber(JSToNumber(x)) => JSToNumber(x)
}
+ // Check for ToNumber truncation of signaling NaN to undefined mapping.
+ if (input->opcode() == IrOpcode::kSelect) {
+ Node* check = NodeProperties::GetValueInput(input, 0);
+ Node* vtrue = NodeProperties::GetValueInput(input, 1);
+ Type* vtrue_type = NodeProperties::GetType(vtrue);
+ Node* vfalse = NodeProperties::GetValueInput(input, 2);
+ Type* vfalse_type = NodeProperties::GetType(vfalse);
+ if (vtrue_type->Is(Type::Undefined()) && vfalse_type->Is(Type::Number())) {
+ if (check->opcode() == IrOpcode::kNumberIsHoleNaN &&
+ check->InputAt(0) == vfalse) {
+ // JSToNumber(Select(NumberIsHoleNaN(x), y:undefined, x:number)) => x
+ return Replace(vfalse);
+ }
+ }
+ }
// Check if we have a cached conversion.
Type* input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::Number())) {
@@ -791,13 +844,18 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
if (input_type->Is(Type::String())) {
return Changed(input); // JSToString(x:string) => x
}
+ if (input_type->Is(Type::Boolean())) {
+ return Replace(graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged), input,
+ jsgraph()->HeapConstant(factory()->true_string()),
+ jsgraph()->HeapConstant(factory()->false_string())));
+ }
if (input_type->Is(Type::Undefined())) {
return Replace(jsgraph()->HeapConstant(factory()->undefined_string()));
}
if (input_type->Is(Type::Null())) {
return Replace(jsgraph()->HeapConstant(factory()->null_string()));
}
- // TODO(turbofan): js-typed-lowering of ToString(x:boolean)
// TODO(turbofan): js-typed-lowering of ToString(x:number)
return NoChange();
}
@@ -815,16 +873,83 @@ Reduction JSTypedLowering::ReduceJSToString(Node* node) {
}
-Reduction JSTypedLowering::ReduceJSLoadGlobal(Node* node) {
- // Optimize global constants like "undefined", "Infinity", and "NaN".
- Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
- Handle<Object> constant_value = factory()->GlobalConstantFor(name);
- if (!constant_value.is_null()) {
- Node* constant = jsgraph()->Constant(constant_value);
- ReplaceWithValue(node, constant);
- return Replace(constant);
+Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSToObject, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (!receiver_type->Is(Type::Receiver())) {
+ // TODO(bmeurer/mstarzinger): Add support for lowering inside try blocks.
+ if (receiver_type->Maybe(Type::NullOrUndefined()) &&
+ NodeProperties::IsExceptionalCall(node)) {
+ // ToObject throws for null or undefined inputs.
+ return NoChange();
+ }
+
+ // Check whether {receiver} is a Smi.
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+
+ // Determine the instance type of {receiver}.
+ Node* receiver_map = efalse0 =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, efalse0, if_false0);
+ Node* receiver_instance_type = efalse0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ receiver_map, efalse0, if_false0);
+
+ // Check whether {receiver} is a spec object.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Node* check1 =
+ graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
+ receiver_instance_type);
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check1, if_false0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+
+ // Convert {receiver} using the ToObjectStub.
+ Node* if_convert =
+ graph()->NewNode(common()->Merge(2), if_true0, if_false1);
+ Node* econvert =
+ graph()->NewNode(common()->EffectPhi(2), etrue0, efalse1, if_convert);
+ Node* rconvert;
+ {
+ Callable callable = CodeFactory::ToObject(isolate());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ rconvert = econvert = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+ receiver, context, frame_state, econvert, if_convert);
+ }
+
+ // The {receiver} is already a spec object.
+ Node* if_done = if_true1;
+ Node* edone = etrue1;
+ Node* rdone = receiver;
+
+ control = graph()->NewNode(common()->Merge(2), if_convert, if_done);
+ effect = graph()->NewNode(common()->EffectPhi(2), econvert, edone, control);
+ receiver =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ rconvert, rdone, control);
}
- return NoChange();
+ ReplaceWithValue(node, receiver, effect, control);
+ return Changed(receiver);
}
@@ -834,17 +959,37 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
Type* receiver_type = NodeProperties::GetType(receiver);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Handle<Name> name = LoadNamedParametersOf(node->op()).name();
+ Handle<Name> name = NamedAccessOf(node->op()).name();
// Optimize "length" property of strings.
if (name.is_identical_to(factory()->length_string()) &&
receiver_type->Is(Type::String())) {
- Node* value = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForStringLength(graph()->zone())),
- receiver, effect, control);
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
+ effect, control);
ReplaceWithValue(node, value, effect);
return Replace(value);
}
+ // Optimize "prototype" property of functions.
+ if (name.is_identical_to(factory()->prototype_string()) &&
+ receiver_type->IsConstant() &&
+ receiver_type->AsConstant()->Value()->IsJSFunction()) {
+ // TODO(turbofan): This lowering might not kick in if we ever lower
+ // the C++ accessor for "prototype" in an earlier optimization pass.
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(receiver_type->AsConstant()->Value());
+ if (function->has_initial_map()) {
+ // We need to add a code dependency on the initial map of the {function}
+ // in order to be notified about changes to the "prototype" of {function},
+ // so it doesn't make sense to continue unless deoptimization is enabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+ Node* value =
+ jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
return NoChange();
}
@@ -860,7 +1005,8 @@ Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
if (!array->GetBuffer()->was_neutered()) {
array->GetBuffer()->set_is_neuterable(false);
BufferAccess const access(array->type());
- size_t const k = ElementSizeLog2Of(access.machine_type());
+ size_t const k =
+ ElementSizeLog2Of(access.machine_type().representation());
double const byte_length = array->byte_length()->Number();
CHECK_LT(k, arraysize(shifted_int32_ranges_));
if (key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
@@ -906,7 +1052,8 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
if (!array->GetBuffer()->was_neutered()) {
array->GetBuffer()->set_is_neuterable(false);
BufferAccess const access(array->type());
- size_t const k = ElementSizeLog2Of(access.machine_type());
+ size_t const k =
+ ElementSizeLog2Of(access.machine_type().representation());
double const byte_length = array->byte_length()->Number();
CHECK_LT(k, arraysize(shifted_int32_ranges_));
if (access.external_array_type() != kExternalUint8ClampedArray &&
@@ -932,14 +1079,6 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
frame_state_for_to_number, effect, control);
}
}
- // For integer-typed arrays, convert to the integer type.
- if (TypeOf(access.machine_type()) == kTypeInt32 &&
- !value_type->Is(Type::Signed32())) {
- value = graph()->NewNode(simplified()->NumberToInt32(), value);
- } else if (TypeOf(access.machine_type()) == kTypeUint32 &&
- !value_type->Is(Type::Unsigned32())) {
- value = graph()->NewNode(simplified()->NumberToUint32(), value);
- }
// Check if we can avoid the bounds check.
if (key_type->Min() >= 0 && key_type->Max() < array->length_value()) {
RelaxControls(node);
@@ -975,17 +1114,187 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
}
+Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+
+ // If deoptimization is disabled, we cannot optimize.
+ if (!(flags() & kDeoptimizationEnabled) ||
+ (flags() & kDisableBinaryOpReduction)) {
+ return NoChange();
+ }
+
+ // If we are in a try block, don't optimize since the runtime call
+ // in the proxy case can throw.
+ if (NodeProperties::IsExceptionalCall(node)) return NoChange();
+
+ JSBinopReduction r(this, node);
+ Node* effect = r.effect();
+ Node* control = r.control();
+
+ if (!r.right_type()->IsConstant() ||
+ !r.right_type()->AsConstant()->Value()->IsJSFunction()) {
+ return NoChange();
+ }
+
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(r.right_type()->AsConstant()->Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ if (!function->IsConstructor() ||
+ function->map()->has_non_instance_prototype()) {
+ return NoChange();
+ }
+
+ JSFunction::EnsureHasInitialMap(function);
+ DCHECK(function->has_initial_map());
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ this->dependencies()->AssumeInitialMapCantChange(initial_map);
+ Node* prototype =
+ jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
+
+ Node* if_is_smi = nullptr;
+ Node* e_is_smi = nullptr;
+ // If the left hand side is an object, no smi check is needed.
+ if (r.left_type()->Maybe(Type::TaggedSigned())) {
+ Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
+ Node* branch_is_smi =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), is_smi, control);
+ if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
+ e_is_smi = effect;
+ control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
+ }
+
+ Node* object_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ r.left(), effect, control);
+
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+
+ Node* loop_effect = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+
+ Node* loop_object_map =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ object_map, r.left(), loop);
+
+ // Check if the lhs needs access checks.
+ Node* map_bit_field = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMapBitField()),
+ loop_object_map, loop_effect, control);
+ int is_access_check_needed_bit = 1 << Map::kIsAccessCheckNeeded;
+ Node* is_access_check_needed_num =
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), map_bit_field,
+ jsgraph()->Uint32Constant(is_access_check_needed_bit));
+ Node* is_access_check_needed =
+ graph()->NewNode(machine()->Word32Equal(), is_access_check_needed_num,
+ jsgraph()->Uint32Constant(is_access_check_needed_bit));
+
+ Node* branch_is_access_check_needed = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), is_access_check_needed, control);
+ Node* if_is_access_check_needed =
+ graph()->NewNode(common()->IfTrue(), branch_is_access_check_needed);
+ Node* e_is_access_check_needed = effect;
+
+ control =
+ graph()->NewNode(common()->IfFalse(), branch_is_access_check_needed);
+
+ // Check if the lhs is a proxy.
+ Node* map_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ loop_object_map, loop_effect, control);
+ Node* is_proxy = graph()->NewNode(machine()->Word32Equal(), map_instance_type,
+ jsgraph()->Uint32Constant(JS_PROXY_TYPE));
+ Node* branch_is_proxy =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), is_proxy, control);
+ Node* if_is_proxy = graph()->NewNode(common()->IfTrue(), branch_is_proxy);
+ Node* e_is_proxy = effect;
+
+
+ Node* runtime_has_in_proto_chain = control = graph()->NewNode(
+ common()->Merge(2), if_is_access_check_needed, if_is_proxy);
+ effect = graph()->NewNode(common()->EffectPhi(2), e_is_access_check_needed,
+ e_is_proxy, control);
+
+ // If we need an access check or the object is a Proxy, make a runtime call
+ // to finish the lowering.
+ Node* bool_result_runtime_has_in_proto_chain_case = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kHasInPrototypeChain, 2), r.left(),
+ prototype, context, frame_state, effect, control);
+
+ control = graph()->NewNode(common()->IfFalse(), branch_is_proxy);
+
+ Node* object_prototype = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapPrototype()),
+ loop_object_map, loop_effect, control);
+
+ // Check if object prototype is equal to function prototype.
+ Node* eq_proto =
+ graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
+ object_prototype, prototype);
+ Node* branch_eq_proto =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), eq_proto, control);
+ Node* if_eq_proto = graph()->NewNode(common()->IfTrue(), branch_eq_proto);
+ Node* e_eq_proto = effect;
+
+ control = graph()->NewNode(common()->IfFalse(), branch_eq_proto);
+
+ // If not, check if object prototype is the null prototype.
+ Node* null_proto =
+ graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
+ object_prototype, jsgraph()->NullConstant());
+ Node* branch_null_proto = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), null_proto, control);
+ Node* if_null_proto = graph()->NewNode(common()->IfTrue(), branch_null_proto);
+ Node* e_null_proto = effect;
+
+ control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
+ Node* load_object_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ object_prototype, effect, control);
+ // Close the loop.
+ loop_effect->ReplaceInput(1, effect);
+ loop_object_map->ReplaceInput(1, load_object_map);
+ loop->ReplaceInput(1, control);
+
+ control = graph()->NewNode(common()->Merge(3), runtime_has_in_proto_chain,
+ if_eq_proto, if_null_proto);
+ effect = graph()->NewNode(common()->EffectPhi(3),
+ bool_result_runtime_has_in_proto_chain_case,
+ e_eq_proto, e_null_proto, control);
+
+ Node* result = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 3),
+ bool_result_runtime_has_in_proto_chain_case, jsgraph()->TrueConstant(),
+ jsgraph()->FalseConstant(), control);
+
+ if (if_is_smi != nullptr) {
+ DCHECK_NOT_NULL(e_is_smi);
+ control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
+ result = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->FalseConstant(), result, control);
+ }
+
+ ReplaceWithValue(node, result, effect, control);
+ return Changed(result);
+}
+
+
Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = graph()->start();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = graph()->start();
for (size_t i = 0; i < access.depth(); ++i) {
- node->ReplaceInput(
- 0, graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
- NodeProperties::GetValueInput(node, 0), effect, control));
+ Node* previous = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
+ NodeProperties::GetValueInput(node, 0), effect, control);
+ node->ReplaceInput(0, previous);
}
node->ReplaceInput(1, effect);
node->ReplaceInput(2, control);
@@ -999,16 +1308,17 @@ Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = graph()->start();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = graph()->start();
for (size_t i = 0; i < access.depth(); ++i) {
- node->ReplaceInput(
- 0, graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
- NodeProperties::GetValueInput(node, 0), effect, control));
+ Node* previous = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
+ NodeProperties::GetValueInput(node, 0), effect, control);
+ node->ReplaceInput(0, previous);
}
node->RemoveInput(2);
+ node->ReplaceInput(2, effect);
NodeProperties::ChangeOp(
node,
simplified()->StoreField(AccessBuilder::ForContextSlot(access.index())));
@@ -1016,124 +1326,184 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
}
-Reduction JSTypedLowering::ReduceJSLoadDynamicGlobal(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadDynamicGlobal, node->opcode());
- DynamicGlobalAccess const& access = DynamicGlobalAccessOf(node->op());
- Node* const vector = NodeProperties::GetValueInput(node, 0);
- Node* const context = NodeProperties::GetContextInput(node);
- Node* const state1 = NodeProperties::GetFrameStateInput(node, 0);
- Node* const state2 = NodeProperties::GetFrameStateInput(node, 1);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- if (access.RequiresFullCheck()) return NoChange();
-
- // Perform checks whether the fast mode applies, by looking for any extension
- // object which might shadow the optimistic declaration.
- uint32_t bitset = access.check_bitset();
- Node* check_true = control;
- Node* check_false = graph()->NewNode(common()->Merge(0));
- for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
- if ((bitset & 1) == 0) continue;
- Node* load = graph()->NewNode(
- javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
- context, context, effect);
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()),
- load, jsgraph()->ZeroConstant());
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
- check_true);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- check_false->AppendInput(graph()->zone(), if_false);
- NodeProperties::ChangeOp(check_false,
- common()->Merge(check_false->InputCount()));
- check_true = if_true;
- }
-
- // Fast case, because variable is not shadowed. Perform global object load.
- Node* global = graph()->NewNode(
- javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true), context,
- context, effect);
- Node* fast = graph()->NewNode(
- javascript()->LoadGlobal(access.name(), access.feedback(),
- access.typeof_mode()),
- context, global, vector, context, state1, state2, global, check_true);
-
- // Slow case, because variable potentially shadowed. Perform dynamic lookup.
- uint32_t check_bitset = DynamicGlobalAccess::kFullCheckRequired;
- Node* slow = graph()->NewNode(
- javascript()->LoadDynamicGlobal(access.name(), check_bitset,
- access.feedback(), access.typeof_mode()),
- vector, context, context, state1, state2, effect, check_false);
-
- // Replace value, effect and control uses accordingly.
- Node* new_control =
- graph()->NewNode(common()->Merge(2), check_true, check_false);
- Node* new_effect =
- graph()->NewNode(common()->EffectPhi(2), fast, slow, new_control);
- Node* new_value = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), fast,
- slow, new_control);
- ReplaceWithValue(node, new_value, new_effect, new_control);
- return Changed(new_value);
+Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSConvertReceiver, node->opcode());
+ ConvertReceiverMode mode = ConvertReceiverModeOf(node->op());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* context = NodeProperties::GetContextInput(node);
+ Type* context_type = NodeProperties::GetType(context);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (!receiver_type->Is(Type::Receiver())) {
+ if (receiver_type->Is(Type::NullOrUndefined()) ||
+ mode == ConvertReceiverMode::kNullOrUndefined) {
+ if (context_type->IsConstant()) {
+ Handle<JSObject> global_proxy(
+ Handle<Context>::cast(context_type->AsConstant()->Value())
+ ->global_proxy(),
+ isolate());
+ receiver = jsgraph()->Constant(global_proxy);
+ } else {
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ receiver = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
+ native_context, native_context, effect);
+ }
+ } else if (!receiver_type->Maybe(Type::NullOrUndefined()) ||
+ mode == ConvertReceiverMode::kNotNullOrUndefined) {
+ receiver = effect =
+ graph()->NewNode(javascript()->ToObject(), receiver, context,
+ frame_state, effect, control);
+ } else {
+ // Check {receiver} for undefined.
+ Node* check0 =
+ graph()->NewNode(simplified()->ReferenceEqual(receiver_type),
+ receiver, jsgraph()->UndefinedConstant());
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check0, control);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+
+ // Check {receiver} for null.
+ Node* check1 =
+ graph()->NewNode(simplified()->ReferenceEqual(receiver_type),
+ receiver, jsgraph()->NullConstant());
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+
+ // Convert {receiver} using ToObject.
+ Node* if_convert = if_false1;
+ Node* econvert = effect;
+ Node* rconvert;
+ {
+ rconvert = econvert =
+ graph()->NewNode(javascript()->ToObject(), receiver, context,
+ frame_state, econvert, if_convert);
+ }
+
+ // Replace {receiver} with global proxy of {context}.
+ Node* if_global =
+ graph()->NewNode(common()->Merge(2), if_true0, if_true1);
+ Node* eglobal = effect;
+ Node* rglobal;
+ {
+ if (context_type->IsConstant()) {
+ Handle<JSObject> global_proxy(
+ Handle<Context>::cast(context_type->AsConstant()->Value())
+ ->global_proxy(),
+ isolate());
+ rglobal = jsgraph()->Constant(global_proxy);
+ } else {
+ Node* native_context = eglobal = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, eglobal);
+ rglobal = eglobal = graph()->NewNode(
+ javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
+ native_context, native_context, eglobal);
+ }
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_convert, if_global);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), econvert, eglobal, control);
+ receiver =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ rconvert, rglobal, control);
+ }
+ }
+ ReplaceWithValue(node, receiver, effect, control);
+ return Changed(receiver);
}
-Reduction JSTypedLowering::ReduceJSLoadDynamicContext(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadDynamicContext, node->opcode());
- DynamicContextAccess const& access = DynamicContextAccessOf(node->op());
- ContextAccess const& context_access = access.context_access();
- Node* const context = NodeProperties::GetContextInput(node);
- Node* const state = NodeProperties::GetFrameStateInput(node, 0);
+namespace {
+
+// Maximum instance size for which allocations will be inlined.
+const int kMaxInlineInstanceSize = 64 * kPointerSize;
+
+
+// Checks whether allocation using the given constructor can be inlined.
+bool IsAllocationInlineable(Handle<JSFunction> constructor) {
+ // TODO(bmeurer): Further relax restrictions on inlining, i.e.
+ // instance type and maybe instance size (inobject properties
+ // are limited anyways by the runtime).
+ return constructor->has_initial_map() &&
+ constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
+ constructor->initial_map()->instance_size() < kMaxInlineInstanceSize;
+}
+
+} // namespace
+
+
+Reduction JSTypedLowering::ReduceJSCreate(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreate, node->opcode());
+ Node* const target = NodeProperties::GetValueInput(node, 0);
+ Type* const target_type = NodeProperties::GetType(target);
+ Node* const new_target = NodeProperties::GetValueInput(node, 1);
Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- if (access.RequiresFullCheck()) return NoChange();
-
- // Perform checks whether the fast mode applies, by looking for any extension
- // object which might shadow the optimistic declaration.
- uint32_t bitset = access.check_bitset();
- Node* check_true = control;
- Node* check_false = graph()->NewNode(common()->Merge(0));
- for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
- if ((bitset & 1) == 0) continue;
- Node* load = graph()->NewNode(
- javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
- context, context, effect);
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()),
- load, jsgraph()->ZeroConstant());
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check,
- check_true);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- check_false->AppendInput(graph()->zone(), if_false);
- NodeProperties::ChangeOp(check_false,
- common()->Merge(check_false->InputCount()));
- check_true = if_true;
- }
-
- // Fast case, because variable is not shadowed. Perform context slot load.
- Node* fast =
- graph()->NewNode(javascript()->LoadContext(context_access.depth(),
- context_access.index(), false),
- context, context, effect);
-
- // Slow case, because variable potentially shadowed. Perform dynamic lookup.
- uint32_t check_bitset = DynamicContextAccess::kFullCheckRequired;
- Node* slow =
- graph()->NewNode(javascript()->LoadDynamicContext(
- access.name(), check_bitset, context_access.depth(),
- context_access.index()),
- context, context, state, effect, check_false);
-
- // Replace value, effect and control uses accordingly.
- Node* new_control =
- graph()->NewNode(common()->Merge(2), check_true, check_false);
- Node* new_effect =
- graph()->NewNode(common()->EffectPhi(2), fast, slow, new_control);
- Node* new_value = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), fast,
- slow, new_control);
- ReplaceWithValue(node, new_value, new_effect, new_control);
- return Changed(new_value);
+ // TODO(turbofan): Add support for NewTarget passed to JSCreate.
+ if (target != new_target) return NoChange();
+ // Extract constructor function.
+ if (target_type->IsConstant() &&
+ target_type->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> constructor =
+ Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ DCHECK(constructor->IsConstructor());
+ // Force completion of inobject slack tracking before
+ // generating code to finalize the instance size.
+ constructor->CompleteInobjectSlackTrackingIfActive();
+
+ // TODO(bmeurer): We fall back to the runtime in case we cannot inline
+ // the allocation here, which is sort of expensive. We should think about
+ // a soft fallback to some NewObjectCodeStub.
+ if (IsAllocationInlineable(constructor)) {
+ // Compute instance size from initial map of {constructor}.
+ Handle<Map> initial_map(constructor->initial_map(), isolate());
+ int const instance_size = initial_map->instance_size();
+
+ // Add a dependency on the {initial_map} to make sure that this code is
+ // deoptimized whenever the {initial_map} of the {constructor} changes.
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+
+ // Emit code to allocate the JSObject instance for the {constructor}.
+ AllocationBuilder a(jsgraph(), effect, graph()->start());
+ a.Allocate(instance_size);
+ a.Store(AccessBuilder::ForMap(), initial_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ jsgraph()->UndefinedConstant());
+ }
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+
+namespace {
+
+// Retrieves the frame state holding actual argument values.
+Node* GetArgumentsFrameState(Node* frame_state) {
+ Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ FrameStateInfo outer_state_info = OpParameter<FrameStateInfo>(outer_state);
+ return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
+ ? outer_state
+ : frame_state;
}
+} // namespace
+
Reduction JSTypedLowering::ReduceJSCreateArguments(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
@@ -1144,36 +1514,305 @@ Reduction JSTypedLowering::ReduceJSCreateArguments(Node* node) {
// Use the ArgumentsAccessStub for materializing both mapped and unmapped
// arguments object, but only for non-inlined (i.e. outermost) frames.
- if (p.type() != CreateArgumentsParameters::kRestArray &&
- outer_state->opcode() != IrOpcode::kFrameState) {
- Handle<SharedFunctionInfo> shared;
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
Isolate* isolate = jsgraph()->isolate();
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- bool unmapped = p.type() == CreateArgumentsParameters::kUnmappedArguments;
- Callable callable = CodeFactory::ArgumentsAccess(
- isolate, unmapped, shared->has_duplicate_parameters());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate, graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState);
- const Operator* new_op = common()->Call(desc);
int parameter_count = state_info.parameter_count() - 1;
int parameter_offset = parameter_count * kPointerSize;
int offset = StandardFrameConstants::kCallerSPOffset + parameter_offset;
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* parameter_pointer = graph()->NewNode(
machine()->IntAdd(), graph()->NewNode(machine()->LoadFramePointer()),
jsgraph()->IntPtrConstant(offset));
- node->InsertInput(graph()->zone(), 0, stub_code);
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(parameter_count));
- node->InsertInput(graph()->zone(), 3, parameter_pointer);
- NodeProperties::ChangeOp(node, new_op);
- return Changed(node);
+
+ if (p.type() != CreateArgumentsParameters::kRestArray) {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ bool unmapped = p.type() == CreateArgumentsParameters::kUnmappedArguments;
+ Callable callable = CodeFactory::ArgumentsAccess(
+ isolate, unmapped, shared->has_duplicate_parameters());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(parameter_count));
+ node->InsertInput(graph()->zone(), 3, parameter_pointer);
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ } else {
+ Callable callable = CodeFactory::RestArgumentsAccess(isolate);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->ReplaceInput(1, jsgraph()->Constant(parameter_count));
+ node->InsertInput(graph()->zone(), 2, parameter_pointer);
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(p.start_index()));
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ }
+ } else if (outer_state->opcode() == IrOpcode::kFrameState) {
+ // Use inline allocation for all mapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ if (p.type() == CreateArgumentsParameters::kMappedArguments) {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ Node* const callee = NodeProperties::GetValueInput(node, 0);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // TODO(mstarzinger): Duplicate parameters are not handled yet.
+ if (shared->has_duplicate_parameters()) return NoChange();
+ // Choose the correct frame state and frame state info depending on
+ // whether there conceptually is an arguments adaptor frame in the call
+ // chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by arguments object.
+ bool has_aliased_arguments = false;
+ Node* const elements = AllocateAliasedArguments(
+ effect, control, args_state, context, shared, &has_aliased_arguments);
+ effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+ // Load the arguments object map from the current native context.
+ Node* const load_native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* const load_arguments_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
+ : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ int length = args_state_info.parameter_count() - 1; // Minus receiver.
+ STATIC_ASSERT(Heap::kSloppyArgumentsObjectSize == 5 * kPointerSize);
+ a.Allocate(Heap::kSloppyArgumentsObjectSize);
+ a.Store(AccessBuilder::ForMap(), load_arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+ a.Store(AccessBuilder::ForArgumentsCallee(), callee);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ } else if (p.type() == CreateArgumentsParameters::kUnmappedArguments) {
+ // Use inline allocation for all unmapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Choose the correct frame state and frame state info depending on
+ // whether there conceptually is an arguments adaptor frame in the call
+ // chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by arguments object.
+ Node* const elements = AllocateArguments(effect, control, args_state);
+ effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+ // Load the arguments object map from the current native context.
+ Node* const load_native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* const load_arguments_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ Context::STRICT_ARGUMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ int length = args_state_info.parameter_count() - 1; // Minus receiver.
+ STATIC_ASSERT(Heap::kStrictArgumentsObjectSize == 4 * kPointerSize);
+ a.Allocate(Heap::kStrictArgumentsObjectSize);
+ a.Store(AccessBuilder::ForMap(), load_arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ } else if (p.type() == CreateArgumentsParameters::kRestArray) {
+ // Use inline allocation for all unmapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Choose the correct frame state and frame state info depending on
+ // whether there conceptually is an arguments adaptor frame in the call
+ // chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by the rest array.
+ Node* const elements =
+ AllocateRestArguments(effect, control, args_state, p.start_index());
+ effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+ // Load the JSArray object map from the current native context.
+ Node* const load_native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* const load_jsarray_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the jsarray.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+ // -1 to minus receiver
+ int argument_count = args_state_info.parameter_count() - 1;
+ int length = std::max(0, argument_count - p.start_index());
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ a.Allocate(JSArray::kSize);
+ a.Store(AccessBuilder::ForMap(), load_jsarray_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
+ jsgraph()->Constant(length));
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
}
return NoChange();
}
+Reduction JSTypedLowering::ReduceNewArray(Node* node, Node* length,
+ int capacity,
+ Handle<AllocationSite> site) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Extract transition and tenuring feedback from the {site} and add
+ // appropriate code dependencies on the {site} if deoptimization is
+ // enabled.
+ PretenureFlag pretenure = site->GetPretenureMode();
+ ElementsKind elements_kind = site->GetElementsKind();
+ DCHECK(IsFastElementsKind(elements_kind));
+ if (flags() & kDeoptimizationEnabled) {
+ dependencies()->AssumeTenuringDecision(site);
+ dependencies()->AssumeTransitionStable(site);
+ }
+
+ // Retrieve the initial map for the array from the appropriate native context.
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* js_array_map = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ArrayMapIndex(elements_kind), true),
+ native_context, native_context, effect);
+
+ // Setup elements and properties.
+ Node* elements;
+ if (capacity == 0) {
+ elements = jsgraph()->EmptyFixedArrayConstant();
+ } else {
+ elements = effect =
+ AllocateElements(effect, control, elements_kind, capacity, pretenure);
+ }
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+ // Perform the allocation of the actual JSArray object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSArray::kSize, pretenure);
+ a.Store(AccessBuilder::ForMap(), js_array_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSCreateArray(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+ CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* new_target = NodeProperties::GetValueInput(node, 1);
+
+ // TODO(bmeurer): Optimize the subclassing case.
+ if (target != new_target) return NoChange();
+
+ // Check if we have a feedback {site} on the {node}.
+ Handle<AllocationSite> site = p.site();
+ if (p.site().is_null()) return NoChange();
+
+ // Attempt to inline calls to the Array constructor for the relevant cases
+ // where either no arguments are provided, or exactly one unsigned number
+ // argument is given.
+ if (site->CanInlineCall()) {
+ if (p.arity() == 0) {
+ Node* length = jsgraph()->ZeroConstant();
+ int capacity = JSArray::kPreallocatedArrayElements;
+ return ReduceNewArray(node, length, capacity, site);
+ } else if (p.arity() == 1) {
+ Node* length = NodeProperties::GetValueInput(node, 2);
+ Type* length_type = NodeProperties::GetType(length);
+ if (length_type->Is(type_cache_.kElementLoopUnrollType)) {
+ int capacity = static_cast<int>(length_type->Max());
+ return ReduceNewArray(node, length, capacity, site);
+ }
+ }
+ }
+
+ // Reduce {node} to the appropriate ArrayConstructorStub backend.
+ // Note that these stubs "behave" like JSFunctions, which means they
+ // expect a receiver on the stack, which they remove. We just push
+ // undefined for the receiver.
+ ElementsKind elements_kind = site->GetElementsKind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+ if (p.arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
+ override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
+ CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+ } else if (p.arity() == 1) {
+ // TODO(bmeurer): Optimize for the 0 length non-holey case?
+ ArraySingleArgumentConstructorStub stub(
+ isolate(), GetHoleyElementsKind(elements_kind), override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
+ CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+ } else {
+ int const arity = static_cast<int>(p.arity());
+ ArrayNArgumentsConstructorStub stub(isolate(), elements_kind,
+ override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
+ arity + 1, CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+ }
+}
+
+
Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
@@ -1200,18 +1839,50 @@ Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
}
+Reduction JSTypedLowering::ReduceJSCreateIterResultObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* done = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Load the JSIteratorResult map for the {context}.
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* iterator_result_map = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
+ native_context, native_context, effect);
+
+ // Emit code to allocate the JSIteratorResult instance.
+ AllocationBuilder a(jsgraph(), effect, graph()->start());
+ a.Allocate(JSIteratorResult::kSize);
+ a.Store(AccessBuilder::ForMap(), iterator_result_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSIteratorResultValue(), value);
+ a.Store(AccessBuilder::ForJSIteratorResultDone(), done);
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+
Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateLiteralArray, node->opcode());
- HeapObjectMatcher mconst(NodeProperties::GetValueInput(node, 2));
- int length = Handle<FixedArray>::cast(mconst.Value())->length();
- int flags = OpParameter<int>(node->op());
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ Handle<FixedArray> const constants = Handle<FixedArray>::cast(p.constant());
+ int const length = constants->length();
+ int const flags = p.flags();
// Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
// initial length limit for arrays with "fast" elements kind.
// TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
if ((flags & ArrayLiteral::kShallowElements) != 0 &&
(flags & ArrayLiteral::kIsStrong) == 0 &&
- length < JSObject::kInitialMaxFastElementArray) {
+ length < JSArray::kInitialMaxFastElementArray) {
Isolate* isolate = jsgraph()->isolate();
Callable callable = CodeFactory::FastCloneShallowArray(isolate);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1221,7 +1892,11 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
: CallDescriptor::kNoFlags);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* literal_index = jsgraph()->SmiConstant(p.index());
+ Node* constant_elements = jsgraph()->HeapConstant(constants);
node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 2, literal_index);
+ node->InsertInput(graph()->zone(), 3, constant_elements);
NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
@@ -1232,10 +1907,11 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateLiteralObject, node->opcode());
- HeapObjectMatcher mconst(NodeProperties::GetValueInput(node, 2));
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ Handle<FixedArray> const constants = Handle<FixedArray>::cast(p.constant());
// Constants are pairs, see ObjectLiteral::properties_count().
- int length = Handle<FixedArray>::cast(mconst.Value())->length() / 2;
- int flags = OpParameter<int>(node->op());
+ int const length = constants->length() / 2;
+ int const flags = p.flags();
// Use the FastCloneShallowObjectStub only for shallow boilerplates without
// elements up to the number of properties that the stubs can handle.
@@ -1250,8 +1926,13 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
: CallDescriptor::kNoFlags);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(flags));
+ Node* literal_index = jsgraph()->SmiConstant(p.index());
+ Node* literal_flags = jsgraph()->SmiConstant(flags);
+ Node* constant_elements = jsgraph()->HeapConstant(constants);
node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 2, literal_index);
+ node->InsertInput(graph()->zone(), 3, constant_elements);
+ node->InsertInput(graph()->zone(), 4, literal_flags);
NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
@@ -1260,74 +1941,200 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
}
-Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
- Node* const input = NodeProperties::GetValueInput(node, 0);
- Type* input_type = NodeProperties::GetType(input);
- if (FLAG_turbo_allocate && input_type->Is(Type::Receiver())) {
- // JSCreateWithContext(o:receiver, f)
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Node* const closure = NodeProperties::GetValueInput(node, 1);
- Node* const context = NodeProperties::GetContextInput(node);
- Node* const load = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
- context, effect, control);
- AllocationBuilder a(jsgraph(), simplified(), effect, control);
+Reduction JSTypedLowering::ReduceJSCreateFunctionContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
+ int slot_count = OpParameter<int>(node->op());
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+
+ // Use inline allocation for function contexts up to a size limit.
+ if (slot_count < kFunctionContextAllocationLimit) {
+ // JSCreateFunctionContext[slot_count < limit]](fun)
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* extension = jsgraph()->TheHoleConstant();
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
- a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
+ int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
+ a.AllocateArray(context_length, factory()->function_context_map());
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
- a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), input);
- a.Store(AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX), load);
- // TODO(mstarzinger): We could mutate {node} into the allocation instead.
- NodeProperties::SetType(a.allocation(), NodeProperties::GetType(node));
- ReplaceWithValue(node, node, a.effect());
- node->ReplaceInput(0, a.allocation());
- node->ReplaceInput(1, a.effect());
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, common()->Finish(1));
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
+ a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
+ }
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+
+ // Use the FastNewContextStub only for function contexts up maximum size.
+ if (slot_count <= FastNewContextStub::kMaximumSlots) {
+ Isolate* isolate = jsgraph()->isolate();
+ Callable callable = CodeFactory::FastNewContext(isolate, slot_count);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
+
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* closure = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), object);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSCreateCatchContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
+ Handle<String> name = OpParameter<Handle<String>>(node);
+ Node* exception = NodeProperties::GetValueInput(node, 0);
+ Node* closure = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ a.AllocateArray(Context::MIN_CONTEXT_SLOTS + 1,
+ factory()->catch_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), name);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
+ exception);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+
Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
int context_length = scope_info->ContextLength();
- if (FLAG_turbo_allocate && context_length < kBlockContextAllocationLimit) {
- // JSCreateBlockContext(s:scope[length < limit], f)
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Node* const closure = NodeProperties::GetValueInput(node, 1);
- Node* const context = NodeProperties::GetContextInput(node);
- Node* const extension = jsgraph()->Constant(scope_info);
- Node* const load = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
- context, effect, control);
- AllocationBuilder a(jsgraph(), simplified(), effect, control);
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+
+ // Use inline allocation for block contexts up to a size limit.
+ if (context_length < kBlockContextAllocationLimit) {
+ // JSCreateBlockContext[scope[length < limit]](fun)
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* extension = jsgraph()->Constant(scope_info);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
a.AllocateArray(context_length, factory()->block_context_map());
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
- a.Store(AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX), load);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->TheHoleConstant());
}
- // TODO(mstarzinger): We could mutate {node} into the allocation instead.
- NodeProperties::SetType(a.allocation(), NodeProperties::GetType(node));
- ReplaceWithValue(node, node, a.effect());
- node->ReplaceInput(0, a.allocation());
- node->ReplaceInput(1, a.effect());
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, common()->Finish(1));
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
+ CallConstructParameters const& p = CallConstructParametersOf(node->op());
+ DCHECK_LE(2u, p.arity());
+ int const arity = static_cast<int>(p.arity() - 2);
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Type* target_type = NodeProperties::GetType(target);
+ Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+
+ // Check if {target} is a known JSFunction.
+ if (target_type->IsConstant() &&
+ target_type->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ // Remove the eager bailout frame state.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+
+ // Patch {node} to an indirect call via the {function}s construct stub.
+ Callable callable(handle(shared->construct_stub(), isolate()),
+ ConstructStubDescriptor(isolate()));
+ node->RemoveInput(arity + 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
+ CallDescriptor::kNeedsFrameState)));
+ return Changed(node);
+ }
+
+ // Check if {target} is a JSFunction.
+ if (target_type->Is(Type::Function())) {
+ // Remove the eager bailout frame state.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+
+ // Patch {node} to an indirect call via the ConstructFunction builtin.
+ Callable callable = CodeFactory::ConstructFunction(isolate());
+ node->RemoveInput(arity + 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
+ CallDescriptor::kNeedsFrameState)));
return Changed(node);
}
+
return NoChange();
}
@@ -1336,32 +2143,121 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
int const arity = static_cast<int>(p.arity() - 2);
- Node* const function = NodeProperties::GetValueInput(node, 0);
- Type* const function_type = NodeProperties::GetType(function);
- Node* const receiver = NodeProperties::GetValueInput(node, 1);
- Type* const receiver_type = NodeProperties::GetType(receiver);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
-
- // Check that {function} is actually a JSFunction with the correct arity.
- if (function_type->IsFunction() &&
- function_type->AsFunction()->Arity() == arity) {
- // Check that the {receiver} doesn't need to be wrapped.
- if (receiver_type->Is(Type::ReceiverOrUndefined())) {
- Node* const context = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSFunctionContext()),
- function, effect, control);
- NodeProperties::ReplaceContextInput(node, context);
- CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- if (is_strict(p.language_mode())) {
- flags |= CallDescriptor::kSupportsTailCalls;
- }
+ ConvertReceiverMode convert_mode = p.convert_mode();
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Type* target_type = NodeProperties::GetType(target);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to infer receiver {convert_mode} from {receiver} type.
+ if (receiver_type->Is(Type::NullOrUndefined())) {
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
+ } else if (!receiver_type->Maybe(Type::NullOrUndefined())) {
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
+ }
+
+ // Check if {target} is a known JSFunction.
+ if (target_type->IsConstant() &&
+ target_type->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ // Class constructors are callable, but [[Call]] will raise an exception.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
+ if (IsClassConstructor(shared->kind())) return NoChange();
+
+ // Load the context from the {target}.
+ Node* context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
+ effect, control);
+ NodeProperties::ReplaceContextInput(node, context);
+
+ // Check if we need to convert the {receiver}.
+ if (is_sloppy(shared->language_mode()) && !shared->native() &&
+ !receiver_type->Is(Type::Receiver())) {
+ receiver = effect =
+ graph()->NewNode(javascript()->ConvertReceiver(convert_mode),
+ receiver, context, frame_state, effect, control);
+ NodeProperties::ReplaceValueInput(node, receiver, 1);
+ }
+
+ // Update the effect dependency for the {node}.
+ NodeProperties::ReplaceEffectInput(node, effect);
+
+ // Remove the eager bailout frame state.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+
+ // Compute flags for the call.
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ if (p.tail_call_mode() == TailCallMode::kAllow) {
+ flags |= CallDescriptor::kSupportsTailCalls;
+ }
+
+ Node* new_target = jsgraph()->UndefinedConstant();
+ Node* argument_count = jsgraph()->Int32Constant(arity);
+ if (shared->internal_formal_parameter_count() == arity ||
+ shared->internal_formal_parameter_count() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Patch {node} to a direct call.
+ node->InsertInput(graph()->zone(), arity + 2, new_target);
+ node->InsertInput(graph()->zone(), arity + 3, argument_count);
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + arity, flags)));
- return Changed(node);
+ } else {
+ // Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
+ Callable callable = CodeFactory::ArgumentAdaptor(isolate());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, argument_count);
+ node->InsertInput(
+ graph()->zone(), 4,
+ jsgraph()->Int32Constant(shared->internal_formal_parameter_count()));
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(),
+ 1 + arity, flags)));
}
+ return Changed(node);
}
+
+ // Check if {target} is a JSFunction.
+ if (target_type->Is(Type::Function())) {
+ // Remove the eager bailout frame state.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+
+ // Compute flags for the call.
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ if (p.tail_call_mode() == TailCallMode::kAllow) {
+ flags |= CallDescriptor::kSupportsTailCalls;
+ }
+
+ // Patch {node} to an indirect call via the CallFunction builtin.
+ Callable callable = CodeFactory::CallFunction(isolate(), convert_mode);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Int32Constant(arity));
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
+ flags)));
+ return Changed(node);
+ }
+
+ // Maybe we did at least learn something about the {receiver}.
+ if (p.convert_mode() != convert_mode) {
+ NodeProperties::ChangeOp(
+ node,
+ javascript()->CallFunction(p.arity(), p.language_mode(), p.feedback(),
+ convert_mode, p.tail_call_mode()));
+ return Changed(node);
+ }
+
return NoChange();
}
@@ -1414,9 +2310,9 @@ Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
Node* cache_type_enum_length = etrue0 = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForMapBitField3()), cache_type,
effect, if_true0);
- cache_length_true0 =
- graph()->NewNode(machine()->Word32And(), cache_type_enum_length,
- jsgraph()->Uint32Constant(Map::EnumLengthBits::kMask));
+ cache_length_true0 = graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), cache_type_enum_length,
+ jsgraph()->Int32Constant(Map::EnumLengthBits::kMask));
Node* check1 =
graph()->NewNode(machine()->Word32Equal(), cache_length_true0,
@@ -1455,8 +2351,8 @@ Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
etrue0 =
graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
cache_array_true0 =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_array_true1,
- cache_array_false1, if_true0);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true1, cache_array_false1, if_true0);
cache_type_true0 = cache_type;
}
@@ -1468,36 +2364,24 @@ Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
Node* efalse0;
{
// FixedArray case.
- Node* receiver_instance_type = efalse0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- receiver_map, effect, if_false0);
-
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- cache_type_false0 = graph()->NewNode(
- common()->Select(kMachAnyTagged, BranchHint::kFalse),
- graph()->NewNode(machine()->Uint32LessThanOrEqual(),
- receiver_instance_type,
- jsgraph()->Uint32Constant(LAST_JS_PROXY_TYPE)),
- jsgraph()->ZeroConstant(), // Zero indicagtes proxy.
- jsgraph()->OneConstant()); // One means slow check.
-
+ cache_type_false0 = jsgraph()->OneConstant(); // Smi means slow check
cache_array_false0 = cache_type;
cache_length_false0 = efalse0 = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFixedArrayLength(graph()->zone())),
- cache_array_false0, efalse0, if_false0);
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ cache_array_false0, effect, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
Node* cache_array =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_array_true0,
- cache_array_false0, control);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true0, cache_array_false0, control);
Node* cache_length =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_length_true0,
- cache_length_false0, control);
- cache_type = graph()->NewNode(common()->Phi(kMachAnyTagged, 2),
- cache_type_true0, cache_type_false0, control);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_length_true0, cache_length_false0, control);
+ cache_type =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_type_true0, cache_type_false0, control);
for (auto edge : node->use_edges()) {
Node* const use = edge.from();
@@ -1610,8 +2494,8 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
efalse0 =
graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vtrue1,
- vfalse1, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
@@ -1621,7 +2505,8 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
node->ReplaceInput(1, vfalse0);
node->ReplaceInput(2, control);
node->TrimInputCount(3);
- NodeProperties::ChangeOp(node, common()->Phi(kMachAnyTagged, 2));
+ NodeProperties::ChangeOp(node,
+ common()->Phi(MachineRepresentation::kTagged, 2));
return Changed(node);
}
@@ -1634,6 +2519,36 @@ Reduction JSTypedLowering::ReduceJSForInStep(Node* node) {
}
+Reduction JSTypedLowering::ReduceSelect(Node* node) {
+ DCHECK_EQ(IrOpcode::kSelect, node->opcode());
+ Node* const condition = NodeProperties::GetValueInput(node, 0);
+ Type* const condition_type = NodeProperties::GetType(condition);
+ Node* const vtrue = NodeProperties::GetValueInput(node, 1);
+ Type* const vtrue_type = NodeProperties::GetType(vtrue);
+ Node* const vfalse = NodeProperties::GetValueInput(node, 2);
+ Type* const vfalse_type = NodeProperties::GetType(vfalse);
+ if (condition_type->Is(true_type_)) {
+ // Select(condition:true, vtrue, vfalse) => vtrue
+ return Replace(vtrue);
+ }
+ if (condition_type->Is(false_type_)) {
+ // Select(condition:false, vtrue, vfalse) => vfalse
+ return Replace(vfalse);
+ }
+ if (vtrue_type->Is(true_type_) && vfalse_type->Is(false_type_)) {
+ // Select(condition, vtrue:true, vfalse:false) => condition
+ return Replace(condition);
+ }
+ if (vtrue_type->Is(false_type_) && vfalse_type->Is(true_type_)) {
+ // Select(condition, vtrue:false, vfalse:true) => BooleanNot(condition)
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
Reduction JSTypedLowering::Reduce(Node* node) {
// Check if the output type is a singleton. In that case we already know the
// result value and can simply replace the node if it's eliminable.
@@ -1681,11 +2596,11 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSGreaterThanOrEqual:
return ReduceJSComparison(node);
case IrOpcode::kJSBitwiseOr:
- return ReduceInt32Binop(node, machine()->Word32Or());
+ return ReduceInt32Binop(node, simplified()->NumberBitwiseOr());
case IrOpcode::kJSBitwiseXor:
- return ReduceInt32Binop(node, machine()->Word32Xor());
+ return ReduceInt32Binop(node, simplified()->NumberBitwiseXor());
case IrOpcode::kJSBitwiseAnd:
- return ReduceInt32Binop(node, machine()->Word32And());
+ return ReduceInt32Binop(node, simplified()->NumberBitwiseAnd());
case IrOpcode::kJSShiftLeft:
return ReduceUI32Shift(node, kSigned, simplified()->NumberShiftLeft());
case IrOpcode::kJSShiftRight:
@@ -1703,42 +2618,52 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceNumberBinop(node, simplified()->NumberDivide());
case IrOpcode::kJSModulus:
return ReduceJSModulus(node);
- case IrOpcode::kJSUnaryNot:
- return ReduceJSUnaryNot(node);
case IrOpcode::kJSToBoolean:
return ReduceJSToBoolean(node);
case IrOpcode::kJSToNumber:
return ReduceJSToNumber(node);
case IrOpcode::kJSToString:
return ReduceJSToString(node);
- case IrOpcode::kJSLoadGlobal:
- return ReduceJSLoadGlobal(node);
+ case IrOpcode::kJSToObject:
+ return ReduceJSToObject(node);
case IrOpcode::kJSLoadNamed:
return ReduceJSLoadNamed(node);
case IrOpcode::kJSLoadProperty:
return ReduceJSLoadProperty(node);
case IrOpcode::kJSStoreProperty:
return ReduceJSStoreProperty(node);
+ case IrOpcode::kJSInstanceOf:
+ return ReduceJSInstanceOf(node);
case IrOpcode::kJSLoadContext:
return ReduceJSLoadContext(node);
case IrOpcode::kJSStoreContext:
return ReduceJSStoreContext(node);
- case IrOpcode::kJSLoadDynamicGlobal:
- return ReduceJSLoadDynamicGlobal(node);
- case IrOpcode::kJSLoadDynamicContext:
- return ReduceJSLoadDynamicContext(node);
+ case IrOpcode::kJSConvertReceiver:
+ return ReduceJSConvertReceiver(node);
+ case IrOpcode::kJSCreate:
+ return ReduceJSCreate(node);
case IrOpcode::kJSCreateArguments:
return ReduceJSCreateArguments(node);
+ case IrOpcode::kJSCreateArray:
+ return ReduceJSCreateArray(node);
case IrOpcode::kJSCreateClosure:
return ReduceJSCreateClosure(node);
+ case IrOpcode::kJSCreateIterResultObject:
+ return ReduceJSCreateIterResultObject(node);
case IrOpcode::kJSCreateLiteralArray:
return ReduceJSCreateLiteralArray(node);
case IrOpcode::kJSCreateLiteralObject:
return ReduceJSCreateLiteralObject(node);
+ case IrOpcode::kJSCreateFunctionContext:
+ return ReduceJSCreateFunctionContext(node);
case IrOpcode::kJSCreateWithContext:
return ReduceJSCreateWithContext(node);
+ case IrOpcode::kJSCreateCatchContext:
+ return ReduceJSCreateCatchContext(node);
case IrOpcode::kJSCreateBlockContext:
return ReduceJSCreateBlockContext(node);
+ case IrOpcode::kJSCallConstruct:
+ return ReduceJSCallConstruct(node);
case IrOpcode::kJSCallFunction:
return ReduceJSCallFunction(node);
case IrOpcode::kJSForInDone:
@@ -1749,6 +2674,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSForInPrepare(node);
case IrOpcode::kJSForInStep:
return ReduceJSForInStep(node);
+ case IrOpcode::kSelect:
+ return ReduceSelect(node);
default:
break;
}
@@ -1763,6 +2690,139 @@ Node* JSTypedLowering::Word32Shl(Node* const lhs, int32_t const rhs) {
}
+// Helper that allocates a FixedArray holding argument values recorded in the
+// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
+Node* JSTypedLowering::AllocateArguments(Node* effect, Node* control,
+ Node* frame_state) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int argument_count = state_info.parameter_count() - 1; // Minus receiver.
+ if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+ // Prepare an iterator over argument values recorded in the frame state.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ StateValuesAccess parameters_access(parameters);
+ auto parameters_it = ++parameters_access.begin();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(argument_count, factory()->fixed_array_map());
+ for (int i = 0; i < argument_count; ++i, ++parameters_it) {
+ a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+ }
+ return a.Finish();
+}
+
+
+// Helper that allocates a FixedArray holding argument values recorded in the
+// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
+Node* JSTypedLowering::AllocateRestArguments(Node* effect, Node* control,
+ Node* frame_state,
+ int start_index) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int argument_count = state_info.parameter_count() - 1; // Minus receiver.
+ int num_elements = std::max(0, argument_count - start_index);
+ if (num_elements == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+ // Prepare an iterator over argument values recorded in the frame state.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ StateValuesAccess parameters_access(parameters);
+ auto parameters_it = ++parameters_access.begin();
+
+ // Skip unused arguments.
+ for (int i = 0; i < start_index; i++) {
+ ++parameters_it;
+ }
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(num_elements, factory()->fixed_array_map());
+ for (int i = 0; i < num_elements; ++i, ++parameters_it) {
+ a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+ }
+ return a.Finish();
+}
+
+
+// Helper that allocates a FixedArray serving as a parameter map for values
+// recorded in the given {frame_state}. Some elements map to slots within the
+// given {context}. Serves as backing store for JSCreateArguments nodes.
+Node* JSTypedLowering::AllocateAliasedArguments(
+ Node* effect, Node* control, Node* frame_state, Node* context,
+ Handle<SharedFunctionInfo> shared, bool* has_aliased_arguments) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int argument_count = state_info.parameter_count() - 1; // Minus receiver.
+ if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+ // If there is no aliasing, the arguments object elements are not special in
+ // any way, we can just return an unmapped backing store instead.
+ int parameter_count = shared->internal_formal_parameter_count();
+ if (parameter_count == 0) {
+ return AllocateArguments(effect, control, frame_state);
+ }
+
+ // Calculate number of argument values being aliased/mapped.
+ int mapped_count = Min(argument_count, parameter_count);
+ *has_aliased_arguments = true;
+
+ // Prepare an iterator over argument values recorded in the frame state.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ StateValuesAccess parameters_access(parameters);
+ auto paratemers_it = ++parameters_access.begin();
+
+ // The unmapped argument values recorded in the frame state are stored yet
+ // another indirection away and then linked into the parameter map below,
+ // whereas mapped argument values are replaced with a hole instead.
+ AllocationBuilder aa(jsgraph(), effect, control);
+ aa.AllocateArray(argument_count, factory()->fixed_array_map());
+ for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
+ aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
+ }
+ for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
+ aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
+ }
+ Node* arguments = aa.Finish();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), arguments, control);
+ a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
+ a.Store(AccessBuilder::ForFixedArraySlot(0), context);
+ a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
+ for (int i = 0; i < mapped_count; ++i) {
+ int idx = Context::MIN_CONTEXT_SLOTS + parameter_count - 1 - i;
+ a.Store(AccessBuilder::ForFixedArraySlot(i + 2), jsgraph()->Constant(idx));
+ }
+ return a.Finish();
+}
+
+
+Node* JSTypedLowering::AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind,
+ int capacity, PretenureFlag pretenure) {
+ DCHECK_LE(1, capacity);
+ DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
+
+ Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
+ ? factory()->fixed_double_array_map()
+ : factory()->fixed_array_map();
+ ElementAccess access = IsFastDoubleElementsKind(elements_kind)
+ ? AccessBuilder::ForFixedDoubleArrayElement()
+ : AccessBuilder::ForFixedArrayElement();
+ Node* value =
+ IsFastDoubleElementsKind(elements_kind)
+ ? jsgraph()->Float64Constant(bit_cast<double>(kHoleNanInt64))
+ : jsgraph()->TheHoleConstant();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(capacity, elements_map, pretenure);
+ for (int i = 0; i < capacity; ++i) {
+ Node* index = jsgraph()->Constant(i);
+ a.Store(access, index, value);
+ }
+ return a.Finish();
+}
+
+
Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
@@ -1782,10 +2842,20 @@ CommonOperatorBuilder* JSTypedLowering::common() const {
}
+SimplifiedOperatorBuilder* JSTypedLowering::simplified() const {
+ return jsgraph()->simplified();
+}
+
+
MachineOperatorBuilder* JSTypedLowering::machine() const {
return jsgraph()->machine();
}
+
+CompilationDependencies* JSTypedLowering::dependencies() const {
+ return dependencies_;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/js-typed-lowering.h b/chromium/v8/src/compiler/js-typed-lowering.h
index c11f068e5b2..68ce74e6245 100644
--- a/chromium/v8/src/compiler/js-typed-lowering.h
+++ b/chromium/v8/src/compiler/js-typed-lowering.h
@@ -5,15 +5,17 @@
#ifndef V8_COMPILER_JS_TYPED_LOWERING_H_
#define V8_COMPILER_JS_TYPED_LOWERING_H_
+#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/opcodes.h"
-#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
// Forward declarations.
+class CompilationDependencies;
class Factory;
+class TypeCache;
namespace compiler {
@@ -23,12 +25,22 @@ class CommonOperatorBuilder;
class JSGraph;
class JSOperatorBuilder;
class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
// Lowers JS-level operators to simplified operators based on types.
class JSTypedLowering final : public AdvancedReducer {
public:
- JSTypedLowering(Editor* editor, JSGraph* jsgraph, Zone* zone);
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ kDisableBinaryOpReduction = 1u << 1,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSTypedLowering(Editor* editor, CompilationDependencies* dependencies,
+ Flags flags, JSGraph* jsgraph, Zone* zone);
~JSTypedLowering() final {}
Reduction Reduce(Node* node) final;
@@ -41,39 +53,56 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSBitwiseOr(Node* node);
Reduction ReduceJSMultiply(Node* node);
Reduction ReduceJSComparison(Node* node);
- Reduction ReduceJSLoadGlobal(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
+ Reduction ReduceJSInstanceOf(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
- Reduction ReduceJSLoadDynamicGlobal(Node* node);
- Reduction ReduceJSLoadDynamicContext(Node* node);
Reduction ReduceJSEqual(Node* node, bool invert);
Reduction ReduceJSStrictEqual(Node* node, bool invert);
- Reduction ReduceJSUnaryNot(Node* node);
Reduction ReduceJSToBoolean(Node* node);
Reduction ReduceJSToNumberInput(Node* input);
Reduction ReduceJSToNumber(Node* node);
Reduction ReduceJSToStringInput(Node* input);
Reduction ReduceJSToString(Node* node);
+ Reduction ReduceJSToObject(Node* node);
+ Reduction ReduceJSConvertReceiver(Node* node);
+ Reduction ReduceJSCreate(Node* node);
Reduction ReduceJSCreateArguments(Node* node);
+ Reduction ReduceJSCreateArray(Node* node);
Reduction ReduceJSCreateClosure(Node* node);
+ Reduction ReduceJSCreateIterResultObject(Node* node);
Reduction ReduceJSCreateLiteralArray(Node* node);
Reduction ReduceJSCreateLiteralObject(Node* node);
+ Reduction ReduceJSCreateFunctionContext(Node* node);
Reduction ReduceJSCreateWithContext(Node* node);
+ Reduction ReduceJSCreateCatchContext(Node* node);
Reduction ReduceJSCreateBlockContext(Node* node);
+ Reduction ReduceJSCallConstruct(Node* node);
Reduction ReduceJSCallFunction(Node* node);
Reduction ReduceJSForInDone(Node* node);
Reduction ReduceJSForInNext(Node* node);
Reduction ReduceJSForInPrepare(Node* node);
Reduction ReduceJSForInStep(Node* node);
+ Reduction ReduceSelect(Node* node);
Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
const Operator* shift_op);
+ Reduction ReduceNewArray(Node* node, Node* length, int capacity,
+ Handle<AllocationSite> site);
Node* Word32Shl(Node* const lhs, int32_t const rhs);
+ Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
+ Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
+ int start_index);
+ Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
+ Node* context, Handle<SharedFunctionInfo>,
+ bool* has_aliased_arguments);
+ Node* AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind, int capacity,
+ PretenureFlag pretenure);
Factory* factory() const;
Graph* graph() const;
@@ -81,17 +110,27 @@ class JSTypedLowering final : public AdvancedReducer {
Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ SimplifiedOperatorBuilder* simplified() const;
MachineOperatorBuilder* machine() const;
+ CompilationDependencies* dependencies() const;
+ Flags flags() const { return flags_; }
// Limits up to which context allocations are inlined.
+ static const int kFunctionContextAllocationLimit = 16;
static const int kBlockContextAllocationLimit = 16;
+ CompilationDependencies* dependencies_;
+ Flags flags_;
JSGraph* jsgraph_;
- SimplifiedOperatorBuilder simplified_;
Type* shifted_int32_ranges_[4];
+ Type* const true_type_;
+ Type* const false_type_;
+ Type* const the_hole_type_;
+ TypeCache const& type_cache_;
};
+DEFINE_OPERATORS_FOR_FLAGS(JSTypedLowering::Flags)
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/linkage.cc b/chromium/v8/src/compiler/linkage.cc
index af3decc5b5a..2eef9291e98 100644
--- a/chromium/v8/src/compiler/linkage.cc
+++ b/chromium/v8/src/compiler/linkage.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ast/scopes.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/compiler/common-operator.h"
@@ -10,7 +11,6 @@
#include "src/compiler/node.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -18,36 +18,36 @@ namespace compiler {
namespace {
LinkageLocation regloc(Register reg) {
- return LinkageLocation::ForRegister(Register::ToAllocationIndex(reg));
+ return LinkageLocation::ForRegister(reg.code());
}
MachineType reptyp(Representation representation) {
switch (representation.kind()) {
case Representation::kInteger8:
- return kMachInt8;
+ return MachineType::Int8();
case Representation::kUInteger8:
- return kMachUint8;
+ return MachineType::Uint8();
case Representation::kInteger16:
- return kMachInt16;
+ return MachineType::Int16();
case Representation::kUInteger16:
- return kMachUint16;
+ return MachineType::Uint16();
case Representation::kInteger32:
- return kMachInt32;
+ return MachineType::Int32();
case Representation::kSmi:
case Representation::kTagged:
case Representation::kHeapObject:
- return kMachAnyTagged;
+ return MachineType::AnyTagged();
case Representation::kDouble:
- return kMachFloat64;
+ return MachineType::Float64();
case Representation::kExternal:
- return kMachPtr;
+ return MachineType::Pointer();
case Representation::kNone:
case Representation::kNumRepresentations:
break;
}
UNREACHABLE();
- return kMachNone;
+ return MachineType::None();
}
} // namespace
@@ -63,6 +63,9 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
case CallDescriptor::kCallAddress:
os << "Addr";
break;
+ case CallDescriptor::kLazyBailout:
+ os << "LazyBail";
+ break;
}
return os;
}
@@ -86,84 +89,38 @@ bool CallDescriptor::HasSameReturnLocationsAs(
}
-bool CallDescriptor::CanTailCall(const Node* node) const {
- // Determine the number of stack parameters passed in
- size_t stack_params = 0;
- for (size_t i = 0; i < InputCount(); ++i) {
- if (!GetInputLocation(i).IsRegister()) {
- ++stack_params;
- }
- }
- // Ensure the input linkage contains the stack parameters in the right order
- size_t current_stack_param = 0;
- for (size_t i = 0; i < InputCount(); ++i) {
- if (!GetInputLocation(i).IsRegister()) {
- if (GetInputLocation(i) != LinkageLocation::ForCallerFrameSlot(
- static_cast<int>(current_stack_param) -
- static_cast<int>(stack_params))) {
- return false;
- }
- ++current_stack_param;
- }
- }
- // Tail calling is currently allowed if return locations match and all
- // parameters are either in registers or on the stack but match exactly in
- // number and content.
+bool CallDescriptor::CanTailCall(const Node* node,
+ int* stack_param_delta) const {
CallDescriptor const* other = OpParameter<CallDescriptor const*>(node);
- if (!HasSameReturnLocationsAs(other)) return false;
size_t current_input = 0;
size_t other_input = 0;
- while (true) {
- if (other_input >= other->InputCount()) {
- while (current_input < InputCount()) {
- if (!GetInputLocation(current_input).IsRegister()) {
- return false;
- }
- ++current_input;
+ *stack_param_delta = 0;
+ bool more_other = true;
+ bool more_this = true;
+ while (more_other || more_this) {
+ if (other_input < other->InputCount()) {
+ if (!other->GetInputLocation(other_input).IsRegister()) {
+ (*stack_param_delta)--;
}
- return true;
+ } else {
+ more_other = false;
}
- if (current_input >= InputCount()) {
- while (other_input < other->InputCount()) {
- if (!other->GetInputLocation(other_input).IsRegister()) {
- return false;
- }
- ++other_input;
+ if (current_input < InputCount()) {
+ if (!GetInputLocation(current_input).IsRegister()) {
+ (*stack_param_delta)++;
}
- return true;
- }
- if (GetInputLocation(current_input).IsRegister()) {
- ++current_input;
- continue;
- }
- if (other->GetInputLocation(other_input).IsRegister()) {
- ++other_input;
- continue;
- }
- if (GetInputLocation(current_input) !=
- other->GetInputLocation(other_input)) {
- return false;
- }
- Node* input = node->InputAt(static_cast<int>(other_input));
- if (input->opcode() != IrOpcode::kParameter) {
- return false;
- }
- // Make sure that the parameter input passed through to the tail call
- // corresponds to the correct stack slot.
- size_t param_index = ParameterIndexOf(input->op());
- if (param_index != current_input - 1) {
- return false;
+ } else {
+ more_this = false;
}
++current_input;
++other_input;
}
- UNREACHABLE();
- return false;
+ return HasSameReturnLocationsAs(OpParameter<CallDescriptor const*>(node));
}
CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
- if (info->code_stub() != NULL) {
+ if (info->code_stub() != nullptr) {
// Use the code stub interface descriptor.
CodeStub* stub = info->code_stub();
CallInterfaceDescriptor descriptor = stub->GetCallInterfaceDescriptor();
@@ -186,27 +143,7 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
1 + shared->internal_formal_parameter_count(),
CallDescriptor::kNoFlags);
}
- return NULL; // TODO(titzer): ?
-}
-
-
-FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame) const {
- bool has_frame = frame->GetSpillSlotCount() > 0 ||
- incoming_->IsJSFunctionCall() ||
- incoming_->kind() == CallDescriptor::kCallAddress;
- const int offset =
- (StandardFrameConstants::kFixedSlotCountAboveFp - spill_slot - 1) *
- kPointerSize;
- if (has_frame) {
- return FrameOffset::FromFramePointer(offset);
- } else {
- // No frame. Retrieve all parameters relative to stack pointer.
- DCHECK(spill_slot < 0); // Must be a parameter.
- int offsetSpToFp =
- kPointerSize * (StandardFrameConstants::kFixedSlotCountAboveFp -
- frame->GetTotalFrameSlotCount());
- return FrameOffset::FromStackPointer(offset - offsetSpToFp);
- }
+ return nullptr; // TODO(titzer): ?
}
@@ -217,14 +154,14 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
// are blacklisted here and can be called without a FrameState.
switch (function) {
case Runtime::kAllocateInTargetSpace:
- case Runtime::kDateField:
+ case Runtime::kCreateIterResultObject:
case Runtime::kDefineClassMethod: // TODO(jarin): Is it safe?
case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kFinalizeClassDefinition: // TODO(conradw): Is it safe?
case Runtime::kForInDone:
case Runtime::kForInStep:
- case Runtime::kGetOriginalConstructor:
+ case Runtime::kGetSuperConstructor:
case Runtime::kNewClosure:
case Runtime::kNewClosure_Tenured:
case Runtime::kNewFunctionContext:
@@ -239,11 +176,8 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
return 0;
case Runtime::kInlineArguments:
case Runtime::kInlineArgumentsLength:
- case Runtime::kInlineCall:
- case Runtime::kInlineCallFunction:
- case Runtime::kInlineDefaultConstructorCallSuper:
- case Runtime::kInlineGetCallerJSFunction:
case Runtime::kInlineGetPrototype:
+ case Runtime::kInlineRegExpConstructResult:
case Runtime::kInlineRegExpExec:
case Runtime::kInlineSubString:
case Runtime::kInlineToInteger:
@@ -256,6 +190,8 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
case Runtime::kInlineToPrimitive:
case Runtime::kInlineToString:
return 1;
+ case Runtime::kInlineCall:
+ case Runtime::kInlineTailCall:
case Runtime::kInlineDeoptimizeNow:
case Runtime::kInlineThrowNotDateError:
return 2;
@@ -285,7 +221,7 @@ bool CallDescriptor::UsesOnlyRegisters() const {
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Zone* zone, Runtime::FunctionId function_id, int js_parameter_count,
- Operator::Properties properties, bool needs_frame_state) {
+ Operator::Properties properties, CallDescriptor::Flags flags) {
const size_t function_count = 1;
const size_t num_args_count = 1;
const size_t context_count = 1;
@@ -307,34 +243,34 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
locations.AddReturn(regloc(kReturnRegister1));
}
for (size_t i = 0; i < return_count; i++) {
- types.AddReturn(kMachAnyTagged);
+ types.AddReturn(MachineType::AnyTagged());
}
// All parameters to the runtime call go on the stack.
for (int i = 0; i < js_parameter_count; i++) {
locations.AddParam(
LinkageLocation::ForCallerFrameSlot(i - js_parameter_count));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
}
// Add runtime function itself.
locations.AddParam(regloc(kRuntimeCallFunctionRegister));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
// Add runtime call argument count.
locations.AddParam(regloc(kRuntimeCallArgCountRegister));
- types.AddParam(kMachPtr);
+ types.AddParam(MachineType::Pointer());
// Add context.
locations.AddParam(regloc(kContextRegister));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
- CallDescriptor::Flags flags =
- needs_frame_state && (Linkage::FrameStateInputCount(function_id) > 0)
- ? CallDescriptor::kNeedsFrameState
- : CallDescriptor::kNoFlags;
+ if (Linkage::FrameStateInputCount(function_id) == 0) {
+ flags = static_cast<CallDescriptor::Flags>(
+ flags & ~CallDescriptor::kNeedsFrameState);
+ }
// The target for runtime calls is a code object.
- MachineType target_type = kMachAnyTagged;
+ MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
@@ -351,32 +287,69 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
}
+CallDescriptor* Linkage::GetLazyBailoutDescriptor(Zone* zone) {
+ const size_t return_count = 0;
+ const size_t parameter_count = 0;
+
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+ MachineSignature::Builder types(zone, return_count, parameter_count);
+
+ // The target is ignored, but we need to give some values here.
+ MachineType target_type = MachineType::AnyTagged();
+ LinkageLocation target_loc = regloc(kJSFunctionRegister);
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kLazyBailout, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ 0, // stack_parameter_count
+ Operator::kNoThrow, // properties
+ kNoCalleeSaved, // callee-saved
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kNeedsFrameState, // flags
+ "lazy-bailout");
+}
+
+
CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
int js_parameter_count,
CallDescriptor::Flags flags) {
const size_t return_count = 1;
const size_t context_count = 1;
- const size_t parameter_count = js_parameter_count + context_count;
+ const size_t new_target_count = 1;
+ const size_t num_args_count = 1;
+ const size_t parameter_count =
+ js_parameter_count + new_target_count + num_args_count + context_count;
LocationSignature::Builder locations(zone, return_count, parameter_count);
MachineSignature::Builder types(zone, return_count, parameter_count);
// All JS calls have exactly one return value.
locations.AddReturn(regloc(kReturnRegister0));
- types.AddReturn(kMachAnyTagged);
+ types.AddReturn(MachineType::AnyTagged());
// All parameters to JS calls go on the stack.
for (int i = 0; i < js_parameter_count; i++) {
int spill_slot_index = i - js_parameter_count;
locations.AddParam(LinkageLocation::ForCallerFrameSlot(spill_slot_index));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
}
+
+ // Add JavaScript call new target value.
+ locations.AddParam(regloc(kJavaScriptCallNewTargetRegister));
+ types.AddParam(MachineType::AnyTagged());
+
+ // Add JavaScript call argument count.
+ locations.AddParam(regloc(kJavaScriptCallArgCountRegister));
+ types.AddParam(MachineType::Int32());
+
// Add context.
locations.AddParam(regloc(kContextRegister));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
// The target for JS function calls is the JSFunction object.
- MachineType target_type = kMachAnyTagged;
+ MachineType target_type = MachineType::AnyTagged();
// TODO(titzer): When entering into an OSR function from unoptimized code,
// the JSFunction is not in a register, but it is on the stack in an
// unaddressable spill slot. We hack this in the OSR prologue. Fix.
@@ -403,38 +376,40 @@ CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
// Add registers for fixed parameters passed via interpreter dispatch.
STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
locations.AddParam(regloc(kInterpreterAccumulatorRegister));
STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
- types.AddParam(kMachPtr);
+ types.AddParam(MachineType::Pointer());
locations.AddParam(regloc(kInterpreterRegisterFileRegister));
STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
- types.AddParam(kMachIntPtr);
+ types.AddParam(MachineType::IntPtr());
locations.AddParam(regloc(kInterpreterBytecodeOffsetRegister));
STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
locations.AddParam(regloc(kInterpreterBytecodeArrayRegister));
STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
- types.AddParam(kMachPtr);
- locations.AddParam(regloc(kInterpreterDispatchTableRegister));
-
- STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::Pointer());
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
+ // TODO(rmcilroy): Make the context param the one spilled to the stack once
+ // Turbofan supports modified stack arguments in tail calls.
locations.AddParam(
- LinkageLocation::ForCallerFrameSlot(kInterpreterContextSpillSlot));
+ LinkageLocation::ForCallerFrameSlot(kInterpreterDispatchTableSpillSlot));
#else
- locations.AddParam(regloc(kContextRegister));
+ locations.AddParam(regloc(kInterpreterDispatchTableRegister));
#endif
+ STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
+ types.AddParam(MachineType::AnyTagged());
+ locations.AddParam(regloc(kContextRegister));
+
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
- kMachNone, // target MachineType
+ MachineType::None(), // target MachineType
target_loc, // target location
types.Build(), // machine_sig
locations.Build(), // location_sig
@@ -454,21 +429,28 @@ CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties, MachineType return_type) {
+ Operator::Properties properties, MachineType return_type,
+ size_t return_count) {
const int register_parameter_count = descriptor.GetRegisterParameterCount();
const int js_parameter_count =
register_parameter_count + stack_parameter_count;
const int context_count = 1;
- const size_t return_count = 1;
const size_t parameter_count =
static_cast<size_t>(js_parameter_count + context_count);
LocationSignature::Builder locations(zone, return_count, parameter_count);
MachineSignature::Builder types(zone, return_count, parameter_count);
- // Add return location.
- locations.AddReturn(regloc(kReturnRegister0));
- types.AddReturn(return_type);
+ // Add returns.
+ if (locations.return_count_ > 0) {
+ locations.AddReturn(regloc(kReturnRegister0));
+ }
+ if (locations.return_count_ > 1) {
+ locations.AddReturn(regloc(kReturnRegister1));
+ }
+ for (size_t i = 0; i < return_count; i++) {
+ types.AddReturn(return_type);
+ }
// Add parameters in registers and on the stack.
for (int i = 0; i < js_parameter_count; i++) {
@@ -483,15 +465,15 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
// The rest of the parameters go on the stack.
int stack_slot = i - register_parameter_count - stack_parameter_count;
locations.AddParam(LinkageLocation::ForCallerFrameSlot(stack_slot));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
}
}
// Add context.
locations.AddParam(regloc(kContextRegister));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
// The target for stub calls is a code object.
- MachineType target_type = kMachAnyTagged;
+ MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
@@ -515,8 +497,9 @@ LinkageLocation Linkage::GetOsrValueLocation(int index) const {
if (index == kOsrContextSpillSlotIndex) {
// Context. Use the parameter location of the context spill slot.
- // Parameter (arity + 1) is special for the context of the function frame.
- int context_index = 1 + 1 + parameter_count; // target + receiver + params
+ // Parameter (arity + 2) is special for the context of the function frame.
+ // >> context_index = target + receiver + params + new_target + #args
+ int context_index = 1 + 1 + parameter_count + 1 + 1;
return incoming_->GetInputLocation(context_index);
} else if (index >= first_stack_slot) {
// Local variable stored in this (callee) stack.
@@ -529,6 +512,28 @@ LinkageLocation Linkage::GetOsrValueLocation(int index) const {
return incoming_->GetInputLocation(parameter_index);
}
}
+
+
+bool Linkage::ParameterHasSecondaryLocation(int index) const {
+ if (incoming_->kind() != CallDescriptor::kCallJSFunction) return false;
+ LinkageLocation loc = GetParameterLocation(index);
+ return (loc == regloc(kJSFunctionRegister) ||
+ loc == regloc(kContextRegister));
+}
+
+LinkageLocation Linkage::GetParameterSecondaryLocation(int index) const {
+ DCHECK(ParameterHasSecondaryLocation(index));
+ LinkageLocation loc = GetParameterLocation(index);
+
+ if (loc == regloc(kJSFunctionRegister)) {
+ return LinkageLocation::ForCalleeFrameSlot(Frame::kJSFunctionSlot);
+ } else {
+ DCHECK(loc == regloc(kContextRegister));
+ return LinkageLocation::ForCalleeFrameSlot(Frame::kContextSlot);
+ }
+}
+
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/linkage.h b/chromium/v8/src/compiler/linkage.h
index b25fe413c96..252f0443213 100644
--- a/chromium/v8/src/compiler/linkage.h
+++ b/chromium/v8/src/compiler/linkage.h
@@ -7,9 +7,9 @@
#include "src/base/flags.h"
#include "src/compiler/frame.h"
-#include "src/compiler/machine-type.h"
#include "src/compiler/operator.h"
#include "src/frames.h"
+#include "src/machine-type.h"
#include "src/runtime/runtime.h"
#include "src/zone.h"
@@ -57,6 +57,34 @@ class LinkageLocation {
return LinkageLocation(STACK_SLOT, slot);
}
+ static LinkageLocation ForSavedCallerReturnAddress() {
+ return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
+ StandardFrameConstants::kCallerPCOffset) /
+ kPointerSize);
+ }
+
+ static LinkageLocation ForSavedCallerFramePtr() {
+ return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
+ StandardFrameConstants::kCallerFPOffset) /
+ kPointerSize);
+ }
+
+ static LinkageLocation ForSavedCallerConstantPool() {
+ DCHECK(V8_EMBEDDED_CONSTANT_POOL);
+ return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
+ StandardFrameConstants::kConstantPoolOffset) /
+ kPointerSize);
+ }
+
+ static LinkageLocation ConvertToTailCallerLocation(
+ LinkageLocation caller_location, int stack_param_delta) {
+ if (!caller_location.IsRegister()) {
+ return LinkageLocation(STACK_SLOT,
+ caller_location.GetLocation() - stack_param_delta);
+ }
+ return caller_location;
+ }
+
private:
friend class CallDescriptor;
friend class OperandGenerator;
@@ -110,9 +138,10 @@ class CallDescriptor final : public ZoneObject {
public:
// Describes the kind of this call, which determines the target.
enum Kind {
- kCallCodeObject, // target is a Code object
- kCallJSFunction, // target is a JSFunction object
- kCallAddress, // target is a machine pointer
+ kCallCodeObject, // target is a Code object
+ kCallJSFunction, // target is a JSFunction object
+ kCallAddress, // target is a machine pointer
+ kLazyBailout // the call is no-op, only used for lazy bailout
};
enum Flag {
@@ -124,6 +153,9 @@ class CallDescriptor final : public ZoneObject {
kHasLocalCatchHandler = 1u << 4,
kSupportsTailCalls = 1u << 5,
kCanUseRoots = 1u << 6,
+ // Indicates that the native stack should be used for a code object. This
+ // information is important for native calls on arm64.
+ kUseNativeStack = 1u << 7,
kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
};
typedef base::Flags<Flag> Flags;
@@ -159,6 +191,10 @@ class CallDescriptor final : public ZoneObject {
// Returns {true} if this descriptor is a call to a JSFunction.
bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; }
+ bool RequiresFrameAsIncoming() const {
+ return IsCFunctionCall() || IsJSFunctionCall();
+ }
+
// The number of return values from this call.
size_t ReturnCount() const { return machine_sig_->return_count(); }
@@ -185,6 +221,7 @@ class CallDescriptor final : public ZoneObject {
bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
bool SupportsTailCalls() const { return flags() & kSupportsTailCalls; }
+ bool UseNativeStack() const { return flags() & kUseNativeStack; }
LinkageLocation GetReturnLocation(size_t index) const {
return location_sig_->GetReturn(index);
@@ -221,7 +258,7 @@ class CallDescriptor final : public ZoneObject {
bool HasSameReturnLocationsAs(const CallDescriptor* other) const;
- bool CanTailCall(const Node* call) const;
+ bool CanTailCall(const Node* call, int* stack_param_delta) const;
private:
friend class Linkage;
@@ -253,11 +290,11 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k);
// Can be used to translate {arg_index} (i.e. index of the call node input) as
// well as {param_index} (i.e. as stored in parameter nodes) into an operator
// representing the architecture-specific location. The following call node
-// layouts are supported (where {n} is the number value inputs):
+// layouts are supported (where {n} is the number of value inputs):
//
// #0 #1 #2 #3 [...] #n
// Call[CodeStub] code, arg 1, arg 2, arg 3, [...], context
-// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], context
+// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], new, #arg, context
// Call[Runtime] CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
class Linkage : public ZoneObject {
public:
@@ -271,15 +308,19 @@ class Linkage : public ZoneObject {
static CallDescriptor* GetJSCallDescriptor(Zone* zone, bool is_osr,
int parameter_count,
CallDescriptor::Flags flags);
+
static CallDescriptor* GetRuntimeCallDescriptor(
Zone* zone, Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, bool needs_frame_state = true);
+ Operator::Properties properties, CallDescriptor::Flags flags);
+
+ static CallDescriptor* GetLazyBailoutDescriptor(Zone* zone);
static CallDescriptor* GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties = Operator::kNoProperties,
- MachineType return_type = kMachAnyTagged);
+ MachineType return_type = MachineType::AnyTagged(),
+ size_t return_count = 1);
// Creates a call descriptor for simplified C calls that is appropriate
// for the host platform. This simplified calling convention only supports
@@ -304,26 +345,40 @@ class Linkage : public ZoneObject {
}
// Get the location where this function should place its return value.
- LinkageLocation GetReturnLocation() const {
- return incoming_->GetReturnLocation(0);
+ LinkageLocation GetReturnLocation(size_t index = 0) const {
+ return incoming_->GetReturnLocation(index);
}
// Get the machine type of this function's return value.
- MachineType GetReturnType() const { return incoming_->GetReturnType(0); }
+ MachineType GetReturnType(size_t index = 0) const {
+ return incoming_->GetReturnType(index);
+ }
- // Get the frame offset for a given spill slot. The location depends on the
- // calling convention and the specific frame layout, and may thus be
- // architecture-specific. Negative spill slots indicate arguments on the
- // caller's frame.
- FrameOffset GetFrameOffset(int spill_slot, Frame* frame) const;
+ bool ParameterHasSecondaryLocation(int index) const;
+ LinkageLocation GetParameterSecondaryLocation(int index) const;
static int FrameStateInputCount(Runtime::FunctionId function);
// Get the location where an incoming OSR value is stored.
LinkageLocation GetOsrValueLocation(int index) const;
- // A special parameter index for JSCalls that represents the closure.
- static const int kJSFunctionCallClosureParamIndex = -1;
+ // A special {Parameter} index for JSCalls that represents the new target.
+ static int GetJSCallNewTargetParamIndex(int parameter_count) {
+ return parameter_count + 0; // Parameter (arity + 0) is special.
+ }
+
+ // A special {Parameter} index for JSCalls that represents the argument count.
+ static int GetJSCallArgCountParamIndex(int parameter_count) {
+ return parameter_count + 1; // Parameter (arity + 1) is special.
+ }
+
+ // A special {Parameter} index for JSCalls that represents the context.
+ static int GetJSCallContextParamIndex(int parameter_count) {
+ return parameter_count + 2; // Parameter (arity + 2) is special.
+ }
+
+ // A special {Parameter} index for JSCalls that represents the closure.
+ static const int kJSCallClosureParamIndex = -1;
// A special {OsrValue} index to indicate the context spill slot.
static const int kOsrContextSpillSlotIndex = -1;
diff --git a/chromium/v8/src/compiler/live-range-separator.cc b/chromium/v8/src/compiler/live-range-separator.cc
index f29e4b4a202..980c9442bcc 100644
--- a/chromium/v8/src/compiler/live-range-separator.cc
+++ b/chromium/v8/src/compiler/live-range-separator.cc
@@ -18,40 +18,6 @@ namespace compiler {
namespace {
-// Starting from a deferred block, find the last consecutive deferred block.
-RpoNumber GetLastDeferredBlock(const InstructionBlock *block,
- const InstructionSequence *code) {
- DCHECK(block->IsDeferred());
- RpoNumber first = block->rpo_number();
-
- RpoNumber last = first;
- for (int i = first.ToInt(); i < code->InstructionBlockCount(); ++i) {
- RpoNumber at_i = RpoNumber::FromInt(i);
- const InstructionBlock *block_at_i = code->InstructionBlockAt(at_i);
- if (!block_at_i->IsDeferred()) break;
- last = at_i;
- }
-
- return last;
-}
-
-
-// Delimits consecutive deferred block sequences.
-void AssociateDeferredBlockSequences(InstructionSequence *code) {
- for (int blk_id = 0; blk_id < code->InstructionBlockCount(); ++blk_id) {
- InstructionBlock *block =
- code->InstructionBlockAt(RpoNumber::FromInt(blk_id));
- if (!block->IsDeferred()) continue;
- RpoNumber last = GetLastDeferredBlock(block, code);
- block->set_last_deferred(last);
- // We know last is still deferred, and that last + 1, is not (or is an
- // invalid index). So skip over last + 1 and continue from last + 2. This
- // way, we visit each block exactly once, and the total complexity of this
- // function is O(n), n being jthe number of blocks.
- blk_id = last.ToInt() + 1;
- }
-}
-
void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
LifetimePosition first_cut, LifetimePosition last_cut) {
@@ -78,80 +44,101 @@ void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
if (range->MayRequireSpillRange()) {
data->CreateSpillRangeForLiveRange(range);
}
- TopLevelLiveRange *result = data->NextLiveRange(range->machine_type());
- DCHECK_NULL(data->live_ranges()[result->vreg()]);
- data->live_ranges()[result->vreg()] = result;
-
+ if (range->splinter() == nullptr) {
+ TopLevelLiveRange *splinter =
+ data->NextLiveRange(range->representation());
+ DCHECK_NULL(data->live_ranges()[splinter->vreg()]);
+ data->live_ranges()[splinter->vreg()] = splinter;
+ range->SetSplinter(splinter);
+ }
Zone *zone = data->allocation_zone();
- range->Splinter(start, end, result, zone);
+ TRACE("creating splinter for range %d between %d and %d\n", range->vreg(),
+ start.ToInstructionIndex(), end.ToInstructionIndex());
+ range->Splinter(start, end, zone);
}
}
-// Splinter all ranges live inside successive deferred blocks.
-// No control flow analysis is performed. After the register allocation, we will
-// merge the splinters back into the original ranges, and then rely on the
-// range connector to properly connect them.
-void SplinterRangesInDeferredBlocks(RegisterAllocationData *data) {
- InstructionSequence *code = data->code();
- int code_block_count = code->InstructionBlockCount();
- Zone *zone = data->allocation_zone();
- ZoneVector<BitVector *> &in_sets = data->live_in_sets();
-
- for (int i = 0; i < code_block_count; ++i) {
- InstructionBlock *block = code->InstructionBlockAt(RpoNumber::FromInt(i));
- if (!block->IsDeferred()) continue;
-
- RpoNumber last_deferred = block->last_deferred();
- // last_deferred + 1 is not deferred, so no point in visiting it.
- i = last_deferred.ToInt() + 1;
-
- LifetimePosition first_cut = LifetimePosition::GapFromInstructionIndex(
- block->first_instruction_index());
-
- LifetimePosition last_cut = LifetimePosition::GapFromInstructionIndex(
- static_cast<int>(code->instructions().size()));
-
- const BitVector *in_set = in_sets[block->rpo_number().ToInt()];
- BitVector ranges_to_splinter(*in_set, zone);
- InstructionBlock *last = code->InstructionBlockAt(last_deferred);
- for (int deferred_id = block->rpo_number().ToInt();
- deferred_id <= last->rpo_number().ToInt(); ++deferred_id) {
- const BitVector *ins = in_sets[deferred_id];
- ranges_to_splinter.Union(*ins);
- const BitVector *outs = LiveRangeBuilder::ComputeLiveOut(
- code->InstructionBlockAt(RpoNumber::FromInt(deferred_id)), data);
- ranges_to_splinter.Union(*outs);
+void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
+ const InstructionSequence *code = data->code();
+ UseInterval *interval = range->first_interval();
+
+ LifetimePosition first_cut = LifetimePosition::Invalid();
+ LifetimePosition last_cut = LifetimePosition::Invalid();
+
+ while (interval != nullptr) {
+ UseInterval *next_interval = interval->next();
+ const InstructionBlock *first_block =
+ code->GetInstructionBlock(interval->FirstGapIndex());
+ const InstructionBlock *last_block =
+ code->GetInstructionBlock(interval->LastGapIndex());
+ int first_block_nr = first_block->rpo_number().ToInt();
+ int last_block_nr = last_block->rpo_number().ToInt();
+ for (int block_id = first_block_nr; block_id <= last_block_nr; ++block_id) {
+ const InstructionBlock *current_block =
+ code->InstructionBlockAt(RpoNumber::FromInt(block_id));
+ if (current_block->IsDeferred()) {
+ if (!first_cut.IsValid()) {
+ first_cut = LifetimePosition::GapFromInstructionIndex(
+ current_block->first_instruction_index());
+ }
+ last_cut = LifetimePosition::GapFromInstructionIndex(
+ current_block->last_instruction_index());
+ } else {
+ if (first_cut.IsValid()) {
+ CreateSplinter(range, data, first_cut, last_cut);
+ first_cut = LifetimePosition::Invalid();
+ last_cut = LifetimePosition::Invalid();
+ }
+ }
}
+ interval = next_interval;
+ }
+ // When the range ends in deferred blocks, first_cut will be valid here.
+ // Splinter from there to the last instruction that was in a deferred block.
+ if (first_cut.IsValid()) {
+ CreateSplinter(range, data, first_cut, last_cut);
+ }
+}
+} // namespace
- int last_index = last->last_instruction_index();
- if (code->InstructionAt(last_index)->opcode() ==
- ArchOpcode::kArchDeoptimize) {
- ++last_index;
- }
- last_cut = LifetimePosition::GapFromInstructionIndex(last_index);
-
- BitVector::Iterator iterator(&ranges_to_splinter);
-
- while (!iterator.Done()) {
- int range_id = iterator.Current();
- iterator.Advance();
- TopLevelLiveRange *range = data->live_ranges()[range_id];
- CreateSplinter(range, data, first_cut, last_cut);
+void LiveRangeSeparator::Splinter() {
+ size_t virt_reg_count = data()->live_ranges().size();
+ for (size_t vreg = 0; vreg < virt_reg_count; ++vreg) {
+ TopLevelLiveRange *range = data()->live_ranges()[vreg];
+ if (range == nullptr || range->IsEmpty() || range->IsSplinter()) {
+ continue;
+ }
+ int first_instr = range->first_interval()->FirstGapIndex();
+ if (!data()->code()->GetInstructionBlock(first_instr)->IsDeferred()) {
+ SplinterLiveRange(range, data());
}
}
}
-} // namespace
-void LiveRangeSeparator::Splinter() {
- AssociateDeferredBlockSequences(data()->code());
- SplinterRangesInDeferredBlocks(data());
+void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
+ for (TopLevelLiveRange *top : data()->live_ranges()) {
+ if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr) {
+ continue;
+ }
+
+ LiveRange *child = top;
+ for (; child != nullptr; child = child->next()) {
+ if (child->spilled() ||
+ child->NextSlotPosition(child->Start()) != nullptr) {
+ break;
+ }
+ }
+ if (child == nullptr) top->MarkSpilledInDeferredBlock();
+ }
}
void LiveRangeMerger::Merge() {
+ MarkRangesSpilledInDeferredBlocks();
+
int live_range_count = static_cast<int>(data()->live_ranges().size());
for (int i = 0; i < live_range_count; ++i) {
TopLevelLiveRange *range = data()->live_ranges()[i];
diff --git a/chromium/v8/src/compiler/live-range-separator.h b/chromium/v8/src/compiler/live-range-separator.h
index c8e6edc20b3..57bc98235dd 100644
--- a/chromium/v8/src/compiler/live-range-separator.h
+++ b/chromium/v8/src/compiler/live-range-separator.h
@@ -47,6 +47,11 @@ class LiveRangeMerger final : public ZoneObject {
RegisterAllocationData* data() const { return data_; }
Zone* zone() const { return zone_; }
+ // Mark ranges spilled in deferred blocks, that also cover non-deferred code.
+ // We do nothing special for ranges fully contained in deferred blocks,
+ // because they would "spill in deferred blocks" anyway.
+ void MarkRangesSpilledInDeferredBlocks();
+
RegisterAllocationData* const data_;
Zone* const zone_;
diff --git a/chromium/v8/src/compiler/load-elimination.cc b/chromium/v8/src/compiler/load-elimination.cc
index c78a283ca09..97f1ab0ec54 100644
--- a/chromium/v8/src/compiler/load-elimination.cc
+++ b/chromium/v8/src/compiler/load-elimination.cc
@@ -28,7 +28,7 @@ Reduction LoadElimination::Reduce(Node* node) {
Reduction LoadElimination::ReduceLoadField(Node* node) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const access = FieldAccessOf(node->op());
- Node* const object = NodeProperties::GetValueInput(node, 0);
+ Node* object = NodeProperties::GetValueInput(node, 0);
for (Node* effect = NodeProperties::GetEffectInput(node);;
effect = NodeProperties::GetEffectInput(effect)) {
switch (effect->opcode()) {
@@ -53,11 +53,24 @@ Reduction LoadElimination::ReduceLoadField(Node* node) {
}
break;
}
+ case IrOpcode::kBeginRegion:
case IrOpcode::kStoreBuffer:
case IrOpcode::kStoreElement: {
// These can never interfere with field loads.
break;
}
+ case IrOpcode::kFinishRegion: {
+ // "Look through" FinishRegion nodes to make LoadElimination capable
+ // of looking into atomic regions.
+ if (object == effect) object = NodeProperties::GetValueInput(effect, 0);
+ break;
+ }
+ case IrOpcode::kAllocate: {
+ // Allocations don't interfere with field loads. In case we see the
+ // actual allocation for the {object} we can abort.
+ if (object == effect) return NoChange();
+ break;
+ }
default: {
if (!effect->op()->HasProperty(Operator::kNoWrite) ||
effect->op()->EffectInputCount() != 1) {
diff --git a/chromium/v8/src/compiler/loop-analysis.h b/chromium/v8/src/compiler/loop-analysis.h
index 1a06b666dd2..2ed5bc22800 100644
--- a/chromium/v8/src/compiler/loop-analysis.h
+++ b/chromium/v8/src/compiler/loop-analysis.h
@@ -113,7 +113,7 @@ class LoopTree : public ZoneObject {
if (node->opcode() == IrOpcode::kLoop) return node;
}
UNREACHABLE();
- return NULL;
+ return nullptr;
}
private:
diff --git a/chromium/v8/src/compiler/loop-peeling.cc b/chromium/v8/src/compiler/loop-peeling.cc
index 8c980aa125e..b553a9ff580 100644
--- a/chromium/v8/src/compiler/loop-peeling.cc
+++ b/chromium/v8/src/compiler/loop-peeling.cc
@@ -311,8 +311,9 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
// Update all the value and effect edges at once.
if (!value_edges.empty()) {
// TODO(titzer): machine type is wrong here.
- Node* phi = graph->NewNode(common->Phi(kMachAnyTagged, 2), node,
- peeling.map(node), merge);
+ Node* phi =
+ graph->NewNode(common->Phi(MachineRepresentation::kTagged, 2), node,
+ peeling.map(node), merge);
for (Edge edge : value_edges) edge.UpdateTo(phi);
value_edges.clear();
}
diff --git a/chromium/v8/src/compiler/machine-operator-reducer.cc b/chromium/v8/src/compiler/machine-operator-reducer.cc
index c174da2f7f2..19ea0620536 100644
--- a/chromium/v8/src/compiler/machine-operator-reducer.cc
+++ b/chromium/v8/src/compiler/machine-operator-reducer.cc
@@ -600,7 +600,8 @@ Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
1, Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)));
node->ReplaceInput(2, Word32And(dividend, mask));
NodeProperties::ChangeOp(
- node, common()->Select(kMachInt32, BranchHint::kFalse));
+ node,
+ common()->Select(MachineRepresentation::kWord32, BranchHint::kFalse));
} else {
Node* quotient = Int32Div(dividend, divisor);
DCHECK_EQ(dividend, node->InputAt(0));
@@ -650,7 +651,7 @@ Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
if (m.IsPhi()) {
Node* const phi = m.node();
- DCHECK_EQ(kRepFloat64, RepresentationOf(OpParameter<MachineType>(phi)));
+ DCHECK_EQ(MachineRepresentation::kFloat64, PhiRepresentationOf(phi->op()));
if (phi->OwnedBy(node)) {
// TruncateFloat64ToInt32[mode](Phi[Float64](x1,...,xn))
// => Phi[Int32](TruncateFloat64ToInt32[mode](x1),
@@ -665,8 +666,9 @@ Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
if (reduction.Changed()) input = reduction.replacement();
phi->ReplaceInput(i, input);
}
- NodeProperties::ChangeOp(phi,
- common()->Phi(kMachInt32, value_input_count));
+ NodeProperties::ChangeOp(
+ phi,
+ common()->Phi(MachineRepresentation::kWord32, value_input_count));
return Replace(phi);
}
}
@@ -675,15 +677,16 @@ Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
Reduction MachineOperatorReducer::ReduceStore(Node* node) {
- MachineType const rep =
- RepresentationOf(StoreRepresentationOf(node->op()).machine_type());
+ MachineRepresentation const rep =
+ StoreRepresentationOf(node->op()).representation();
Node* const value = node->InputAt(2);
switch (value->opcode()) {
case IrOpcode::kWord32And: {
Uint32BinopMatcher m(value);
- if (m.right().HasValue() &&
- ((rep == kRepWord8 && (m.right().Value() & 0xff) == 0xff) ||
- (rep == kRepWord16 && (m.right().Value() & 0xffff) == 0xffff))) {
+ if (m.right().HasValue() && ((rep == MachineRepresentation::kWord8 &&
+ (m.right().Value() & 0xff) == 0xff) ||
+ (rep == MachineRepresentation::kWord16 &&
+ (m.right().Value() & 0xffff) == 0xffff))) {
node->ReplaceInput(2, m.left().node());
return Changed(node);
}
@@ -691,9 +694,10 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
}
case IrOpcode::kWord32Sar: {
Int32BinopMatcher m(value);
- if (m.left().IsWord32Shl() &&
- ((rep == kRepWord8 && m.right().IsInRange(1, 24)) ||
- (rep == kRepWord16 && m.right().IsInRange(1, 16)))) {
+ if (m.left().IsWord32Shl() && ((rep == MachineRepresentation::kWord8 &&
+ m.right().IsInRange(1, 24)) ||
+ (rep == MachineRepresentation::kWord16 &&
+ m.right().IsInRange(1, 16)))) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(m.right().Value())) {
node->ReplaceInput(2, mleft.left().node());
@@ -811,12 +815,14 @@ Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
}
} else if (mleft.left().IsLoad()) {
LoadRepresentation const rep =
- OpParameter<LoadRepresentation>(mleft.left().node());
- if (m.right().Is(24) && mleft.right().Is(24) && rep == kMachInt8) {
+ LoadRepresentationOf(mleft.left().node()->op());
+ if (m.right().Is(24) && mleft.right().Is(24) &&
+ rep == MachineType::Int8()) {
// Load[kMachInt8] << 24 >> 24 => Load[kMachInt8]
return Replace(mleft.left().node());
}
- if (m.right().Is(16) && mleft.right().Is(16) && rep == kMachInt16) {
+ if (m.right().Is(16) && mleft.right().Is(16) &&
+ rep == MachineType::Int16()) {
// Load[kMachInt16] << 16 >> 16 => Load[kMachInt8]
return Replace(mleft.left().node());
}
@@ -938,8 +944,8 @@ Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
}
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
- Node* shl = NULL;
- Node* shr = NULL;
+ Node* shl = nullptr;
+ Node* shr = nullptr;
// Recognize rotation, we are matching either:
// * x << y | x >>> (32 - y) => x ror (32 - y), i.e x rol y
// * x << (32 - y) | x >>> y => x ror y
@@ -962,8 +968,8 @@ Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
// Case where y is a constant.
if (mshl.right().Value() + mshr.right().Value() != 32) return NoChange();
} else {
- Node* sub = NULL;
- Node* y = NULL;
+ Node* sub = nullptr;
+ Node* y = nullptr;
if (mshl.right().IsInt32Sub()) {
sub = mshl.right().node();
y = mshr.right().node();
diff --git a/chromium/v8/src/compiler/machine-operator.cc b/chromium/v8/src/compiler/machine-operator.cc
index 38bb056157d..511a10dd029 100644
--- a/chromium/v8/src/compiler/machine-operator.cc
+++ b/chromium/v8/src/compiler/machine-operator.cc
@@ -34,6 +34,10 @@ std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
switch (kind) {
case kNoWriteBarrier:
return os << "NoWriteBarrier";
+ case kMapWriteBarrier:
+ return os << "MapWriteBarrier";
+ case kPointerWriteBarrier:
+ return os << "PointerWriteBarrier";
case kFullWriteBarrier:
return os << "FullWriteBarrier";
}
@@ -43,7 +47,7 @@ std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
bool operator==(StoreRepresentation lhs, StoreRepresentation rhs) {
- return lhs.machine_type() == rhs.machine_type() &&
+ return lhs.representation() == rhs.representation() &&
lhs.write_barrier_kind() == rhs.write_barrier_kind();
}
@@ -54,16 +58,22 @@ bool operator!=(StoreRepresentation lhs, StoreRepresentation rhs) {
size_t hash_value(StoreRepresentation rep) {
- return base::hash_combine(rep.machine_type(), rep.write_barrier_kind());
+ return base::hash_combine(rep.representation(), rep.write_barrier_kind());
}
std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
- return os << "(" << rep.machine_type() << " : " << rep.write_barrier_kind()
+ return os << "(" << rep.representation() << " : " << rep.write_barrier_kind()
<< ")";
}
+LoadRepresentation LoadRepresentationOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kLoad, op->opcode());
+ return OpParameter<LoadRepresentation>(op);
+}
+
+
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kStore, op->opcode());
return OpParameter<StoreRepresentation>(op);
@@ -99,6 +109,7 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Word64Shr, Operator::kNoProperties, 2, 0, 1) \
V(Word64Sar, Operator::kNoProperties, 2, 0, 1) \
V(Word64Ror, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Equal, Operator::kCommutative, 2, 0, 1) \
V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
@@ -117,7 +128,10 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Uint32Mod, Operator::kNoProperties, 2, 1, 1) \
V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int64AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
+ 0, 2) \
V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int64SubWithOverflow, Operator::kNoProperties, 2, 0, 2) \
V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Div, Operator::kNoProperties, 2, 1, 1) \
V(Int64Mod, Operator::kNoProperties, 2, 1, 1) \
@@ -130,7 +144,15 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
@@ -167,28 +189,48 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)
#define PURE_OPTIONAL_OP_LIST(V) \
+ V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64Ctz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word32Popcnt, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64Popcnt, Operator::kNoProperties, 1, 0, 1) \
V(Float32Max, Operator::kNoProperties, 2, 0, 1) \
V(Float32Min, Operator::kNoProperties, 2, 0, 1) \
V(Float64Max, Operator::kNoProperties, 2, 0, 1) \
V(Float64Min, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32RoundDown, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32RoundUp, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundUp, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
- V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1)
+ V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
#define MACHINE_TYPE_LIST(V) \
- V(MachFloat32) \
- V(MachFloat64) \
- V(MachInt8) \
- V(MachUint8) \
- V(MachInt16) \
- V(MachUint16) \
- V(MachInt32) \
- V(MachUint32) \
- V(MachInt64) \
- V(MachUint64) \
- V(MachPtr) \
- V(MachAnyTagged)
+ V(Float32) \
+ V(Float64) \
+ V(Int8) \
+ V(Uint8) \
+ V(Int16) \
+ V(Uint16) \
+ V(Int32) \
+ V(Uint32) \
+ V(Int64) \
+ V(Uint64) \
+ V(Pointer) \
+ V(AnyTagged)
+
+
+#define MACHINE_REPRESENTATION_LIST(V) \
+ V(kFloat32) \
+ V(kFloat64) \
+ V(kWord8) \
+ V(kWord16) \
+ V(kWord32) \
+ V(kWord64) \
+ V(kTagged)
struct MachineOperatorGlobalCache {
@@ -223,14 +265,14 @@ struct MachineOperatorGlobalCache {
Load##Type##Operator() \
: Operator1<LoadRepresentation>( \
IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, \
- "Load", 2, 1, 1, 1, 1, 0, k##Type) {} \
+ "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
struct CheckedLoad##Type##Operator final \
: public Operator1<CheckedLoadRepresentation> { \
CheckedLoad##Type##Operator() \
: Operator1<CheckedLoadRepresentation>( \
IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite, \
- "CheckedLoad", 3, 1, 1, 1, 1, 0, k##Type) {} \
+ "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
Load##Type##Operator kLoad##Type; \
CheckedLoad##Type##Operator kCheckedLoad##Type;
@@ -243,13 +285,24 @@ struct MachineOperatorGlobalCache {
: Operator1<StoreRepresentation>( \
IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, \
"Store", 3, 1, 1, 0, 1, 0, \
- StoreRepresentation(k##Type, write_barrier_kind)) {} \
+ StoreRepresentation(MachineRepresentation::Type, \
+ write_barrier_kind)) {} \
}; \
struct Store##Type##NoWriteBarrier##Operator final \
: public Store##Type##Operator { \
Store##Type##NoWriteBarrier##Operator() \
: Store##Type##Operator(kNoWriteBarrier) {} \
}; \
+ struct Store##Type##MapWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##MapWriteBarrier##Operator() \
+ : Store##Type##Operator(kMapWriteBarrier) {} \
+ }; \
+ struct Store##Type##PointerWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##PointerWriteBarrier##Operator() \
+ : Store##Type##Operator(kPointerWriteBarrier) {} \
+ }; \
struct Store##Type##FullWriteBarrier##Operator final \
: public Store##Type##Operator { \
Store##Type##FullWriteBarrier##Operator() \
@@ -260,12 +313,16 @@ struct MachineOperatorGlobalCache {
CheckedStore##Type##Operator() \
: Operator1<CheckedStoreRepresentation>( \
IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow, \
- "CheckedStore", 4, 1, 1, 0, 1, 0, k##Type) {} \
+ "CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
+ } \
}; \
Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
+ Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier; \
+ Store##Type##PointerWriteBarrier##Operator \
+ kStore##Type##PointerWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
CheckedStore##Type##Operator kCheckedStore##Type;
- MACHINE_TYPE_LIST(STORE)
+ MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
};
@@ -274,10 +331,12 @@ static base::LazyInstance<MachineOperatorGlobalCache>::type kCache =
LAZY_INSTANCE_INITIALIZER;
-MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone, MachineType word,
+MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone,
+ MachineRepresentation word,
Flags flags)
: cache_(kCache.Get()), word_(word), flags_(flags) {
- DCHECK(word == kRepWord32 || word == kRepWord64);
+ DCHECK(word == MachineRepresentation::kWord32 ||
+ word == MachineRepresentation::kWord64);
}
@@ -310,35 +369,36 @@ const Operator* MachineOperatorBuilder::TruncateFloat64ToInt32(
const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
- switch (rep) {
-#define LOAD(Type) \
- case k##Type: \
- return &cache_.kLoad##Type;
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kLoad##Type; \
+ }
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
- default:
- break;
- }
UNREACHABLE();
return nullptr;
}
-const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
- switch (rep.machine_type()) {
-#define STORE(Type) \
- case k##Type: \
- switch (rep.write_barrier_kind()) { \
- case kNoWriteBarrier: \
- return &cache_.k##Store##Type##NoWriteBarrier; \
- case kFullWriteBarrier: \
- return &cache_.k##Store##Type##FullWriteBarrier; \
- } \
+const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
+ switch (store_rep.representation()) {
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ switch (store_rep.write_barrier_kind()) { \
+ case kNoWriteBarrier: \
+ return &cache_.k##Store##kRep##NoWriteBarrier; \
+ case kMapWriteBarrier: \
+ return &cache_.k##Store##kRep##MapWriteBarrier; \
+ case kPointerWriteBarrier: \
+ return &cache_.k##Store##kRep##PointerWriteBarrier; \
+ case kFullWriteBarrier: \
+ return &cache_.k##Store##kRep##FullWriteBarrier; \
+ } \
break;
- MACHINE_TYPE_LIST(STORE)
+ MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
-
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
@@ -348,15 +408,12 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
const Operator* MachineOperatorBuilder::CheckedLoad(
CheckedLoadRepresentation rep) {
- switch (rep) {
-#define LOAD(Type) \
- case k##Type: \
- return &cache_.kCheckedLoad##Type;
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kCheckedLoad##Type; \
+ }
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
- default:
- break;
- }
UNREACHABLE();
return nullptr;
}
@@ -365,12 +422,13 @@ const Operator* MachineOperatorBuilder::CheckedLoad(
const Operator* MachineOperatorBuilder::CheckedStore(
CheckedStoreRepresentation rep) {
switch (rep) {
-#define STORE(Type) \
- case k##Type: \
- return &cache_.kCheckedStore##Type;
- MACHINE_TYPE_LIST(STORE)
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ return &cache_.kCheckedStore##kRep;
+ MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
diff --git a/chromium/v8/src/compiler/machine-operator.h b/chromium/v8/src/compiler/machine-operator.h
index 27abfb4accc..00fefe3539b 100644
--- a/chromium/v8/src/compiler/machine-operator.h
+++ b/chromium/v8/src/compiler/machine-operator.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_MACHINE_OPERATOR_H_
#include "src/base/flags.h"
-#include "src/compiler/machine-type.h"
+#include "src/machine-type.h"
namespace v8 {
namespace internal {
@@ -49,7 +49,12 @@ TruncationMode TruncationModeOf(Operator const*);
// Supported write barrier modes.
-enum WriteBarrierKind { kNoWriteBarrier, kFullWriteBarrier };
+enum WriteBarrierKind {
+ kNoWriteBarrier,
+ kMapWriteBarrier,
+ kPointerWriteBarrier,
+ kFullWriteBarrier
+};
std::ostream& operator<<(std::ostream& os, WriteBarrierKind);
@@ -57,20 +62,22 @@ std::ostream& operator<<(std::ostream& os, WriteBarrierKind);
// A Load needs a MachineType.
typedef MachineType LoadRepresentation;
+LoadRepresentation LoadRepresentationOf(Operator const*);
// A Store needs a MachineType and a WriteBarrierKind in order to emit the
// correct write barrier.
class StoreRepresentation final {
public:
- StoreRepresentation(MachineType machine_type,
+ StoreRepresentation(MachineRepresentation representation,
WriteBarrierKind write_barrier_kind)
- : machine_type_(machine_type), write_barrier_kind_(write_barrier_kind) {}
+ : representation_(representation),
+ write_barrier_kind_(write_barrier_kind) {}
- MachineType machine_type() const { return machine_type_; }
+ MachineRepresentation representation() const { return representation_; }
WriteBarrierKind write_barrier_kind() const { return write_barrier_kind_; }
private:
- MachineType machine_type_;
+ MachineRepresentation representation_;
WriteBarrierKind write_barrier_kind_;
};
@@ -91,7 +98,7 @@ CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const*);
// A CheckedStore needs a MachineType.
-typedef MachineType CheckedStoreRepresentation;
+typedef MachineRepresentation CheckedStoreRepresentation;
CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
@@ -105,26 +112,41 @@ class MachineOperatorBuilder final : public ZoneObject {
// for operations that are unsupported by some back-ends.
enum Flag {
kNoFlags = 0u,
- // Note that Float*Max behaves like `(a < b) ? b : a`, not like Math.max().
+ // Note that Float*Max behaves like `(b < a) ? a : b`, not like Math.max().
// Note that Float*Min behaves like `(a < b) ? a : b`, not like Math.min().
kFloat32Max = 1u << 0,
kFloat32Min = 1u << 1,
kFloat64Max = 1u << 2,
kFloat64Min = 1u << 3,
- kFloat64RoundDown = 1u << 4,
- kFloat64RoundTruncate = 1u << 5,
- kFloat64RoundTiesAway = 1u << 6,
- kInt32DivIsSafe = 1u << 7,
- kUint32DivIsSafe = 1u << 8,
- kWord32ShiftIsSafe = 1u << 9,
+ kFloat32RoundDown = 1u << 4,
+ kFloat64RoundDown = 1u << 5,
+ kFloat32RoundUp = 1u << 6,
+ kFloat64RoundUp = 1u << 7,
+ kFloat32RoundTruncate = 1u << 8,
+ kFloat64RoundTruncate = 1u << 9,
+ kFloat32RoundTiesEven = 1u << 10,
+ kFloat64RoundTiesEven = 1u << 11,
+ kFloat64RoundTiesAway = 1u << 12,
+ kInt32DivIsSafe = 1u << 13,
+ kUint32DivIsSafe = 1u << 14,
+ kWord32ShiftIsSafe = 1u << 15,
+ kWord32Ctz = 1u << 16,
+ kWord64Ctz = 1u << 17,
+ kWord32Popcnt = 1u << 18,
+ kWord64Popcnt = 1u << 19,
kAllOptionalOps = kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
- kFloat64RoundDown | kFloat64RoundTruncate |
- kFloat64RoundTiesAway
+ kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
+ kFloat64RoundUp | kFloat32RoundTruncate |
+ kFloat64RoundTruncate | kFloat64RoundTiesAway |
+ kFloat32RoundTiesEven | kFloat64RoundTiesEven |
+ kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt
};
typedef base::Flags<Flag, unsigned> Flags;
- explicit MachineOperatorBuilder(Zone* zone, MachineType word = kMachPtr,
- Flags supportedOperators = kNoFlags);
+ explicit MachineOperatorBuilder(
+ Zone* zone,
+ MachineRepresentation word = MachineType::PointerRepresentation(),
+ Flags supportedOperators = kNoFlags);
const Operator* Word32And();
const Operator* Word32Or();
@@ -135,6 +157,9 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Word32Ror();
const Operator* Word32Equal();
const Operator* Word32Clz();
+ const OptionalOperator Word32Ctz();
+ const OptionalOperator Word32Popcnt();
+ const OptionalOperator Word64Popcnt();
bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
const Operator* Word64And();
@@ -144,6 +169,8 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Word64Shr();
const Operator* Word64Sar();
const Operator* Word64Ror();
+ const Operator* Word64Clz();
+ const OptionalOperator Word64Ctz();
const Operator* Word64Equal();
const Operator* Int32Add();
@@ -165,7 +192,9 @@ class MachineOperatorBuilder final : public ZoneObject {
bool Uint32DivIsSafe() const { return flags_ & kUint32DivIsSafe; }
const Operator* Int64Add();
+ const Operator* Int64AddWithOverflow();
const Operator* Int64Sub();
+ const Operator* Int64SubWithOverflow();
const Operator* Int64Mul();
const Operator* Int64Div();
const Operator* Int64Mod();
@@ -184,16 +213,24 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* ChangeFloat32ToFloat64();
const Operator* ChangeFloat64ToInt32(); // narrowing
const Operator* ChangeFloat64ToUint32(); // narrowing
+ const Operator* TryTruncateFloat32ToInt64();
+ const Operator* TryTruncateFloat64ToInt64();
+ const Operator* TryTruncateFloat32ToUint64();
+ const Operator* TryTruncateFloat64ToUint64();
const Operator* ChangeInt32ToFloat64();
const Operator* ChangeInt32ToInt64();
const Operator* ChangeUint32ToFloat64();
const Operator* ChangeUint32ToUint64();
- // These operators truncate numbers, both changing the representation of
- // the number and mapping multiple input values onto the same output value.
+ // These operators truncate or round numbers, both changing the representation
+ // of the number and mapping multiple input values onto the same output value.
const Operator* TruncateFloat64ToFloat32();
const Operator* TruncateFloat64ToInt32(TruncationMode);
const Operator* TruncateInt64ToInt32();
+ const Operator* RoundInt64ToFloat32();
+ const Operator* RoundInt64ToFloat64();
+ const Operator* RoundUint64ToFloat32();
+ const Operator* RoundUint64ToFloat64();
// These operators reinterpret the bits of a floating point number as an
// integer and vice versa.
@@ -244,9 +281,15 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Float64Abs();
// Floating point rounding.
+ const OptionalOperator Float32RoundDown();
const OptionalOperator Float64RoundDown();
+ const OptionalOperator Float32RoundUp();
+ const OptionalOperator Float64RoundUp();
+ const OptionalOperator Float32RoundTruncate();
const OptionalOperator Float64RoundTruncate();
const OptionalOperator Float64RoundTiesAway();
+ const OptionalOperator Float32RoundTiesEven();
+ const OptionalOperator Float64RoundTiesEven();
// Floating point bit representation.
const Operator* Float64ExtractLowWord32();
@@ -270,9 +313,9 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* CheckedStore(CheckedStoreRepresentation);
// Target machine word-size assumed by this builder.
- bool Is32() const { return word() == kRepWord32; }
- bool Is64() const { return word() == kRepWord64; }
- MachineType word() const { return word_; }
+ bool Is32() const { return word() == MachineRepresentation::kWord32; }
+ bool Is64() const { return word() == MachineRepresentation::kWord64; }
+ MachineRepresentation word() const { return word_; }
// Pseudo operators that translate to 32/64-bit operators depending on the
// word-size of the target machine assumed by this builder.
@@ -305,7 +348,7 @@ class MachineOperatorBuilder final : public ZoneObject {
private:
MachineOperatorGlobalCache const& cache_;
- MachineType const word_;
+ MachineRepresentation const word_;
Flags const flags_;
DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
diff --git a/chromium/v8/src/compiler/machine-type.cc b/chromium/v8/src/compiler/machine-type.cc
deleted file mode 100644
index 7475a038cc0..00000000000
--- a/chromium/v8/src/compiler/machine-type.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/machine-type.h"
-#include "src/ostreams.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#define PRINT(bit) \
- if (type & bit) { \
- if (before) os << "|"; \
- os << #bit; \
- before = true; \
- }
-
-
-std::ostream& operator<<(std::ostream& os, const MachineType& type) {
- bool before = false;
- PRINT(kRepBit);
- PRINT(kRepWord8);
- PRINT(kRepWord16);
- PRINT(kRepWord32);
- PRINT(kRepWord64);
- PRINT(kRepFloat32);
- PRINT(kRepFloat64);
- PRINT(kRepTagged);
-
- PRINT(kTypeBool);
- PRINT(kTypeInt32);
- PRINT(kTypeUint32);
- PRINT(kTypeInt64);
- PRINT(kTypeUint64);
- PRINT(kTypeNumber);
- PRINT(kTypeAny);
- return os;
-}
-
-
-#undef PRINT
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/compiler/machine-type.h b/chromium/v8/src/compiler/machine-type.h
deleted file mode 100644
index 0cd2a84010b..00000000000
--- a/chromium/v8/src/compiler/machine-type.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_MACHINE_TYPE_H_
-#define V8_COMPILER_MACHINE_TYPE_H_
-
-#include <iosfwd>
-
-#include "src/base/bits.h"
-#include "src/globals.h"
-#include "src/signature.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Machine-level types and representations.
-// TODO(titzer): Use the real type system instead of MachineType.
-enum MachineType : uint16_t {
- // Representations.
- kRepBit = 1u << 0,
- kRepWord8 = 1u << 1,
- kRepWord16 = 1u << 2,
- kRepWord32 = 1u << 3,
- kRepWord64 = 1u << 4,
- kRepFloat32 = 1u << 5,
- kRepFloat64 = 1u << 6,
- kRepTagged = 1u << 7,
-
- // Types.
- kTypeBool = 1u << 8,
- kTypeInt32 = 1u << 9,
- kTypeUint32 = 1u << 10,
- kTypeInt64 = 1u << 11,
- kTypeUint64 = 1u << 12,
- kTypeNumber = 1u << 13,
- kTypeAny = 1u << 14,
-
- // Machine types.
- kMachNone = 0u,
- kMachBool = kRepBit | kTypeBool,
- kMachFloat32 = kRepFloat32 | kTypeNumber,
- kMachFloat64 = kRepFloat64 | kTypeNumber,
- kMachInt8 = kRepWord8 | kTypeInt32,
- kMachUint8 = kRepWord8 | kTypeUint32,
- kMachInt16 = kRepWord16 | kTypeInt32,
- kMachUint16 = kRepWord16 | kTypeUint32,
- kMachInt32 = kRepWord32 | kTypeInt32,
- kMachUint32 = kRepWord32 | kTypeUint32,
- kMachInt64 = kRepWord64 | kTypeInt64,
- kMachUint64 = kRepWord64 | kTypeUint64,
- kMachIntPtr = (kPointerSize == 4) ? kMachInt32 : kMachInt64,
- kMachUintPtr = (kPointerSize == 4) ? kMachUint32 : kMachUint64,
- kMachPtr = (kPointerSize == 4) ? kRepWord32 : kRepWord64,
- kMachAnyTagged = kRepTagged | kTypeAny
-};
-
-V8_INLINE size_t hash_value(MachineType type) {
- return static_cast<size_t>(type);
-}
-
-std::ostream& operator<<(std::ostream& os, const MachineType& type);
-
-typedef uint16_t MachineTypeUnion;
-
-// Globally useful machine types and constants.
-const MachineTypeUnion kRepMask = kRepBit | kRepWord8 | kRepWord16 |
- kRepWord32 | kRepWord64 | kRepFloat32 |
- kRepFloat64 | kRepTagged;
-const MachineTypeUnion kTypeMask = kTypeBool | kTypeInt32 | kTypeUint32 |
- kTypeInt64 | kTypeUint64 | kTypeNumber |
- kTypeAny;
-
-// Gets only the type of the given type.
-inline MachineType TypeOf(MachineType machine_type) {
- int result = machine_type & kTypeMask;
- return static_cast<MachineType>(result);
-}
-
-// Gets only the representation of the given type.
-inline MachineType RepresentationOf(MachineType machine_type) {
- int result = machine_type & kRepMask;
- CHECK(base::bits::IsPowerOfTwo32(result));
- return static_cast<MachineType>(result);
-}
-
-// Gets the log2 of the element size in bytes of the machine type.
-inline int ElementSizeLog2Of(MachineType machine_type) {
- switch (RepresentationOf(machine_type)) {
- case kRepBit:
- case kRepWord8:
- return 0;
- case kRepWord16:
- return 1;
- case kRepWord32:
- case kRepFloat32:
- return 2;
- case kRepWord64:
- case kRepFloat64:
- return 3;
- case kRepTagged:
- return kPointerSizeLog2;
- default:
- break;
- }
- UNREACHABLE();
- return -1;
-}
-
-// Gets the element size in bytes of the machine type.
-inline int ElementSizeOf(MachineType machine_type) {
- const int shift = ElementSizeLog2Of(machine_type);
- DCHECK_NE(-1, shift);
- return 1 << shift;
-}
-
-inline bool IsFloatingPoint(MachineType type) {
- MachineType rep = RepresentationOf(type);
- return rep == kRepFloat32 || rep == kRepFloat64;
-}
-
-typedef Signature<MachineType> MachineSignature;
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_MACHINE_TYPE_H_
diff --git a/chromium/v8/src/compiler/mips/OWNERS b/chromium/v8/src/compiler/mips/OWNERS
index 5508ba626f3..89455a4fbd7 100644
--- a/chromium/v8/src/compiler/mips/OWNERS
+++ b/chromium/v8/src/compiler/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/compiler/mips/code-generator-mips.cc b/chromium/v8/src/compiler/mips/code-generator-mips.cc
index 7769b9e739e..75e4b9e7a8b 100644
--- a/chromium/v8/src/compiler/mips/code-generator-mips.cc
+++ b/chromium/v8/src/compiler/mips/code-generator-mips.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/mips/macro-assembler-mips.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -54,6 +54,18 @@ class MipsOperandConverter final : public InstructionOperandConverter {
return ToDoubleRegister(op);
}
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
@@ -106,10 +118,10 @@ class MipsOperandConverter final : public InstructionOperandConverter {
MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
MemOperand ToMemOperand(InstructionOperand* op) const {
- DCHECK(op != NULL);
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -180,24 +192,63 @@ class OutOfLineRound : public OutOfLineCode {
};
-class OutOfLineTruncate final : public OutOfLineRound {
+class OutOfLineRound32 : public OutOfLineCode {
public:
- OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
-};
+ OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+ void Generate() final {
+ // Handle rounding to zero case where sign has to be preserved.
+ // High bits of float input already in kScratchReg.
+ __ srl(at, kScratchReg, 31);
+ __ sll(at, at, 31);
+ __ mtc1(at, result_);
+ }
-class OutOfLineFloor final : public OutOfLineRound {
- public:
- OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
+ private:
+ DoubleRegister const result_;
};
-class OutOfLineCeil final : public OutOfLineRound {
+class OutOfLineRecordWrite final : public OutOfLineCode {
public:
- OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ Addu(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
};
@@ -248,19 +299,6 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
}
-Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
- switch (condition) {
- case kOverflow:
- return lt;
- case kNotOverflow:
- return ge;
- default:
- break;
- }
- UNREACHABLE();
- return kNoCondition;
-}
-
FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
FlagsCondition condition) {
switch (condition) {
@@ -371,10 +409,15 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
} while (0)
-#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
- do { \
- auto ool = \
- new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
+ if (IsMipsArchVariant(kMips32r6)) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
Label done; \
__ Mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
__ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
@@ -382,22 +425,62 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ Branch(USE_DELAY_SLOT, &done, hs, at, \
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
__ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
__ Move(at, kScratchReg2, i.OutputDoubleRegister()); \
__ or_(at, at, kScratchReg2); \
__ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
__ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
__ bind(ool->exit()); \
__ bind(&done); \
- } while (0)
+ }
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ LeaveFrame(StackFrame::MANUAL);
+#define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
+ if (IsMipsArchVariant(kMips32r6)) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ int32_t kFloat32ExponentBias = 127; \
+ int32_t kFloat32MantissaBits = 23; \
+ int32_t kFloat32ExponentBits = 8; \
+ auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
+ Label done; \
+ __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
+ __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
+ __ Branch(USE_DELAY_SLOT, &done, hs, at, \
+ Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
+ __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mfc1(at, i.OutputDoubleRegister()); \
+ __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
+ __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
+ __ bind(ool->exit()); \
+ __ bind(&done); \
+ }
+
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ addiu(sp, sp, sp_slot_delta * kPointerSize);
}
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Subu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -417,10 +500,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(at);
}
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -428,6 +513,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Jump(at);
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -442,6 +528,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
@@ -452,16 +539,28 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
break;
}
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (instr->InputAt(0)->IsImmediate()) {
@@ -471,6 +570,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -483,12 +584,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -503,19 +607,35 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ Addu(at, object, index);
+ __ sw(value, MemOperand(at));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kMipsAdd:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMipsAddOvf:
- __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), kCompareReg, kScratchReg);
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
break;
case kMipsSub:
__ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMipsSubOvf:
- __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), kCompareReg, kScratchReg);
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
break;
case kMipsMul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -528,9 +648,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kMipsDiv:
__ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMipsDivU:
__ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMipsMod:
__ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -544,6 +674,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMipsOr:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMipsNor:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK(i.InputOperand(1).immediate() == 0);
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
case kMipsXor:
__ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -574,6 +712,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sra(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
+ case kMipsExt:
+ __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ break;
+ case kMipsIns:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
+ } else {
+ __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
+ break;
case kMipsRor:
__ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -693,15 +843,91 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputDoubleRegister(1));
break;
case kMipsFloat64RoundDown: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
+ break;
+ }
+ case kMipsFloat32RoundDown: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
break;
}
case kMipsFloat64RoundTruncate: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
+ break;
+ }
+ case kMipsFloat32RoundTruncate: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
break;
}
case kMipsFloat64RoundUp: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
+ break;
+ }
+ case kMipsFloat32RoundUp: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
+ break;
+ }
+ case kMipsFloat64RoundTiesEven: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
+ break;
+ }
+ case kMipsFloat32RoundTiesEven: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
+ break;
+ }
+ case kMipsFloat64Max: {
+ // (b < a) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMipsFloat64Min: {
+ // (a < b) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMipsFloat32Max: {
+ // (b < a) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMipsFloat32Min: {
+ // (a < b) ? a : b
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
break;
}
case kMipsCvtSD: {
@@ -718,11 +944,35 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cvt_d_w(i.OutputDoubleRegister(), scratch);
break;
}
+ case kMipsCvtSW: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ mtc1(i.InputRegister(0), scratch);
+ __ cvt_s_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
case kMipsCvtDUw: {
FPURegister scratch = kScratchDoubleReg;
__ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
break;
}
+ case kMipsFloorWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ floor_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsCeilWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsRoundWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
case kMipsTruncWD: {
FPURegister scratch = kScratchDoubleReg;
// Other arches use round to zero here, so we follow.
@@ -730,6 +980,30 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mfc1(i.OutputRegister(), scratch);
break;
}
+ case kMipsFloorWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ floor_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsCeilWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsRoundWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsTruncWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ trunc_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
case kMipsTruncUwD: {
FPURegister scratch = kScratchDoubleReg;
// TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
@@ -794,12 +1068,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
__ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
break;
case kMipsStackClaim: {
__ Subu(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
break;
}
case kMipsStoreToStackSlot: {
@@ -810,18 +1087,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
- case kMipsStoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ addu(index, object, index);
- __ sw(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RAStatus ra_status = kRAHasNotBeenSaved;
- __ RecordWrite(object, index, value, ra_status, mode);
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
break;
@@ -863,7 +1128,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
}
-}
+} // NOLINT(readability/fn_size)
#define UNSUPPORTED_COND(opcode, condition) \
@@ -915,11 +1180,34 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
cc = FlagsConditionToConditionTst(branch->condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
- } else if (instr->arch_opcode() == kMipsAddOvf ||
- instr->arch_opcode() == kMipsSubOvf) {
- // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
- cc = FlagsConditionToConditionOvf(branch->condition);
- __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
+ } else if (instr->arch_opcode() == kMipsAddOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+ break;
+ }
+ } else if (instr->arch_opcode() == kMipsSubOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+ break;
+ }
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
@@ -927,14 +1215,24 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpS, branch->condition);
}
- __ BranchF32(tlabel, NULL, cc, i.InputSingleRegister(0),
- i.InputSingleRegister(1));
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF32(tlabel, nullptr, cc, left, right);
} else if (instr->arch_opcode() == kMipsCmpD) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpD, branch->condition);
}
- __ BranchF64(tlabel, NULL, cc, i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF64(tlabel, nullptr, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
@@ -968,30 +1266,34 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMipsTst) {
cc = FlagsConditionToConditionTst(condition);
__ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
- __ xori(result, zero_reg, 1); // Create 1 for true.
- if (IsMipsArchVariant(kMips32r6)) {
- if (cc == eq) {
- __ seleqz(result, result, kScratchReg);
- } else {
- __ selnez(result, result, kScratchReg);
- }
- } else {
- if (cc == eq) {
- __ Movn(result, zero_reg, kScratchReg);
- } else {
- __ Movz(result, zero_reg, kScratchReg);
- }
+ __ Sltu(result, zero_reg, kScratchReg);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
}
return;
} else if (instr->arch_opcode() == kMipsAddOvf ||
instr->arch_opcode() == kMipsSubOvf) {
- // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
- cc = FlagsConditionToConditionOvf(condition);
- // Return 1 on overflow.
- __ Slt(result, kCompareReg, Operand(zero_reg));
- if (cc == ge) // Invert result on not overflow.
- __ xori(result, result, 1);
- return;
+ Label flabel, tlabel;
+ switch (instr->arch_opcode()) {
+ case kMipsAddOvf:
+ __ AddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+
+ break;
+ case kMipsSubOvf:
+ __ SubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ li(result, 1);
+ __ Branch(&tlabel);
+ __ bind(&flabel);
+ __ li(result, 0);
+ __ bind(&tlabel);
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(condition);
switch (cc) {
@@ -999,20 +1301,18 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case ne: {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
- __ Subu(kScratchReg, left, right);
- __ xori(result, zero_reg, 1);
- if (IsMipsArchVariant(kMips32r6)) {
- if (cc == eq) {
- __ seleqz(result, result, kScratchReg);
- } else {
- __ selnez(result, result, kScratchReg);
- }
+ Register select;
+ if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
+ // Pass left operand if right is zero.
+ select = left;
} else {
- if (cc == eq) {
- __ Movn(result, zero_reg, kScratchReg);
- } else {
- __ Movz(result, zero_reg, kScratchReg);
- }
+ __ Subu(kScratchReg, left, right);
+ select = kScratchReg;
+ }
+ __ Sltu(result, zero_reg, select);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
}
} break;
case lt:
@@ -1057,8 +1357,12 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
return;
} else if (instr->arch_opcode() == kMipsCmpD ||
instr->arch_opcode() == kMipsCmpS) {
- FPURegister left = i.InputDoubleRegister(0);
- FPURegister right = i.InputDoubleRegister(1);
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
bool predicate;
FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
if (!IsMipsArchVariant(kMips32r6)) {
@@ -1081,8 +1385,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
DCHECK(instr->arch_opcode() == kMipsCmpS);
__ cmp(cc, W, kDoubleCompareReg, left, right);
}
- __ mfc1(at, kDoubleCompareReg);
- __ srl(result, at, 31); // Cmp returns all 1s for true.
+ __ mfc1(result, kDoubleCompareReg);
+ __ andi(result, result, 1); // Cmp returns all 1's/0's, use only LSB.
if (!predicate) // Toggle result for not equal.
__ xori(result, result, 1);
}
@@ -1139,17 +1443,17 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int stack_shrink_slots = frame()->GetSpillSlotCount();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
__ mov(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(0);
}
+ frame_access_state()->SetFrameAccessToDefault();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -1212,10 +1516,10 @@ void CodeGenerator::AssembleReturn() {
__ MultiPopFPU(saves_fpu);
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ mov(sp, fp);
__ Pop(ra, fp);
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ Branch(&return_label_);
@@ -1236,7 +1540,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1342,7 +1646,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
diff --git a/chromium/v8/src/compiler/mips/instruction-codes-mips.h b/chromium/v8/src/compiler/mips/instruction-codes-mips.h
index db8f2511e97..c9381775c8f 100644
--- a/chromium/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/chromium/v8/src/compiler/mips/instruction-codes-mips.h
@@ -25,11 +25,14 @@ namespace compiler {
V(MipsModU) \
V(MipsAnd) \
V(MipsOr) \
+ V(MipsNor) \
V(MipsXor) \
V(MipsClz) \
V(MipsShl) \
V(MipsShr) \
V(MipsSar) \
+ V(MipsExt) \
+ V(MipsIns) \
V(MipsRor) \
V(MipsMov) \
V(MipsTst) \
@@ -54,15 +57,28 @@ namespace compiler {
V(MipsSqrtD) \
V(MipsMaxD) \
V(MipsMinD) \
+ V(MipsFloat32RoundDown) \
+ V(MipsFloat32RoundTruncate) \
+ V(MipsFloat32RoundUp) \
+ V(MipsFloat32RoundTiesEven) \
V(MipsFloat64RoundDown) \
V(MipsFloat64RoundTruncate) \
V(MipsFloat64RoundUp) \
+ V(MipsFloat64RoundTiesEven) \
V(MipsCvtSD) \
V(MipsCvtDS) \
V(MipsTruncWD) \
+ V(MipsRoundWD) \
+ V(MipsFloorWD) \
+ V(MipsCeilWD) \
+ V(MipsTruncWS) \
+ V(MipsRoundWS) \
+ V(MipsFloorWS) \
+ V(MipsCeilWS) \
V(MipsTruncUwD) \
V(MipsCvtDW) \
V(MipsCvtDUw) \
+ V(MipsCvtSW) \
V(MipsLb) \
V(MipsLbu) \
V(MipsSb) \
@@ -79,10 +95,13 @@ namespace compiler {
V(MipsFloat64ExtractHighWord32) \
V(MipsFloat64InsertLowWord32) \
V(MipsFloat64InsertHighWord32) \
+ V(MipsFloat64Max) \
+ V(MipsFloat64Min) \
+ V(MipsFloat32Max) \
+ V(MipsFloat32Min) \
V(MipsPush) \
V(MipsStoreToStackSlot) \
- V(MipsStackClaim) \
- V(MipsStoreWriteBarrier)
+ V(MipsStackClaim)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/mips/instruction-scheduler-mips.cc b/chromium/v8/src/compiler/mips/instruction-scheduler-mips.cc
new file mode 100644
index 00000000000..af86a87ad78
--- /dev/null
+++ b/chromium/v8/src/compiler/mips/instruction-scheduler-mips.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNIMPLEMENTED();
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNIMPLEMENTED();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/mips/instruction-selector-mips.cc b/chromium/v8/src/compiler/mips/instruction-selector-mips.cc
index 3c4b378553e..61cea76b22c 100644
--- a/chromium/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/chromium/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -44,11 +44,10 @@ class MipsOperandGenerator final : public OperandGenerator {
return is_uint16(value);
case kMipsLdc1:
case kMipsSdc1:
- case kCheckedLoadFloat32:
case kCheckedLoadFloat64:
- case kCheckedStoreFloat32:
case kCheckedStoreFloat64:
- return is_int16(value + kIntSize);
+ return std::numeric_limits<int16_t>::min() <= (value + kIntSize) &&
+ std::numeric_limits<int16_t>::max() >= (value + kIntSize);
default:
return is_int16(value);
}
@@ -128,32 +127,32 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kMipsLwc1;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kMipsLdc1;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeUint32 ? kMipsLbu : kMipsLb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
break;
- case kRepWord16:
- opcode = typ == kTypeUint32 ? kMipsLhu : kMipsLh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kMipsLw;
break;
- default:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -178,59 +177,126 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
- Emit(kMipsStoreWriteBarrier, g.NoOutput(), g.UseFixed(base, t0),
- g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
-
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kMipsSwc1;
- break;
- case kRepFloat64:
- opcode = kMipsSdc1;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kMipsSb;
- break;
- case kRepWord16:
- opcode = kMipsSh;
- break;
- case kRepTagged: // Fall through.
- case kRepWord32:
- opcode = kMipsSw;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ // TODO(mips): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- InstructionOperand addr_reg = g.TempRegister();
- Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kMipsSwc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMipsSdc1;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kMipsSb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMipsSh;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kMipsSw;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
}
}
void InstructionSelector::VisitWord32And(Node* node) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+
+ // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().Value() & 0x1f;
+
+ // Ext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Ext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+ Emit(kMipsExt, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t shift = base::bits::CountPopulation32(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros32(~mask);
+ if (shift != 0 && shift != 32 && msb + shift == 32) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of invereted mask.
+ Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0), g.TempImmediate(shift));
+ return;
+ }
+ }
VisitBinop(this, node, kMipsAnd);
}
@@ -241,16 +307,81 @@ void InstructionSelector::VisitWord32Or(Node* node) {
void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasValue()) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsNor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ MipsOperandGenerator g(this);
+ Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
VisitBinop(this, node, kMipsXor);
}
void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasValue()) {
+ uint32_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kMipsShl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kMipsShl, node);
}
void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x1f;
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_msb + mask_width + lsb) == 32) {
+ MipsOperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kMipsExt, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMipsShr, node);
}
@@ -270,6 +401,12 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
MipsOperandGenerator g(this);
@@ -332,7 +469,7 @@ void InstructionSelector::VisitUint32MulHigh(Node* node) {
void InstructionSelector::VisitInt32Div(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMipsDiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -340,7 +477,7 @@ void InstructionSelector::VisitInt32Div(Node* node) {
void InstructionSelector::VisitUint32Div(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMipsDivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -377,6 +514,65 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
+ // which does rounding and conversion to integer format.
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kFloat64RoundDown:
+ Emit(kMipsFloorWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundUp:
+ Emit(kMipsCeilWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTiesEven:
+ Emit(kMipsRoundWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTruncate:
+ Emit(kMipsTruncWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ default:
+ break;
+ }
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (CanCover(value, next)) {
+ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
+ switch (next->opcode()) {
+ case IrOpcode::kFloat32RoundDown:
+ Emit(kMipsFloorWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundUp:
+ Emit(kMipsCeilWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTiesEven:
+ Emit(kMipsRoundWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTruncate:
+ Emit(kMipsTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ default:
+ Emit(kMipsTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ } else {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kMipsTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
VisitRR(this, kMipsTruncWD, node);
}
@@ -387,6 +583,16 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kMipsCvtSW, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
VisitRR(this, kMipsCvtSD, node);
}
@@ -476,16 +682,64 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat32Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat32Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat64Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat32Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat32Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ MipsOperandGenerator g(this);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Emit(kMipsFloat64Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMipsFloat64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
void InstructionSelector::VisitFloat32Abs(Node* node) {
@@ -508,11 +762,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kMipsFloat32RoundDown, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kMipsFloat64RoundDown, node);
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kMipsFloat32RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kMipsFloat64RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kMipsFloat32RoundTruncate, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kMipsFloat64RoundTruncate, node);
}
@@ -523,20 +797,20 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
- MipsOperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kMipsFloat32RoundTiesEven, node);
+}
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kMipsFloat64RoundTiesEven, node);
+}
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ MipsOperandGenerator g(this);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -546,8 +820,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
int slot = kCArgSlotCount;
- for (Node* input : buffer.pushed_nodes) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ for (PushParameter input : (*arguments)) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
@@ -558,160 +832,47 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
Emit(kMipsStackClaim, g.NoOutput(),
g.TempImmediate(push_count << kPointerSizeLog2));
}
- for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* input = buffer.pushed_nodes[n]) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(n << kPointerSizeLog2));
}
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- MipsOperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
- // Possibly align stack here for functions.
- int push_count = static_cast<int>(descriptor->StackParameterCount());
- if (push_count > 0) {
- Emit(kMipsStackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
- }
- int slot = static_cast<int>(buffer.pushed_nodes.size()) - 1;
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
- g.TempImmediate(slot << kPointerSizeLog2));
- slot--;
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -732,27 +893,27 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
default:
@@ -797,10 +958,14 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kMipsCmpS, g.UseRegister(left), g.UseRegister(right),
- cont);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMipsCmpS, lhs, rhs, cont);
}
@@ -808,10 +973,14 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kMipsCmpD, g.UseRegister(left), g.UseRegister(right),
- cont);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMipsCmpD, lhs, rhs, cont);
}
@@ -826,6 +995,16 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, opcode)) {
switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
case kSignedLessThan:
case kSignedGreaterThanOrEqual:
case kUnsignedLessThan:
@@ -840,6 +1019,16 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
} else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
case kSignedLessThan:
case kSignedGreaterThanOrEqual:
case kUnsignedLessThan:
@@ -920,7 +1109,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
@@ -1125,9 +1314,21 @@ InstructionSelector::SupportedMachineOperatorFlags() {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode()) {
flags |= MachineOperatorBuilder::kFloat64RoundDown |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
}
- return flags;
+ return flags | MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
}
} // namespace compiler
diff --git a/chromium/v8/src/compiler/mips64/OWNERS b/chromium/v8/src/compiler/mips64/OWNERS
index 5508ba626f3..89455a4fbd7 100644
--- a/chromium/v8/src/compiler/mips64/OWNERS
+++ b/chromium/v8/src/compiler/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/compiler/mips64/code-generator-mips64.cc b/chromium/v8/src/compiler/mips64/code-generator-mips64.cc
index 053434eec9a..1b81aa56980 100644
--- a/chromium/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/chromium/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/mips/macro-assembler-mips.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -53,6 +53,18 @@ class MipsOperandConverter final : public InstructionOperandConverter {
return ToDoubleRegister(op);
}
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
@@ -106,10 +118,10 @@ class MipsOperandConverter final : public InstructionOperandConverter {
MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
MemOperand ToMemOperand(InstructionOperand* op) const {
- DCHECK(op != NULL);
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -180,24 +192,63 @@ class OutOfLineRound : public OutOfLineCode {
};
-class OutOfLineTruncate final : public OutOfLineRound {
+class OutOfLineRound32 : public OutOfLineCode {
public:
- OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
-};
+ OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+ void Generate() final {
+ // Handle rounding to zero case where sign has to be preserved.
+ // High bits of float input already in kScratchReg.
+ __ srl(at, kScratchReg, 31);
+ __ sll(at, at, 31);
+ __ mtc1(at, result_);
+ }
-class OutOfLineFloor final : public OutOfLineRound {
- public:
- OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
+ private:
+ DoubleRegister const result_;
};
-class OutOfLineCeil final : public OutOfLineRound {
+class OutOfLineRecordWrite final : public OutOfLineCode {
public:
- OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(index),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ Daddu(scratch1_, object_, index_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const index_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
};
@@ -372,10 +423,15 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
} while (0)
-#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
- do { \
- auto ool = \
- new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
+ if (kArchVariant == kMips64r6) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
Label done; \
__ mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
__ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
@@ -383,21 +439,60 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ Branch(USE_DELAY_SLOT, &done, hs, at, \
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
__ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
__ dmfc1(at, i.OutputDoubleRegister()); \
__ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
__ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
__ bind(ool->exit()); \
__ bind(&done); \
- } while (0)
+ }
+#define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
+ if (kArchVariant == kMips64r6) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ int32_t kFloat32ExponentBias = 127; \
+ int32_t kFloat32MantissaBits = 23; \
+ int32_t kFloat32ExponentBits = 8; \
+ auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
+ Label done; \
+ __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
+ __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
+ __ Branch(USE_DELAY_SLOT, &done, hs, at, \
+ Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
+ __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mfc1(at, i.OutputDoubleRegister()); \
+ __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
+ __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
+ __ bind(ool->exit()); \
+ __ bind(&done); \
+ }
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ LeaveFrame(StackFrame::MANUAL);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ daddiu(sp, sp, sp_slot_delta * kPointerSize);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Dsubu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -417,10 +512,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(at);
}
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -428,6 +525,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Jump(at);
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -441,6 +539,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
@@ -450,16 +549,28 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
break;
}
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (instr->InputAt(0)->IsImmediate()) {
@@ -469,6 +580,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -481,12 +594,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -501,18 +617,42 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ Daddu(at, object, index);
+ __ sd(value, MemOperand(at));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kMips64Add:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Dadd:
__ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64DaddOvf:
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ break;
case kMips64Sub:
__ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Dsub:
__ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64DsubOvf:
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ break;
case kMips64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -522,11 +662,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64MulHighU:
__ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64DMulHigh:
+ __ Dmulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
case kMips64Div:
__ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (kArchVariant == kMips64r6) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMips64DivU:
__ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (kArchVariant == kMips64r6) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMips64Mod:
__ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -539,9 +692,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kMips64Ddiv:
__ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (kArchVariant == kMips64r6) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMips64DdivU:
__ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (kArchVariant == kMips64r6) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMips64Dmod:
__ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -555,12 +718,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64Or:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64Nor:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK(i.InputOperand(1).immediate() == 0);
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
case kMips64Xor:
__ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Clz:
__ Clz(i.OutputRegister(), i.InputRegister(0));
break;
+ case kMips64Dclz:
+ __ dclz(i.OutputRegister(), i.InputRegister(0));
+ break;
case kMips64Shl:
if (instr->InputAt(1)->IsRegister()) {
__ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -592,9 +766,37 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
break;
- case kMips64Dext:
- __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
- i.InputInt8(2));
+ case kMips64Ins:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
+ } else {
+ __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
+ break;
+ case kMips64Dext: {
+ int16_t pos = i.InputInt8(1);
+ int16_t size = i.InputInt8(2);
+ if (size > 0 && size <= 32 && pos >= 0 && pos < 32) {
+ __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ } else if (size > 32 && size <= 64 && pos > 0 && pos < 32) {
+ __ Dextm(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ } else {
+ DCHECK(size > 0 && size <= 32 && pos >= 32 && pos < 64);
+ __ Dextu(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
+ break;
+ }
+ case kMips64Dins:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ Dins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
+ } else {
+ __ Dins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
break;
case kMips64Dshl:
if (instr->InputAt(1)->IsRegister()) {
@@ -758,15 +960,91 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputDoubleRegister(1));
break;
case kMips64Float64RoundDown: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
+ break;
+ }
+ case kMips64Float32RoundDown: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
break;
}
case kMips64Float64RoundTruncate: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
+ break;
+ }
+ case kMips64Float32RoundTruncate: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
break;
}
case kMips64Float64RoundUp: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
+ break;
+ }
+ case kMips64Float32RoundUp: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
+ break;
+ }
+ case kMips64Float64RoundTiesEven: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
+ break;
+ }
+ case kMips64Float32RoundTiesEven: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
+ break;
+ }
+ case kMips64Float64Max: {
+ // (b < a) ? a : b
+ if (kArchVariant == kMips64r6) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMips64Float64Min: {
+ // (a < b) ? a : b
+ if (kArchVariant == kMips64r6) {
+ __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMips64Float32Max: {
+ // (b < a) ? a : b
+ if (kArchVariant == kMips64r6) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ // Left operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
+ break;
+ }
+ case kMips64Float32Min: {
+ // (a < b) ? a : b
+ if (kArchVariant == kMips64r6) {
+ __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(0));
+ } else {
+ __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
+ // Right operand is result, passthrough if false.
+ __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
+ }
break;
}
case kMips64CvtSD:
@@ -781,9 +1059,52 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cvt_d_w(i.OutputDoubleRegister(), scratch);
break;
}
+ case kMips64CvtSW: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ mtc1(i.InputRegister(0), scratch);
+ __ cvt_s_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kMips64CvtSL: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ dmtc1(i.InputRegister(0), scratch);
+ __ cvt_s_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kMips64CvtDL: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ dmtc1(i.InputRegister(0), scratch);
+ __ cvt_d_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
case kMips64CvtDUw: {
+ __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kMips64CvtDUl: {
+ __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kMips64CvtSUl: {
+ __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kMips64FloorWD: {
FPURegister scratch = kScratchDoubleReg;
- __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
+ __ floor_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64CeilWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64RoundWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
break;
}
case kMips64TruncWD: {
@@ -793,12 +1114,108 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mfc1(i.OutputRegister(), scratch);
break;
}
+ case kMips64FloorWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ floor_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64CeilWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64RoundWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64TruncWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ trunc_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64TruncLS: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register tmp_fcsr = kScratchReg;
+ Register result = kScratchReg2;
+
+ bool load_status = instr->OutputCount() > 1;
+ if (load_status) {
+ // Save FCSR.
+ __ cfc1(tmp_fcsr, FCSR);
+ // Clear FPU flags.
+ __ ctc1(zero_reg, FCSR);
+ }
+ // Other arches use round to zero here, so we follow.
+ __ trunc_l_s(scratch, i.InputDoubleRegister(0));
+ __ dmfc1(i.OutputRegister(), scratch);
+ if (load_status) {
+ __ cfc1(result, FCSR);
+ // Check for overflow and NaNs.
+ __ andi(result, result,
+ (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
+ __ Slt(result, zero_reg, result);
+ __ xori(result, result, 1);
+ __ mov(i.OutputRegister(1), result);
+ // Restore FCSR
+ __ ctc1(tmp_fcsr, FCSR);
+ }
+ break;
+ }
+ case kMips64TruncLD: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register tmp_fcsr = kScratchReg;
+ Register result = kScratchReg2;
+
+ bool load_status = instr->OutputCount() > 1;
+ if (load_status) {
+ // Save FCSR.
+ __ cfc1(tmp_fcsr, FCSR);
+ // Clear FPU flags.
+ __ ctc1(zero_reg, FCSR);
+ }
+ // Other arches use round to zero here, so we follow.
+ __ trunc_l_d(scratch, i.InputDoubleRegister(0));
+ __ dmfc1(i.OutputRegister(0), scratch);
+ if (load_status) {
+ __ cfc1(result, FCSR);
+ // Check for overflow and NaNs.
+ __ andi(result, result,
+ (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
+ __ Slt(result, zero_reg, result);
+ __ xori(result, result, 1);
+ __ mov(i.OutputRegister(1), result);
+ // Restore FCSR
+ __ ctc1(tmp_fcsr, FCSR);
+ }
+ break;
+ }
case kMips64TruncUwD: {
FPURegister scratch = kScratchDoubleReg;
// TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
+ case kMips64TruncUlS: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ // TODO(plind): Fix wrong param order of Trunc_ul_s() macro-asm function.
+ __ Trunc_ul_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch,
+ result);
+ break;
+ }
+ case kMips64TruncUlD: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ // TODO(plind): Fix wrong param order of Trunc_ul_d() macro-asm function.
+ __ Trunc_ul_d(i.InputDoubleRegister(0), i.OutputRegister(0), scratch,
+ result);
+ break;
+ }
case kMips64BitcastDL:
__ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0));
break;
@@ -869,12 +1286,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
__ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
break;
case kMips64StackClaim: {
__ Dsubu(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
break;
}
case kMips64StoreToStackSlot: {
@@ -885,18 +1305,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
- case kMips64StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
- Register value = i.InputRegister(2);
- __ daddu(index, object, index);
- __ sd(value, MemOperand(index));
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RAStatus ra_status = kRAHasNotBeenSaved;
- __ RecordWrite(object, index, value, ra_status, mode);
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
break;
@@ -998,6 +1406,34 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg));
+ } else if (instr->arch_opcode() == kMips64DaddOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64DaddOvf, branch->condition);
+ break;
+ }
+ } else if (instr->arch_opcode() == kMips64DsubOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64DsubOvf, branch->condition);
+ break;
+ }
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
@@ -1005,14 +1441,24 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpS, branch->condition);
}
- __ BranchF32(tlabel, NULL, cc, i.InputSingleRegister(0),
- i.InputSingleRegister(1));
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF32(tlabel, nullptr, cc, left, right);
} else if (instr->arch_opcode() == kMips64CmpD) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpD, branch->condition);
}
- __ BranchF64(tlabel, NULL, cc, i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ BranchF64(tlabel, nullptr, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
@@ -1046,19 +1492,10 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMips64Tst) {
cc = FlagsConditionToConditionTst(condition);
__ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
- __ xori(result, zero_reg, 1); // Create 1 for true.
- if (kArchVariant == kMips64r6) {
- if (cc == eq) {
- __ seleqz(result, result, kScratchReg);
- } else {
- __ selnez(result, result, kScratchReg);
- }
- } else {
- if (cc == eq) {
- __ Movn(result, zero_reg, kScratchReg);
- } else {
- __ Movz(result, zero_reg, kScratchReg);
- }
+ __ Sltu(result, zero_reg, kScratchReg);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
}
return;
} else if (instr->arch_opcode() == kMips64Dadd ||
@@ -1071,6 +1508,28 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (cc == eq) // Toggle result for not overflow.
__ xori(result, result, 1);
return;
+ } else if (instr->arch_opcode() == kMips64DaddOvf ||
+ instr->arch_opcode() == kMips64DsubOvf) {
+ Label flabel, tlabel;
+ switch (instr->arch_opcode()) {
+ case kMips64DaddOvf:
+ __ DaddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+
+ break;
+ case kMips64DsubOvf:
+ __ DsubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ li(result, 1);
+ __ Branch(&tlabel);
+ __ bind(&flabel);
+ __ li(result, 0);
+ __ bind(&tlabel);
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(condition);
switch (cc) {
@@ -1078,20 +1537,18 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case ne: {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
- __ Dsubu(kScratchReg, left, right);
- __ xori(result, zero_reg, 1);
- if (kArchVariant == kMips64r6) {
- if (cc == eq) {
- __ seleqz(result, result, kScratchReg);
- } else {
- __ selnez(result, result, kScratchReg);
- }
+ Register select;
+ if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
+ // Pass left operand if right is zero.
+ select = left;
} else {
- if (cc == eq) {
- __ Movn(result, zero_reg, kScratchReg);
- } else {
- __ Movz(result, zero_reg, kScratchReg);
- }
+ __ Dsubu(kScratchReg, left, right);
+ select = kScratchReg;
+ }
+ __ Sltu(result, zero_reg, select);
+ if (cc == eq) {
+ // Sltu produces 0 for equality, invert the result.
+ __ xori(result, result, 1);
}
} break;
case lt:
@@ -1136,8 +1593,12 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
return;
} else if (instr->arch_opcode() == kMips64CmpD ||
instr->arch_opcode() == kMips64CmpS) {
- FPURegister left = i.InputDoubleRegister(0);
- FPURegister right = i.InputDoubleRegister(1);
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
bool predicate;
FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
if (kArchVariant != kMips64r6) {
@@ -1160,9 +1621,10 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
DCHECK(instr->arch_opcode() == kMips64CmpS);
__ cmp(cc, W, kDoubleCompareReg, left, right);
}
- __ dmfc1(at, kDoubleCompareReg);
- __ dsrl32(result, at, 31); // Cmp returns all 1s for true.
- if (!predicate) // Toggle result for not equal.
+ __ dmfc1(result, kDoubleCompareReg);
+ __ andi(result, result, 1); // Cmp returns all 1's/0's, use only LSB.
+
+ if (!predicate) // Toggle result for not equal.
__ xori(result, result, 1);
}
return;
@@ -1196,9 +1658,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
__ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
// Ensure that dd-ed labels use 8 byte aligned addresses.
- if ((masm()->pc_offset() & 7) != 0) {
- __ nop();
- }
+ __ Align(8);
__ bal(&here);
__ dsll(at, input, 3); // Branch delay slot.
__ bind(&here);
@@ -1222,17 +1682,17 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
__ mov(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(0);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1292,10 +1752,10 @@ void CodeGenerator::AssembleReturn() {
__ MultiPopFPU(saves_fpu);
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ mov(sp, fp);
__ Pop(ra, fp);
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ Branch(&return_label_);
@@ -1317,7 +1777,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1423,7 +1883,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
diff --git a/chromium/v8/src/compiler/mips64/instruction-codes-mips64.h b/chromium/v8/src/compiler/mips64/instruction-codes-mips64.h
index 38e4c46485e..778c6add0f7 100644
--- a/chromium/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/chromium/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -14,10 +14,13 @@ namespace compiler {
#define TARGET_ARCH_OPCODE_LIST(V) \
V(Mips64Add) \
V(Mips64Dadd) \
+ V(Mips64DaddOvf) \
V(Mips64Sub) \
V(Mips64Dsub) \
+ V(Mips64DsubOvf) \
V(Mips64Mul) \
V(Mips64MulHigh) \
+ V(Mips64DMulHigh) \
V(Mips64MulHighU) \
V(Mips64Dmul) \
V(Mips64Div) \
@@ -30,13 +33,17 @@ namespace compiler {
V(Mips64DmodU) \
V(Mips64And) \
V(Mips64Or) \
+ V(Mips64Nor) \
V(Mips64Xor) \
V(Mips64Clz) \
V(Mips64Shl) \
V(Mips64Shr) \
V(Mips64Sar) \
V(Mips64Ext) \
+ V(Mips64Ins) \
V(Mips64Dext) \
+ V(Mips64Dins) \
+ V(Mips64Dclz) \
V(Mips64Dshl) \
V(Mips64Dshr) \
V(Mips64Dsar) \
@@ -68,12 +75,33 @@ namespace compiler {
V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \
+ V(Mips64Float64RoundTiesEven) \
+ V(Mips64Float32RoundDown) \
+ V(Mips64Float32RoundTruncate) \
+ V(Mips64Float32RoundUp) \
+ V(Mips64Float32RoundTiesEven) \
V(Mips64CvtSD) \
V(Mips64CvtDS) \
V(Mips64TruncWD) \
+ V(Mips64RoundWD) \
+ V(Mips64FloorWD) \
+ V(Mips64CeilWD) \
+ V(Mips64TruncWS) \
+ V(Mips64RoundWS) \
+ V(Mips64FloorWS) \
+ V(Mips64CeilWS) \
+ V(Mips64TruncLS) \
+ V(Mips64TruncLD) \
V(Mips64TruncUwD) \
+ V(Mips64TruncUlS) \
+ V(Mips64TruncUlD) \
V(Mips64CvtDW) \
+ V(Mips64CvtSL) \
+ V(Mips64CvtSW) \
+ V(Mips64CvtSUl) \
+ V(Mips64CvtDL) \
V(Mips64CvtDUw) \
+ V(Mips64CvtDUl) \
V(Mips64Lb) \
V(Mips64Lbu) \
V(Mips64Sb) \
@@ -94,10 +122,13 @@ namespace compiler {
V(Mips64Float64ExtractHighWord32) \
V(Mips64Float64InsertLowWord32) \
V(Mips64Float64InsertHighWord32) \
+ V(Mips64Float64Max) \
+ V(Mips64Float64Min) \
+ V(Mips64Float32Max) \
+ V(Mips64Float32Min) \
V(Mips64Push) \
V(Mips64StoreToStackSlot) \
- V(Mips64StackClaim) \
- V(Mips64StoreWriteBarrier)
+ V(Mips64StackClaim)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/mips64/instruction-scheduler-mips64.cc b/chromium/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
new file mode 100644
index 00000000000..af86a87ad78
--- /dev/null
+++ b/chromium/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNIMPLEMENTED();
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNIMPLEMENTED();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/mips64/instruction-selector-mips64.cc b/chromium/v8/src/compiler/mips64/instruction-selector-mips64.cc
index d20c1c72f66..1b12bd9aec7 100644
--- a/chromium/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/chromium/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -132,35 +132,34 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kMips64Lwc1;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kMips64Ldc1;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeUint32 ? kMips64Lbu : kMips64Lb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
break;
- case kRepWord16:
- opcode = typ == kTypeUint32 ? kMips64Lhu : kMips64Lh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kMips64Lw;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kMips64Ld;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -185,67 +184,180 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
- Emit(kMips64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, t0),
- g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
-
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kMips64Swc1;
- break;
- case kRepFloat64:
- opcode = kMips64Sdc1;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kMips64Sb;
- break;
- case kRepWord16:
- opcode = kMips64Sh;
- break;
- case kRepWord32:
- opcode = kMips64Sw;
- break;
- case kRepTagged: // Fall through.
- case kRepWord64:
- opcode = kMips64Sd;
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ // TODO(mips): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- InstructionOperand addr_reg = g.TempRegister();
- Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kMips64Swc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMips64Sdc1;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kMips64Sb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMips64Sh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kMips64Sw;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kMips64Sd;
+ break;
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
}
}
void InstructionSelector::VisitWord32And(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+
+ // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().Value() & 0x1f;
+
+ // Ext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Ext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+ Emit(kMips64Ext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t shift = base::bits::CountPopulation32(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros32(~mask);
+ if (shift != 0 && shift != 32 && msb + shift == 32) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask.
+ Emit(kMips64Ins, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
VisitBinop(this, node, kMips64And);
}
void InstructionSelector::VisitWord64And(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint64_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation64(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+
+ // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int64 shifts use `value % 64`.
+ uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
+
+ // Dext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Dext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 64) mask_width = 64 - lsb;
+
+ Emit(kMips64Dext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(static_cast<int32_t>(mask_width)));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasValue()) {
+ uint64_t mask = m.right().Value();
+ uint32_t shift = base::bits::CountPopulation64(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros64(~mask);
+ if (shift != 0 && shift < 32 && msb + shift == 64) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask. Dins cannot insert bits
+ // past word size, so shifts smaller than 32 are covered.
+ Emit(kMips64Dins, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
VisitBinop(this, node, kMips64And);
}
@@ -261,21 +373,105 @@ void InstructionSelector::VisitWord64Or(Node* node) {
void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasValue()) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
VisitBinop(this, node, kMips64Xor);
}
void InstructionSelector::VisitWord64Xor(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasValue()) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
VisitBinop(this, node, kMips64Xor);
}
void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasValue()) {
+ uint32_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kMips64Shl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kMips64Shl, node);
}
void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x1f;
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_msb + mask_width + lsb) == 32) {
+ Mips64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kMips64Ext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMips64Shr, node);
}
@@ -297,11 +493,56 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
g.UseImmediate(m.right().node()));
return;
}
+ if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 63)) {
+ // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ uint64_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation64(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ uint64_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+ DCHECK_NE(0u, shift);
+
+ if ((shift + mask_width) >= 64) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kMips64Dshl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kMips64Dshl, node);
}
void InstructionSelector::VisitWord64Shr(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x3f;
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation64(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_msb + mask_width + lsb) == 64) {
+ Mips64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
+ Emit(kMips64Dext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMips64Dshr, node);
}
@@ -321,11 +562,28 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitWord64Ror(Node* node) {
VisitRRO(this, kMips64Dror, node);
}
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ VisitRR(this, kMips64Dclz, node);
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
Mips64OperandGenerator g(this);
// TODO(plind): Consider multiply & add optimization from arm port.
@@ -380,6 +638,21 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
return;
}
}
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher leftInput(left), rightInput(right);
+ if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
+ // Combine untagging shifts with Dmul high.
+ Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
VisitRRR(this, kMips64Mul, node);
}
@@ -390,12 +663,7 @@ void InstructionSelector::VisitInt32MulHigh(Node* node) {
void InstructionSelector::VisitUint32MulHigh(Node* node) {
- Mips64OperandGenerator g(this);
- InstructionOperand const dmul_operand = g.TempRegister();
- Emit(kMips64MulHighU, dmul_operand, g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)));
- Emit(kMips64Ext, g.DefineAsRegister(node), dmul_operand, g.TempImmediate(0),
- g.TempImmediate(32));
+ VisitRRR(this, kMips64MulHighU, node);
}
@@ -438,7 +706,22 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
void InstructionSelector::VisitInt32Div(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMips64Div, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Ddiv.
+ Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -446,7 +729,7 @@ void InstructionSelector::VisitInt32Div(Node* node) {
void InstructionSelector::VisitUint32Div(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMips64DivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -454,6 +737,21 @@ void InstructionSelector::VisitUint32Div(Node* node) {
void InstructionSelector::VisitInt32Mod(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Dmod.
+ Emit(kMips64Dmod, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -470,7 +768,7 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
void InstructionSelector::VisitInt64Div(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
- Emit(kMips64Ddiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -478,7 +776,7 @@ void InstructionSelector::VisitInt64Div(Node* node) {
void InstructionSelector::VisitUint64Div(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
- Emit(kMips64DdivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -515,6 +813,65 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
+ // which does rounding and conversion to integer format.
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kFloat64RoundDown:
+ Emit(kMips64FloorWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundUp:
+ Emit(kMips64CeilWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTiesEven:
+ Emit(kMips64RoundWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTruncate:
+ Emit(kMips64TruncWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ default:
+ break;
+ }
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (CanCover(value, next)) {
+ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
+ switch (next->opcode()) {
+ case IrOpcode::kFloat32RoundDown:
+ Emit(kMips64FloorWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundUp:
+ Emit(kMips64CeilWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTiesEven:
+ Emit(kMips64RoundWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTruncate:
+ Emit(kMips64TruncWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ default:
+ Emit(kMips64TruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ } else {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kMips64TruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
VisitRR(this, kMips64TruncWD, node);
}
@@ -524,6 +881,71 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
}
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ Mips64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
+}
+
+
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
@@ -548,7 +970,8 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
if (m.right().IsInRange(32, 63)) {
// After smi untagging no need for truncate. Combine sequence.
Emit(kMips64Dsar, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.TempImmediate(kSmiShift));
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
return;
}
break;
@@ -563,6 +986,16 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kMips64CvtSW, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
VisitRR(this, kMips64CvtSD, node);
}
@@ -578,6 +1011,26 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kMips64CvtSL, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kMips64CvtDL, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kMips64CvtSUl, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kMips64CvtDUl, node);
+}
+
+
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kMips64Float64ExtractLowWord32, node);
}
@@ -663,16 +1116,64 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float32Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float32Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float64Max, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float32Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float32Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ Mips64OperandGenerator g(this);
+ if (kArchVariant == kMips64r6) {
+ Emit(kMips64Float64Min, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+
+ } else {
+ // Reverse operands, and use same reg. for result and right operand.
+ Emit(kMips64Float64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
+ }
+}
void InstructionSelector::VisitFloat32Abs(Node* node) {
@@ -695,11 +1196,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kMips64Float32RoundDown, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kMips64Float64RoundDown, node);
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kMips64Float32RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kMips64Float64RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kMips64Float32RoundTruncate, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kMips64Float64RoundTruncate, node);
}
@@ -710,20 +1231,20 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
- Mips64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kMips64Float32RoundTiesEven, node);
+}
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())));
- }
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kMips64Float64RoundTiesEven, node);
+}
+
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ Mips64OperandGenerator g(this);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -733,8 +1254,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
int slot = kCArgSlotCount;
- for (Node* input : buffer.pushed_nodes) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ for (PushParameter input : (*arguments)) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
@@ -744,164 +1265,49 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
Emit(kMips64StackClaim, g.NoOutput(),
g.TempImmediate(push_count << kPointerSizeLog2));
}
- for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* input = buffer.pushed_nodes[n]) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
}
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(flags);
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- Mips64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
-
- const int32_t push_count = static_cast<int32_t>(buffer.pushed_nodes.size());
- if (push_count > 0) {
- Emit(kMips64StackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
- }
- int slot = push_count - 1;
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
- g.TempImmediate(slot << kPointerSizeLog2));
- slot--;
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
opcode = kCheckedLoadWord64;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -922,33 +1328,35 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
opcode = kCheckedStoreWord64;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -990,10 +1398,14 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kMips64CmpS, g.UseRegister(left), g.UseRegister(right),
- cont);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
}
@@ -1001,10 +1413,14 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(selector, kMips64CmpD, g.UseRegister(left), g.UseRegister(right),
- cont);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
}
@@ -1019,6 +1435,16 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, opcode)) {
switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
case kSignedLessThan:
case kSignedGreaterThanOrEqual:
case kUnsignedLessThan:
@@ -1033,6 +1459,16 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
} else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
case kSignedLessThan:
case kSignedGreaterThanOrEqual:
case kUnsignedLessThan:
@@ -1159,12 +1595,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == NULL || selector->IsDefined(result)) {
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1172,6 +1608,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMips64Dsub, cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMips64DaddOvf, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMips64DsubOvf, cont);
default:
break;
}
@@ -1282,6 +1724,26 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
}
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kMips64DaddOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMips64DaddOvf, &cont);
+}
+
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kMips64DsubOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMips64DsubOvf, &cont);
+}
+
+
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int64BinopMatcher m(node);
@@ -1384,8 +1846,21 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64RoundDown |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ return MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
}
} // namespace compiler
diff --git a/chromium/v8/src/compiler/move-optimizer.cc b/chromium/v8/src/compiler/move-optimizer.cc
index 7c2bbe06b87..bde3f7fe36f 100644
--- a/chromium/v8/src/compiler/move-optimizer.cc
+++ b/chromium/v8/src/compiler/move-optimizer.cc
@@ -14,10 +14,17 @@ typedef std::pair<InstructionOperand, InstructionOperand> MoveKey;
struct MoveKeyCompare {
bool operator()(const MoveKey& a, const MoveKey& b) const {
- if (a.first.EqualsModuloType(b.first)) {
- return a.second.CompareModuloType(b.second);
+ if (a.first.EqualsCanonicalized(b.first)) {
+ return a.second.CompareCanonicalized(b.second);
}
- return a.first.CompareModuloType(b.first);
+ return a.first.CompareCanonicalized(b.first);
+ }
+};
+
+struct OperandCompare {
+ bool operator()(const InstructionOperand& a,
+ const InstructionOperand& b) const {
+ return a.CompareCanonicalized(b);
}
};
@@ -25,15 +32,45 @@ typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
-bool GapsCanMoveOver(Instruction* instr) { return instr->IsNop(); }
+bool GapsCanMoveOver(Instruction* instr, Zone* zone) {
+ if (instr->IsNop()) return true;
+ if (instr->ClobbersTemps() || instr->ClobbersRegisters() ||
+ instr->ClobbersDoubleRegisters()) {
+ return false;
+ }
+ if (instr->arch_opcode() != ArchOpcode::kArchNop) return false;
+
+ ZoneSet<InstructionOperand, OperandCompare> operands(zone);
+ for (size_t i = 0; i < instr->InputCount(); ++i) {
+ operands.insert(*instr->InputAt(i));
+ }
+ for (size_t i = 0; i < instr->OutputCount(); ++i) {
+ operands.insert(*instr->OutputAt(i));
+ }
+ for (size_t i = 0; i < instr->TempCount(); ++i) {
+ operands.insert(*instr->TempAt(i));
+ }
+ for (int i = Instruction::GapPosition::FIRST_GAP_POSITION;
+ i <= Instruction::GapPosition::LAST_GAP_POSITION; ++i) {
+ ParallelMove* moves = instr->parallel_moves()[i];
+ if (moves == nullptr) continue;
+ for (MoveOperands* move : *moves) {
+ if (operands.count(move->source()) > 0 ||
+ operands.count(move->destination()) > 0) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
-int FindFirstNonEmptySlot(Instruction* instr) {
+int FindFirstNonEmptySlot(const Instruction* instr) {
int i = Instruction::FIRST_GAP_POSITION;
for (; i <= Instruction::LAST_GAP_POSITION; i++) {
- auto moves = instr->parallel_moves()[i];
+ ParallelMove* moves = instr->parallel_moves()[i];
if (moves == nullptr) continue;
- for (auto move : *moves) {
+ for (MoveOperands* move : *moves) {
if (!move->IsRedundant()) return i;
move->Eliminate();
}
@@ -49,93 +86,108 @@ MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
: local_zone_(local_zone),
code_(code),
to_finalize_(local_zone),
- temp_vector_0_(local_zone),
- temp_vector_1_(local_zone) {}
+ local_vector_(local_zone) {}
void MoveOptimizer::Run() {
- for (auto* block : code()->instruction_blocks()) {
+ for (InstructionBlock* block : code()->instruction_blocks()) {
CompressBlock(block);
}
- for (auto block : code()->instruction_blocks()) {
+ for (InstructionBlock* block : code()->instruction_blocks()) {
if (block->PredecessorCount() <= 1) continue;
- bool has_only_deferred = true;
- for (RpoNumber pred_id : block->predecessors()) {
- if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) {
- has_only_deferred = false;
- break;
+ if (!block->IsDeferred()) {
+ bool has_only_deferred = true;
+ for (RpoNumber& pred_id : block->predecessors()) {
+ if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) {
+ has_only_deferred = false;
+ break;
+ }
}
+ // This would pull down common moves. If the moves occur in deferred
+ // blocks, and the closest common successor is not deferred, we lose the
+ // optimization of just spilling/filling in deferred blocks, when the
+ // current block is not deferred.
+ if (has_only_deferred) continue;
}
- // This would pull down common moves. If the moves occur in deferred blocks,
- // and the closest common successor is not deferred, we lose the
- // optimization of just spilling/filling in deferred blocks.
- if (has_only_deferred) continue;
OptimizeMerge(block);
}
- for (auto gap : to_finalize_) {
+ for (Instruction* gap : to_finalize_) {
FinalizeMoves(gap);
}
}
-void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
- ParallelMove* right) {
- DCHECK(eliminated->empty());
+void MoveOptimizer::CompressMoves(ParallelMove* left, ParallelMove* right) {
+ if (right == nullptr) return;
+
+ MoveOpVector& eliminated = local_vector();
+ DCHECK(eliminated.empty());
+
if (!left->empty()) {
// Modify the right moves in place and collect moves that will be killed by
// merging the two gaps.
- for (auto move : *right) {
+ for (MoveOperands* move : *right) {
if (move->IsRedundant()) continue;
- auto to_eliminate = left->PrepareInsertAfter(move);
- if (to_eliminate != nullptr) eliminated->push_back(to_eliminate);
+ MoveOperands* to_eliminate = left->PrepareInsertAfter(move);
+ if (to_eliminate != nullptr) eliminated.push_back(to_eliminate);
}
// Eliminate dead moves.
- for (auto to_eliminate : *eliminated) {
+ for (MoveOperands* to_eliminate : eliminated) {
to_eliminate->Eliminate();
}
- eliminated->clear();
+ eliminated.clear();
}
// Add all possibly modified moves from right side.
- for (auto move : *right) {
+ for (MoveOperands* move : *right) {
if (move->IsRedundant()) continue;
left->push_back(move);
}
// Nuke right.
right->clear();
+ DCHECK(eliminated.empty());
}
// Smash all consecutive moves into the left most move slot and accumulate them
// as much as possible across instructions.
void MoveOptimizer::CompressBlock(InstructionBlock* block) {
- auto temp_vector = temp_vector_0();
- DCHECK(temp_vector.empty());
Instruction* prev_instr = nullptr;
for (int index = block->code_start(); index < block->code_end(); ++index) {
- auto instr = code()->instructions()[index];
+ Instruction* instr = code()->instructions()[index];
int i = FindFirstNonEmptySlot(instr);
- if (i <= Instruction::LAST_GAP_POSITION) {
- // Move the first non-empty gap to position 0.
- std::swap(instr->parallel_moves()[0], instr->parallel_moves()[i]);
- auto left = instr->parallel_moves()[0];
- // Compress everything into position 0.
- for (++i; i <= Instruction::LAST_GAP_POSITION; ++i) {
- auto move = instr->parallel_moves()[i];
- if (move == nullptr) continue;
- CompressMoves(&temp_vector, left, move);
- }
- if (prev_instr != nullptr) {
- // Smash left into prev_instr, killing left.
- auto pred_moves = prev_instr->parallel_moves()[0];
- CompressMoves(&temp_vector, pred_moves, left);
- }
+ bool has_moves = i <= Instruction::LAST_GAP_POSITION;
+
+ if (i == Instruction::LAST_GAP_POSITION) {
+ std::swap(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
+ instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
+ } else if (i == Instruction::FIRST_GAP_POSITION) {
+ CompressMoves(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
+ instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
}
+ // We either have no moves, or, after swapping or compressing, we have
+ // all the moves in the first gap position, and none in the second/end gap
+ // position.
+ ParallelMove* first =
+ instr->parallel_moves()[Instruction::FIRST_GAP_POSITION];
+ ParallelMove* last =
+ instr->parallel_moves()[Instruction::LAST_GAP_POSITION];
+ USE(last);
+
+ DCHECK(!has_moves ||
+ (first != nullptr && (last == nullptr || last->empty())));
+
if (prev_instr != nullptr) {
+ if (has_moves) {
+ // Smash first into prev_instr, killing left.
+ ParallelMove* pred_moves = prev_instr->parallel_moves()[0];
+ CompressMoves(pred_moves, first);
+ }
// Slide prev_instr down so we always know where to look for it.
std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]);
}
+
prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr;
- if (GapsCanMoveOver(instr)) continue;
+ if (GapsCanMoveOver(instr, local_zone())) continue;
if (prev_instr != nullptr) {
to_finalize_.push_back(prev_instr);
prev_instr = nullptr;
@@ -147,7 +199,8 @@ void MoveOptimizer::CompressBlock(InstructionBlock* block) {
}
-Instruction* MoveOptimizer::LastInstruction(InstructionBlock* block) {
+const Instruction* MoveOptimizer::LastInstruction(
+ const InstructionBlock* block) const {
return code()->instructions()[block->last_instruction_index()];
}
@@ -156,14 +209,15 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
DCHECK(block->PredecessorCount() > 1);
// Ensure that the last instruction in all incoming blocks don't contain
// things that would prevent moving gap moves across them.
- for (auto pred_index : block->predecessors()) {
- auto pred = code()->InstructionBlockAt(pred_index);
- auto last_instr = code()->instructions()[pred->last_instruction_index()];
+ for (RpoNumber& pred_index : block->predecessors()) {
+ const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
+ const Instruction* last_instr =
+ code()->instructions()[pred->last_instruction_index()];
if (last_instr->IsCall()) return;
if (last_instr->TempCount() != 0) return;
if (last_instr->OutputCount() != 0) return;
for (size_t i = 0; i < last_instr->InputCount(); ++i) {
- auto op = last_instr->InputAt(i);
+ const InstructionOperand* op = last_instr->InputAt(i);
if (!op->IsConstant() && !op->IsImmediate()) return;
}
}
@@ -171,17 +225,17 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
MoveMap move_map(local_zone());
size_t correct_counts = 0;
// Accumulate set of shared moves.
- for (auto pred_index : block->predecessors()) {
- auto pred = code()->InstructionBlockAt(pred_index);
- auto instr = LastInstruction(pred);
+ for (RpoNumber& pred_index : block->predecessors()) {
+ const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
+ const Instruction* instr = LastInstruction(pred);
if (instr->parallel_moves()[0] == nullptr ||
instr->parallel_moves()[0]->empty()) {
return;
}
- for (auto move : *instr->parallel_moves()[0]) {
+ for (const MoveOperands* move : *instr->parallel_moves()[0]) {
if (move->IsRedundant()) continue;
- auto src = move->source();
- auto dst = move->destination();
+ InstructionOperand src = move->source();
+ InstructionOperand dst = move->destination();
MoveKey key = {src, dst};
auto res = move_map.insert(std::make_pair(key, 1));
if (!res.second) {
@@ -198,9 +252,10 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
for (int i = block->first_instruction_index();
i <= block->last_instruction_index(); ++i) {
instr = code()->instructions()[i];
- if (!GapsCanMoveOver(instr) || !instr->AreMovesRedundant()) break;
+ if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
+ break;
}
- DCHECK(instr != nullptr);
+ DCHECK_NOT_NULL(instr);
bool gap_initialized = true;
if (instr->parallel_moves()[0] == nullptr ||
instr->parallel_moves()[0]->empty()) {
@@ -210,13 +265,13 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
gap_initialized = false;
std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
}
- auto moves = instr->GetOrCreateParallelMove(
+ ParallelMove* moves = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(0), code_zone());
// Delete relevant entries in predecessors and move everything to block.
bool first_iteration = true;
- for (auto pred_index : block->predecessors()) {
- auto pred = code()->InstructionBlockAt(pred_index);
- for (auto move : *LastInstruction(pred)->parallel_moves()[0]) {
+ for (RpoNumber& pred_index : block->predecessors()) {
+ const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
+ for (MoveOperands* move : *LastInstruction(pred)->parallel_moves()[0]) {
if (move->IsRedundant()) continue;
MoveKey key = {move->source(), move->destination()};
auto it = move_map.find(key);
@@ -231,8 +286,7 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
}
// Compress.
if (!gap_initialized) {
- CompressMoves(&temp_vector_0(), instr->parallel_moves()[0],
- instr->parallel_moves()[1]);
+ CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]);
}
}
@@ -245,12 +299,12 @@ bool IsSlot(const InstructionOperand& op) {
bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
- if (!a->source().EqualsModuloType(b->source())) {
- return a->source().CompareModuloType(b->source());
+ if (!a->source().EqualsCanonicalized(b->source())) {
+ return a->source().CompareCanonicalized(b->source());
}
if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
- return a->destination().CompareModuloType(b->destination());
+ return a->destination().CompareCanonicalized(b->destination());
}
} // namespace
@@ -259,10 +313,11 @@ bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
// Split multiple loads of the same constant or stack slot off into the second
// slot and keep remaining moves in the first slot.
void MoveOptimizer::FinalizeMoves(Instruction* instr) {
- auto loads = temp_vector_0();
+ MoveOpVector& loads = local_vector();
DCHECK(loads.empty());
+
// Find all the loads.
- for (auto move : *instr->parallel_moves()[0]) {
+ for (MoveOperands* move : *instr->parallel_moves()[0]) {
if (move->IsRedundant()) continue;
if (move->source().IsConstant() || IsSlot(move->source())) {
loads.push_back(move);
@@ -273,17 +328,17 @@ void MoveOptimizer::FinalizeMoves(Instruction* instr) {
// beginning of the group.
std::sort(loads.begin(), loads.end(), LoadCompare);
MoveOperands* group_begin = nullptr;
- for (auto load : loads) {
+ for (MoveOperands* load : loads) {
// New group.
if (group_begin == nullptr ||
- !load->source().EqualsModuloType(group_begin->source())) {
+ !load->source().EqualsCanonicalized(group_begin->source())) {
group_begin = load;
continue;
}
// Nothing to be gained from splitting here.
if (IsSlot(group_begin->destination())) continue;
// Insert new move into slot 1.
- auto slot_1 = instr->GetOrCreateParallelMove(
+ ParallelMove* slot_1 = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(1), code_zone());
slot_1->AddMove(group_begin->destination(), load->destination());
load->Eliminate();
diff --git a/chromium/v8/src/compiler/move-optimizer.h b/chromium/v8/src/compiler/move-optimizer.h
index 2fdecf31e95..c9a3289d6b2 100644
--- a/chromium/v8/src/compiler/move-optimizer.h
+++ b/chromium/v8/src/compiler/move-optimizer.h
@@ -24,21 +24,18 @@ class MoveOptimizer final {
InstructionSequence* code() const { return code_; }
Zone* local_zone() const { return local_zone_; }
Zone* code_zone() const { return code()->zone(); }
- MoveOpVector& temp_vector_0() { return temp_vector_0_; }
- MoveOpVector& temp_vector_1() { return temp_vector_1_; }
+ MoveOpVector& local_vector() { return local_vector_; }
void CompressBlock(InstructionBlock* blocke);
- void CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
- ParallelMove* right);
- Instruction* LastInstruction(InstructionBlock* block);
+ void CompressMoves(ParallelMove* left, ParallelMove* right);
+ const Instruction* LastInstruction(const InstructionBlock* block) const;
void OptimizeMerge(InstructionBlock* block);
void FinalizeMoves(Instruction* instr);
Zone* const local_zone_;
InstructionSequence* const code_;
Instructions to_finalize_;
- MoveOpVector temp_vector_0_;
- MoveOpVector temp_vector_1_;
+ MoveOpVector local_vector_;
DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
};
diff --git a/chromium/v8/src/compiler/node-cache.h b/chromium/v8/src/compiler/node-cache.h
index adddb67eff4..a8f9071af09 100644
--- a/chromium/v8/src/compiler/node-cache.h
+++ b/chromium/v8/src/compiler/node-cache.h
@@ -35,7 +35,8 @@ class NodeCache final {
// Search for node associated with {key} and return a pointer to a memory
// location in this cache that stores an entry for the key. If the location
- // returned by this method contains a non-NULL node, the caller can use that
+ // returned by this method contains a non-nullptr node, the caller can use
+ // that
// node. Otherwise it is the responsibility of the caller to fill the entry
// with a new node.
// Note that a previous cache entry may be overwritten if the cache becomes
diff --git a/chromium/v8/src/compiler/node-matchers.h b/chromium/v8/src/compiler/node-matchers.h
index bafe3daa683..37d0e1a561b 100644
--- a/chromium/v8/src/compiler/node-matchers.h
+++ b/chromium/v8/src/compiler/node-matchers.h
@@ -384,19 +384,19 @@ template <class AddMatcher>
struct BaseWithIndexAndDisplacementMatcher {
BaseWithIndexAndDisplacementMatcher(Node* node, bool allow_input_swap)
: matches_(false),
- index_(NULL),
+ index_(nullptr),
scale_(0),
- base_(NULL),
- displacement_(NULL) {
+ base_(nullptr),
+ displacement_(nullptr) {
Initialize(node, allow_input_swap);
}
explicit BaseWithIndexAndDisplacementMatcher(Node* node)
: matches_(false),
- index_(NULL),
+ index_(nullptr),
scale_(0),
- base_(NULL),
- displacement_(NULL) {
+ base_(nullptr),
+ displacement_(nullptr) {
Initialize(node, node->op()->HasProperty(Operator::kCommutative));
}
@@ -434,10 +434,10 @@ struct BaseWithIndexAndDisplacementMatcher {
AddMatcher m(node, allow_input_swap);
Node* left = m.left().node();
Node* right = m.right().node();
- Node* displacement = NULL;
- Node* base = NULL;
- Node* index = NULL;
- Node* scale_expression = NULL;
+ Node* displacement = nullptr;
+ Node* base = nullptr;
+ Node* index = nullptr;
+ Node* scale_expression = nullptr;
bool power_of_two_plus_one = false;
int scale = 0;
if (m.HasIndexInput() && left->OwnedBy(node)) {
@@ -519,7 +519,7 @@ struct BaseWithIndexAndDisplacementMatcher {
}
}
int64_t value = 0;
- if (displacement != NULL) {
+ if (displacement != nullptr) {
switch (displacement->opcode()) {
case IrOpcode::kInt32Constant: {
value = OpParameter<int32_t>(displacement);
@@ -534,11 +534,11 @@ struct BaseWithIndexAndDisplacementMatcher {
break;
}
if (value == 0) {
- displacement = NULL;
+ displacement = nullptr;
}
}
if (power_of_two_plus_one) {
- if (base != NULL) {
+ if (base != nullptr) {
// If the scale requires explicitly using the index as the base, but a
// base is already part of the match, then the (1 << N + 1) scale factor
// can't be folded into the match and the entire index * scale
diff --git a/chromium/v8/src/compiler/node-properties.cc b/chromium/v8/src/compiler/node-properties.cc
index 0d061a36c4f..cb6c3c43d83 100644
--- a/chromium/v8/src/compiler/node-properties.cc
+++ b/chromium/v8/src/compiler/node-properties.cc
@@ -4,9 +4,11 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/verifier.h"
+#include "src/types-inl.h"
namespace v8 {
namespace internal {
@@ -130,6 +132,24 @@ bool NodeProperties::IsExceptionalCall(Node* node) {
// static
+void NodeProperties::ReplaceValueInput(Node* node, Node* value, int index) {
+ DCHECK(index < node->op()->ValueInputCount());
+ node->ReplaceInput(FirstValueIndex(node) + index, value);
+}
+
+
+// static
+void NodeProperties::ReplaceValueInputs(Node* node, Node* value) {
+ int value_input_count = node->op()->ValueInputCount();
+ DCHECK_LE(1, value_input_count);
+ node->ReplaceInput(0, value);
+ while (--value_input_count > 0) {
+ node->RemoveInput(value_input_count);
+ }
+}
+
+
+// static
void NodeProperties::ReplaceContextInput(Node* node, Node* context) {
node->ReplaceInput(FirstContextIndex(node), context);
}
@@ -157,11 +177,27 @@ void NodeProperties::ReplaceFrameStateInput(Node* node, int index,
// static
+void NodeProperties::RemoveFrameStateInput(Node* node, int index) {
+ DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
+ node->RemoveInput(FirstFrameStateIndex(node) + index);
+}
+
+
+// static
void NodeProperties::RemoveNonValueInputs(Node* node) {
node->TrimInputCount(node->op()->ValueInputCount());
}
+// static
+void NodeProperties::RemoveValueInputs(Node* node) {
+ int value_input_count = node->op()->ValueInputCount();
+ while (--value_input_count >= 0) {
+ node->RemoveInput(value_input_count);
+ }
+}
+
+
void NodeProperties::MergeControlToEnd(Graph* graph,
CommonOperatorBuilder* common,
Node* node) {
@@ -269,6 +305,96 @@ void NodeProperties::CollectControlProjections(Node* node, Node** projections,
// static
+MaybeHandle<Context> NodeProperties::GetSpecializationContext(
+ Node* node, MaybeHandle<Context> context) {
+ switch (node->opcode()) {
+ case IrOpcode::kHeapConstant:
+ return Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
+ case IrOpcode::kParameter: {
+ Node* const start = NodeProperties::GetValueInput(node, 0);
+ DCHECK_EQ(IrOpcode::kStart, start->opcode());
+ int const index = ParameterIndexOf(node->op());
+ // The context is always the last parameter to a JavaScript function, and
+ // {Parameter} indices start at -1, so value outputs of {Start} look like
+ // this: closure, receiver, param0, ..., paramN, context.
+ if (index == start->op()->ValueOutputCount() - 2) {
+ return context;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return MaybeHandle<Context>();
+}
+
+
+// static
+MaybeHandle<Context> NodeProperties::GetSpecializationNativeContext(
+ Node* node, MaybeHandle<Context> native_context) {
+ while (true) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCreateBlockContext:
+ case IrOpcode::kJSCreateCatchContext:
+ case IrOpcode::kJSCreateFunctionContext:
+ case IrOpcode::kJSCreateModuleContext:
+ case IrOpcode::kJSCreateScriptContext:
+ case IrOpcode::kJSCreateWithContext: {
+ // Skip over the intermediate contexts, we're only interested in the
+ // very last context in the context chain anyway.
+ node = NodeProperties::GetContextInput(node);
+ break;
+ }
+ case IrOpcode::kHeapConstant: {
+ // Extract the native context from the actual {context}.
+ Handle<Context> context =
+ Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
+ return handle(context->native_context());
+ }
+ case IrOpcode::kOsrValue: {
+ int const index = OpParameter<int>(node);
+ if (index == Linkage::kOsrContextSpillSlotIndex) {
+ return native_context;
+ }
+ return MaybeHandle<Context>();
+ }
+ case IrOpcode::kParameter: {
+ Node* const start = NodeProperties::GetValueInput(node, 0);
+ DCHECK_EQ(IrOpcode::kStart, start->opcode());
+ int const index = ParameterIndexOf(node->op());
+ // The context is always the last parameter to a JavaScript function,
+ // and {Parameter} indices start at -1, so value outputs of {Start}
+ // look like this: closure, receiver, param0, ..., paramN, context.
+ if (index == start->op()->ValueOutputCount() - 2) {
+ return native_context;
+ }
+ return MaybeHandle<Context>();
+ }
+ default:
+ return MaybeHandle<Context>();
+ }
+ }
+}
+
+
+// static
+MaybeHandle<JSGlobalObject> NodeProperties::GetSpecializationGlobalObject(
+ Node* node, MaybeHandle<Context> native_context) {
+ Handle<Context> context;
+ if (GetSpecializationNativeContext(node, native_context).ToHandle(&context)) {
+ return handle(context->global_object());
+ }
+ return MaybeHandle<JSGlobalObject>();
+}
+
+
+// static
+Type* NodeProperties::GetTypeOrAny(Node* node) {
+ return IsTyped(node) ? node->type() : Type::Any();
+}
+
+
+// static
bool NodeProperties::AllValueInputsAreTyped(Node* node) {
int input_count = node->op()->ValueInputCount();
for (int index = 0; index < input_count; ++index) {
diff --git a/chromium/v8/src/compiler/node-properties.h b/chromium/v8/src/compiler/node-properties.h
index 313d3749bb7..58005a7153d 100644
--- a/chromium/v8/src/compiler/node-properties.h
+++ b/chromium/v8/src/compiler/node-properties.h
@@ -72,16 +72,24 @@ class NodeProperties final {
return IrOpcode::IsPhiOpcode(node->opcode());
}
+ // Determines whether exceptions thrown by the given node are handled locally
+ // within the graph (i.e. an IfException projection is present).
static bool IsExceptionalCall(Node* node);
// ---------------------------------------------------------------------------
// Miscellaneous mutators.
+ static void ReplaceValueInput(Node* node, Node* value, int index);
static void ReplaceContextInput(Node* node, Node* context);
static void ReplaceControlInput(Node* node, Node* control);
static void ReplaceEffectInput(Node* node, Node* effect, int index = 0);
static void ReplaceFrameStateInput(Node* node, int index, Node* frame_state);
+ static void RemoveFrameStateInput(Node* node, int index);
static void RemoveNonValueInputs(Node* node);
+ static void RemoveValueInputs(Node* node);
+
+ // Replaces all value inputs of {node} with the single input {value}.
+ static void ReplaceValueInputs(Node* node, Node* value);
// Merge the control node {node} into the end of the graph, introducing a
// merge node or expanding an existing merge node if necessary.
@@ -89,7 +97,7 @@ class NodeProperties final {
Node* node);
// Replace all uses of {node} with the given replacement nodes. All occurring
- // use kinds need to be replaced, {NULL} is only valid if a use kind is
+ // use kinds need to be replaced, {nullptr} is only valid if a use kind is
// guaranteed not to exist.
static void ReplaceUses(Node* node, Node* value, Node* effect = nullptr,
Node* success = nullptr, Node* exception = nullptr);
@@ -111,6 +119,27 @@ class NodeProperties final {
static void CollectControlProjections(Node* node, Node** proj, size_t count);
// ---------------------------------------------------------------------------
+ // Context.
+
+ // Try to retrieve the specialization context from the given {node},
+ // optionally utilizing the knowledge about the (outermost) function
+ // {context}.
+ static MaybeHandle<Context> GetSpecializationContext(
+ Node* node, MaybeHandle<Context> context = MaybeHandle<Context>());
+
+ // Try to retrieve the specialization native context from the given
+ // {node}, optionally utilizing the knowledge about the (outermost)
+ // {native_context}.
+ static MaybeHandle<Context> GetSpecializationNativeContext(
+ Node* node, MaybeHandle<Context> native_context = MaybeHandle<Context>());
+
+ // Try to retrieve the specialization global object from the given
+ // {node}, optionally utilizing the knowledge about the (outermost)
+ // {native_context}.
+ static MaybeHandle<JSGlobalObject> GetSpecializationGlobalObject(
+ Node* node, MaybeHandle<Context> native_context = MaybeHandle<Context>());
+
+ // ---------------------------------------------------------------------------
// Type.
static bool IsTyped(Node* node) { return node->type() != nullptr; }
@@ -118,6 +147,7 @@ class NodeProperties final {
DCHECK(IsTyped(node));
return node->type();
}
+ static Type* GetTypeOrAny(Node* node);
static void SetType(Node* node, Type* type) {
DCHECK_NOT_NULL(type);
node->set_type(type);
diff --git a/chromium/v8/src/compiler/node.cc b/chromium/v8/src/compiler/node.cc
index 022c44db2dd..198c3530847 100644
--- a/chromium/v8/src/compiler/node.cc
+++ b/chromium/v8/src/compiler/node.cc
@@ -56,6 +56,16 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
Node* node;
bool is_inline;
+#if DEBUG
+ // Verify that none of the inputs are {nullptr}.
+ for (int i = 0; i < input_count; i++) {
+ if (inputs[i] == nullptr) {
+ V8_Fatal(__FILE__, __LINE__, "Node::New() Error: #%d:%s[%d] is nullptr",
+ static_cast<int>(id), op->mnemonic(), i);
+ }
+ }
+#endif
+
if (input_count > kMaxInlineCapacity) {
// Allocate out-of-line inputs.
int capacity =
@@ -271,6 +281,12 @@ bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
}
+void Node::Print() const {
+ OFStream os(stdout);
+ os << *this << std::endl;
+}
+
+
Node::Node(NodeId id, const Operator* op, int inline_count, int inline_capacity)
: op_(op),
type_(nullptr),
diff --git a/chromium/v8/src/compiler/node.h b/chromium/v8/src/compiler/node.h
index d6a9b39a564..c73482fa69f 100644
--- a/chromium/v8/src/compiler/node.h
+++ b/chromium/v8/src/compiler/node.h
@@ -186,6 +186,7 @@ class Node final {
// Returns true if {owner1} and {owner2} are the only users of {this} node.
bool OwnedBy(Node const* owner1, Node const* owner2) const;
+ void Print() const;
private:
struct Use;
diff --git a/chromium/v8/src/compiler/opcodes.h b/chromium/v8/src/compiler/opcodes.h
index 33e17f6dd0d..a97fdfa54b3 100644
--- a/chromium/v8/src/compiler/opcodes.h
+++ b/chromium/v8/src/compiler/opcodes.h
@@ -44,11 +44,13 @@
V(Phi) \
V(EffectSet) \
V(EffectPhi) \
- V(ValueEffect) \
- V(Finish) \
+ V(Guard) \
+ V(BeginRegion) \
+ V(FinishRegion) \
V(FrameState) \
V(StateValues) \
V(TypedStateValues) \
+ V(ObjectState) \
V(Call) \
V(Parameter) \
V(OsrValue) \
@@ -90,8 +92,6 @@
JS_BITWISE_BINOP_LIST(V) \
JS_ARITH_BINOP_LIST(V)
-#define JS_LOGIC_UNOP_LIST(V) V(JSUnaryNot)
-
#define JS_CONVERSION_UNOP_LIST(V) \
V(JSToBoolean) \
V(JSToNumber) \
@@ -103,31 +103,32 @@
V(JSTypeOf)
#define JS_SIMPLE_UNOP_LIST(V) \
- JS_LOGIC_UNOP_LIST(V) \
JS_CONVERSION_UNOP_LIST(V) \
JS_OTHER_UNOP_LIST(V)
-#define JS_OBJECT_OP_LIST(V) \
- V(JSCreate) \
- V(JSCreateArguments) \
- V(JSCreateClosure) \
- V(JSCreateLiteralArray) \
- V(JSCreateLiteralObject) \
- V(JSLoadProperty) \
- V(JSLoadNamed) \
- V(JSLoadGlobal) \
- V(JSStoreProperty) \
- V(JSStoreNamed) \
- V(JSStoreGlobal) \
- V(JSDeleteProperty) \
- V(JSHasProperty) \
+#define JS_OBJECT_OP_LIST(V) \
+ V(JSCreate) \
+ V(JSCreateArguments) \
+ V(JSCreateArray) \
+ V(JSCreateClosure) \
+ V(JSCreateIterResultObject) \
+ V(JSCreateLiteralArray) \
+ V(JSCreateLiteralObject) \
+ V(JSCreateLiteralRegExp) \
+ V(JSLoadProperty) \
+ V(JSLoadNamed) \
+ V(JSLoadGlobal) \
+ V(JSStoreProperty) \
+ V(JSStoreNamed) \
+ V(JSStoreGlobal) \
+ V(JSDeleteProperty) \
+ V(JSHasProperty) \
V(JSInstanceOf)
#define JS_CONTEXT_OP_LIST(V) \
V(JSLoadContext) \
V(JSStoreContext) \
- V(JSLoadDynamicGlobal) \
- V(JSLoadDynamicContext) \
+ V(JSLoadDynamic) \
V(JSCreateFunctionContext) \
V(JSCreateCatchContext) \
V(JSCreateWithContext) \
@@ -139,10 +140,13 @@
V(JSCallConstruct) \
V(JSCallFunction) \
V(JSCallRuntime) \
+ V(JSConvertReceiver) \
V(JSForInDone) \
V(JSForInNext) \
V(JSForInPrepare) \
V(JSForInStep) \
+ V(JSLoadMessage) \
+ V(JSStoreMessage) \
V(JSYield) \
V(JSStackCheck)
@@ -172,11 +176,15 @@
V(NumberMultiply) \
V(NumberDivide) \
V(NumberModulus) \
+ V(NumberBitwiseOr) \
+ V(NumberBitwiseXor) \
+ V(NumberBitwiseAnd) \
V(NumberShiftLeft) \
V(NumberShiftRight) \
V(NumberShiftRightLogical) \
V(NumberToInt32) \
V(NumberToUint32) \
+ V(NumberIsHoleNaN) \
V(PlainPrimitiveToNumber) \
V(ChangeTaggedToInt32) \
V(ChangeTaggedToUint32) \
@@ -193,6 +201,7 @@
V(StoreField) \
V(StoreBuffer) \
V(StoreElement) \
+ V(ObjectIsNumber) \
V(ObjectIsSmi)
// Opcodes for Machine-level operators.
@@ -226,6 +235,9 @@
V(Word32Sar) \
V(Word32Ror) \
V(Word32Clz) \
+ V(Word32Ctz) \
+ V(Word32Popcnt) \
+ V(Word64Popcnt) \
V(Word64And) \
V(Word64Or) \
V(Word64Xor) \
@@ -233,6 +245,8 @@
V(Word64Shr) \
V(Word64Sar) \
V(Word64Ror) \
+ V(Word64Clz) \
+ V(Word64Ctz) \
V(Int32Add) \
V(Int32AddWithOverflow) \
V(Int32Sub) \
@@ -245,7 +259,9 @@
V(Uint32Mod) \
V(Uint32MulHigh) \
V(Int64Add) \
+ V(Int64AddWithOverflow) \
V(Int64Sub) \
+ V(Int64SubWithOverflow) \
V(Int64Mul) \
V(Int64Div) \
V(Int64Mod) \
@@ -254,6 +270,10 @@
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToUint32) \
+ V(TryTruncateFloat32ToInt64) \
+ V(TryTruncateFloat64ToInt64) \
+ V(TryTruncateFloat32ToUint64) \
+ V(TryTruncateFloat64ToUint64) \
V(ChangeInt32ToFloat64) \
V(ChangeInt32ToInt64) \
V(ChangeUint32ToFloat64) \
@@ -261,6 +281,10 @@
V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToInt32) \
V(TruncateInt64ToInt32) \
+ V(RoundInt64ToFloat32) \
+ V(RoundInt64ToFloat64) \
+ V(RoundUint64ToFloat32) \
+ V(RoundUint64ToFloat64) \
V(BitcastFloat32ToInt32) \
V(BitcastFloat64ToInt64) \
V(BitcastInt32ToFloat32) \
@@ -273,6 +297,7 @@
V(Float32Min) \
V(Float32Abs) \
V(Float32Sqrt) \
+ V(Float32RoundDown) \
V(Float64Add) \
V(Float64Sub) \
V(Float64Mul) \
@@ -283,8 +308,13 @@
V(Float64Abs) \
V(Float64Sqrt) \
V(Float64RoundDown) \
+ V(Float32RoundUp) \
+ V(Float64RoundUp) \
+ V(Float32RoundTruncate) \
V(Float64RoundTruncate) \
V(Float64RoundTiesAway) \
+ V(Float32RoundTiesEven) \
+ V(Float64RoundTiesEven) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
V(Float64InsertLowWord32) \
@@ -358,6 +388,11 @@ class IrOpcode {
return kIfTrue <= value && value <= kIfDefault;
}
+ // Returns true if opcode can be inlined.
+ static bool IsInlineeOpcode(Value value) {
+ return value == kJSCallConstruct || value == kJSCallFunction;
+ }
+
// Returns true if opcode for comparison operator.
static bool IsComparisonOpcode(Value value) {
return (kJSEqual <= value && value <= kJSGreaterThanOrEqual) ||
diff --git a/chromium/v8/src/compiler/operator-properties.cc b/chromium/v8/src/compiler/operator-properties.cc
index 60e6ad76368..bd704a36502 100644
--- a/chromium/v8/src/compiler/operator-properties.cc
+++ b/chromium/v8/src/compiler/operator-properties.cc
@@ -34,9 +34,11 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSStrictNotEqual:
return 0;
- // Calls
- case IrOpcode::kJSCallFunction:
+ // We record the frame state immediately before and immediately after every
+ // construct/function call.
case IrOpcode::kJSCallConstruct:
+ case IrOpcode::kJSCallFunction:
+ return 2;
// Compare operations
case IrOpcode::kJSEqual:
@@ -45,21 +47,25 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSInstanceOf:
// Object operations
+ case IrOpcode::kJSCreate:
case IrOpcode::kJSCreateArguments:
+ case IrOpcode::kJSCreateArray:
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
// Context operations
- case IrOpcode::kJSLoadDynamicContext:
+ case IrOpcode::kJSLoadDynamic:
case IrOpcode::kJSCreateScriptContext:
- case IrOpcode::kJSCreateWithContext:
// Conversions
- case IrOpcode::kJSToObject:
- case IrOpcode::kJSToNumber:
case IrOpcode::kJSToName:
+ case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToObject:
+ case IrOpcode::kJSToString:
// Misc operations
+ case IrOpcode::kJSConvertReceiver:
case IrOpcode::kJSForInNext:
case IrOpcode::kJSForInPrepare:
case IrOpcode::kJSStackCheck:
@@ -74,7 +80,6 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSLoadGlobal:
case IrOpcode::kJSStoreGlobal:
- case IrOpcode::kJSLoadDynamicGlobal:
return 2;
// Binary operators that can deopt in the middle the operation (e.g.,
diff --git a/chromium/v8/src/compiler/operator.h b/chromium/v8/src/compiler/operator.h
index eba430f927c..fa85d599cdf 100644
--- a/chromium/v8/src/compiler/operator.h
+++ b/chromium/v8/src/compiler/operator.h
@@ -30,7 +30,7 @@ namespace compiler {
// meaningful to the operator itself.
class Operator : public ZoneObject {
public:
- typedef uint8_t Opcode;
+ typedef uint16_t Opcode;
// Properties inform the operator-independent optimizer about legal
// transformations for nodes that have this operator.
@@ -136,10 +136,19 @@ DEFINE_OPERATORS_FOR_FLAGS(Operator::Properties)
std::ostream& operator<<(std::ostream& os, const Operator& op);
+// Default equality function for below Operator1<*> class.
+template <typename T>
+struct OpEqualTo : public std::equal_to<T> {};
+
+
+// Default hashing function for below Operator1<*> class.
+template <typename T>
+struct OpHash : public base::hash<T> {};
+
+
// A templatized implementation of Operator that has one static parameter of
-// type {T}.
-template <typename T, typename Pred = std::equal_to<T>,
- typename Hash = base::hash<T>>
+// type {T} with the proper default equality and hashing functions.
+template <typename T, typename Pred = OpEqualTo<T>, typename Hash = OpHash<T>>
class Operator1 : public Operator {
public:
Operator1(Opcode opcode, Properties properties, const char* mnemonic,
@@ -183,46 +192,38 @@ class Operator1 : public Operator {
// Helper to extract parameters from Operator1<*> operator.
template <typename T>
inline T const& OpParameter(const Operator* op) {
- return reinterpret_cast<const Operator1<T>*>(op)->parameter();
+ return reinterpret_cast<const Operator1<T, OpEqualTo<T>, OpHash<T>>*>(op)
+ ->parameter();
}
+
// NOTE: We have to be careful to use the right equal/hash functions below, for
// float/double we always use the ones operating on the bit level, for Handle<>
// we always use the ones operating on the location level.
template <>
-inline float const& OpParameter(const Operator* op) {
- return reinterpret_cast<const Operator1<float, base::bit_equal_to<float>,
- base::bit_hash<float>>*>(op)
- ->parameter();
-}
+struct OpEqualTo<float> : public base::bit_equal_to<float> {};
+template <>
+struct OpHash<float> : public base::bit_hash<float> {};
template <>
-inline double const& OpParameter(const Operator* op) {
- return reinterpret_cast<const Operator1<double, base::bit_equal_to<double>,
- base::bit_hash<double>>*>(op)
- ->parameter();
-}
+struct OpEqualTo<double> : public base::bit_equal_to<double> {};
+template <>
+struct OpHash<double> : public base::bit_hash<double> {};
template <>
-inline Handle<HeapObject> const& OpParameter(const Operator* op) {
- return reinterpret_cast<
- const Operator1<Handle<HeapObject>, Handle<HeapObject>::equal_to,
- Handle<HeapObject>::hash>*>(op)->parameter();
-}
+struct OpEqualTo<Handle<HeapObject>> : public Handle<HeapObject>::equal_to {};
+template <>
+struct OpHash<Handle<HeapObject>> : public Handle<HeapObject>::hash {};
template <>
-inline Handle<String> const& OpParameter(const Operator* op) {
- return reinterpret_cast<const Operator1<
- Handle<String>, Handle<String>::equal_to, Handle<String>::hash>*>(op)
- ->parameter();
-}
+struct OpEqualTo<Handle<String>> : public Handle<String>::equal_to {};
+template <>
+struct OpHash<Handle<String>> : public Handle<String>::hash {};
template <>
-inline Handle<ScopeInfo> const& OpParameter(const Operator* op) {
- return reinterpret_cast<
- const Operator1<Handle<ScopeInfo>, Handle<ScopeInfo>::equal_to,
- Handle<ScopeInfo>::hash>*>(op)->parameter();
-}
+struct OpEqualTo<Handle<ScopeInfo>> : public Handle<ScopeInfo>::equal_to {};
+template <>
+struct OpHash<Handle<ScopeInfo>> : public Handle<ScopeInfo>::hash {};
} // namespace compiler
} // namespace internal
diff --git a/chromium/v8/src/compiler/osr.cc b/chromium/v8/src/compiler/osr.cc
index 77eea3ce2ce..55431c201c6 100644
--- a/chromium/v8/src/compiler/osr.cc
+++ b/chromium/v8/src/compiler/osr.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ast/scopes.h"
#include "src/compiler.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/common-operator.h"
@@ -17,7 +18,6 @@
#include "src/compiler/node.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/osr.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/compiler/pipeline-statistics.cc b/chromium/v8/src/compiler/pipeline-statistics.cc
index ba705ba1d8f..b98f837ee97 100644
--- a/chromium/v8/src/compiler/pipeline-statistics.cc
+++ b/chromium/v8/src/compiler/pipeline-statistics.cc
@@ -36,7 +36,7 @@ void PipelineStatistics::CommonStats::End(
diff->max_allocated_bytes_ + allocated_bytes_at_start_;
diff->total_allocated_bytes_ =
outer_zone_diff + scope_->GetTotalAllocatedBytes();
- scope_.Reset(NULL);
+ scope_.Reset(nullptr);
timer_.Stop();
}
@@ -48,8 +48,8 @@ PipelineStatistics::PipelineStatistics(CompilationInfo* info,
zone_pool_(zone_pool),
compilation_stats_(isolate_->GetTurboStatistics()),
source_size_(0),
- phase_kind_name_(NULL),
- phase_name_(NULL) {
+ phase_kind_name_(nullptr),
+ phase_name_(nullptr) {
if (info->has_shared_info()) {
source_size_ = static_cast<size_t>(info->shared_info()->SourceSize());
base::SmartArrayPointer<char> name =
diff --git a/chromium/v8/src/compiler/pipeline-statistics.h b/chromium/v8/src/compiler/pipeline-statistics.h
index 988327d1bb8..2b6563da403 100644
--- a/chromium/v8/src/compiler/pipeline-statistics.h
+++ b/chromium/v8/src/compiler/pipeline-statistics.h
@@ -76,10 +76,10 @@ class PhaseScope {
public:
PhaseScope(PipelineStatistics* pipeline_stats, const char* name)
: pipeline_stats_(pipeline_stats) {
- if (pipeline_stats_ != NULL) pipeline_stats_->BeginPhase(name);
+ if (pipeline_stats_ != nullptr) pipeline_stats_->BeginPhase(name);
}
~PhaseScope() {
- if (pipeline_stats_ != NULL) pipeline_stats_->EndPhase();
+ if (pipeline_stats_ != nullptr) pipeline_stats_->EndPhase();
}
private:
diff --git a/chromium/v8/src/compiler/pipeline.cc b/chromium/v8/src/compiler/pipeline.cc
index 209ddfdf0da..4d6aacd78a8 100644
--- a/chromium/v8/src/compiler/pipeline.cc
+++ b/chromium/v8/src/compiler/pipeline.cc
@@ -12,12 +12,15 @@
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/basic-block-instrumentor.h"
+#include "src/compiler/branch-elimination.h"
#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/change-lowering.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
#include "src/compiler/control-flow-optimizer.h"
#include "src/compiler/dead-code-elimination.h"
+#include "src/compiler/escape-analysis.h"
+#include "src/compiler/escape-analysis-reducer.h"
#include "src/compiler/frame-elider.h"
#include "src/compiler/graph-replay.h"
#include "src/compiler/graph-trimmer.h"
@@ -26,14 +29,15 @@
#include "src/compiler/instruction.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-call-reducer.h"
#include "src/compiler/js-context-relaxation.h"
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-frame-specialization.h"
#include "src/compiler/js-generic-lowering.h"
-#include "src/compiler/js-inlining.h"
+#include "src/compiler/js-global-object-specialization.h"
+#include "src/compiler/js-inlining-heuristic.h"
#include "src/compiler/js-intrinsic-lowering.h"
-#include "src/compiler/js-type-feedback.h"
-#include "src/compiler/js-type-feedback-lowering.h"
+#include "src/compiler/js-native-context-specialization.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/jump-threading.h"
#include "src/compiler/live-range-separator.h"
@@ -50,13 +54,16 @@
#include "src/compiler/scheduler.h"
#include "src/compiler/select-lowering.h"
#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/tail-call-optimization.h"
+#include "src/compiler/type-hint-analyzer.h"
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
#include "src/compiler/verifier.h"
#include "src/compiler/zone-pool.h"
#include "src/ostreams.h"
+#include "src/register-configuration.h"
#include "src/type-info.h"
#include "src/utils.h"
@@ -80,11 +87,11 @@ class PipelineData {
graph_zone_(graph_zone_scope_.zone()),
graph_(nullptr),
loop_assignment_(nullptr),
+ simplified_(nullptr),
machine_(nullptr),
common_(nullptr),
javascript_(nullptr),
jsgraph_(nullptr),
- js_type_feedback_(nullptr),
schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
instruction_zone_(instruction_zone_scope_.zone()),
@@ -96,13 +103,14 @@ class PipelineData {
PhaseScope scope(pipeline_statistics, "init pipeline data");
graph_ = new (graph_zone_) Graph(graph_zone_);
source_positions_.Reset(new SourcePositionTable(graph_));
+ simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
machine_ = new (graph_zone_) MachineOperatorBuilder(
- graph_zone_, kMachPtr,
+ graph_zone_, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags());
common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_)
- JSGraph(isolate_, graph_, common_, javascript_, machine_);
+ JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
}
// For machine graph testing entry point.
@@ -120,11 +128,11 @@ class PipelineData {
graph_(graph),
source_positions_(new SourcePositionTable(graph_)),
loop_assignment_(nullptr),
+ simplified_(nullptr),
machine_(nullptr),
common_(nullptr),
javascript_(nullptr),
jsgraph_(nullptr),
- js_type_feedback_(nullptr),
schedule_(schedule),
instruction_zone_scope_(zone_pool_),
instruction_zone_(instruction_zone_scope_.zone()),
@@ -148,11 +156,11 @@ class PipelineData {
graph_zone_(nullptr),
graph_(nullptr),
loop_assignment_(nullptr),
+ simplified_(nullptr),
machine_(nullptr),
common_(nullptr),
javascript_(nullptr),
jsgraph_(nullptr),
- js_type_feedback_(nullptr),
schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
instruction_zone_(sequence->zone()),
@@ -192,9 +200,11 @@ class PipelineData {
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
JSGraph* jsgraph() const { return jsgraph_; }
- JSTypeFeedbackTable* js_type_feedback() { return js_type_feedback_; }
- void set_js_type_feedback(JSTypeFeedbackTable* js_type_feedback) {
- js_type_feedback_ = js_type_feedback;
+ MaybeHandle<Context> native_context() const {
+ if (info()->is_native_context_specializing()) {
+ return handle(info()->native_context(), isolate());
+ }
+ return MaybeHandle<Context>();
}
LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
@@ -203,6 +213,12 @@ class PipelineData {
loop_assignment_ = loop_assignment;
}
+ TypeHintAnalysis* type_hint_analysis() const { return type_hint_analysis_; }
+ void set_type_hint_analysis(TypeHintAnalysis* type_hint_analysis) {
+ DCHECK_NULL(type_hint_analysis_);
+ type_hint_analysis_ = type_hint_analysis;
+ }
+
Schedule* schedule() const { return schedule_; }
void set_schedule(Schedule* schedule) {
DCHECK(!schedule_);
@@ -227,11 +243,12 @@ class PipelineData {
graph_zone_ = nullptr;
graph_ = nullptr;
loop_assignment_ = nullptr;
+ type_hint_analysis_ = nullptr;
+ simplified_ = nullptr;
machine_ = nullptr;
common_ = nullptr;
javascript_ = nullptr;
jsgraph_ = nullptr;
- js_type_feedback_ = nullptr;
schedule_ = nullptr;
}
@@ -266,12 +283,12 @@ class PipelineData {
DCHECK(register_allocation_data_ == nullptr);
int fixed_frame_size = 0;
if (descriptor != nullptr) {
- fixed_frame_size = (descriptor->kind() == CallDescriptor::kCallAddress)
+ fixed_frame_size = (descriptor->IsCFunctionCall())
? StandardFrameConstants::kFixedSlotCountAboveFp +
StandardFrameConstants::kCPSlotCount
: StandardFrameConstants::kFixedSlotCount;
}
- frame_ = new (instruction_zone()) Frame(fixed_frame_size);
+ frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
sequence(), debug_name);
@@ -287,22 +304,24 @@ class PipelineData {
Handle<Code> code_;
// All objects in the following group of fields are allocated in graph_zone_.
- // They are all set to NULL when the graph_zone_ is destroyed.
+ // They are all set to nullptr when the graph_zone_ is destroyed.
ZonePool::Scope graph_zone_scope_;
Zone* graph_zone_;
Graph* graph_;
// TODO(dcarney): make this into a ZoneObject.
base::SmartPointer<SourcePositionTable> source_positions_;
LoopAssignmentAnalysis* loop_assignment_;
+ TypeHintAnalysis* type_hint_analysis_ = nullptr;
+ SimplifiedOperatorBuilder* simplified_;
MachineOperatorBuilder* machine_;
CommonOperatorBuilder* common_;
JSOperatorBuilder* javascript_;
JSGraph* jsgraph_;
- JSTypeFeedbackTable* js_type_feedback_;
Schedule* schedule_;
// All objects in the following group of fields are allocated in
- // instruction_zone_. They are all set to NULL when the instruction_zone_ is
+ // instruction_zone_. They are all set to nullptr when the instruction_zone_
+ // is
// destroyed.
ZonePool::Scope instruction_zone_scope_;
Zone* instruction_zone_;
@@ -310,7 +329,7 @@ class PipelineData {
Frame* frame_;
// All objects in the following group of fields are allocated in
- // register_allocation_zone_. They are all set to NULL when the zone is
+ // register_allocation_zone_. They are all set to nullptr when the zone is
// destroyed.
ZonePool::Scope register_allocation_zone_scope_;
Zone* register_allocation_zone_;
@@ -331,7 +350,7 @@ struct TurboCfgFile : public std::ofstream {
void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(info, NULL, "json", "a+");
+ FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
if (json_file != nullptr) {
OFStream json_of(json_file);
json_of << "{\"name\":\"Schedule\",\"type\":\"schedule\",\"data\":\"";
@@ -356,10 +375,10 @@ class AstGraphBuilderWithPositions final : public AstGraphBuilder {
AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph,
LoopAssignmentAnalysis* loop_assignment,
- JSTypeFeedbackTable* js_type_feedback,
+ TypeHintAnalysis* type_hint_analysis,
SourcePositionTable* source_positions)
: AstGraphBuilder(local_zone, info, jsgraph, loop_assignment,
- js_type_feedback),
+ type_hint_analysis),
source_positions_(source_positions),
start_position_(info->shared_info()->start_position()) {}
@@ -395,6 +414,8 @@ class SourcePositionWrapper final : public Reducer {
return reducer_->Reduce(node);
}
+ void Finalize() final { reducer_->Finalize(); }
+
private:
Reducer* const reducer_;
SourcePositionTable* const table_;
@@ -469,6 +490,18 @@ struct LoopAssignmentAnalysisPhase {
};
+struct TypeHintAnalysisPhase {
+ static const char* phase_name() { return "type hint analysis"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ TypeHintAnalyzer analyzer(data->graph_zone());
+ Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
+ TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
+ data->set_type_hint_analysis(type_hint_analysis);
+ }
+};
+
+
struct GraphBuilderPhase {
static const char* phase_name() { return "graph builder"; }
@@ -483,7 +516,7 @@ struct GraphBuilderPhase {
} else {
AstGraphBuilderWithPositions graph_builder(
temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
- data->js_type_feedback(), data->source_positions());
+ data->type_hint_analysis(), data->source_positions());
succeeded = graph_builder.CreateGraph(stack_check);
}
@@ -503,6 +536,11 @@ struct InliningPhase {
data->common());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
+ JSCallReducer call_reducer(data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSCallReducer::kDeoptimizationEnabled
+ : JSCallReducer::kNoFlags,
+ data->native_context());
JSContextSpecialization context_specialization(
&graph_reducer, data->jsgraph(),
data->info()->is_function_context_specializing()
@@ -510,17 +548,33 @@ struct InliningPhase {
: MaybeHandle<Context>());
JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
data->jsgraph());
- JSInliner inliner(&graph_reducer, data->info()->is_inlining_enabled()
- ? JSInliner::kGeneralInlining
- : JSInliner::kRestrictedInlining,
- temp_zone, data->info(), data->jsgraph());
+ JSGlobalObjectSpecialization global_object_specialization(
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSGlobalObjectSpecialization::kDeoptimizationEnabled
+ : JSGlobalObjectSpecialization::kNoFlags,
+ data->native_context(), data->info()->dependencies());
+ JSNativeContextSpecialization native_context_specialization(
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSNativeContextSpecialization::kDeoptimizationEnabled
+ : JSNativeContextSpecialization::kNoFlags,
+ data->native_context(), data->info()->dependencies(), temp_zone);
+ JSInliningHeuristic inlining(&graph_reducer,
+ data->info()->is_inlining_enabled()
+ ? JSInliningHeuristic::kGeneralInlining
+ : JSInliningHeuristic::kRestrictedInlining,
+ temp_zone, data->info(), data->jsgraph());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
if (data->info()->is_frame_specializing()) {
AddReducer(data, &graph_reducer, &frame_specialization);
}
+ AddReducer(data, &graph_reducer, &global_object_specialization);
+ AddReducer(data, &graph_reducer, &native_context_specialization);
AddReducer(data, &graph_reducer, &context_specialization);
- AddReducer(data, &graph_reducer, &inliner);
+ AddReducer(data, &graph_reducer, &call_reducer);
+ AddReducer(data, &graph_reducer, &inlining);
graph_reducer.ReduceGraph();
}
};
@@ -547,34 +601,6 @@ struct OsrDeconstructionPhase {
};
-struct JSTypeFeedbackPhase {
- static const char* phase_name() { return "type feedback specializing"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- Handle<Context> native_context(data->info()->context()->native_context());
- TypeFeedbackOracle oracle(data->isolate(), temp_zone,
- data->info()->unoptimized_code(),
- data->info()->feedback_vector(), native_context);
- JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
- Handle<GlobalObject> global_object = Handle<GlobalObject>::null();
- if (data->info()->has_global_object()) {
- global_object =
- Handle<GlobalObject>(data->info()->global_object(), data->isolate());
- }
- // TODO(titzer): introduce a specialization mode/flags enum to control
- // specializing to the global object here.
- JSTypeFeedbackSpecializer specializer(
- &graph_reducer, data->jsgraph(), data->js_type_feedback(), &oracle,
- global_object, data->info()->is_deoptimization_enabled()
- ? JSTypeFeedbackSpecializer::kDeoptimizationEnabled
- : JSTypeFeedbackSpecializer::kDeoptimizationDisabled,
- data->info()->dependencies());
- AddReducer(data, &graph_reducer, &specializer);
- graph_reducer.ReduceGraph();
- }
-};
-
-
struct TypedLoweringPhase {
static const char* phase_name() { return "typed lowering"; }
@@ -584,12 +610,16 @@ struct TypedLoweringPhase {
data->common());
LoadElimination load_elimination(&graph_reducer);
JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
- JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(), temp_zone);
- JSTypeFeedbackLowering type_feedback_lowering(
- &graph_reducer, data->info()->is_deoptimization_enabled()
- ? JSTypeFeedbackLowering::kDeoptimizationEnabled
- : JSTypeFeedbackLowering::kNoFlags,
- data->jsgraph());
+ JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
+ if (data->info()->is_deoptimization_enabled()) {
+ typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
+ }
+ if (data->info()->shared_info()->HasBytecodeArray()) {
+ typed_lowering_flags |= JSTypedLowering::kDisableBinaryOpReduction;
+ }
+ JSTypedLowering typed_lowering(&graph_reducer, data->info()->dependencies(),
+ typed_lowering_flags, data->jsgraph(),
+ temp_zone);
JSIntrinsicLowering intrinsic_lowering(
&graph_reducer, data->jsgraph(),
data->info()->is_deoptimization_enabled()
@@ -601,7 +631,6 @@ struct TypedLoweringPhase {
AddReducer(data, &graph_reducer, &builtin_reducer);
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
- AddReducer(data, &graph_reducer, &type_feedback_lowering);
AddReducer(data, &graph_reducer, &load_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
@@ -609,6 +638,38 @@ struct TypedLoweringPhase {
};
+struct BranchEliminationPhase {
+ static const char* phase_name() { return "branch condition elimination"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ BranchElimination branch_condition_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ AddReducer(data, &graph_reducer, &branch_condition_elimination);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+
+struct EscapeAnalysisPhase {
+ static const char* phase_name() { return "escape analysis"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ EscapeAnalysis escape_analysis(data->graph(), data->jsgraph()->common(),
+ temp_zone);
+ escape_analysis.Run();
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
+ &escape_analysis, temp_zone);
+ AddReducer(data, &graph_reducer, &escape_reducer);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+
struct SimplifiedLoweringPhase {
static const char* phase_name() { return "simplified lowering"; }
@@ -698,7 +759,7 @@ struct StressLoopPeelingPhase {
// Peel the first outer loop for testing.
// TODO(titzer): peel all loops? the N'th loop? Innermost loops?
LoopTree* loop_tree = LoopFinder::BuildLoopTree(data->graph(), temp_zone);
- if (loop_tree != NULL && loop_tree->outer_loops().size() > 0) {
+ if (loop_tree != nullptr && loop_tree->outer_loops().size() > 0) {
LoopPeeler::Peel(data->graph(), data->common(), loop_tree,
loop_tree->outer_loops()[0], temp_zone);
}
@@ -945,16 +1006,8 @@ struct PrintGraphPhase {
CompilationInfo* info = data->info();
Graph* graph = data->graph();
- { // Print dot.
- FILE* dot_file = OpenVisualizerLogFile(info, phase, "dot", "w+");
- if (dot_file == nullptr) return;
- OFStream dot_of(dot_file);
- dot_of << AsDOT(*graph);
- fclose(dot_file);
- }
-
{ // Print JSON.
- FILE* json_file = OpenVisualizerLogFile(info, NULL, "json", "a+");
+ FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
if (json_file == nullptr) return;
OFStream json_of(json_file);
json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
@@ -983,7 +1036,7 @@ struct VerifyGraphPhase {
void Pipeline::BeginPhaseKind(const char* phase_kind_name) {
- if (data_->pipeline_statistics() != NULL) {
+ if (data_->pipeline_statistics() != nullptr) {
data_->pipeline_statistics()->BeginPhaseKind(phase_kind_name);
}
}
@@ -1016,7 +1069,7 @@ Handle<Code> Pipeline::GenerateCode() {
}
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(info(), NULL, "json", "w+");
+ FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "w+");
if (json_file != nullptr) {
OFStream json_of(json_file);
Handle<Script> script = info()->script();
@@ -1043,11 +1096,6 @@ Handle<Code> Pipeline::GenerateCode() {
PipelineData data(&zone_pool, info(), pipeline_statistics.get());
this->data_ = &data;
- if (info()->is_type_feedback_enabled()) {
- data.set_js_type_feedback(new (data.graph_zone())
- JSTypeFeedbackTable(data.graph_zone()));
- }
-
BeginPhaseKind("graph creation");
if (FLAG_trace_turbo) {
@@ -1065,6 +1113,10 @@ Handle<Code> Pipeline::GenerateCode() {
Run<LoopAssignmentAnalysisPhase>();
}
+ if (info()->is_typing_enabled()) {
+ Run<TypeHintAnalysisPhase>();
+ }
+
Run<GraphBuilderPhase>();
if (data.compilation_failed()) return Handle<Code>::null();
RunPrintAndVerify("Initial untyped", true);
@@ -1075,7 +1127,7 @@ Handle<Code> Pipeline::GenerateCode() {
RunPrintAndVerify("OSR deconstruction", true);
}
- // Perform context specialization and inlining (if enabled).
+ // Perform function context specialization and inlining (if enabled).
Run<InliningPhase>();
RunPrintAndVerify("Inlined", true);
@@ -1091,7 +1143,11 @@ Handle<Code> Pipeline::GenerateCode() {
base::SmartPointer<Typer> typer;
if (info()->is_typing_enabled()) {
// Type the graph.
- typer.Reset(new Typer(isolate(), data.graph(), info()->function_type()));
+ typer.Reset(new Typer(isolate(), data.graph(),
+ info()->is_deoptimization_enabled()
+ ? Typer::kDeoptimizationEnabled
+ : Typer::kNoFlags,
+ info()->dependencies()));
Run<TyperPhase>(typer.get());
RunPrintAndVerify("Typed");
}
@@ -1108,15 +1164,18 @@ Handle<Code> Pipeline::GenerateCode() {
RunPrintAndVerify("Loop peeled");
}
- if (info()->is_type_feedback_enabled()) {
- Run<JSTypeFeedbackPhase>();
- RunPrintAndVerify("JSType feedback");
+ if (FLAG_turbo_escape) {
+ Run<EscapeAnalysisPhase>();
+ RunPrintAndVerify("Escape Analysed");
}
// Lower simplified operators and insert changes.
Run<SimplifiedLoweringPhase>();
RunPrintAndVerify("Lowered simplified");
+ Run<BranchEliminationPhase>();
+ RunPrintAndVerify("Branch conditions eliminated");
+
// Optimize control flow.
if (FLAG_turbo_cf_optimization) {
Run<ControlFlowOptimizationPhase>();
@@ -1150,10 +1209,13 @@ Handle<Code> Pipeline::GenerateCode() {
}
-Handle<Code> Pipeline::GenerateCodeForInterpreter(
- Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Schedule* schedule, const char* bytecode_name) {
- CompilationInfo info(bytecode_name, isolate, graph->zone());
+Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
+ CallDescriptor* call_descriptor,
+ Graph* graph, Schedule* schedule,
+ Code::Kind kind,
+ const char* debug_name) {
+ CompilationInfo info(debug_name, isolate, graph->zone());
+ info.set_output_code_kind(kind);
// Construct a pipeline for scheduling and code generation.
ZonePool zone_pool;
@@ -1161,21 +1223,24 @@ Handle<Code> Pipeline::GenerateCodeForInterpreter(
base::SmartPointer<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats) {
pipeline_statistics.Reset(new PipelineStatistics(&info, &zone_pool));
- pipeline_statistics->BeginPhaseKind("interpreter handler codegen");
+ pipeline_statistics->BeginPhaseKind("stub codegen");
}
+
+ Pipeline pipeline(&info);
+ pipeline.data_ = &data;
+ DCHECK_NOT_NULL(data.schedule());
+
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(&info, NULL, "json", "w+");
+ FILE* json_file = OpenVisualizerLogFile(&info, nullptr, "json", "w+");
if (json_file != nullptr) {
OFStream json_of(json_file);
json_of << "{\"function\":\"" << info.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
fclose(json_file);
}
+ pipeline.Run<PrintGraphPhase>("Machine");
}
- Pipeline pipeline(&info);
- pipeline.data_ = &data;
- pipeline.RunPrintAndVerify("Machine", true);
return pipeline.ScheduleAndGenerateCode(call_descriptor);
}
@@ -1235,7 +1300,7 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
TraceSchedule(data->info(), data->schedule());
- BasicBlockProfiler::Data* profiler_data = NULL;
+ BasicBlockProfiler::Data* profiler_data = nullptr;
if (FLAG_turbo_profiling) {
profiler_data = BasicBlockInstrumentor::Instrument(info(), data->graph(),
data->schedule());
@@ -1265,8 +1330,9 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
bool run_verifier = FLAG_turbo_verify_allocation;
// Allocate registers.
- AllocateRegisters(RegisterConfiguration::ArchDefault(), call_descriptor,
- run_verifier);
+ AllocateRegisters(
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
+ call_descriptor, run_verifier);
if (data->compilation_failed()) {
info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
return Handle<Code>();
@@ -1283,10 +1349,10 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
Run<GenerateCodePhase>(&linkage);
Handle<Code> code = data->code();
- if (profiler_data != NULL) {
+ if (profiler_data != nullptr) {
#if ENABLE_DISASSEMBLER
std::ostringstream os;
- code->Disassemble(NULL, os);
+ code->Disassemble(nullptr, os);
profiler_data->SetCode(&os);
#endif
}
@@ -1295,14 +1361,14 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
v8::internal::CodeGenerator::PrintCode(code, info());
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(info(), NULL, "json", "a+");
+ FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "a+");
if (json_file != nullptr) {
OFStream json_of(json_file);
json_of
<< "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
#if ENABLE_DISASSEMBLER
std::stringstream disassembly_stream;
- code->Disassemble(NULL, disassembly_stream);
+ code->Disassemble(nullptr, disassembly_stream);
std::string disassembly_string(disassembly_stream.str());
for (const auto& c : disassembly_string) {
json_of << AsEscapedUC16ForJSON(c);
@@ -1360,6 +1426,8 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
}
if (verifier != nullptr) {
CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
+ CHECK(data->register_allocation_data()
+ ->RangesDefinedInDeferredStayInDeferred());
}
if (FLAG_turbo_preprocess_ranges) {
diff --git a/chromium/v8/src/compiler/pipeline.h b/chromium/v8/src/compiler/pipeline.h
index 90c223f67ea..af94018f078 100644
--- a/chromium/v8/src/compiler/pipeline.h
+++ b/chromium/v8/src/compiler/pipeline.h
@@ -11,6 +11,9 @@
namespace v8 {
namespace internal {
+
+class RegisterConfiguration;
+
namespace compiler {
class CallDescriptor;
@@ -18,7 +21,6 @@ class Graph;
class InstructionSequence;
class Linkage;
class PipelineData;
-class RegisterConfiguration;
class Schedule;
class Pipeline {
@@ -28,11 +30,13 @@ class Pipeline {
// Run the entire pipeline and generate a handle to a code object.
Handle<Code> GenerateCode();
- // Run the pipeline on an interpreter bytecode handler machine graph and
- // generate code.
- static Handle<Code> GenerateCodeForInterpreter(
- Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Schedule* schedule, const char* bytecode_name);
+ // Run the pipeline on a machine graph and generate code. The {schedule} must
+ // be valid, hence the given {graph} does not need to be schedulable.
+ static Handle<Code> GenerateCodeForCodeStub(Isolate* isolate,
+ CallDescriptor* call_descriptor,
+ Graph* graph, Schedule* schedule,
+ Code::Kind kind,
+ const char* debug_name);
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
diff --git a/chromium/v8/src/compiler/ppc/code-generator-ppc.cc b/chromium/v8/src/compiler/ppc/code-generator-ppc.cc
index df776fac682..6fe674e4f2c 100644
--- a/chromium/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/chromium/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -4,12 +4,12 @@
#include "src/compiler/code-generator.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/ppc/macro-assembler-ppc.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -27,6 +27,8 @@ class PPCOperandConverter final : public InstructionOperandConverter {
PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
+ size_t OutputCount() { return instr_->OutputCount(); }
+
RCBit OutputRCBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
@@ -99,10 +101,10 @@ class PPCOperandConverter final : public InstructionOperandConverter {
}
MemOperand ToMemOperand(InstructionOperand* op) const {
- DCHECK(op != NULL);
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -157,7 +159,49 @@ class OutOfLineLoadZero final : public OutOfLineCode {
};
-Condition FlagsConditionToCondition(FlagsCondition condition) {
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ offset_(offset),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ // TODO(turbofan): Once we get frame elision working, we need to save
+ // and restore lr properly here if the frame was elided.
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ add(scratch1_, object_, offset_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Register const offset_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
+
+Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
switch (condition) {
case kEqual:
return eq;
@@ -176,17 +220,42 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
case kUnsignedGreaterThan:
return gt;
case kOverflow:
+ // Overflow checked for add/sub only.
+ switch (op) {
#if V8_TARGET_ARCH_PPC64
- return ne;
+ case kPPC_Add:
+ case kPPC_Sub:
+ return lt;
+#endif
+ case kPPC_AddWithOverflow32:
+ case kPPC_SubWithOverflow32:
+#if V8_TARGET_ARCH_PPC64
+ return ne;
#else
- return lt;
+ return lt;
#endif
+ default:
+ break;
+ }
+ break;
case kNotOverflow:
+ switch (op) {
#if V8_TARGET_ARCH_PPC64
- return eq;
+ case kPPC_Add:
+ case kPPC_Sub:
+ return ge;
+#endif
+ case kPPC_AddWithOverflow32:
+ case kPPC_SubWithOverflow32:
+#if V8_TARGET_ARCH_PPC64
+ return eq;
#else
- return ge;
+ return ge;
#endif
+ default:
+ break;
+ }
+ break;
default:
break;
}
@@ -246,13 +315,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
-#if V8_TARGET_ARCH_PPC64
-#define ASSEMBLE_ADD_WITH_OVERFLOW() \
- do { \
- ASSEMBLE_BINOP(add, addi); \
- __ TestIfInt32(i.OutputRegister(), r0, cr0); \
- } while (0)
-#else
#define ASSEMBLE_ADD_WITH_OVERFLOW() \
do { \
if (HasRegisterInput(instr, 1)) { \
@@ -263,16 +325,8 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
i.InputInt32(1), kScratchReg, r0); \
} \
} while (0)
-#endif
-#if V8_TARGET_ARCH_PPC64
-#define ASSEMBLE_SUB_WITH_OVERFLOW() \
- do { \
- ASSEMBLE_BINOP(sub, subi); \
- __ TestIfInt32(i.OutputRegister(), r0, cr0); \
- } while (0)
-#else
#define ASSEMBLE_SUB_WITH_OVERFLOW() \
do { \
if (HasRegisterInput(instr, 1)) { \
@@ -283,6 +337,24 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
-i.InputInt32(1), kScratchReg, r0); \
} \
} while (0)
+
+
+#if V8_TARGET_ARCH_PPC64
+#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_BINOP(add, addi); \
+ __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+ } while (0)
+
+
+#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_BINOP(sub, subi); \
+ __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+ } while (0)
+#else
+#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
+#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
#endif
@@ -563,27 +635,31 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
-#define ASSEMBLE_STORE_WRITE_BARRIER() \
- do { \
- Register object = i.InputRegister(0); \
- Register index = i.InputRegister(1); \
- Register value = i.InputRegister(2); \
- __ add(index, object, index); \
- __ StoreP(value, MemOperand(index)); \
- SaveFPRegsMode mode = \
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; \
- LinkRegisterStatus lr_status = kLRHasNotBeenSaved; \
- __ RecordWrite(object, index, value, lr_status, mode); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ LeaveFrame(StackFrame::MANUAL);
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
+ if (frame()->needs_frame()) {
+ if (FLAG_enable_embedded_constant_pool) {
+ __ LoadP(kConstantPoolRegister,
+ MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ }
+ __ LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ mtlr(r0);
+ }
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -594,6 +670,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
switch (opcode) {
case kArchCallCodeObject: {
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
EnsureSpaceForLazyDeopt();
if (HasRegisterInput(instr, 0)) {
__ addi(ip, i.InputRegister(0),
@@ -605,10 +683,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (HasRegisterInput(instr, 0)) {
__ addi(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -621,9 +701,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
RelocInfo::CODE_TARGET);
}
DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@@ -637,6 +720,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(ip);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
@@ -648,17 +732,31 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
break;
}
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (instr->InputAt(0)->IsImmediate()) {
@@ -668,6 +766,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -683,13 +783,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -709,6 +812,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register offset = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+ scratch0, scratch1, mode);
+ __ StorePX(value, MemOperand(object, offset));
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kPPC_And:
if (HasRegisterInput(instr, 1)) {
__ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -811,31 +931,47 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
#endif
case kPPC_Add:
- if (HasRegisterInput(instr, 1)) {
- __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
- LeaveOE, i.OutputRCBit());
+#if V8_TARGET_ARCH_PPC64
+ if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ ASSEMBLE_ADD_WITH_OVERFLOW();
} else {
- __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
- DCHECK_EQ(LeaveRC, i.OutputRCBit());
+#endif
+ if (HasRegisterInput(instr, 1)) {
+ __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ } else {
+ __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+#if V8_TARGET_ARCH_PPC64
}
+#endif
break;
case kPPC_AddWithOverflow32:
- ASSEMBLE_ADD_WITH_OVERFLOW();
+ ASSEMBLE_ADD_WITH_OVERFLOW32();
break;
case kPPC_AddDouble:
ASSEMBLE_FLOAT_BINOP_RC(fadd);
break;
case kPPC_Sub:
- if (HasRegisterInput(instr, 1)) {
- __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
- LeaveOE, i.OutputRCBit());
+#if V8_TARGET_ARCH_PPC64
+ if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ ASSEMBLE_SUB_WITH_OVERFLOW();
} else {
- __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
- DCHECK_EQ(LeaveRC, i.OutputRCBit());
+#endif
+ if (HasRegisterInput(instr, 1)) {
+ __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ } else {
+ __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+#if V8_TARGET_ARCH_PPC64
}
+#endif
break;
case kPPC_SubWithOverflow32:
- ASSEMBLE_SUB_WITH_OVERFLOW();
+ ASSEMBLE_SUB_WITH_OVERFLOW32();
break;
case kPPC_SubDouble:
ASSEMBLE_FLOAT_BINOP_RC(fsub);
@@ -939,6 +1075,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cntlzw_(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Cntlz64:
+ __ cntlzd_(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
+ case kPPC_Popcnt32:
+ __ popcntw(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Popcnt64:
+ __ popcntd(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+#endif
case kPPC_Cmp32:
ASSEMBLE_COMPARE(cmpw, cmplw);
break;
@@ -974,8 +1126,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kPPC_Push:
if (instr->InputAt(0)->IsDoubleRegister()) {
__ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
__ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
@@ -1018,8 +1172,25 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_Int64ToInt32:
- // TODO(mbrandy): sign extend?
- __ Move(i.OutputRegister(), i.InputRegister(0));
+ __ extsw(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Int64ToFloat32:
+ __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Int64ToDouble:
+ __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Uint64ToFloat32:
+ __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Uint64ToDouble:
+ __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
+ i.OutputDoubleRegister());
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
#endif
@@ -1034,13 +1205,52 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kPPC_DoubleToInt32:
case kPPC_DoubleToUint32:
+ case kPPC_DoubleToInt64: {
+#if V8_TARGET_ARCH_PPC64
+ bool check_conversion =
+ (opcode == kPPC_DoubleToInt64 && i.OutputCount() > 1);
+ if (check_conversion) {
+ __ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ }
+#endif
__ ConvertDoubleToInt64(i.InputDoubleRegister(0),
#if !V8_TARGET_ARCH_PPC64
kScratchReg,
#endif
- i.OutputRegister(), kScratchDoubleReg);
+ i.OutputRegister(0), kScratchDoubleReg);
+#if V8_TARGET_ARCH_PPC64
+ if (check_conversion) {
+ // Set 2nd output to zero if conversion fails.
+ CRBit crbit = static_cast<CRBit>(VXCVI % CRWIDTH);
+ __ mcrfs(cr7, VXCVI); // extract FPSCR field containing VXCVI into cr7
+ __ li(i.OutputRegister(1), Operand(1));
+ __ isel(i.OutputRegister(1), r0, i.OutputRegister(1),
+ v8::internal::Assembler::encode_crbit(cr7, crbit));
+ }
+#endif
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_DoubleToUint64: {
+ bool check_conversion = (i.OutputCount() > 1);
+ if (check_conversion) {
+ __ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ }
+ __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
+ i.OutputRegister(0), kScratchDoubleReg);
+ if (check_conversion) {
+ // Set 2nd output to zero if conversion fails.
+ CRBit crbit = static_cast<CRBit>(VXCVI % CRWIDTH);
+ __ mcrfs(cr7, VXCVI); // extract FPSCR field containing VXCVI into cr7
+ __ li(i.OutputRegister(1), Operand(1));
+ __ isel(i.OutputRegister(1), r0, i.OutputRegister(1),
+ v8::internal::Assembler::encode_crbit(cr7, crbit));
+ }
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ }
+#endif
case kPPC_DoubleToFloat32:
ASSEMBLE_FLOAT_UNOP_RC(frsp);
break;
@@ -1136,9 +1346,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kPPC_StoreDouble:
ASSEMBLE_STORE_DOUBLE();
break;
- case kPPC_StoreWriteBarrier:
- ASSEMBLE_STORE_WRITE_BARRIER();
- break;
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
@@ -1206,11 +1413,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
FlagsCondition condition = branch->condition;
CRegister cr = cr0;
- // Overflow checked for add/sub only.
- DCHECK((condition != kOverflow && condition != kNotOverflow) ||
- (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
-
- Condition cond = FlagsConditionToCondition(condition);
+ Condition cond = FlagsConditionToCondition(condition, op);
if (op == kPPC_CmpDouble) {
// check for unordered if necessary
if (cond == le) {
@@ -1240,16 +1443,12 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
bool check_unordered = (op == kPPC_CmpDouble);
CRegister cr = cr0;
- // Overflow checked for add/sub only.
- DCHECK((condition != kOverflow && condition != kNotOverflow) ||
- (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
-
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
- Condition cond = FlagsConditionToCondition(condition);
+ Condition cond = FlagsConditionToCondition(condition, op);
switch (cond) {
case eq:
case lt:
@@ -1332,8 +1531,7 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ function_descriptor();
__ mflr(r0);
if (FLAG_enable_embedded_constant_pool) {
@@ -1345,13 +1543,18 @@ void CodeGenerator::AssemblePrologue() {
__ mr(fp, sp);
}
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
- __ StubPrologue();
+ __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
+ } else if (frame()->needs_frame()) {
+ if (!ABI_CALL_VIA_IP && info()->output_code_kind() == Code::WASM_FUNCTION) {
+ // TODO(mbrandy): Restrict only to the wasm wrapper case.
+ __ StubPrologue();
+ } else {
+ __ StubPrologue(ip);
+ }
} else {
frame()->SetElidedFrameSizeInSlots(0);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1421,9 +1624,9 @@ void CodeGenerator::AssembleReturn() {
__ MultiPopDoubles(double_saves);
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ b(&return_label_);
@@ -1441,7 +1644,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- PPCOperandConverter g(this, NULL);
+ PPCOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1543,7 +1746,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- PPCOperandConverter g(this, NULL);
+ PPCOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1632,6 +1835,9 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block tramoline pool emission for duration of padding.
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
diff --git a/chromium/v8/src/compiler/ppc/instruction-codes-ppc.h b/chromium/v8/src/compiler/ppc/instruction-codes-ppc.h
index ed9bbcd91cf..a3bf80e5032 100644
--- a/chromium/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/chromium/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -62,6 +62,9 @@ namespace compiler {
V(PPC_MinDouble) \
V(PPC_AbsDouble) \
V(PPC_Cntlz32) \
+ V(PPC_Cntlz64) \
+ V(PPC_Popcnt32) \
+ V(PPC_Popcnt64) \
V(PPC_Cmp32) \
V(PPC_Cmp64) \
V(PPC_CmpDouble) \
@@ -75,11 +78,17 @@ namespace compiler {
V(PPC_ExtendSignWord32) \
V(PPC_Uint32ToUint64) \
V(PPC_Int64ToInt32) \
+ V(PPC_Int64ToFloat32) \
+ V(PPC_Int64ToDouble) \
+ V(PPC_Uint64ToFloat32) \
+ V(PPC_Uint64ToDouble) \
V(PPC_Int32ToDouble) \
V(PPC_Uint32ToDouble) \
V(PPC_Float32ToDouble) \
V(PPC_DoubleToInt32) \
V(PPC_DoubleToUint32) \
+ V(PPC_DoubleToInt64) \
+ V(PPC_DoubleToUint64) \
V(PPC_DoubleToFloat32) \
V(PPC_DoubleExtractLowWord32) \
V(PPC_DoubleExtractHighWord32) \
@@ -103,8 +112,7 @@ namespace compiler {
V(PPC_StoreWord32) \
V(PPC_StoreWord64) \
V(PPC_StoreFloat32) \
- V(PPC_StoreDouble) \
- V(PPC_StoreWriteBarrier)
+ V(PPC_StoreDouble)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/chromium/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/chromium/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
new file mode 100644
index 00000000000..fc90cdd628e
--- /dev/null
+++ b/chromium/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -0,0 +1,143 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kPPC_And:
+ case kPPC_AndComplement:
+ case kPPC_Or:
+ case kPPC_OrComplement:
+ case kPPC_Xor:
+ case kPPC_ShiftLeft32:
+ case kPPC_ShiftLeft64:
+ case kPPC_ShiftRight32:
+ case kPPC_ShiftRight64:
+ case kPPC_ShiftRightAlg32:
+ case kPPC_ShiftRightAlg64:
+ case kPPC_RotRight32:
+ case kPPC_RotRight64:
+ case kPPC_Not:
+ case kPPC_RotLeftAndMask32:
+ case kPPC_RotLeftAndClear64:
+ case kPPC_RotLeftAndClearLeft64:
+ case kPPC_RotLeftAndClearRight64:
+ case kPPC_Add:
+ case kPPC_AddWithOverflow32:
+ case kPPC_AddDouble:
+ case kPPC_Sub:
+ case kPPC_SubWithOverflow32:
+ case kPPC_SubDouble:
+ case kPPC_Mul32:
+ case kPPC_Mul64:
+ case kPPC_MulHigh32:
+ case kPPC_MulHighU32:
+ case kPPC_MulDouble:
+ case kPPC_Div32:
+ case kPPC_Div64:
+ case kPPC_DivU32:
+ case kPPC_DivU64:
+ case kPPC_DivDouble:
+ case kPPC_Mod32:
+ case kPPC_Mod64:
+ case kPPC_ModU32:
+ case kPPC_ModU64:
+ case kPPC_ModDouble:
+ case kPPC_Neg:
+ case kPPC_NegDouble:
+ case kPPC_SqrtDouble:
+ case kPPC_FloorDouble:
+ case kPPC_CeilDouble:
+ case kPPC_TruncateDouble:
+ case kPPC_RoundDouble:
+ case kPPC_MaxDouble:
+ case kPPC_MinDouble:
+ case kPPC_AbsDouble:
+ case kPPC_Cntlz32:
+ case kPPC_Cntlz64:
+ case kPPC_Popcnt32:
+ case kPPC_Popcnt64:
+ case kPPC_Cmp32:
+ case kPPC_Cmp64:
+ case kPPC_CmpDouble:
+ case kPPC_Tst32:
+ case kPPC_Tst64:
+ case kPPC_ExtendSignWord8:
+ case kPPC_ExtendSignWord16:
+ case kPPC_ExtendSignWord32:
+ case kPPC_Uint32ToUint64:
+ case kPPC_Int64ToInt32:
+ case kPPC_Int64ToFloat32:
+ case kPPC_Int64ToDouble:
+ case kPPC_Uint64ToFloat32:
+ case kPPC_Uint64ToDouble:
+ case kPPC_Int32ToDouble:
+ case kPPC_Uint32ToDouble:
+ case kPPC_Float32ToDouble:
+ case kPPC_DoubleToInt32:
+ case kPPC_DoubleToUint32:
+ case kPPC_DoubleToInt64:
+ case kPPC_DoubleToUint64:
+ case kPPC_DoubleToFloat32:
+ case kPPC_DoubleExtractLowWord32:
+ case kPPC_DoubleExtractHighWord32:
+ case kPPC_DoubleInsertLowWord32:
+ case kPPC_DoubleInsertHighWord32:
+ case kPPC_DoubleConstruct:
+ case kPPC_BitcastInt32ToFloat32:
+ case kPPC_BitcastFloat32ToInt32:
+ case kPPC_BitcastInt64ToDouble:
+ case kPPC_BitcastDoubleToInt64:
+ return kNoOpcodeFlags;
+
+ case kPPC_LoadWordS8:
+ case kPPC_LoadWordU8:
+ case kPPC_LoadWordS16:
+ case kPPC_LoadWordU16:
+ case kPPC_LoadWordS32:
+ case kPPC_LoadWord64:
+ case kPPC_LoadFloat32:
+ case kPPC_LoadDouble:
+ return kIsLoadOperation;
+
+ case kPPC_StoreWord8:
+ case kPPC_StoreWord16:
+ case kPPC_StoreWord32:
+ case kPPC_StoreWord64:
+ case kPPC_StoreFloat32:
+ case kPPC_StoreDouble:
+ case kPPC_Push:
+ case kPPC_PushFrame:
+ case kPPC_StoreToStackSlot:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/ppc/instruction-selector-ppc.cc b/chromium/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 91c65d14c1b..f6ebbdf5d6a 100644
--- a/chromium/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/chromium/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -95,6 +95,25 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
}
+#if V8_TARGET_ARCH_PPC64
+void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ PPCOperandGenerator g(selector);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ selector->Emit(opcode, output_count, outputs, 1, inputs);
+}
+#endif
+
+
// Shared routine for multiple binary operations.
template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node,
@@ -142,32 +161,30 @@ void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
-
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
ImmediateMode mode = kInt16Imm;
- switch (rep) {
- case kRepFloat32:
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kPPC_LoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kPPC_LoadDouble;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = (typ == kTypeInt32) ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
break;
- case kRepWord16:
- opcode = (typ == kTypeInt32) ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
break;
#if !V8_TARGET_ARCH_PPC64
- case kRepTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
#endif
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kPPC_LoadWordS32;
#if V8_TARGET_ARCH_PPC64
// TODO(mbrandy): this applies to signed loads only (lwa)
@@ -175,13 +192,15 @@ void InstructionSelector::VisitLoad(Node* node) {
#endif
break;
#if V8_TARGET_ARCH_PPC64
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kPPC_LoadWord64;
mode = kInt16Imm_4ByteAligned;
break;
+#else
+ case MachineRepresentation::kWord64: // Fall through.
#endif
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -204,94 +223,124 @@ void InstructionSelector::VisitStore(Node* node) {
Node* offset = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK(rep == kRepTagged);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
- // TODO(dcarney): handle immediate indices.
- InstructionOperand temps[] = {g.TempRegister(r8), g.TempRegister(r9)};
- Emit(kPPC_StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, r7),
- g.UseFixed(offset, r8), g.UseFixed(value, r9), arraysize(temps),
- temps);
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
- ArchOpcode opcode;
- ImmediateMode mode = kInt16Imm;
- switch (rep) {
- case kRepFloat32:
- opcode = kPPC_StoreFloat32;
- break;
- case kRepFloat64:
- opcode = kPPC_StoreDouble;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kPPC_StoreWord8;
- break;
- case kRepWord16:
- opcode = kPPC_StoreWord16;
- break;
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ // TODO(ppc): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(offset);
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode = kArchNop;
+ ImmediateMode mode = kInt16Imm;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kPPC_StoreFloat32;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kPPC_StoreDouble;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kPPC_StoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kPPC_StoreWord16;
+ break;
#if !V8_TARGET_ARCH_PPC64
- case kRepTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
#endif
- case kRepWord32:
- opcode = kPPC_StoreWord32;
- break;
+ case MachineRepresentation::kWord32:
+ opcode = kPPC_StoreWord32;
+ break;
#if V8_TARGET_ARCH_PPC64
- case kRepTagged: // Fall through.
- case kRepWord64:
- opcode = kPPC_StoreWord64;
- mode = kInt16Imm_4ByteAligned;
- break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kPPC_StoreWord64;
+ mode = kInt16Imm_4ByteAligned;
+ break;
+#else
+ case MachineRepresentation::kWord64: // Fall through.
#endif
- default:
- UNREACHABLE();
- return;
- }
- if (g.CanBeImmediate(offset, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
- } else if (g.CanBeImmediate(base, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(offset, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
+ } else if (g.CanBeImmediate(base, mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
+ }
}
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
PPCOperandGenerator g(this);
Node* const base = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepWord64:
+#if V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kWord64:
opcode = kCheckedLoadWord64;
break;
- case kRepFloat32:
+#endif
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+#if !V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -303,33 +352,40 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
PPCOperandGenerator g(this);
Node* const base = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepWord64:
+#if V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kWord64:
opcode = kCheckedStoreWord64;
break;
- case kRepFloat32:
+#endif
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+#if !V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -417,8 +473,8 @@ static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
void InstructionSelector::VisitWord32And(Node* node) {
PPCOperandGenerator g(this);
Int32BinopMatcher m(node);
- int mb;
- int me;
+ int mb = 0;
+ int me = 0;
if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
@@ -456,8 +512,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
void InstructionSelector::VisitWord64And(Node* node) {
PPCOperandGenerator g(this);
Int64BinopMatcher m(node);
- int mb;
- int me;
+ int mb = 0;
+ int me = 0;
if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
int sh = 0;
Node* left = m.left().node();
@@ -737,6 +793,38 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Cntlz64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Popcnt32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Popcnt64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+#endif
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+#endif
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
}
@@ -873,6 +961,26 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
+}
+
+
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
VisitRR(this, kPPC_ExtendSignWord32, node);
@@ -907,6 +1015,26 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
VisitRR(this, kPPC_Int64ToInt32, node);
}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kPPC_Int64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kPPC_Int64ToDouble, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kPPC_Uint64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kPPC_Uint64ToDouble, node);
+}
#endif
@@ -1044,11 +1172,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kPPC_FloorDouble, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kPPC_FloorDouble, node);
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kPPC_CeilDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kPPC_CeilDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kPPC_TruncateDouble, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kPPC_TruncateDouble, node);
}
@@ -1059,6 +1207,16 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ UNREACHABLE();
+}
+
+
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
@@ -1083,6 +1241,30 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
}
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm, &cont);
+}
+
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate, &cont);
+}
+#endif
+
+
static bool CompareLogical(FlagsContinuation* cont) {
switch (cont->condition()) {
case kUnsignedLessThan:
@@ -1249,12 +1431,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == NULL || selector->IsDefined(result)) {
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1265,6 +1447,16 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
return VisitBinop<Int32BinopMatcher>(selector, node,
kPPC_SubWithOverflow32,
kInt16Imm_Negate, cont);
+#if V8_TARGET_ARCH_PPC64
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add,
+ kInt16Imm, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Sub,
+ kInt16Imm_Negate, cont);
+#endif
default:
break;
}
@@ -1471,23 +1663,10 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
PPCOperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
-
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on PPC it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, true);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -1497,8 +1676,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Poke any stack arguments.
int slot = kStackFrameExtraParamSlot;
- for (Node* node : buffer.pushed_nodes) {
- Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ for (PushParameter input : (*arguments)) {
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(slot));
++slot;
}
@@ -1506,149 +1685,25 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// Push any stack arguments.
int num_slots = static_cast<int>(descriptor->StackParameterCount());
int slot = 0;
- for (Node* input : buffer.pushed_nodes) {
+ for (PushParameter input : (*arguments)) {
if (slot == 0) {
- DCHECK(input);
- Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input),
+ DCHECK(input.node());
+ Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(num_slots));
} else {
// Skip any alignment holes in pushed nodes.
- if (input) {
- Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ if (input.node()) {
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(slot));
}
}
++slot;
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- PPCOperandGenerator g(this);
- CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on PPC it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on PPC it's probably better to use the code object in a
- // register if there are multiple uses of it. Improve constant pool and the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Push any stack arguments.
- int num_slots = static_cast<int>(descriptor->StackParameterCount());
- int slot = 0;
- for (Node* input : buffer.pushed_nodes) {
- if (slot == 0) {
- Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input),
- g.TempImmediate(num_slots));
- } else {
- Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
- g.TempImmediate(slot));
- }
- ++slot;
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
@@ -1700,9 +1755,15 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64RoundDown |
+ return MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
- MachineOperatorBuilder::kFloat64RoundTiesAway;
+ MachineOperatorBuilder::kFloat64RoundTiesAway |
+ MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord64Popcnt;
// We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
}
diff --git a/chromium/v8/src/compiler/raw-machine-assembler.cc b/chromium/v8/src/compiler/raw-machine-assembler.cc
index f590902df38..4df2bde448d 100644
--- a/chromium/v8/src/compiler/raw-machine-assembler.cc
+++ b/chromium/v8/src/compiler/raw-machine-assembler.cc
@@ -5,6 +5,7 @@
#include "src/compiler/raw-machine-assembler.h"
#include "src/code-factory.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/scheduler.h"
@@ -14,7 +15,7 @@ namespace compiler {
RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
CallDescriptor* call_descriptor,
- MachineType word,
+ MachineRepresentation word,
MachineOperatorBuilder::Flags flags)
: isolate_(isolate),
graph_(graph),
@@ -22,18 +23,16 @@ RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
machine_(zone(), word, flags),
common_(zone()),
call_descriptor_(call_descriptor),
- parameters_(nullptr),
+ parameters_(parameter_count(), zone()),
current_block_(schedule()->start()) {
int param_count = static_cast<int>(parameter_count());
- // Add an extra input node for the JSFunction parameter to the start node.
- Node* s = graph->NewNode(common_.Start(param_count + 1));
- graph->SetStart(s);
- if (parameter_count() == 0) return;
- parameters_ = zone()->NewArray<Node*>(param_count);
+ // Add an extra input for the JSFunction parameter to the start node.
+ graph->SetStart(graph->NewNode(common_.Start(param_count + 1)));
for (size_t i = 0; i < parameter_count(); ++i) {
parameters_[i] =
AddNode(common()->Parameter(static_cast<int>(i)), graph->start());
}
+ graph->SetEnd(graph->NewNode(common_.End(0)));
}
@@ -54,15 +53,15 @@ Node* RawMachineAssembler::Parameter(size_t index) {
}
-void RawMachineAssembler::Goto(Label* label) {
+void RawMachineAssembler::Goto(RawMachineLabel* label) {
DCHECK(current_block_ != schedule()->end());
schedule()->AddGoto(CurrentBlock(), Use(label));
current_block_ = nullptr;
}
-void RawMachineAssembler::Branch(Node* condition, Label* true_val,
- Label* false_val) {
+void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
+ RawMachineLabel* false_val) {
DCHECK(current_block_ != schedule()->end());
Node* branch = AddNode(common()->Branch(), condition);
schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
@@ -70,8 +69,9 @@ void RawMachineAssembler::Branch(Node* condition, Label* true_val,
}
-void RawMachineAssembler::Switch(Node* index, Label* default_label,
- int32_t* case_values, Label** case_labels,
+void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
+ int32_t* case_values,
+ RawMachineLabel** case_labels,
size_t case_count) {
DCHECK_NE(schedule()->end(), current_block_);
size_t succ_count = case_count + 1;
@@ -96,6 +96,25 @@ void RawMachineAssembler::Switch(Node* index, Label* default_label,
void RawMachineAssembler::Return(Node* value) {
Node* ret = MakeNode(common()->Return(), 1, &value);
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
+
+void RawMachineAssembler::Return(Node* v1, Node* v2) {
+ Node* values[] = {v1, v2};
+ Node* ret = MakeNode(common()->Return(2), 2, values);
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
+
+void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
+ Node* values[] = {v1, v2, v3};
+ Node* ret = MakeNode(common()->Return(3), 3, values);
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
@@ -105,15 +124,13 @@ Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
Node** args) {
int param_count =
static_cast<int>(desc->GetMachineSignature()->parameter_count());
- int input_count = param_count + 3;
+ int input_count = param_count + 1;
Node** buffer = zone()->NewArray<Node*>(input_count);
int index = 0;
buffer[index++] = function;
for (int i = 0; i < param_count; i++) {
buffer[index++] = args[i];
}
- buffer[index++] = graph()->start();
- buffer[index++] = graph()->start();
return AddNode(common()->Call(desc), input_count, buffer);
}
@@ -124,7 +141,7 @@ Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
DCHECK(desc->NeedsFrameState());
int param_count =
static_cast<int>(desc->GetMachineSignature()->parameter_count());
- int input_count = param_count + 4;
+ int input_count = param_count + 2;
Node** buffer = zone()->NewArray<Node*>(input_count);
int index = 0;
buffer[index++] = function;
@@ -132,72 +149,121 @@ Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
buffer[index++] = args[i];
}
buffer[index++] = frame_state;
- buffer[index++] = graph()->start();
- buffer[index++] = graph()->start();
return AddNode(common()->Call(desc), input_count, buffer);
}
+Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
+ Node* arg1, Node* context) {
+ CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, 1, Operator::kNoProperties, CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(descriptor->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(1);
+
+ return AddNode(common()->Call(descriptor), centry, arg1, ref, arity, context);
+}
+
+
+Node* RawMachineAssembler::CallRuntime2(Runtime::FunctionId function,
+ Node* arg1, Node* arg2, Node* context) {
+ CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, 2, Operator::kNoProperties, CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(descriptor->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(2);
+
+ return AddNode(common()->Call(descriptor), centry, arg1, arg2, ref, arity,
+ context);
+}
+
+
+Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, Node* context) {
+ CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, 4, Operator::kNoProperties, CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(descriptor->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(4);
+
+ return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
+ ref, arity, context);
+}
+
+
Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
Node** args) {
int param_count =
static_cast<int>(desc->GetMachineSignature()->parameter_count());
- int input_count = param_count + 3;
+ int input_count = param_count + 1;
Node** buffer = zone()->NewArray<Node*>(input_count);
int index = 0;
buffer[index++] = function;
for (int i = 0; i < param_count; i++) {
buffer[index++] = args[i];
}
- buffer[index++] = graph()->start();
- buffer[index++] = graph()->start();
Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
+ NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
}
-Node* RawMachineAssembler::CallFunctionStub0(Node* function, Node* receiver,
- Node* context, Node* frame_state,
- CallFunctionFlags flags) {
- Callable callable = CodeFactory::CallFunction(isolate(), 0, flags);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 1,
- CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
- Node* stub_code = HeapConstant(callable.code());
- return AddNode(common()->Call(desc), stub_code, function, receiver, context,
- frame_state, graph()->start(), graph()->start());
-}
-
-
-Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
- Node* arg1, Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 1, Operator::kNoProperties, false);
+Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
+ Node* arg1, Node* context) {
+ const int kArity = 1;
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, kArity, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
- Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
Node* ref = AddNode(
common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(1);
+ Node* arity = Int32Constant(kArity);
+
+ Node* nodes[] = {centry, arg1, ref, arity, context};
+ Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
- return AddNode(common()->Call(descriptor), centry, arg1, ref, arity, context,
- graph()->start(), graph()->start());
+ NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
}
-Node* RawMachineAssembler::CallRuntime2(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 2, Operator::kNoProperties, false);
+Node* RawMachineAssembler::TailCallRuntime2(Runtime::FunctionId function,
+ Node* arg1, Node* arg2,
+ Node* context) {
+ const int kArity = 2;
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, kArity, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
- Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
Node* ref = AddNode(
common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(2);
+ Node* arity = Int32Constant(kArity);
- return AddNode(common()->Call(descriptor), centry, arg1, arg2, ref, arity,
- context, graph()->start(), graph()->start());
+ Node* nodes[] = {centry, arg1, arg2, ref, arity, context};
+ Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+ NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
}
@@ -208,8 +274,7 @@ Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, graph()->start(),
- graph()->start());
+ return AddNode(common()->Call(descriptor), function);
}
@@ -222,8 +287,7 @@ Node* RawMachineAssembler::CallCFunction1(MachineType return_type,
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, graph()->start(),
- graph()->start());
+ return AddNode(common()->Call(descriptor), function, arg0);
}
@@ -238,8 +302,7 @@ Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, arg1,
- graph()->start(), graph()->start());
+ return AddNode(common()->Call(descriptor), function, arg0, arg1);
}
@@ -259,24 +322,14 @@ Node* RawMachineAssembler::CallCFunction8(
builder.AddParam(arg5_type);
builder.AddParam(arg6_type);
builder.AddParam(arg7_type);
- Node* args[] = {function,
- arg0,
- arg1,
- arg2,
- arg3,
- arg4,
- arg5,
- arg6,
- arg7,
- graph()->start(),
- graph()->start()};
+ Node* args[] = {function, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
return AddNode(common()->Call(descriptor), arraysize(args), args);
}
-void RawMachineAssembler::Bind(Label* label) {
+void RawMachineAssembler::Bind(RawMachineLabel* label) {
DCHECK(current_block_ == nullptr);
DCHECK(!label->bound_);
label->bound_ = true;
@@ -284,13 +337,13 @@ void RawMachineAssembler::Bind(Label* label) {
}
-BasicBlock* RawMachineAssembler::Use(Label* label) {
+BasicBlock* RawMachineAssembler::Use(RawMachineLabel* label) {
label->used_ = true;
return EnsureBlock(label);
}
-BasicBlock* RawMachineAssembler::EnsureBlock(Label* label) {
+BasicBlock* RawMachineAssembler::EnsureBlock(RawMachineLabel* label) {
if (label->block_ == nullptr) label->block_ = schedule()->NewBasicBlock();
return label->block_;
}
@@ -305,7 +358,7 @@ BasicBlock* RawMachineAssembler::CurrentBlock() {
Node* RawMachineAssembler::AddNode(const Operator* op, int input_count,
Node** inputs) {
DCHECK_NOT_NULL(schedule_);
- DCHECK(current_block_ != nullptr);
+ DCHECK_NOT_NULL(current_block_);
Node* node = MakeNode(op, input_count, inputs);
schedule()->AddNode(CurrentBlock(), node);
return node;
@@ -319,6 +372,13 @@ Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
return graph()->NewNodeUnchecked(op, input_count, inputs);
}
+
+RawMachineLabel::RawMachineLabel()
+ : block_(nullptr), used_(false), bound_(false) {}
+
+
+RawMachineLabel::~RawMachineLabel() { DCHECK(bound_ || !used_); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/raw-machine-assembler.h b/chromium/v8/src/compiler/raw-machine-assembler.h
index 291f69f3b0a..5c232ed1d12 100644
--- a/chromium/v8/src/compiler/raw-machine-assembler.h
+++ b/chromium/v8/src/compiler/raw-machine-assembler.h
@@ -19,8 +19,10 @@ namespace internal {
namespace compiler {
class BasicBlock;
+class RawMachineLabel;
class Schedule;
+
// The RawMachineAssembler produces a low-level IR graph. All nodes are wired
// into a graph and also placed into a schedule immediately, hence subsequent
// code generation can happen without the need for scheduling.
@@ -34,37 +36,19 @@ class Schedule;
// non-schedulable due to missing control and effect dependencies.
class RawMachineAssembler {
public:
- class Label {
- public:
- Label() : block_(NULL), used_(false), bound_(false) {}
- ~Label() { DCHECK(bound_ || !used_); }
-
- private:
- BasicBlock* block_;
- bool used_;
- bool bound_;
- friend class RawMachineAssembler;
- DISALLOW_COPY_AND_ASSIGN(Label);
- };
-
- RawMachineAssembler(Isolate* isolate, Graph* graph,
- CallDescriptor* call_descriptor,
- MachineType word = kMachPtr,
- MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::Flag::kNoFlags);
+ RawMachineAssembler(
+ Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
+ MachineRepresentation word = MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::Flag::kNoFlags);
~RawMachineAssembler() {}
Isolate* isolate() const { return isolate_; }
Graph* graph() const { return graph_; }
- Schedule* schedule() { return schedule_; }
Zone* zone() const { return graph()->zone(); }
MachineOperatorBuilder* machine() { return &machine_; }
CommonOperatorBuilder* common() { return &common_; }
CallDescriptor* call_descriptor() const { return call_descriptor_; }
- size_t parameter_count() const { return machine_sig()->parameter_count(); }
- const MachineSignature* machine_sig() const {
- return call_descriptor_->GetMachineSignature();
- }
// Finalizes the schedule and exports it to be used for code generation. Note
// that this RawMachineAssembler becomes invalid after export.
@@ -75,6 +59,10 @@ class RawMachineAssembler {
// place them into the current basic block. They don't perform control flow,
// hence will not switch the current basic block.
+ Node* NullConstant() {
+ return HeapConstant(isolate()->factory()->null_value());
+ }
+
Node* UndefinedConstant() {
return HeapConstant(isolate()->factory()->undefined_value());
}
@@ -123,15 +111,16 @@ class RawMachineAssembler {
return Load(rep, base, IntPtrConstant(0));
}
Node* Load(MachineType rep, Node* base, Node* index) {
- return AddNode(machine()->Load(rep), base, index, graph()->start(),
- graph()->start());
+ return AddNode(machine()->Load(rep), base, index);
}
- Node* Store(MachineType rep, Node* base, Node* value) {
- return Store(rep, base, IntPtrConstant(0), value);
+ Node* Store(MachineRepresentation rep, Node* base, Node* value,
+ WriteBarrierKind write_barrier) {
+ return Store(rep, base, IntPtrConstant(0), value, write_barrier);
}
- Node* Store(MachineType rep, Node* base, Node* index, Node* value) {
- return AddNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
- base, index, value, graph()->start(), graph()->start());
+ Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value,
+ WriteBarrierKind write_barrier) {
+ return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)),
+ base, index, value);
}
// Arithmetic Operations.
@@ -227,6 +216,7 @@ class RawMachineAssembler {
Node* Word64Ror(Node* a, Node* b) {
return AddNode(machine()->Word64Ror(), a, b);
}
+ Node* Word64Clz(Node* a) { return AddNode(machine()->Word64Clz(), a); }
Node* Word64Equal(Node* a, Node* b) {
return AddNode(machine()->Word64Equal(), a, b);
}
@@ -255,10 +245,10 @@ class RawMachineAssembler {
return AddNode(machine()->Int32MulHigh(), a, b);
}
Node* Int32Div(Node* a, Node* b) {
- return AddNode(machine()->Int32Div(), a, b, graph()->start());
+ return AddNode(machine()->Int32Div(), a, b);
}
Node* Int32Mod(Node* a, Node* b) {
- return AddNode(machine()->Int32Mod(), a, b, graph()->start());
+ return AddNode(machine()->Int32Mod(), a, b);
}
Node* Int32LessThan(Node* a, Node* b) {
return AddNode(machine()->Int32LessThan(), a, b);
@@ -267,7 +257,7 @@ class RawMachineAssembler {
return AddNode(machine()->Int32LessThanOrEqual(), a, b);
}
Node* Uint32Div(Node* a, Node* b) {
- return AddNode(machine()->Uint32Div(), a, b, graph()->start());
+ return AddNode(machine()->Uint32Div(), a, b);
}
Node* Uint32LessThan(Node* a, Node* b) {
return AddNode(machine()->Uint32LessThan(), a, b);
@@ -276,7 +266,7 @@ class RawMachineAssembler {
return AddNode(machine()->Uint32LessThanOrEqual(), a, b);
}
Node* Uint32Mod(Node* a, Node* b) {
- return AddNode(machine()->Uint32Mod(), a, b, graph()->start());
+ return AddNode(machine()->Uint32Mod(), a, b);
}
Node* Uint32MulHigh(Node* a, Node* b) {
return AddNode(machine()->Uint32MulHigh(), a, b);
@@ -290,9 +280,15 @@ class RawMachineAssembler {
Node* Int64Add(Node* a, Node* b) {
return AddNode(machine()->Int64Add(), a, b);
}
+ Node* Int64AddWithOverflow(Node* a, Node* b) {
+ return AddNode(machine()->Int64AddWithOverflow(), a, b);
+ }
Node* Int64Sub(Node* a, Node* b) {
return AddNode(machine()->Int64Sub(), a, b);
}
+ Node* Int64SubWithOverflow(Node* a, Node* b) {
+ return AddNode(machine()->Int64SubWithOverflow(), a, b);
+ }
Node* Int64Mul(Node* a, Node* b) {
return AddNode(machine()->Int64Mul(), a, b);
}
@@ -355,6 +351,12 @@ class RawMachineAssembler {
Node* Float32Div(Node* a, Node* b) {
return AddNode(machine()->Float32Div(), a, b);
}
+ Node* Float32Max(Node* a, Node* b) {
+ return AddNode(machine()->Float32Max().op(), a, b);
+ }
+ Node* Float32Min(Node* a, Node* b) {
+ return AddNode(machine()->Float32Min().op(), a, b);
+ }
Node* Float32Abs(Node* a) { return AddNode(machine()->Float32Abs(), a); }
Node* Float32Sqrt(Node* a) { return AddNode(machine()->Float32Sqrt(), a); }
Node* Float32Equal(Node* a, Node* b) {
@@ -389,6 +391,12 @@ class RawMachineAssembler {
Node* Float64Mod(Node* a, Node* b) {
return AddNode(machine()->Float64Mod(), a, b);
}
+ Node* Float64Max(Node* a, Node* b) {
+ return AddNode(machine()->Float64Max().op(), a, b);
+ }
+ Node* Float64Min(Node* a, Node* b) {
+ return AddNode(machine()->Float64Min().op(), a, b);
+ }
Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
Node* Float64Equal(Node* a, Node* b) {
@@ -424,6 +432,38 @@ class RawMachineAssembler {
Node* ChangeFloat64ToUint32(Node* a) {
return AddNode(machine()->ChangeFloat64ToUint32(), a);
}
+ Node* TruncateFloat32ToInt64(Node* a) {
+ // TODO(ahaas): Remove this function as soon as it is not used anymore in
+ // WebAssembly.
+ return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
+ }
+ Node* TryTruncateFloat32ToInt64(Node* a) {
+ return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
+ }
+ Node* TruncateFloat64ToInt64(Node* a) {
+ // TODO(ahaas): Remove this function as soon as it is not used anymore in
+ // WebAssembly.
+ return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
+ }
+ Node* TryTruncateFloat64ToInt64(Node* a) {
+ return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
+ }
+ Node* TruncateFloat32ToUint64(Node* a) {
+ // TODO(ahaas): Remove this function as soon as it is not used anymore in
+ // WebAssembly.
+ return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
+ }
+ Node* TryTruncateFloat32ToUint64(Node* a) {
+ return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
+ }
+ Node* TruncateFloat64ToUint64(Node* a) {
+ // TODO(ahaas): Remove this function as soon as it is not used anymore in
+ // WebAssembly.
+ return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
+ }
+ Node* TryTruncateFloat64ToUint64(Node* a) {
+ return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
+ }
Node* ChangeInt32ToInt64(Node* a) {
return AddNode(machine()->ChangeInt32ToInt64(), a);
}
@@ -439,6 +479,18 @@ class RawMachineAssembler {
Node* TruncateInt64ToInt32(Node* a) {
return AddNode(machine()->TruncateInt64ToInt32(), a);
}
+ Node* RoundInt64ToFloat32(Node* a) {
+ return AddNode(machine()->RoundInt64ToFloat32(), a);
+ }
+ Node* RoundInt64ToFloat64(Node* a) {
+ return AddNode(machine()->RoundInt64ToFloat64(), a);
+ }
+ Node* RoundUint64ToFloat32(Node* a) {
+ return AddNode(machine()->RoundUint64ToFloat32(), a);
+ }
+ Node* RoundUint64ToFloat64(Node* a) {
+ return AddNode(machine()->RoundUint64ToFloat64(), a);
+ }
Node* BitcastFloat32ToInt32(Node* a) {
return AddNode(machine()->BitcastFloat32ToInt32(), a);
}
@@ -451,15 +503,33 @@ class RawMachineAssembler {
Node* BitcastInt64ToFloat64(Node* a) {
return AddNode(machine()->BitcastInt64ToFloat64(), a);
}
+ Node* Float32RoundDown(Node* a) {
+ return AddNode(machine()->Float32RoundDown().op(), a);
+ }
Node* Float64RoundDown(Node* a) {
return AddNode(machine()->Float64RoundDown().op(), a);
}
+ Node* Float32RoundUp(Node* a) {
+ return AddNode(machine()->Float32RoundUp().op(), a);
+ }
+ Node* Float64RoundUp(Node* a) {
+ return AddNode(machine()->Float64RoundUp().op(), a);
+ }
+ Node* Float32RoundTruncate(Node* a) {
+ return AddNode(machine()->Float32RoundTruncate().op(), a);
+ }
Node* Float64RoundTruncate(Node* a) {
return AddNode(machine()->Float64RoundTruncate().op(), a);
}
Node* Float64RoundTiesAway(Node* a) {
return AddNode(machine()->Float64RoundTiesAway().op(), a);
}
+ Node* Float32RoundTiesEven(Node* a) {
+ return AddNode(machine()->Float32RoundTiesEven().op(), a);
+ }
+ Node* Float64RoundTiesEven(Node* a) {
+ return AddNode(machine()->Float64RoundTiesEven().op(), a);
+ }
// Float64 bit operations.
Node* Float64ExtractLowWord32(Node* a) {
@@ -486,8 +556,8 @@ class RawMachineAssembler {
Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
return Load(rep, PointerConstant(address), Int32Constant(offset));
}
- Node* StoreToPointer(void* address, MachineType rep, Node* node) {
- return Store(rep, PointerConstant(address), node);
+ Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
+ return Store(rep, PointerConstant(address), node, kNoWriteBarrier);
}
Node* StringConstant(const char* string) {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
@@ -498,16 +568,14 @@ class RawMachineAssembler {
// Call a given call descriptor and the given arguments and frame-state.
Node* CallNWithFrameState(CallDescriptor* desc, Node* function, Node** args,
Node* frame_state);
- // Tail call the given call descriptor and the given arguments.
- Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
- // Call through CallFunctionStub with lazy deopt and frame-state.
- Node* CallFunctionStub0(Node* function, Node* receiver, Node* context,
- Node* frame_state, CallFunctionFlags flags);
// Call to a runtime function with one arguments.
Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context);
// Call to a runtime function with two arguments.
Node* CallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
Node* context);
+ // Call to a runtime function with four arguments.
+ Node* CallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* context);
// Call to a C function with zero arguments.
Node* CallCFunction0(MachineType return_type, Node* function);
// Call to a C function with one parameter.
@@ -526,28 +594,41 @@ class RawMachineAssembler {
Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, Node* arg6, Node* arg7);
+ // Tail call the given call descriptor and the given arguments.
+ Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
+ // Tail call to a runtime function with one argument.
+ Node* TailCallRuntime1(Runtime::FunctionId function, Node* arg0,
+ Node* context);
+ // Tail call to a runtime function with two arguments.
+ Node* TailCallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* context);
+
+
// ===========================================================================
// The following utility methods deal with control flow, hence might switch
// the current basic block or create new basic blocks for labels.
// Control flow.
- void Goto(Label* label);
- void Branch(Node* condition, Label* true_val, Label* false_val);
- void Switch(Node* index, Label* default_label, int32_t* case_values,
- Label** case_labels, size_t case_count);
+ void Goto(RawMachineLabel* label);
+ void Branch(Node* condition, RawMachineLabel* true_val,
+ RawMachineLabel* false_val);
+ void Switch(Node* index, RawMachineLabel* default_label, int32_t* case_values,
+ RawMachineLabel** case_labels, size_t case_count);
void Return(Node* value);
- void Bind(Label* label);
+ void Return(Node* v1, Node* v2);
+ void Return(Node* v1, Node* v2, Node* v3);
+ void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
// Variables.
- Node* Phi(MachineType type, Node* n1, Node* n2) {
- return AddNode(common()->Phi(type, 2), n1, n2);
+ Node* Phi(MachineRepresentation rep, Node* n1, Node* n2) {
+ return AddNode(common()->Phi(rep, 2), n1, n2);
}
- Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3) {
- return AddNode(common()->Phi(type, 3), n1, n2, n3);
+ Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3) {
+ return AddNode(common()->Phi(rep, 3), n1, n2, n3);
}
- Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3, Node* n4) {
- return AddNode(common()->Phi(type, 4), n1, n2, n3, n4);
+ Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3, Node* n4) {
+ return AddNode(common()->Phi(rep, 4), n1, n2, n3, n4);
}
// ===========================================================================
@@ -569,22 +650,42 @@ class RawMachineAssembler {
private:
Node* MakeNode(const Operator* op, int input_count, Node** inputs);
- BasicBlock* Use(Label* label);
- BasicBlock* EnsureBlock(Label* label);
+ BasicBlock* Use(RawMachineLabel* label);
+ BasicBlock* EnsureBlock(RawMachineLabel* label);
BasicBlock* CurrentBlock();
+ Schedule* schedule() { return schedule_; }
+ size_t parameter_count() const { return machine_sig()->parameter_count(); }
+ const MachineSignature* machine_sig() const {
+ return call_descriptor_->GetMachineSignature();
+ }
+
Isolate* isolate_;
Graph* graph_;
Schedule* schedule_;
MachineOperatorBuilder machine_;
CommonOperatorBuilder common_;
CallDescriptor* call_descriptor_;
- Node** parameters_;
+ NodeVector parameters_;
BasicBlock* current_block_;
DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
};
+
+class RawMachineLabel final {
+ public:
+ RawMachineLabel();
+ ~RawMachineLabel();
+
+ private:
+ BasicBlock* block_;
+ bool used_;
+ bool bound_;
+ friend class RawMachineAssembler;
+ DISALLOW_COPY_AND_ASSIGN(RawMachineLabel);
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/register-allocator-verifier.cc b/chromium/v8/src/compiler/register-allocator-verifier.cc
index 0b775d29e1d..463795ecf22 100644
--- a/chromium/v8/src/compiler/register-allocator-verifier.cc
+++ b/chromium/v8/src/compiler/register-allocator-verifier.cc
@@ -48,7 +48,7 @@ void VerifyAllocatedGaps(const Instruction* instr) {
void RegisterAllocatorVerifier::VerifyInput(
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
- if (constraint.type_ != kImmediate) {
+ if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
}
@@ -59,6 +59,7 @@ void RegisterAllocatorVerifier::VerifyTemp(
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(kExplicit, constraint.type_);
CHECK_NE(kConstant, constraint.type_);
}
@@ -66,6 +67,7 @@ void RegisterAllocatorVerifier::VerifyTemp(
void RegisterAllocatorVerifier::VerifyOutput(
const OperandConstraint& constraint) {
CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(kExplicit, constraint.type_);
CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
}
@@ -143,6 +145,8 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
constraint->type_ = kConstant;
constraint->value_ = ConstantOperand::cast(op)->virtual_register();
constraint->virtual_register_ = constraint->value_;
+ } else if (op->IsExplicit()) {
+ constraint->type_ = kExplicit;
} else if (op->IsImmediate()) {
auto imm = ImmediateOperand::cast(op);
int value = imm->type() == ImmediateOperand::INLINE ? imm->inline_value()
@@ -160,8 +164,6 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
} else {
switch (unallocated->extended_policy()) {
case UnallocatedOperand::ANY:
- CHECK(false);
- break;
case UnallocatedOperand::NONE:
if (sequence()->IsFloat(vreg)) {
constraint->type_ = kNoneDouble;
@@ -170,7 +172,12 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
}
break;
case UnallocatedOperand::FIXED_REGISTER:
- constraint->type_ = kFixedRegister;
+ if (unallocated->HasSecondaryStorage()) {
+ constraint->type_ = kRegisterAndSlot;
+ constraint->spilled_slot_ = unallocated->GetSecondaryStorage();
+ } else {
+ constraint->type_ = kFixedRegister;
+ }
constraint->value_ = unallocated->fixed_register_index();
break;
case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
@@ -216,20 +223,26 @@ void RegisterAllocatorVerifier::CheckConstraint(
case kRegister:
CHECK(op->IsRegister());
return;
- case kFixedRegister:
- CHECK(op->IsRegister());
- CHECK_EQ(RegisterOperand::cast(op)->index(), constraint->value_);
- return;
case kDoubleRegister:
CHECK(op->IsDoubleRegister());
return;
+ case kExplicit:
+ CHECK(op->IsExplicit());
+ return;
+ case kFixedRegister:
+ case kRegisterAndSlot:
+ CHECK(op->IsRegister());
+ CHECK_EQ(LocationOperand::cast(op)->GetRegister().code(),
+ constraint->value_);
+ return;
case kFixedDoubleRegister:
CHECK(op->IsDoubleRegister());
- CHECK_EQ(DoubleRegisterOperand::cast(op)->index(), constraint->value_);
+ CHECK_EQ(LocationOperand::cast(op)->GetDoubleRegister().code(),
+ constraint->value_);
return;
case kFixedSlot:
CHECK(op->IsStackSlot());
- CHECK_EQ(StackSlotOperand::cast(op)->index(), constraint->value_);
+ CHECK_EQ(LocationOperand::cast(op)->index(), constraint->value_);
return;
case kSlot:
CHECK(op->IsStackSlot());
@@ -282,7 +295,7 @@ class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
struct OperandLess {
bool operator()(const InstructionOperand* a,
const InstructionOperand* b) const {
- return a->CompareModuloType(*b);
+ return a->CompareCanonicalized(*b);
}
};
@@ -316,7 +329,7 @@ class OperandMap : public ZoneObject {
this->erase(it++);
if (it == this->end()) return;
}
- if (it->first->EqualsModuloType(*o.first)) {
+ if (it->first->EqualsCanonicalized(*o.first)) {
++it;
if (it == this->end()) return;
} else {
@@ -379,11 +392,13 @@ class OperandMap : public ZoneObject {
}
}
- void Define(Zone* zone, const InstructionOperand* op, int virtual_register) {
+ MapValue* Define(Zone* zone, const InstructionOperand* op,
+ int virtual_register) {
auto value = new (zone) MapValue();
value->define_vreg = virtual_register;
auto res = map().insert(std::make_pair(op, value));
if (!res.second) res.first->second = value;
+ return value;
}
void Use(const InstructionOperand* op, int use_vreg, bool initial_pass) {
@@ -676,7 +691,10 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
const auto op_constraints = instr_constraint.operand_constraints_;
size_t count = 0;
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
- if (op_constraints[count].type_ == kImmediate) continue;
+ if (op_constraints[count].type_ == kImmediate ||
+ op_constraints[count].type_ == kExplicit) {
+ continue;
+ }
int virtual_register = op_constraints[count].virtual_register_;
auto op = instr->InputAt(i);
if (!block_maps->IsPhi(virtual_register)) {
@@ -694,7 +712,20 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
}
for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
int virtual_register = op_constraints[count].virtual_register_;
- current->Define(zone(), instr->OutputAt(i), virtual_register);
+ OperandMap::MapValue* value =
+ current->Define(zone(), instr->OutputAt(i), virtual_register);
+ if (op_constraints[count].type_ == kRegisterAndSlot) {
+ const AllocatedOperand* reg_op =
+ AllocatedOperand::cast(instr->OutputAt(i));
+ MachineRepresentation rep = reg_op->representation();
+ const AllocatedOperand* stack_op = AllocatedOperand::New(
+ zone(), LocationOperand::LocationKind::STACK_SLOT, rep,
+ op_constraints[i].spilled_slot_);
+ auto insert_result =
+ current->map().insert(std::make_pair(stack_op, value));
+ DCHECK(insert_result.second);
+ USE(insert_result);
+ }
}
}
}
diff --git a/chromium/v8/src/compiler/register-allocator-verifier.h b/chromium/v8/src/compiler/register-allocator-verifier.h
index 15db782a684..f3ab54f0181 100644
--- a/chromium/v8/src/compiler/register-allocator-verifier.h
+++ b/chromium/v8/src/compiler/register-allocator-verifier.h
@@ -35,12 +35,15 @@ class RegisterAllocatorVerifier final : public ZoneObject {
kFixedSlot,
kNone,
kNoneDouble,
- kSameAsFirst
+ kExplicit,
+ kSameAsFirst,
+ kRegisterAndSlot
};
struct OperandConstraint {
ConstraintType type_;
int value_; // subkind index when relevant
+ int spilled_slot_;
int virtual_register_;
};
diff --git a/chromium/v8/src/compiler/register-allocator.cc b/chromium/v8/src/compiler/register-allocator.cc
index 840c13b1a77..232ad9fec1f 100644
--- a/chromium/v8/src/compiler/register-allocator.cc
+++ b/chromium/v8/src/compiler/register-allocator.cc
@@ -27,14 +27,29 @@ void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
- return kind == DOUBLE_REGISTERS ? cfg->num_aliased_double_registers()
+ return kind == DOUBLE_REGISTERS ? cfg->num_double_registers()
: cfg->num_general_registers();
}
+int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
+ RegisterKind kind) {
+ return kind == DOUBLE_REGISTERS
+ ? cfg->num_allocatable_aliased_double_registers()
+ : cfg->num_allocatable_general_registers();
+}
+
+
+const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
+ RegisterKind kind) {
+ return kind == DOUBLE_REGISTERS ? cfg->allocatable_double_codes()
+ : cfg->allocatable_general_codes();
+}
+
+
const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
const InstructionBlock* block) {
- auto index = block->loop_header();
+ RpoNumber index = block->loop_header();
if (!index.IsValid()) return nullptr;
return sequence->InstructionBlockAt(index);
}
@@ -52,11 +67,11 @@ Instruction* GetLastInstruction(InstructionSequence* code,
}
-bool IsOutputRegisterOf(Instruction* instr, int index) {
+bool IsOutputRegisterOf(Instruction* instr, Register reg) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
- auto output = instr->OutputAt(i);
+ InstructionOperand* output = instr->OutputAt(i);
if (output->IsRegister() &&
- RegisterOperand::cast(output)->index() == index) {
+ LocationOperand::cast(output)->GetRegister().is(reg)) {
return true;
}
}
@@ -64,11 +79,11 @@ bool IsOutputRegisterOf(Instruction* instr, int index) {
}
-bool IsOutputDoubleRegisterOf(Instruction* instr, int index) {
+bool IsOutputDoubleRegisterOf(Instruction* instr, DoubleRegister reg) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
- auto output = instr->OutputAt(i);
+ InstructionOperand* output = instr->OutputAt(i);
if (output->IsDoubleRegister() &&
- DoubleRegisterOperand::cast(output)->index() == index) {
+ LocationOperand::cast(output)->GetDoubleRegister().is(reg)) {
return true;
}
}
@@ -77,23 +92,23 @@ bool IsOutputDoubleRegisterOf(Instruction* instr, int index) {
// TODO(dcarney): fix frame to allow frame accesses to half size location.
-int GetByteWidth(MachineType machine_type) {
- DCHECK_EQ(RepresentationOf(machine_type), machine_type);
- switch (machine_type) {
- case kRepBit:
- case kRepWord8:
- case kRepWord16:
- case kRepWord32:
- case kRepTagged:
+int GetByteWidth(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kTagged:
return kPointerSize;
- case kRepFloat32:
- case kRepWord64:
- case kRepFloat64:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat64:
return 8;
- default:
- UNREACHABLE();
- return 0;
+ case MachineRepresentation::kNone:
+ break;
}
+ UNREACHABLE();
+ return 0;
}
} // namespace
@@ -129,30 +144,35 @@ bool UsePosition::HasHint() const {
}
-bool UsePosition::HintRegister(int* register_index) const {
+bool UsePosition::HintRegister(int* register_code) const {
if (hint_ == nullptr) return false;
switch (HintTypeField::decode(flags_)) {
case UsePositionHintType::kNone:
case UsePositionHintType::kUnresolved:
return false;
case UsePositionHintType::kUsePos: {
- auto use_pos = reinterpret_cast<UsePosition*>(hint_);
+ UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
if (assigned_register == kUnassignedRegister) return false;
- *register_index = assigned_register;
+ *register_code = assigned_register;
return true;
}
case UsePositionHintType::kOperand: {
- auto operand = reinterpret_cast<InstructionOperand*>(hint_);
- int assigned_register = AllocatedOperand::cast(operand)->index();
- *register_index = assigned_register;
+ InstructionOperand* operand =
+ reinterpret_cast<InstructionOperand*>(hint_);
+ int assigned_register =
+ operand->IsRegister()
+ ? LocationOperand::cast(operand)->GetRegister().code()
+ : LocationOperand::cast(operand)->GetDoubleRegister().code();
+ *register_code = assigned_register;
return true;
}
case UsePositionHintType::kPhi: {
- auto phi = reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
+ RegisterAllocationData::PhiMapValue* phi =
+ reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
int assigned_register = phi->assigned_register();
if (assigned_register == kUnassignedRegister) return false;
- *register_index = assigned_register;
+ *register_code = assigned_register;
return true;
}
}
@@ -166,17 +186,16 @@ UsePositionHintType UsePosition::HintTypeForOperand(
switch (op.kind()) {
case InstructionOperand::CONSTANT:
case InstructionOperand::IMMEDIATE:
+ case InstructionOperand::EXPLICIT:
return UsePositionHintType::kNone;
case InstructionOperand::UNALLOCATED:
return UsePositionHintType::kUnresolved;
case InstructionOperand::ALLOCATED:
- switch (AllocatedOperand::cast(op).allocated_kind()) {
- case AllocatedOperand::REGISTER:
- case AllocatedOperand::DOUBLE_REGISTER:
- return UsePositionHintType::kOperand;
- case AllocatedOperand::STACK_SLOT:
- case AllocatedOperand::DOUBLE_STACK_SLOT:
- return UsePositionHintType::kNone;
+ if (op.IsRegister() || op.IsDoubleRegister()) {
+ return UsePositionHintType::kOperand;
+ } else {
+ DCHECK(op.IsStackSlot() || op.IsDoubleStackSlot());
+ return UsePositionHintType::kNone;
}
case InstructionOperand::INVALID:
break;
@@ -206,7 +225,7 @@ void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
DCHECK(Contains(pos) && pos != start());
- auto after = new (zone) UseInterval(pos, end_);
+ UseInterval* after = new (zone) UseInterval(pos, end_);
after->next_ = next_;
next_ = nullptr;
end_ = pos;
@@ -214,6 +233,12 @@ UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
}
+void LifetimePosition::Print() const {
+ OFStream os(stdout);
+ os << *this << std::endl;
+}
+
+
std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
os << '@' << pos.ToInstructionIndex();
if (pos.IsGapPosition()) {
@@ -234,7 +259,7 @@ const float LiveRange::kInvalidWeight = -1;
const float LiveRange::kMaxWeight = std::numeric_limits<float>::max();
-LiveRange::LiveRange(int relative_id, MachineType machine_type,
+LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
TopLevelLiveRange* top_level)
: relative_id_(relative_id),
bits_(0),
@@ -246,30 +271,43 @@ LiveRange::LiveRange(int relative_id, MachineType machine_type,
current_interval_(nullptr),
last_processed_use_(nullptr),
current_hint_position_(nullptr),
+ splitting_pointer_(nullptr),
size_(kInvalidSize),
weight_(kInvalidWeight),
group_(nullptr) {
- DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
+ DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
- MachineTypeField::encode(machine_type);
+ RepresentationField::encode(rep);
}
-void LiveRange::Verify() const {
+void LiveRange::VerifyPositions() const {
// Walk the positions, verifying that each is in an interval.
- auto interval = first_interval_;
- for (auto pos = first_pos_; pos != nullptr; pos = pos->next()) {
+ UseInterval* interval = first_interval_;
+ for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
CHECK(Start() <= pos->pos());
CHECK(pos->pos() <= End());
- CHECK(interval != nullptr);
+ CHECK_NOT_NULL(interval);
while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
interval = interval->next();
- CHECK(interval != nullptr);
+ CHECK_NOT_NULL(interval);
}
}
}
+void LiveRange::VerifyIntervals() const {
+ DCHECK(first_interval()->start() == Start());
+ LifetimePosition last_end = first_interval()->end();
+ for (UseInterval* interval = first_interval()->next(); interval != nullptr;
+ interval = interval->next()) {
+ DCHECK(last_end <= interval->start());
+ last_end = interval->end();
+ }
+ DCHECK(last_end == End());
+}
+
+
void LiveRange::set_assigned_register(int reg) {
DCHECK(!HasRegisterAssigned() && !spilled());
bits_ = AssignedRegisterField::update(bits_, reg);
@@ -291,19 +329,13 @@ void LiveRange::Spill() {
RegisterKind LiveRange::kind() const {
- switch (RepresentationOf(machine_type())) {
- case kRepFloat32:
- case kRepFloat64:
- return DOUBLE_REGISTERS;
- default:
- break;
- }
- return GENERAL_REGISTERS;
+ return IsFloatingPoint(representation()) ? DOUBLE_REGISTERS
+ : GENERAL_REGISTERS;
}
UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
- for (auto pos = first_pos_; pos != nullptr; pos = pos->next()) {
+ for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
if (pos->HintRegister(register_index)) return pos;
}
return nullptr;
@@ -335,7 +367,7 @@ UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
LifetimePosition start) const {
- auto pos = first_pos();
+ UsePosition* pos = first_pos();
UsePosition* prev = nullptr;
while (pos != nullptr && pos->pos() < start) {
if (pos->RegisterIsBeneficial()) prev = pos;
@@ -367,7 +399,7 @@ UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
// We cannot spill a live range that has a use requiring a register
// at the current or the immediate next position.
- auto use_pos = NextRegisterPosition(pos);
+ UsePosition* use_pos = NextRegisterPosition(pos);
if (use_pos == nullptr) return true;
return use_pos->pos() > pos.NextStart().End();
}
@@ -379,17 +411,13 @@ bool LiveRange::IsTopLevel() const { return top_level_ == this; }
InstructionOperand LiveRange::GetAssignedOperand() const {
if (HasRegisterAssigned()) {
DCHECK(!spilled());
- switch (kind()) {
- case GENERAL_REGISTERS:
- return RegisterOperand(machine_type(), assigned_register());
- case DOUBLE_REGISTERS:
- return DoubleRegisterOperand(machine_type(), assigned_register());
- }
+ return AllocatedOperand(LocationOperand::REGISTER, representation(),
+ assigned_register());
}
DCHECK(spilled());
DCHECK(!HasRegisterAssigned());
if (TopLevel()->HasSpillOperand()) {
- auto op = TopLevel()->GetSpillOperand();
+ InstructionOperand* op = TopLevel()->GetSpillOperand();
DCHECK(!op->IsUnallocated());
return *op;
}
@@ -412,8 +440,9 @@ void LiveRange::AdvanceLastProcessedMarker(
UseInterval* to_start_of, LifetimePosition but_not_past) const {
if (to_start_of == nullptr) return;
if (to_start_of->start() > but_not_past) return;
- auto start = current_interval_ == nullptr ? LifetimePosition::Invalid()
- : current_interval_->start();
+ LifetimePosition start = current_interval_ == nullptr
+ ? LifetimePosition::Invalid()
+ : current_interval_->start();
if (to_start_of->start() > start) {
current_interval_ = to_start_of;
}
@@ -422,28 +451,25 @@ void LiveRange::AdvanceLastProcessedMarker(
LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
int new_id = TopLevel()->GetNextChildId();
- LiveRange* child = new (zone) LiveRange(new_id, machine_type(), TopLevel());
+ LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
DetachAt(position, child, zone);
child->top_level_ = TopLevel();
child->next_ = next_;
next_ = child;
- if (child->next() == nullptr) {
- TopLevel()->set_last_child(child);
- }
return child;
}
-void LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
- Zone* zone) {
+UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
+ Zone* zone) {
DCHECK(Start() < position);
DCHECK(End() > position);
DCHECK(result->IsEmpty());
// Find the last interval that ends before the position. If the
// position is contained in one of the intervals in the chain, we
// split that interval and use the first part.
- auto current = FirstSearchIntervalForPosition(position);
+ UseInterval* current = FirstSearchIntervalForPosition(position);
// If the split position coincides with the beginning of a use interval
// we need to split use positons in a special way.
@@ -460,7 +486,7 @@ void LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
after = current->SplitAt(position, zone);
break;
}
- auto next = current->next();
+ UseInterval* next = current->next();
if (next->start() >= position) {
split_at_start = (next->start() == position);
after = next;
@@ -472,7 +498,7 @@ void LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
DCHECK(nullptr != after);
// Partition original use intervals to the two live ranges.
- auto before = current;
+ UseInterval* before = current;
result->last_interval_ =
(last_interval_ == before)
? after // Only interval in the range after split.
@@ -482,7 +508,10 @@ void LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
// Find the last use position before the split and the first use
// position after it.
- auto use_after = first_pos_;
+ UsePosition* use_after =
+ splitting_pointer_ == nullptr || splitting_pointer_->pos() > position
+ ? first_pos()
+ : splitting_pointer_;
UsePosition* use_before = nullptr;
if (split_at_start) {
// The split position coincides with the beginning of a use interval (the
@@ -517,18 +546,10 @@ void LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
size_ = kInvalidSize;
weight_ = kInvalidWeight;
#ifdef DEBUG
- Verify();
- result->Verify();
+ VerifyChildStructure();
+ result->VerifyChildStructure();
#endif
-}
-
-
-void LiveRange::AppendAsChild(TopLevelLiveRange* other) {
- next_ = other;
-
- other->UpdateParentForAllChildren(TopLevel());
- TopLevel()->UpdateSpillRangePostMerge(other);
- TopLevel()->set_last_child(other->last_child());
+ return use_before;
}
@@ -542,7 +563,7 @@ void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
const InstructionOperand& spill_op) {
- for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) {
+ for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
DCHECK(Start() <= pos->pos() && pos->pos() <= End());
if (!pos->HasOperand()) continue;
switch (pos->type()) {
@@ -581,7 +602,7 @@ bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
void LiveRange::SetUseHints(int register_index) {
- for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) {
+ for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
if (!pos->HasOperand()) continue;
switch (pos->type()) {
case UsePositionType::kRequiresSlot:
@@ -603,8 +624,8 @@ bool LiveRange::CanCover(LifetimePosition position) const {
bool LiveRange::Covers(LifetimePosition position) const {
if (!CanCover(position)) return false;
- auto start_search = FirstSearchIntervalForPosition(position);
- for (auto interval = start_search; interval != nullptr;
+ UseInterval* start_search = FirstSearchIntervalForPosition(position);
+ for (UseInterval* interval = start_search; interval != nullptr;
interval = interval->next()) {
DCHECK(interval->next() == nullptr ||
interval->next()->start() >= interval->start());
@@ -617,14 +638,14 @@ bool LiveRange::Covers(LifetimePosition position) const {
LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
- auto b = other->first_interval();
+ UseInterval* b = other->first_interval();
if (b == nullptr) return LifetimePosition::Invalid();
- auto advance_last_processed_up_to = b->start();
- auto a = FirstSearchIntervalForPosition(b->start());
+ LifetimePosition advance_last_processed_up_to = b->start();
+ UseInterval* a = FirstSearchIntervalForPosition(b->start());
while (a != nullptr && b != nullptr) {
if (a->start() > other->End()) break;
if (b->start() > End()) break;
- auto cur_intersection = a->Intersect(b);
+ LifetimePosition cur_intersection = a->Intersect(b);
if (cur_intersection.IsValid()) {
return cur_intersection;
}
@@ -643,7 +664,7 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
unsigned LiveRange::GetSize() {
if (size_ == kInvalidSize) {
size_ = 0;
- for (auto interval = first_interval(); interval != nullptr;
+ for (const UseInterval* interval = first_interval(); interval != nullptr;
interval = interval->next()) {
size_ += (interval->end().value() - interval->start().value());
}
@@ -653,81 +674,70 @@ unsigned LiveRange::GetSize() {
}
-struct TopLevelLiveRange::SpillAtDefinitionList : ZoneObject {
- SpillAtDefinitionList(int gap_index, InstructionOperand* operand,
- SpillAtDefinitionList* next)
+void LiveRange::Print(const RegisterConfiguration* config,
+ bool with_children) const {
+ OFStream os(stdout);
+ PrintableLiveRange wrapper;
+ wrapper.register_configuration_ = config;
+ for (const LiveRange* i = this; i != nullptr; i = i->next()) {
+ wrapper.range_ = i;
+ os << wrapper << std::endl;
+ if (!with_children) break;
+ }
+}
+
+
+void LiveRange::Print(bool with_children) const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config, with_children);
+}
+
+
+struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
+ SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
+ SpillMoveInsertionList* next)
: gap_index(gap_index), operand(operand), next(next) {}
const int gap_index;
InstructionOperand* const operand;
- SpillAtDefinitionList* const next;
+ SpillMoveInsertionList* const next;
};
-TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineType machine_type)
- : LiveRange(0, machine_type, this),
+TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
+ : LiveRange(0, rep, this),
vreg_(vreg),
last_child_id_(0),
splintered_from_(nullptr),
spill_operand_(nullptr),
- spills_at_definition_(nullptr),
+ spill_move_insertion_locations_(nullptr),
spilled_in_deferred_blocks_(false),
spill_start_index_(kMaxInt),
- last_child_(this),
- last_insertion_point_(this) {
+ last_pos_(nullptr),
+ splinter_(nullptr),
+ has_preassigned_slot_(false) {
bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
}
-void TopLevelLiveRange::SpillAtDefinition(Zone* zone, int gap_index,
- InstructionOperand* operand) {
+#if DEBUG
+int TopLevelLiveRange::debug_virt_reg() const {
+ return IsSplinter() ? splintered_from()->vreg() : vreg();
+}
+#endif
+
+
+void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
+ InstructionOperand* operand) {
DCHECK(HasNoSpillType());
- spills_at_definition_ = new (zone)
- SpillAtDefinitionList(gap_index, operand, spills_at_definition_);
+ spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList(
+ gap_index, operand, spill_move_insertion_locations_);
}
bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
InstructionSequence* code, const InstructionOperand& spill_operand) {
- if (!FLAG_turbo_preprocess_ranges || IsEmpty() || HasNoSpillType() ||
- spill_operand.IsConstant() || spill_operand.IsImmediate()) {
- return false;
- }
-
- int count = 0;
- for (const LiveRange* child = this; child != nullptr; child = child->next()) {
- int first_instr = child->Start().ToInstructionIndex();
-
- // If the range starts at instruction end, the first instruction index is
- // the next one.
- if (!child->Start().IsGapPosition() && !child->Start().IsStart()) {
- ++first_instr;
- }
-
- // We only look at where the range starts. It doesn't matter where it ends:
- // if it ends past this block, then either there is a phi there already,
- // or ResolveControlFlow will adapt the last instruction gap of this block
- // as if there were a phi. In either case, data flow will be correct.
- const InstructionBlock* block = code->GetInstructionBlock(first_instr);
-
- // If we have slot uses in a subrange, bail out, because we need the value
- // on the stack before that use.
- bool has_slot_use = child->NextSlotPosition(child->Start()) != nullptr;
- if (!block->IsDeferred()) {
- if (child->spilled() || has_slot_use) {
- TRACE(
- "Live Range %d must be spilled at definition: found a "
- "slot-requiring non-deferred child range %d.\n",
- TopLevel()->vreg(), child->relative_id());
- return false;
- }
- } else {
- if (child->spilled() || has_slot_use) ++count;
- }
- }
- if (count == 0) return false;
-
- spill_start_index_ = -1;
- spilled_in_deferred_blocks_ = true;
+ if (!IsSpilledOnlyInDeferredBlocks()) return false;
TRACE("Live Range %d will be spilled only in deferred blocks.\n", vreg());
// If we have ranges that aren't spilled but require the operand on the stack,
@@ -735,14 +745,15 @@ bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
for (const LiveRange* child = this; child != nullptr; child = child->next()) {
if (!child->spilled() &&
child->NextSlotPosition(child->Start()) != nullptr) {
- auto instr = code->InstructionAt(child->Start().ToInstructionIndex());
+ Instruction* instr =
+ code->InstructionAt(child->Start().ToInstructionIndex());
// Insert spill at the end to let live range connections happen at START.
- auto move =
+ ParallelMove* move =
instr->GetOrCreateParallelMove(Instruction::END, code->zone());
InstructionOperand assigned = child->GetAssignedOperand();
if (TopLevel()->has_slot_use()) {
bool found = false;
- for (auto move_op : *move) {
+ for (MoveOperands* move_op : *move) {
if (move_op->IsEliminated()) continue;
if (move_op->source().Equals(assigned) &&
move_op->destination().Equals(spill_operand)) {
@@ -761,31 +772,35 @@ bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
}
-void TopLevelLiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
- const InstructionOperand& op,
- bool might_be_duplicated) {
- DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr);
- auto zone = sequence->zone();
+void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
+ const InstructionOperand& op,
+ bool might_be_duplicated) {
+ DCHECK_IMPLIES(op.IsConstant(), spill_move_insertion_locations() == nullptr);
+ Zone* zone = sequence->zone();
- for (auto to_spill = spills_at_definition_; to_spill != nullptr;
- to_spill = to_spill->next) {
- auto instr = sequence->InstructionAt(to_spill->gap_index);
- auto move = instr->GetOrCreateParallelMove(Instruction::START, zone);
+ for (SpillMoveInsertionList* to_spill = spill_move_insertion_locations();
+ to_spill != nullptr; to_spill = to_spill->next) {
+ Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
+ ParallelMove* move =
+ instr->GetOrCreateParallelMove(Instruction::START, zone);
// Skip insertion if it's possible that the move exists already as a
// constraint move from a fixed output register to a slot.
- if (might_be_duplicated) {
+ if (might_be_duplicated || has_preassigned_slot()) {
bool found = false;
- for (auto move_op : *move) {
+ for (MoveOperands* move_op : *move) {
if (move_op->IsEliminated()) continue;
if (move_op->source().Equals(*to_spill->operand) &&
move_op->destination().Equals(op)) {
found = true;
+ if (has_preassigned_slot()) move_op->Eliminate();
break;
}
}
if (found) continue;
}
- move->AddMove(*to_spill->operand, op);
+ if (!has_preassigned_slot()) {
+ move->AddMove(*to_spill->operand, op);
+ }
}
}
@@ -806,49 +821,37 @@ void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
- auto spill_range = GetSpillRange();
+ SpillRange* spill_range = GetSpillRange();
int index = spill_range->assigned_slot();
- switch (kind()) {
- case GENERAL_REGISTERS:
- return StackSlotOperand(machine_type(), index);
- case DOUBLE_REGISTERS:
- return DoubleStackSlotOperand(machine_type(), index);
- }
- UNREACHABLE();
- return StackSlotOperand(kMachNone, 0);
+ return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
}
void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
- TopLevelLiveRange* result, Zone* zone) {
+ Zone* zone) {
DCHECK(start != Start() || end != End());
DCHECK(start < end);
- result->set_spill_type(spill_type());
-
- if (start <= Start()) {
- // TODO(mtrofin): here, the TopLevel part is in the deferred range, so we
- // may want to continue processing the splinter. However, if the value is
- // defined in a cold block, and then used in a hot block, it follows that
- // it should terminate on the RHS of a phi, defined on the hot path. We
- // should check this, however, this may not be the place, because we don't
- // have access to the instruction sequence.
- DCHECK(end < End());
- DetachAt(end, result, zone);
- next_ = nullptr;
- } else if (end >= End()) {
+ TopLevelLiveRange splinter_temp(-1, representation());
+ UsePosition* last_in_splinter = nullptr;
+ // Live ranges defined in deferred blocks stay in deferred blocks, so we
+ // don't need to splinter them. That means that start should always be
+ // after the beginning of the range.
+ DCHECK(start > Start());
+
+ if (end >= End()) {
DCHECK(start > Start());
- DetachAt(start, result, zone);
+ DetachAt(start, &splinter_temp, zone);
next_ = nullptr;
} else {
DCHECK(start < End() && Start() < end);
const int kInvalidId = std::numeric_limits<int>::max();
- DetachAt(start, result, zone);
+ UsePosition* last = DetachAt(start, &splinter_temp, zone);
- LiveRange end_part(kInvalidId, this->machine_type(), nullptr);
- result->DetachAt(end, &end_part, zone);
+ LiveRange end_part(kInvalidId, this->representation(), nullptr);
+ last_in_splinter = splinter_temp.DetachAt(end, &end_part, zone);
next_ = end_part.next_;
last_interval_->set_next(end_part.first_interval_);
@@ -858,30 +861,46 @@ void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
current_interval_ = last_interval_;
last_interval_ = end_part.last_interval_;
-
if (first_pos_ == nullptr) {
first_pos_ = end_part.first_pos_;
} else {
- UsePosition* pos = first_pos_;
- for (; pos->next() != nullptr; pos = pos->next()) {
- }
- pos->set_next(end_part.first_pos_);
+ splitting_pointer_ = last;
+ if (last != nullptr) last->set_next(end_part.first_pos_);
}
}
- result->next_ = nullptr;
- result->top_level_ = result;
- result->SetSplinteredFrom(this);
- // Ensure the result's relative ID is unique within the IDs used for this
- // virtual register's children and splinters.
- result->relative_id_ = GetNextChildId();
+ if (splinter()->IsEmpty()) {
+ splinter()->first_interval_ = splinter_temp.first_interval_;
+ splinter()->last_interval_ = splinter_temp.last_interval_;
+ } else {
+ splinter()->last_interval_->set_next(splinter_temp.first_interval_);
+ splinter()->last_interval_ = splinter_temp.last_interval_;
+ }
+ if (splinter()->first_pos() == nullptr) {
+ splinter()->first_pos_ = splinter_temp.first_pos_;
+ } else {
+ splinter()->last_pos_->set_next(splinter_temp.first_pos_);
+ }
+ if (last_in_splinter != nullptr) {
+ splinter()->last_pos_ = last_in_splinter;
+ } else {
+ if (splinter()->first_pos() != nullptr &&
+ splinter()->last_pos_ == nullptr) {
+ splinter()->last_pos_ = splinter()->first_pos();
+ for (UsePosition* pos = splinter()->first_pos(); pos != nullptr;
+ pos = pos->next()) {
+ splinter()->last_pos_ = pos;
+ }
+ }
+ }
+#if DEBUG
+ Verify();
+ splinter()->Verify();
+#endif
}
void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
- // The splinter parent is always the original "Top".
- DCHECK(splinter_parent->Start() < Start());
-
splintered_from_ = splinter_parent;
if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
SetSpillRange(splinter_parent->spill_range_);
@@ -906,43 +925,73 @@ void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
DCHECK(Start() < other->Start());
DCHECK(other->splintered_from() == this);
- LiveRange* last_other = other->last_child();
- LiveRange* last_me = last_child();
+ LiveRange* first = this;
+ LiveRange* second = other;
+ DCHECK(first->Start() < second->Start());
+ while (first != nullptr && second != nullptr) {
+ DCHECK(first != second);
+ // Make sure the ranges are in order each time we iterate.
+ if (second->Start() < first->Start()) {
+ LiveRange* tmp = second;
+ second = first;
+ first = tmp;
+ continue;
+ }
- // Simple case: we just append at the end.
- if (last_me->End() <= other->Start()) return last_me->AppendAsChild(other);
+ if (first->End() <= second->Start()) {
+ if (first->next() == nullptr ||
+ first->next()->Start() > second->Start()) {
+ // First is in order before second.
+ LiveRange* temp = first->next();
+ first->next_ = second;
+ first = temp;
+ } else {
+ // First is in order before its successor (or second), so advance first.
+ first = first->next();
+ }
+ continue;
+ }
- DCHECK(last_me->End() > last_other->End());
+ DCHECK(first->Start() < second->Start());
+ // If first and second intersect, split first.
+ if (first->Start() < second->End() && second->Start() < first->End()) {
+ LiveRange* temp = first->SplitAt(second->Start(), zone);
+ CHECK(temp != first);
+ temp->set_spilled(first->spilled());
+ if (!temp->spilled())
+ temp->set_assigned_register(first->assigned_register());
- // In the more general case, we need to find the ranges between which to
- // insert.
- if (other->Start() < last_insertion_point_->Start()) {
- last_insertion_point_ = this;
+ first->next_ = second;
+ first = temp;
+ continue;
+ }
+ DCHECK(first->End() <= second->Start());
}
- for (; last_insertion_point_->next() != nullptr &&
- last_insertion_point_->next()->Start() <= other->Start();
- last_insertion_point_ = last_insertion_point_->next()) {
- }
+ TopLevel()->UpdateParentForAllChildren(TopLevel());
+ TopLevel()->UpdateSpillRangePostMerge(other);
- // When we splintered the original range, we reconstituted the original range
- // into one range without children, but with discontinuities. To merge the
- // splinter back in, we need to split the range - or a child obtained after
- // register allocation splitting.
- LiveRange* after = last_insertion_point_->next();
- if (last_insertion_point_->End() > other->Start()) {
- LiveRange* new_after = last_insertion_point_->SplitAt(other->Start(), zone);
- new_after->set_spilled(last_insertion_point_->spilled());
- if (!new_after->spilled())
- new_after->set_assigned_register(
- last_insertion_point_->assigned_register());
- after = new_after;
+#if DEBUG
+ Verify();
+#endif
+}
+
+
+void TopLevelLiveRange::VerifyChildrenInOrder() const {
+ LifetimePosition last_end = End();
+ for (const LiveRange* child = this->next(); child != nullptr;
+ child = child->next()) {
+ DCHECK(last_end <= child->Start());
+ last_end = child->End();
}
+}
- last_other->next_ = after;
- last_insertion_point_->next_ = other;
- other->UpdateParentForAllChildren(TopLevel());
- TopLevel()->UpdateSpillRangePostMerge(other);
+
+void TopLevelLiveRange::Verify() const {
+ VerifyChildrenInOrder();
+ for (const LiveRange* child = this; child != nullptr; child = child->next()) {
+ VerifyChildStructure();
+ }
}
@@ -959,7 +1008,7 @@ void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
LifetimePosition end, Zone* zone) {
TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
end.value());
- auto new_end = end;
+ LifetimePosition new_end = end;
while (first_interval_ != nullptr && first_interval_->start() <= end) {
if (first_interval_->end() > end) {
new_end = first_interval_->end();
@@ -967,7 +1016,7 @@ void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
first_interval_ = first_interval_->next();
}
- auto new_interval = new (zone) UseInterval(start, new_end);
+ UseInterval* new_interval = new (zone) UseInterval(start, new_end);
new_interval->set_next(first_interval_);
first_interval_ = new_interval;
if (new_interval->next() == nullptr) {
@@ -981,14 +1030,14 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
end.value());
if (first_interval_ == nullptr) {
- auto interval = new (zone) UseInterval(start, end);
+ UseInterval* interval = new (zone) UseInterval(start, end);
first_interval_ = interval;
last_interval_ = interval;
} else {
if (end == first_interval_->start()) {
first_interval_->set_start(start);
} else if (end < first_interval_->start()) {
- auto interval = new (zone) UseInterval(start, end);
+ UseInterval* interval = new (zone) UseInterval(start, end);
interval->set_next(first_interval_);
first_interval_ = interval;
} else {
@@ -1004,11 +1053,11 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
- auto pos = use_pos->pos();
+ LifetimePosition pos = use_pos->pos();
TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
UsePosition* prev_hint = nullptr;
UsePosition* prev = nullptr;
- auto current = first_pos_;
+ UsePosition* current = first_pos_;
while (current != nullptr && current->pos() < pos) {
prev_hint = current->HasHint() ? current : prev_hint;
prev = current;
@@ -1057,8 +1106,8 @@ std::ostream& operator<<(std::ostream& os,
if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
os << "{" << std::endl;
- auto interval = range->first_interval();
- auto use_pos = range->first_pos();
+ UseInterval* interval = range->first_interval();
+ UsePosition* use_pos = range->first_pos();
PrintableInstructionOperand pio;
pio.register_configuration_ = printable_range.register_configuration_;
while (use_pos != nullptr) {
@@ -1083,7 +1132,7 @@ std::ostream& operator<<(std::ostream& os,
SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
: live_ranges_(zone),
assigned_slot_(kUnassignedSlot),
- byte_width_(GetByteWidth(parent->machine_type())),
+ byte_width_(GetByteWidth(parent->representation())),
kind_(parent->kind()) {
// Spill ranges are created for top level, non-splintered ranges. This is so
// that, when merging decisions are made, we consider the full extent of the
@@ -1093,9 +1142,9 @@ SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
UseInterval* node = nullptr;
// Copy the intervals for all ranges.
for (LiveRange* range = parent; range != nullptr; range = range->next()) {
- auto src = range->first_interval();
+ UseInterval* src = range->first_interval();
while (src != nullptr) {
- auto new_node = new (zone) UseInterval(src->start(), src->end());
+ UseInterval* new_node = new (zone) UseInterval(src->start(), src->end());
if (result == nullptr) {
result = new_node;
} else {
@@ -1113,7 +1162,7 @@ SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
int SpillRange::ByteWidth() const {
- return GetByteWidth(live_ranges_[0]->machine_type());
+ return GetByteWidth(live_ranges_[0]->representation());
}
@@ -1128,13 +1177,14 @@ bool SpillRange::IsIntersectingWith(SpillRange* other) const {
bool SpillRange::TryMerge(SpillRange* other) {
+ if (HasSlot() || other->HasSlot()) return false;
// TODO(dcarney): byte widths should be compared here not kinds.
if (live_ranges_[0]->kind() != other->live_ranges_[0]->kind() ||
IsIntersectingWith(other)) {
return false;
}
- auto max = LifetimePosition::MaxPosition();
+ LifetimePosition max = LifetimePosition::MaxPosition();
if (End() < other->End() && other->End() != max) {
end_position_ = other->End();
}
@@ -1143,7 +1193,7 @@ bool SpillRange::TryMerge(SpillRange* other) {
MergeDisjointIntervals(other->use_interval_);
other->use_interval_ = nullptr;
- for (auto range : other->live_ranges()) {
+ for (TopLevelLiveRange* range : other->live_ranges()) {
DCHECK(range->GetSpillRange() == other);
range->SetSpillRange(this);
}
@@ -1158,7 +1208,7 @@ bool SpillRange::TryMerge(SpillRange* other) {
void SpillRange::MergeDisjointIntervals(UseInterval* other) {
UseInterval* tail = nullptr;
- auto current = use_interval_;
+ UseInterval* current = use_interval_;
while (other != nullptr) {
// Make sure the 'current' list starts first
if (current == nullptr || current->start() > other->start()) {
@@ -1179,6 +1229,21 @@ void SpillRange::MergeDisjointIntervals(UseInterval* other) {
}
+void SpillRange::Print() const {
+ OFStream os(stdout);
+ os << "{" << std::endl;
+ for (TopLevelLiveRange* range : live_ranges()) {
+ os << range->vreg() << " ";
+ }
+ os << std::endl;
+
+ for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
+ os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
+ }
+ os << "}" << std::endl;
+}
+
+
RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
const InstructionBlock* block,
Zone* zone)
@@ -1198,7 +1263,7 @@ void RegisterAllocationData::PhiMapValue::AddOperand(
void RegisterAllocationData::PhiMapValue::CommitAssignment(
const InstructionOperand& assigned) {
- for (auto operand : incoming_operands_) {
+ for (InstructionOperand* operand : incoming_operands_) {
InstructionOperand::ReplaceWith(operand, &assigned);
}
}
@@ -1213,6 +1278,10 @@ RegisterAllocationData::RegisterAllocationData(
debug_name_(debug_name),
config_(config),
phi_map_(allocation_zone()),
+ allocatable_codes_(this->config()->num_general_registers(), -1,
+ allocation_zone()),
+ allocatable_double_codes_(this->config()->num_double_registers(), -1,
+ allocation_zone()),
live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
@@ -1225,7 +1294,8 @@ RegisterAllocationData::RegisterAllocationData(
delayed_references_(allocation_zone()),
assigned_registers_(nullptr),
assigned_double_registers_(nullptr),
- virtual_register_count_(code->VirtualRegisterCount()) {
+ virtual_register_count_(code->VirtualRegisterCount()),
+ preassigned_slot_ranges_(zone) {
DCHECK(this->config()->num_general_registers() <=
RegisterConfiguration::kMaxGeneralRegisters);
DCHECK(this->config()->num_double_registers() <=
@@ -1233,7 +1303,7 @@ RegisterAllocationData::RegisterAllocationData(
assigned_registers_ = new (code_zone())
BitVector(this->config()->num_general_registers(), code_zone());
assigned_double_registers_ = new (code_zone())
- BitVector(this->config()->num_aliased_double_registers(), code_zone());
+ BitVector(this->config()->num_double_registers(), code_zone());
this->frame()->SetAllocatedRegisters(assigned_registers_);
this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
}
@@ -1242,13 +1312,14 @@ RegisterAllocationData::RegisterAllocationData(
MoveOperands* RegisterAllocationData::AddGapMove(
int index, Instruction::GapPosition position,
const InstructionOperand& from, const InstructionOperand& to) {
- auto instr = code()->InstructionAt(index);
- auto moves = instr->GetOrCreateParallelMove(position, code_zone());
+ Instruction* instr = code()->InstructionAt(index);
+ ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
return moves->AddMove(from, to);
}
-MachineType RegisterAllocationData::MachineTypeFor(int virtual_register) {
+MachineRepresentation RegisterAllocationData::RepresentationFor(
+ int virtual_register) {
DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
return code()->GetRepresentation(virtual_register);
}
@@ -1258,9 +1329,9 @@ TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
if (index >= static_cast<int>(live_ranges().size())) {
live_ranges().resize(index + 1, nullptr);
}
- auto result = live_ranges()[index];
+ TopLevelLiveRange* result = live_ranges()[index];
if (result == nullptr) {
- result = NewLiveRange(index, MachineTypeFor(index));
+ result = NewLiveRange(index, RepresentationFor(index));
live_ranges()[index] = result;
}
return result;
@@ -1268,8 +1339,8 @@ TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
- int index, MachineType machine_type) {
- return new (allocation_zone()) TopLevelLiveRange(index, machine_type);
+ int index, MachineRepresentation rep) {
+ return new (allocation_zone()) TopLevelLiveRange(index, rep);
}
@@ -1283,16 +1354,16 @@ int RegisterAllocationData::GetNextLiveRangeId() {
TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
- MachineType machine_type) {
+ MachineRepresentation rep) {
int vreg = GetNextLiveRangeId();
- TopLevelLiveRange* ret = NewLiveRange(vreg, machine_type);
+ TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
return ret;
}
RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
const InstructionBlock* block, PhiInstruction* phi) {
- auto map_value = new (allocation_zone())
+ RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone())
RegisterAllocationData::PhiMapValue(phi, block, allocation_zone());
auto res =
phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
@@ -1337,6 +1408,37 @@ bool RegisterAllocationData::ExistsUseWithoutDefinition() {
}
+// If a range is defined in a deferred block, we can expect all the range
+// to only cover positions in deferred blocks. Otherwise, a block on the
+// hot path would be dominated by a deferred block, meaning it is unreachable
+// without passing through the deferred block, which is contradictory.
+// In particular, when such a range contributes a result back on the hot
+// path, it will be as one of the inputs of a phi. In that case, the value
+// will be transferred via a move in the Gap::END's of the last instruction
+// of a deferred block.
+bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
+ for (const TopLevelLiveRange* range : live_ranges()) {
+ if (range == nullptr || range->IsEmpty() ||
+ !code()
+ ->GetInstructionBlock(range->Start().ToInstructionIndex())
+ ->IsDeferred()) {
+ continue;
+ }
+ for (const UseInterval* i = range->first_interval(); i != nullptr;
+ i = i->next()) {
+ int first = i->FirstGapIndex();
+ int last = i->LastGapIndex();
+ for (int instr = first; instr <= last;) {
+ const InstructionBlock* block = code()->GetInstructionBlock(instr);
+ if (!block->IsDeferred()) return false;
+ instr = block->last_instruction_index() + 1;
+ }
+ }
+ }
+ return true;
+}
+
+
SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
TopLevelLiveRange* range) {
DCHECK(!range->HasSpillOperand());
@@ -1361,7 +1463,7 @@ SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
TopLevelLiveRange* range) {
DCHECK(!range->HasSpillOperand());
DCHECK(!range->IsSplinter());
- auto spill_range =
+ SpillRange* spill_range =
new (allocation_zone()) SpillRange(range, allocation_zone());
return spill_range;
}
@@ -1384,74 +1486,6 @@ bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
}
-void RegisterAllocationData::Print(
- const InstructionSequence* instructionSequence) {
- OFStream os(stdout);
- PrintableInstructionSequence wrapper;
- wrapper.register_configuration_ = config();
- wrapper.sequence_ = instructionSequence;
- os << wrapper << std::endl;
-}
-
-
-void RegisterAllocationData::Print(const Instruction* instruction) {
- OFStream os(stdout);
- PrintableInstruction wrapper;
- wrapper.instr_ = instruction;
- wrapper.register_configuration_ = config();
- os << wrapper << std::endl;
-}
-
-
-void RegisterAllocationData::Print(const LiveRange* range, bool with_children) {
- OFStream os(stdout);
- PrintableLiveRange wrapper;
- wrapper.register_configuration_ = config();
- for (const LiveRange* i = range; i != nullptr; i = i->next()) {
- wrapper.range_ = i;
- os << wrapper << std::endl;
- if (!with_children) break;
- }
-}
-
-
-void RegisterAllocationData::Print(const InstructionOperand& op) {
- OFStream os(stdout);
- PrintableInstructionOperand wrapper;
- wrapper.register_configuration_ = config();
- wrapper.op_ = op;
- os << wrapper << std::endl;
-}
-
-
-void RegisterAllocationData::Print(const MoveOperands* move) {
- OFStream os(stdout);
- PrintableInstructionOperand wrapper;
- wrapper.register_configuration_ = config();
- wrapper.op_ = move->destination();
- os << wrapper << " = ";
- wrapper.op_ = move->source();
- os << wrapper << std::endl;
-}
-
-
-void RegisterAllocationData::Print(const SpillRange* spill_range) {
- OFStream os(stdout);
- os << "{" << std::endl;
- for (TopLevelLiveRange* range : spill_range->live_ranges()) {
- os << range->vreg() << " ";
- }
- os << std::endl;
-
- for (UseInterval* interval = spill_range->interval(); interval != nullptr;
- interval = interval->next()) {
- os << '[' << interval->start() << ", " << interval->end() << ')'
- << std::endl;
- }
- os << "}" << std::endl;
-}
-
-
ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
: data_(data) {}
@@ -1461,31 +1495,30 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
DCHECK(operand->HasFixedPolicy());
InstructionOperand allocated;
- MachineType machine_type = InstructionSequence::DefaultRepresentation();
+ MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
int virtual_register = operand->virtual_register();
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
- machine_type = data()->MachineTypeFor(virtual_register);
+ rep = data()->RepresentationFor(virtual_register);
}
if (operand->HasFixedSlotPolicy()) {
- AllocatedOperand::AllocatedKind kind =
- IsFloatingPoint(machine_type) ? AllocatedOperand::DOUBLE_STACK_SLOT
- : AllocatedOperand::STACK_SLOT;
- allocated =
- AllocatedOperand(kind, machine_type, operand->fixed_slot_index());
+ allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
+ operand->fixed_slot_index());
} else if (operand->HasFixedRegisterPolicy()) {
- allocated = AllocatedOperand(AllocatedOperand::REGISTER, machine_type,
+ DCHECK(!IsFloatingPoint(rep));
+ allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
operand->fixed_register_index());
} else if (operand->HasFixedDoubleRegisterPolicy()) {
+ DCHECK(IsFloatingPoint(rep));
DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
- allocated = AllocatedOperand(AllocatedOperand::DOUBLE_REGISTER,
- machine_type, operand->fixed_register_index());
+ allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
+ operand->fixed_register_index());
} else {
UNREACHABLE();
}
InstructionOperand::ReplaceWith(operand, &allocated);
if (is_tagged) {
TRACE("Fixed reg is tagged at %d\n", pos);
- auto instr = code()->InstructionAt(pos);
+ Instruction* instr = code()->InstructionAt(pos);
if (instr->HasReferenceMap()) {
instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
}
@@ -1495,7 +1528,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
void ConstraintBuilder::MeetRegisterConstraints() {
- for (auto block : code()->instruction_blocks()) {
+ for (InstructionBlock* block : code()->instruction_blocks()) {
MeetRegisterConstraints(block);
}
}
@@ -1517,26 +1550,26 @@ void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
const InstructionBlock* block) {
int end = block->last_instruction_index();
- auto last_instruction = code()->InstructionAt(end);
+ Instruction* last_instruction = code()->InstructionAt(end);
for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
- auto output_operand = last_instruction->OutputAt(i);
+ InstructionOperand* output_operand = last_instruction->OutputAt(i);
DCHECK(!output_operand->IsConstant());
- auto output = UnallocatedOperand::cast(output_operand);
+ UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
int output_vreg = output->virtual_register();
- auto range = data()->GetOrCreateLiveRangeFor(output_vreg);
+ TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
bool assigned = false;
if (output->HasFixedPolicy()) {
AllocateFixed(output, -1, false);
// This value is produced on the stack, we never need to spill it.
if (output->IsStackSlot()) {
- DCHECK(StackSlotOperand::cast(output)->index() <
+ DCHECK(LocationOperand::cast(output)->index() <
data()->frame()->GetSpillSlotCount());
- range->SetSpillOperand(StackSlotOperand::cast(output));
+ range->SetSpillOperand(LocationOperand::cast(output));
range->SetSpillStartIndex(end);
assigned = true;
}
- for (auto succ : block->successors()) {
+ for (const RpoNumber& succ : block->successors()) {
const InstructionBlock* successor = code()->InstructionBlockAt(succ);
DCHECK(successor->PredecessorCount() == 1);
int gap_index = successor->first_instruction_index();
@@ -1548,11 +1581,11 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
}
if (!assigned) {
- for (auto succ : block->successors()) {
+ for (const RpoNumber& succ : block->successors()) {
const InstructionBlock* successor = code()->InstructionBlockAt(succ);
DCHECK(successor->PredecessorCount() == 1);
int gap_index = successor->first_instruction_index();
- range->SpillAtDefinition(allocation_zone(), gap_index, output);
+ range->RecordSpillLocation(allocation_zone(), gap_index, output);
range->SetSpillStartIndex(gap_index);
}
}
@@ -1561,10 +1594,10 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
- auto first = code()->InstructionAt(instr_index);
+ Instruction* first = code()->InstructionAt(instr_index);
// Handle fixed temporaries.
for (size_t i = 0; i < first->TempCount(); i++) {
- auto temp = UnallocatedOperand::cast(first->TempAt(i));
+ UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false);
}
// Handle constant/fixed output operands.
@@ -1572,26 +1605,31 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
InstructionOperand* output = first->OutputAt(i);
if (output->IsConstant()) {
int output_vreg = ConstantOperand::cast(output)->virtual_register();
- auto range = data()->GetOrCreateLiveRangeFor(output_vreg);
+ TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
range->SetSpillStartIndex(instr_index + 1);
range->SetSpillOperand(output);
continue;
}
- auto first_output = UnallocatedOperand::cast(output);
- auto range =
+ UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
+ TopLevelLiveRange* range =
data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
bool assigned = false;
if (first_output->HasFixedPolicy()) {
int output_vreg = first_output->virtual_register();
UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
bool is_tagged = code()->IsReference(output_vreg);
+ if (first_output->HasSecondaryStorage()) {
+ range->MarkHasPreassignedSlot();
+ data()->preassigned_slot_ranges().push_back(
+ std::make_pair(range, first_output->GetSecondaryStorage()));
+ }
AllocateFixed(first_output, instr_index, is_tagged);
// This value is produced on the stack, we never need to spill it.
if (first_output->IsStackSlot()) {
- DCHECK(StackSlotOperand::cast(first_output)->index() <
+ DCHECK(LocationOperand::cast(first_output)->index() <
data()->frame()->GetTotalFrameSlotCount());
- range->SetSpillOperand(StackSlotOperand::cast(first_output));
+ range->SetSpillOperand(LocationOperand::cast(first_output));
range->SetSpillStartIndex(instr_index + 1);
assigned = true;
}
@@ -1601,8 +1639,8 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
// Make sure we add a gap move for spilling (if we have not done
// so already).
if (!assigned) {
- range->SpillAtDefinition(allocation_zone(), instr_index + 1,
- first_output);
+ range->RecordSpillLocation(allocation_zone(), instr_index + 1,
+ first_output);
range->SetSpillStartIndex(instr_index + 1);
}
}
@@ -1610,12 +1648,14 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
- auto second = code()->InstructionAt(instr_index);
+ Instruction* second = code()->InstructionAt(instr_index);
// Handle fixed input operands of second instruction.
for (size_t i = 0; i < second->InputCount(); i++) {
- auto input = second->InputAt(i);
- if (input->IsImmediate()) continue; // Ignore immediates.
- auto cur_input = UnallocatedOperand::cast(input);
+ InstructionOperand* input = second->InputAt(i);
+ if (input->IsImmediate() || input->IsExplicit()) {
+ continue; // Ignore immediates and explicitly reserved registers.
+ }
+ UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
if (cur_input->HasFixedPolicy()) {
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
@@ -1626,9 +1666,9 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
}
// Handle "output same as input" for second instruction.
for (size_t i = 0; i < second->OutputCount(); i++) {
- auto output = second->OutputAt(i);
+ InstructionOperand* output = second->OutputAt(i);
if (!output->IsUnallocated()) continue;
- auto second_output = UnallocatedOperand::cast(output);
+ UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
if (!second_output->HasSameAsInputPolicy()) continue;
DCHECK(i == 0); // Only valid for first output.
UnallocatedOperand* cur_input =
@@ -1637,8 +1677,8 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
cur_input->set_virtual_register(second_output->virtual_register());
- auto gap_move = data()->AddGapMove(instr_index, Instruction::END,
- input_copy, *cur_input);
+ MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
+ input_copy, *cur_input);
if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
if (second->HasReferenceMap()) {
RegisterAllocationData::DelayedReference delayed_reference = {
@@ -1667,25 +1707,26 @@ void ConstraintBuilder::ResolvePhis() {
void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
- for (auto phi : block->phis()) {
+ for (PhiInstruction* phi : block->phis()) {
int phi_vreg = phi->virtual_register();
- auto map_value = data()->InitializePhiMap(block, phi);
- auto& output = phi->output();
+ RegisterAllocationData::PhiMapValue* map_value =
+ data()->InitializePhiMap(block, phi);
+ InstructionOperand& output = phi->output();
// Map the destination operands, so the commitment phase can find them.
for (size_t i = 0; i < phi->operands().size(); ++i) {
InstructionBlock* cur_block =
code()->InstructionBlockAt(block->predecessors()[i]);
UnallocatedOperand input(UnallocatedOperand::ANY, phi->operands()[i]);
- auto move = data()->AddGapMove(cur_block->last_instruction_index(),
- Instruction::END, input, output);
+ MoveOperands* move = data()->AddGapMove(
+ cur_block->last_instruction_index(), Instruction::END, input, output);
map_value->AddOperand(&move->destination());
DCHECK(!code()
->InstructionAt(cur_block->last_instruction_index())
->HasReferenceMap());
}
- auto live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
+ TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
int gap_index = block->first_instruction_index();
- live_range->SpillAtDefinition(allocation_zone(), gap_index, &output);
+ live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
live_range->SetSpillStartIndex(gap_index);
// We use the phi-ness of some nodes in some later heuristics.
live_range->set_is_phi(true);
@@ -1720,7 +1761,7 @@ BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
// All phi input operands corresponding to this successor edge are live
// out from this block.
- auto successor = code->InstructionBlockAt(succ);
+ const InstructionBlock* successor = code->InstructionBlockAt(succ);
size_t index = successor->PredecessorIndexOf(block->rpo_number());
DCHECK(index < successor->PredecessorCount());
for (PhiInstruction* phi : successor->phis()) {
@@ -1737,14 +1778,15 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
BitVector* live_out) {
// Add an interval that includes the entire block to the live range for
// each live_out value.
- auto start = LifetimePosition::GapFromInstructionIndex(
+ LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
- auto end = LifetimePosition::InstructionFromInstructionIndex(
- block->last_instruction_index()).NextStart();
+ LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
+ block->last_instruction_index())
+ .NextStart();
BitVector::Iterator iterator(live_out);
while (!iterator.Done()) {
int operand_index = iterator.Current();
- auto range = data()->GetOrCreateLiveRangeFor(operand_index);
+ TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
range->AddUseInterval(start, end, allocation_zone());
iterator.Advance();
}
@@ -1758,7 +1800,7 @@ int LiveRangeBuilder::FixedDoubleLiveRangeID(int index) {
TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
DCHECK(index < config()->num_general_registers());
- auto result = data()->fixed_live_ranges()[index];
+ TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
if (result == nullptr) {
result = data()->NewLiveRange(FixedLiveRangeID(index),
InstructionSequence::DefaultRepresentation());
@@ -1772,10 +1814,11 @@ TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
TopLevelLiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
- DCHECK(index < config()->num_aliased_double_registers());
- auto result = data()->fixed_double_live_ranges()[index];
+ DCHECK(index < config()->num_double_registers());
+ TopLevelLiveRange* result = data()->fixed_double_live_ranges()[index];
if (result == nullptr) {
- result = data()->NewLiveRange(FixedDoubleLiveRangeID(index), kRepFloat64);
+ result = data()->NewLiveRange(FixedDoubleLiveRangeID(index),
+ MachineRepresentation::kFloat64);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
data()->MarkAllocated(DOUBLE_REGISTERS, index);
@@ -1793,10 +1836,11 @@ TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
return data()->GetOrCreateLiveRangeFor(
ConstantOperand::cast(operand)->virtual_register());
} else if (operand->IsRegister()) {
- return FixedLiveRangeFor(RegisterOperand::cast(operand)->index());
+ return FixedLiveRangeFor(
+ LocationOperand::cast(operand)->GetRegister().code());
} else if (operand->IsDoubleRegister()) {
return FixedDoubleLiveRangeFor(
- DoubleRegisterOperand::cast(operand)->index());
+ LocationOperand::cast(operand)->GetDoubleRegister().code());
} else {
return nullptr;
}
@@ -1814,7 +1858,7 @@ UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
InstructionOperand* operand, void* hint,
UsePositionHintType hint_type) {
- auto range = LiveRangeFor(operand);
+ TopLevelLiveRange* range = LiveRangeFor(operand);
if (range == nullptr) return nullptr;
if (range->IsEmpty() || range->Start() > position) {
@@ -1825,8 +1869,9 @@ UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
range->ShortenTo(position);
}
if (!operand->IsUnallocated()) return nullptr;
- auto unalloc_operand = UnallocatedOperand::cast(operand);
- auto use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
+ UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+ UsePosition* use_pos =
+ NewUsePosition(position, unalloc_operand, hint, hint_type);
range->AddUsePosition(use_pos);
return use_pos;
}
@@ -1836,7 +1881,7 @@ UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
LifetimePosition position,
InstructionOperand* operand, void* hint,
UsePositionHintType hint_type) {
- auto range = LiveRangeFor(operand);
+ TopLevelLiveRange* range = LiveRangeFor(operand);
if (range == nullptr) return nullptr;
UsePosition* use_pos = nullptr;
if (operand->IsUnallocated()) {
@@ -1852,19 +1897,19 @@ UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
BitVector* live) {
int block_start = block->first_instruction_index();
- auto block_start_position =
+ LifetimePosition block_start_position =
LifetimePosition::GapFromInstructionIndex(block_start);
for (int index = block->last_instruction_index(); index >= block_start;
index--) {
- auto curr_position =
+ LifetimePosition curr_position =
LifetimePosition::InstructionFromInstructionIndex(index);
- auto instr = code()->InstructionAt(index);
+ Instruction* instr = code()->InstructionAt(index);
DCHECK(instr != nullptr);
DCHECK(curr_position.IsInstructionPosition());
// Process output, inputs, and temps of this instruction.
for (size_t i = 0; i < instr->OutputCount(); i++) {
- auto output = instr->OutputAt(i);
+ InstructionOperand* output = instr->OutputAt(i);
if (output->IsUnallocated()) {
// Unsupported.
DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
@@ -1874,7 +1919,10 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
int out_vreg = ConstantOperand::cast(output)->virtual_register();
live->Remove(out_vreg);
}
- if (block->IsHandler() && index == block_start) {
+ if (block->IsHandler() && index == block_start && output->IsAllocated() &&
+ output->IsRegister() &&
+ AllocatedOperand::cast(output)->GetRegister().is(
+ v8::internal::kReturnRegister0)) {
// The register defined here is blocked from gap start - it is the
// exception value.
// TODO(mtrofin): should we explore an explicit opcode for
@@ -1886,9 +1934,10 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
if (instr->ClobbersRegisters()) {
- for (int i = 0; i < config()->num_general_registers(); ++i) {
- if (!IsOutputRegisterOf(instr, i)) {
- auto range = FixedLiveRangeFor(i);
+ for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
+ int code = config()->GetAllocatableGeneralCode(i);
+ if (!IsOutputRegisterOf(instr, Register::from_code(code))) {
+ TopLevelLiveRange* range = FixedLiveRangeFor(code);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
@@ -1896,9 +1945,11 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
if (instr->ClobbersDoubleRegisters()) {
- for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
- if (!IsOutputDoubleRegisterOf(instr, i)) {
- auto range = FixedDoubleLiveRangeFor(i);
+ for (int i = 0; i < config()->num_allocatable_aliased_double_registers();
+ ++i) {
+ int code = config()->GetAllocatableDoubleCode(i);
+ if (!IsOutputDoubleRegisterOf(instr, DoubleRegister::from_code(code))) {
+ TopLevelLiveRange* range = FixedDoubleLiveRangeFor(code);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
@@ -1906,8 +1957,10 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
for (size_t i = 0; i < instr->InputCount(); i++) {
- auto input = instr->InputAt(i);
- if (input->IsImmediate()) continue; // Ignore immediates.
+ InstructionOperand* input = instr->InputAt(i);
+ if (input->IsImmediate() || input->IsExplicit()) {
+ continue; // Ignore immediates and explicitly reserved registers.
+ }
LifetimePosition use_pos;
if (input->IsUnallocated() &&
UnallocatedOperand::cast(input)->IsUsedAtStart()) {
@@ -1928,7 +1981,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
for (size_t i = 0; i < instr->TempCount(); i++) {
- auto temp = instr->TempAt(i);
+ InstructionOperand* temp = instr->TempAt(i);
// Unsupported.
DCHECK_IMPLIES(temp->IsUnallocated(),
!UnallocatedOperand::cast(temp)->HasSlotPolicy());
@@ -1950,24 +2003,25 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
Instruction::START};
curr_position = curr_position.PrevStart();
DCHECK(curr_position.IsGapPosition());
- for (auto position : kPositions) {
- auto move = instr->GetParallelMove(position);
+ for (const Instruction::GapPosition& position : kPositions) {
+ ParallelMove* move = instr->GetParallelMove(position);
if (move == nullptr) continue;
if (position == Instruction::END) {
curr_position = curr_position.End();
} else {
curr_position = curr_position.Start();
}
- for (auto cur : *move) {
- auto& from = cur->source();
- auto& to = cur->destination();
+ for (MoveOperands* cur : *move) {
+ InstructionOperand& from = cur->source();
+ InstructionOperand& to = cur->destination();
void* hint = &to;
UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
UsePosition* to_use = nullptr;
int phi_vreg = -1;
if (to.IsUnallocated()) {
int to_vreg = UnallocatedOperand::cast(to).virtual_register();
- auto to_range = data()->GetOrCreateLiveRangeFor(to_vreg);
+ TopLevelLiveRange* to_range =
+ data()->GetOrCreateLiveRangeFor(to_vreg);
if (to_range->is_phi()) {
phi_vreg = to_vreg;
if (to_range->is_non_loop_phi()) {
@@ -1991,7 +2045,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
} else {
Define(curr_position, &to);
}
- auto from_use =
+ UsePosition* from_use =
Use(block_start_position, curr_position, &from, hint, hint_type);
// Mark range live.
if (from.IsUnallocated()) {
@@ -2014,16 +2068,16 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
BitVector* live) {
- for (auto phi : block->phis()) {
+ for (PhiInstruction* phi : block->phis()) {
// The live range interval already ends at the first instruction of the
// block.
int phi_vreg = phi->virtual_register();
live->Remove(phi_vreg);
InstructionOperand* hint = nullptr;
- auto instr = GetLastInstruction(
+ Instruction* instr = GetLastInstruction(
code(), code()->InstructionBlockAt(block->predecessors()[0]));
- for (auto move : *instr->GetParallelMove(Instruction::END)) {
- auto& to = move->destination();
+ for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
+ InstructionOperand& to = move->destination();
if (to.IsUnallocated() &&
UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
hint = &move->source();
@@ -2031,10 +2085,10 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
}
}
DCHECK(hint != nullptr);
- auto block_start = LifetimePosition::GapFromInstructionIndex(
+ LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
- auto use_pos = Define(block_start, &phi->output(), hint,
- UsePosition::HintTypeForOperand(*hint));
+ UsePosition* use_pos = Define(block_start, &phi->output(), hint,
+ UsePosition::HintTypeForOperand(*hint));
MapPhiHint(hint, use_pos);
}
}
@@ -2046,10 +2100,11 @@ void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
// Add a live range stretching from the first loop instruction to the last
// for each value live on entry to the header.
BitVector::Iterator iterator(live);
- auto start = LifetimePosition::GapFromInstructionIndex(
+ LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
- auto end = LifetimePosition::GapFromInstructionIndex(
- code()->LastLoopInstructionIndex(block)).NextFullStart();
+ LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
+ code()->LastLoopInstructionIndex(block))
+ .NextFullStart();
while (!iterator.Done()) {
int operand_index = iterator.Current();
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
@@ -2068,8 +2123,9 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Process the blocks in reverse order.
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
--block_id) {
- auto block = code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
- auto live = ComputeLiveOut(block, data());
+ InstructionBlock* block =
+ code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
+ BitVector* live = ComputeLiveOut(block, data());
// Initially consider all live_out values live for the entire block. We
// will shorten these intervals if necessary.
AddInitialIntervals(block, live);
@@ -2084,7 +2140,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
live_in_sets()[block_id] = live;
}
// Postprocess the ranges.
- for (auto range : data()->live_ranges()) {
+ for (TopLevelLiveRange* range : data()->live_ranges()) {
if (range == nullptr) continue;
// Give slots to all ranges with a non fixed slot use.
if (range->has_slot_use() && range->HasNoSpillType()) {
@@ -2095,7 +2151,8 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Without this hack, all uses with "any" policy would get the constant
// operand assigned.
if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
- for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
+ for (UsePosition* pos = range->first_pos(); pos != nullptr;
+ pos = pos->next()) {
if (pos->type() == UsePositionType::kRequiresSlot) continue;
UsePositionType new_type = UsePositionType::kAny;
// Can't mark phis as needing a register.
@@ -2106,6 +2163,14 @@ void LiveRangeBuilder::BuildLiveRanges() {
}
}
}
+ for (auto preassigned : data()->preassigned_slot_ranges()) {
+ TopLevelLiveRange* range = preassigned.first;
+ int slot_id = preassigned.second;
+ SpillRange* spill = range->HasSpillRange()
+ ? range->GetSpillRange()
+ : data()->AssignSpillRangeToLiveRange(range);
+ spill->set_assigned_slot(slot_id);
+ }
#ifdef DEBUG
Verify();
#endif
@@ -2134,8 +2199,8 @@ void LiveRangeBuilder::Verify() const {
for (auto& hint : phi_hints_) {
CHECK(hint.second->IsResolved());
}
- for (LiveRange* current : data()->live_ranges()) {
- if (current != nullptr) current->Verify();
+ for (TopLevelLiveRange* current : data()->live_ranges()) {
+ if (current != nullptr && !current->IsEmpty()) current->Verify();
}
}
@@ -2144,7 +2209,63 @@ RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
RegisterKind kind)
: data_(data),
mode_(kind),
- num_registers_(GetRegisterCount(data->config(), kind)) {}
+ num_registers_(GetRegisterCount(data->config(), kind)),
+ num_allocatable_registers_(
+ GetAllocatableRegisterCount(data->config(), kind)),
+ allocatable_register_codes_(
+ GetAllocatableRegisterCodes(data->config(), kind)) {}
+
+
+LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
+ const LiveRange* range, int instruction_index) {
+ LifetimePosition ret = LifetimePosition::Invalid();
+
+ ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
+ if (range->Start() >= ret || ret >= range->End()) {
+ return LifetimePosition::Invalid();
+ }
+ return ret;
+}
+
+
+void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand(
+ bool operands_only) {
+ size_t initial_range_count = data()->live_ranges().size();
+ for (size_t i = 0; i < initial_range_count; ++i) {
+ TopLevelLiveRange* range = data()->live_ranges()[i];
+ if (!CanProcessRange(range)) continue;
+ if (range->HasNoSpillType() || (operands_only && range->HasSpillRange())) {
+ continue;
+ }
+
+ LifetimePosition start = range->Start();
+ TRACE("Live range %d:%d is defined by a spill operand.\n",
+ range->TopLevel()->vreg(), range->relative_id());
+ LifetimePosition next_pos = start;
+ if (next_pos.IsGapPosition()) {
+ next_pos = next_pos.NextStart();
+ }
+ UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+ // If the range already has a spill operand and it doesn't need a
+ // register immediately, split it and spill the first part of the range.
+ if (pos == nullptr) {
+ Spill(range);
+ } else if (pos->pos() > range->Start().NextStart()) {
+ // Do not spill live range eagerly if use position that can benefit from
+ // the register is too close to the start of live range.
+ LifetimePosition split_pos = GetSplitPositionForInstruction(
+ range, pos->pos().ToInstructionIndex());
+ // There is no place to split, so we can't split and spill.
+ if (!split_pos.IsValid()) continue;
+
+ split_pos =
+ FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
+
+ SplitRangeAt(range, split_pos);
+ Spill(range);
+ }
+ }
+}
LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
@@ -2174,7 +2295,7 @@ LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
range->TopLevel()->vreg(), range->relative_id(), start.value(),
end.value());
- auto split_pos = FindOptimalSplitPos(start, end);
+ LifetimePosition split_pos = FindOptimalSplitPos(start, end);
DCHECK(split_pos >= start);
return SplitRangeAt(range, split_pos);
}
@@ -2189,8 +2310,8 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
// We have no choice
if (start_instr == end_instr) return end;
- auto start_block = GetInstructionBlock(code(), start);
- auto end_block = GetInstructionBlock(code(), end);
+ const InstructionBlock* start_block = GetInstructionBlock(code(), start);
+ const InstructionBlock* end_block = GetInstructionBlock(code(), end);
if (end_block == start_block) {
// The interval is split in the same basic block. Split at the latest
@@ -2198,7 +2319,7 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
return end;
}
- auto block = end_block;
+ const InstructionBlock* block = end_block;
// Find header of outermost loop.
// TODO(titzer): fix redundancy below.
while (GetContainingLoop(code(), block) != nullptr &&
@@ -2218,19 +2339,20 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
LiveRange* range, LifetimePosition pos) {
- auto block = GetInstructionBlock(code(), pos.Start());
- auto loop_header =
+ const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
+ const InstructionBlock* loop_header =
block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
if (loop_header == nullptr) return pos;
- auto prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
+ const UsePosition* prev_use =
+ range->PreviousUsePositionRegisterIsBeneficial(pos);
while (loop_header != nullptr) {
// We are going to spill live range inside the loop.
// If possible try to move spilling position backwards to loop header.
// This will reduce number of memory moves on the back edge.
- auto loop_start = LifetimePosition::GapFromInstructionIndex(
+ LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
loop_header->first_instruction_index());
if (range->Covers(loop_start)) {
@@ -2267,11 +2389,11 @@ const ZoneVector<TopLevelLiveRange*>& RegisterAllocator::GetFixedRegisters()
}
-const char* RegisterAllocator::RegisterName(int allocation_index) const {
+const char* RegisterAllocator::RegisterName(int register_code) const {
if (mode() == GENERAL_REGISTERS) {
- return data()->config()->general_register_name(allocation_index);
+ return data()->config()->GetGeneralRegisterName(register_code);
} else {
- return data()->config()->double_register_name(allocation_index);
+ return data()->config()->GetDoubleRegisterName(register_code);
}
}
@@ -2298,17 +2420,23 @@ void LinearScanAllocator::AllocateRegisters() {
DCHECK(active_live_ranges().empty());
DCHECK(inactive_live_ranges().empty());
- for (LiveRange* range : data()->live_ranges()) {
- if (range == nullptr) continue;
- if (range->kind() == mode()) {
- AddToUnhandledUnsorted(range);
+ SplitAndSpillRangesDefinedByMemoryOperand(code()->VirtualRegisterCount() <=
+ num_allocatable_registers());
+
+ for (TopLevelLiveRange* range : data()->live_ranges()) {
+ if (!CanProcessRange(range)) continue;
+ for (LiveRange* to_add = range; to_add != nullptr;
+ to_add = to_add->next()) {
+ if (!to_add->spilled()) {
+ AddToUnhandledUnsorted(to_add);
+ }
}
}
SortUnhandled();
DCHECK(UnhandledIsSorted());
auto& fixed_ranges = GetFixedRegisters();
- for (auto current : fixed_ranges) {
+ for (TopLevelLiveRange* current : fixed_ranges) {
if (current != nullptr) {
DCHECK_EQ(mode(), current->kind());
AddToInactive(current);
@@ -2317,43 +2445,21 @@ void LinearScanAllocator::AllocateRegisters() {
while (!unhandled_live_ranges().empty()) {
DCHECK(UnhandledIsSorted());
- auto current = unhandled_live_ranges().back();
+ LiveRange* current = unhandled_live_ranges().back();
unhandled_live_ranges().pop_back();
DCHECK(UnhandledIsSorted());
- auto position = current->Start();
+ LifetimePosition position = current->Start();
#ifdef DEBUG
allocation_finger_ = position;
#endif
TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
current->relative_id(), position.value());
- if (current->IsTopLevel() && !current->TopLevel()->HasNoSpillType()) {
- TRACE("Live range %d:%d already has a spill operand\n",
- current->TopLevel()->vreg(), current->relative_id());
- auto next_pos = position;
- if (next_pos.IsGapPosition()) {
- next_pos = next_pos.NextStart();
- }
- auto pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
- // If the range already has a spill operand and it doesn't need a
- // register immediately, split it and spill the first part of the range.
- if (pos == nullptr) {
- Spill(current);
- continue;
- } else if (pos->pos() > current->Start().NextStart()) {
- // Do not spill live range eagerly if use position that can benefit from
- // the register is too close to the start of live range.
- SpillBetween(current, current->Start(), pos->pos());
- DCHECK(UnhandledIsSorted());
- continue;
- }
- }
-
if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
continue;
for (size_t i = 0; i < active_live_ranges().size(); ++i) {
- auto cur_active = active_live_ranges()[i];
+ LiveRange* cur_active = active_live_ranges()[i];
if (cur_active->End() <= position) {
ActiveToHandled(cur_active);
--i; // The live range was removed from the list of active live ranges.
@@ -2364,7 +2470,7 @@ void LinearScanAllocator::AllocateRegisters() {
}
for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
- auto cur_inactive = inactive_live_ranges()[i];
+ LiveRange* cur_inactive = inactive_live_ranges()[i];
if (cur_inactive->End() <= position) {
InactiveToHandled(cur_inactive);
--i; // Live range was removed from the list of inactive live ranges.
@@ -2416,7 +2522,7 @@ void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) {
DCHECK(allocation_finger_ <= range->Start());
for (int i = static_cast<int>(unhandled_live_ranges().size() - 1); i >= 0;
--i) {
- auto cur_range = unhandled_live_ranges().at(i);
+ LiveRange* cur_range = unhandled_live_ranges().at(i);
if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
TRACE("Add live range %d:%d to unhandled at %d\n",
range->TopLevel()->vreg(), range->relative_id(), i + 1);
@@ -2462,8 +2568,8 @@ void LinearScanAllocator::SortUnhandled() {
bool LinearScanAllocator::UnhandledIsSorted() {
size_t len = unhandled_live_ranges().size();
for (size_t i = 1; i < len; i++) {
- auto a = unhandled_live_ranges().at(i - 1);
- auto b = unhandled_live_ranges().at(i);
+ LiveRange* a = unhandled_live_ranges().at(i - 1);
+ LiveRange* b = unhandled_live_ranges().at(i);
if (a->Start() < b->Start()) return false;
}
return true;
@@ -2507,17 +2613,23 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
- for (auto cur_active : active_live_ranges()) {
+ for (LiveRange* cur_active : active_live_ranges()) {
free_until_pos[cur_active->assigned_register()] =
LifetimePosition::GapFromInstructionIndex(0);
+ TRACE("Register %s is free until pos %d (1)\n",
+ RegisterName(cur_active->assigned_register()),
+ LifetimePosition::GapFromInstructionIndex(0).value());
}
- for (auto cur_inactive : inactive_live_ranges()) {
+ for (LiveRange* cur_inactive : inactive_live_ranges()) {
DCHECK(cur_inactive->End() > current->Start());
- auto next_intersection = cur_inactive->FirstIntersection(current);
+ LifetimePosition next_intersection =
+ cur_inactive->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = cur_inactive->assigned_register();
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
+ TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
+ Min(free_until_pos[cur_reg], next_intersection).value());
}
int hint_register;
@@ -2539,14 +2651,15 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
}
// Find the register which stays free for the longest time.
- int reg = 0;
- for (int i = 1; i < num_registers(); ++i) {
- if (free_until_pos[i] > free_until_pos[reg]) {
- reg = i;
+ int reg = allocatable_register_code(0);
+ for (int i = 1; i < num_allocatable_registers(); ++i) {
+ int code = allocatable_register_code(i);
+ if (free_until_pos[code] > free_until_pos[reg]) {
+ reg = code;
}
}
- auto pos = free_until_pos[reg];
+ LifetimePosition pos = free_until_pos[reg];
if (pos <= current->Start()) {
// All registers are blocked.
@@ -2556,7 +2669,7 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
if (pos < current->End()) {
// Register reg is available at the range start but becomes blocked before
// the range end. Split current at position where it becomes blocked.
- auto tail = SplitRangeAt(current, pos);
+ LiveRange* tail = SplitRangeAt(current, pos);
AddToUnhandledSorted(tail);
}
@@ -2572,7 +2685,7 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
- auto register_use = current->NextRegisterPosition(current->Start());
+ UsePosition* register_use = current->NextRegisterPosition(current->Start());
if (register_use == nullptr) {
// There is no use in the current live range that requires a register.
// We can just spill it.
@@ -2587,14 +2700,14 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
- for (auto range : active_live_ranges()) {
+ for (LiveRange* range : active_live_ranges()) {
int cur_reg = range->assigned_register();
if (range->TopLevel()->IsFixed() ||
!range->CanBeSpilled(current->Start())) {
block_pos[cur_reg] = use_pos[cur_reg] =
LifetimePosition::GapFromInstructionIndex(0);
} else {
- auto next_use =
+ UsePosition* next_use =
range->NextUsePositionRegisterIsBeneficial(current->Start());
if (next_use == nullptr) {
use_pos[cur_reg] = range->End();
@@ -2604,9 +2717,9 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
}
}
- for (auto range : inactive_live_ranges()) {
+ for (LiveRange* range : inactive_live_ranges()) {
DCHECK(range->End() > current->Start());
- auto next_intersection = range->FirstIntersection(current);
+ LifetimePosition next_intersection = range->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = range->assigned_register();
if (range->TopLevel()->IsFixed()) {
@@ -2617,14 +2730,15 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
}
}
- int reg = 0;
- for (int i = 1; i < num_registers(); ++i) {
- if (use_pos[i] > use_pos[reg]) {
- reg = i;
+ int reg = allocatable_register_code(0);
+ for (int i = 1; i < num_allocatable_registers(); ++i) {
+ int code = allocatable_register_code(i);
+ if (use_pos[code] > use_pos[reg]) {
+ reg = code;
}
}
- auto pos = use_pos[reg];
+ LifetimePosition pos = use_pos[reg];
if (pos < register_use->pos()) {
// All registers are blocked before the first use that requires a register.
@@ -2657,12 +2771,12 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
DCHECK(current->HasRegisterAssigned());
int reg = current->assigned_register();
- auto split_pos = current->Start();
+ LifetimePosition split_pos = current->Start();
for (size_t i = 0; i < active_live_ranges().size(); ++i) {
- auto range = active_live_ranges()[i];
+ LiveRange* range = active_live_ranges()[i];
if (range->assigned_register() == reg) {
- auto next_pos = range->NextRegisterPosition(current->Start());
- auto spill_pos = FindOptimalSpillingPos(range, split_pos);
+ UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
if (next_pos == nullptr) {
SpillAfter(range, spill_pos);
} else {
@@ -2682,7 +2796,7 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
}
for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
- auto range = inactive_live_ranges()[i];
+ LiveRange* range = inactive_live_ranges()[i];
DCHECK(range->End() > current->Start());
if (range->assigned_register() == reg && !range->TopLevel()->IsFixed()) {
LifetimePosition next_intersection = range->FirstIntersection(current);
@@ -2706,9 +2820,10 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
if (!range->is_phi()) return false;
DCHECK(!range->HasSpillOperand());
- auto phi_map_value = data()->GetPhiMapValueFor(range);
- auto phi = phi_map_value->phi();
- auto block = phi_map_value->block();
+ RegisterAllocationData::PhiMapValue* phi_map_value =
+ data()->GetPhiMapValueFor(range);
+ const PhiInstruction* phi = phi_map_value->phi();
+ const InstructionBlock* block = phi_map_value->block();
// Count the number of spilled operands.
size_t spilled_count = 0;
LiveRange* first_op = nullptr;
@@ -2716,9 +2831,11 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
int op = phi->operands()[i];
LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
if (!op_range->TopLevel()->HasSpillRange()) continue;
- auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
- auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
- pred->last_instruction_index());
+ const InstructionBlock* pred =
+ code()->InstructionBlockAt(block->predecessors()[i]);
+ LifetimePosition pred_end =
+ LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
while (op_range != nullptr && !op_range->CanCover(pred_end)) {
op_range = op_range->next();
}
@@ -2738,13 +2855,13 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
// Try to merge the spilled operands and count the number of merged spilled
// operands.
DCHECK(first_op != nullptr);
- auto first_op_spill = first_op->TopLevel()->GetSpillRange();
+ SpillRange* first_op_spill = first_op->TopLevel()->GetSpillRange();
size_t num_merged = 1;
for (size_t i = 1; i < phi->operands().size(); i++) {
int op = phi->operands()[i];
- auto op_range = data()->GetOrCreateLiveRangeFor(op);
+ TopLevelLiveRange* op_range = data()->live_ranges()[op];
if (!op_range->HasSpillRange()) continue;
- auto op_spill = op_range->GetSpillRange();
+ SpillRange* op_spill = op_range->GetSpillRange();
if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
num_merged++;
}
@@ -2760,11 +2877,11 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
// If the range does not need register soon, spill it to the merged
// spill range.
- auto next_pos = range->Start();
+ LifetimePosition next_pos = range->Start();
if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
- auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+ UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
if (pos == nullptr) {
- auto spill_range =
+ SpillRange* spill_range =
range->TopLevel()->HasSpillRange()
? range->TopLevel()->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(range->TopLevel());
@@ -2773,7 +2890,7 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
Spill(range);
return true;
} else if (pos->pos() > range->Start().NextStart()) {
- auto spill_range =
+ SpillRange* spill_range =
range->TopLevel()->HasSpillRange()
? range->TopLevel()->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(range->TopLevel());
@@ -2788,7 +2905,7 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
- auto second_part = SplitRangeAt(range, pos);
+ LiveRange* second_part = SplitRangeAt(range, pos);
Spill(second_part);
}
@@ -2804,17 +2921,17 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
LifetimePosition until,
LifetimePosition end) {
CHECK(start < end);
- auto second_part = SplitRangeAt(range, start);
+ LiveRange* second_part = SplitRangeAt(range, start);
if (second_part->Start() < end) {
// The split result intersects with [start, end[.
// Split it at position between ]start+1, end[, spill the middle part
// and put the rest to unhandled.
- auto third_part_end = end.PrevStart().End();
+ LifetimePosition third_part_end = end.PrevStart().End();
if (data()->IsBlockBoundary(end.Start())) {
third_part_end = end.Start();
}
- auto third_part = SplitBetween(
+ LiveRange* third_part = SplitBetween(
second_part, Max(second_part->Start().End(), until), third_part_end);
DCHECK(third_part != second_part);
@@ -2834,15 +2951,25 @@ SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
void SpillSlotLocator::LocateSpillSlots() {
- auto code = data()->code();
+ const InstructionSequence* code = data()->code();
for (TopLevelLiveRange* range : data()->live_ranges()) {
if (range == nullptr || range->IsEmpty()) continue;
// We care only about ranges which spill in the frame.
if (!range->HasSpillRange()) continue;
- auto spills = range->spills_at_definition();
- DCHECK_NOT_NULL(spills);
- for (; spills != nullptr; spills = spills->next) {
- code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
+ if (range->IsSpilledOnlyInDeferredBlocks()) {
+ for (LiveRange* child = range; child != nullptr; child = child->next()) {
+ if (child->spilled()) {
+ code->GetInstructionBlock(child->Start().ToInstructionIndex())
+ ->mark_needs_frame();
+ }
+ }
+ } else {
+ TopLevelLiveRange::SpillMoveInsertionList* spills =
+ range->spill_move_insertion_locations();
+ DCHECK_NOT_NULL(spills);
+ for (; spills != nullptr; spills = spills->next) {
+ code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
+ }
}
}
}
@@ -2869,9 +2996,11 @@ void OperandAssigner::AssignSpillSlots() {
for (SpillRange* range : spill_ranges) {
if (range == nullptr || range->IsEmpty()) continue;
// Allocate a new operand referring to the spill slot.
- int byte_width = range->ByteWidth();
- int index = data()->frame()->AllocateSpillSlot(byte_width);
- range->set_assigned_slot(index);
+ if (!range->HasSlot()) {
+ int byte_width = range->ByteWidth();
+ int index = data()->frame()->AllocateSpillSlot(byte_width);
+ range->set_assigned_slot(index);
+ }
}
}
@@ -2891,7 +3020,7 @@ void OperandAssigner::CommitAssignment() {
}
for (LiveRange* range = top_range; range != nullptr;
range = range->next()) {
- auto assigned = range->GetAssignedOperand();
+ InstructionOperand assigned = range->GetAssignedOperand();
range->ConvertUsesToOperand(assigned, spill_operand);
}
@@ -2911,7 +3040,7 @@ void OperandAssigner::CommitAssignment() {
spill_operand)) {
// Spill at definition if the range isn't spilled only in deferred
// blocks.
- top_range->CommitSpillsAtDefinition(
+ top_range->CommitSpillMoves(
data()->code(), spill_operand,
top_range->has_slot_use() || top_range->spilled());
}
@@ -2926,7 +3055,7 @@ ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
bool ReferenceMapPopulator::SafePointsAreInOrder() const {
int safe_point = 0;
- for (auto map : *data()->code()->reference_maps()) {
+ for (ReferenceMap* map : *data()->code()->reference_maps()) {
if (safe_point > map->instruction_position()) return false;
safe_point = map->instruction_position();
}
@@ -2937,14 +3066,15 @@ bool ReferenceMapPopulator::SafePointsAreInOrder() const {
void ReferenceMapPopulator::PopulateReferenceMaps() {
DCHECK(SafePointsAreInOrder());
// Map all delayed references.
- for (auto& delayed_reference : data()->delayed_references()) {
+ for (RegisterAllocationData::DelayedReference& delayed_reference :
+ data()->delayed_references()) {
delayed_reference.map->RecordReference(
AllocatedOperand::cast(*delayed_reference.operand));
}
// Iterate over all safe point positions and record a pointer
// for all spilled live ranges at this point.
int last_range_start = 0;
- auto reference_maps = data()->code()->reference_maps();
+ const ReferenceMapDeque* reference_maps = data()->code()->reference_maps();
ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
for (TopLevelLiveRange* range : data()->live_ranges()) {
if (range == nullptr) continue;
@@ -2952,12 +3082,13 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
if (!data()->IsReference(range)) continue;
// Skip empty live ranges.
if (range->IsEmpty()) continue;
+ if (range->has_preassigned_slot()) continue;
// Find the extent of the range and its children.
int start = range->Start().ToInstructionIndex();
int end = 0;
for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
- auto this_end = cur->End();
+ LifetimePosition this_end = cur->End();
if (this_end.ToInstructionIndex() > end)
end = this_end.ToInstructionIndex();
DCHECK(cur->Start().ToInstructionIndex() >= start);
@@ -2971,7 +3102,7 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
// Step across all the safe points that are before the start of this range,
// recording how far we step in order to save doing this for the next range.
for (; first_it != reference_maps->end(); ++first_it) {
- auto map = *first_it;
+ ReferenceMap* map = *first_it;
if (map->instruction_position() >= start) break;
}
@@ -2985,13 +3116,14 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
spill_operand = range->GetSpillRangeOperand();
}
DCHECK(spill_operand.IsStackSlot());
- DCHECK_EQ(kRepTagged,
- AllocatedOperand::cast(spill_operand).machine_type());
+ DCHECK_EQ(MachineRepresentation::kTagged,
+ AllocatedOperand::cast(spill_operand).representation());
}
+ LiveRange* cur = range;
// Step through the safe points to see whether they are in the range.
for (auto it = first_it; it != reference_maps->end(); ++it) {
- auto map = *it;
+ ReferenceMap* map = *it;
int safe_point = map->instruction_position();
// The safe points are sorted so we can stop searching here.
@@ -2999,13 +3131,33 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
// Advance to the next active range that covers the current
// safe point position.
- auto safe_point_pos =
+ LifetimePosition safe_point_pos =
LifetimePosition::InstructionFromInstructionIndex(safe_point);
- LiveRange* cur = range;
- while (cur != nullptr && !cur->Covers(safe_point_pos)) {
- cur = cur->next();
+
+ // Search for the child range (cur) that covers safe_point_pos. If we
+ // don't find it before the children pass safe_point_pos, keep cur at
+ // the last child, because the next safe_point_pos may be covered by cur.
+ // This may happen if cur has more than one interval, and the current
+ // safe_point_pos is in between intervals.
+ // For that reason, cur may be at most the last child.
+ DCHECK_NOT_NULL(cur);
+ DCHECK(safe_point_pos >= cur->Start() || range == cur);
+ bool found = false;
+ while (!found) {
+ if (cur->Covers(safe_point_pos)) {
+ found = true;
+ } else {
+ LiveRange* next = cur->next();
+ if (next == nullptr || next->Start() > safe_point_pos) {
+ break;
+ }
+ cur = next;
+ }
+ }
+
+ if (!found) {
+ continue;
}
- if (cur == nullptr) continue;
// Check if the live range is spilled and the safe point is after
// the spill position.
@@ -3025,9 +3177,10 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
"at safe point %d\n",
range->vreg(), cur->relative_id(), cur->Start().value(),
safe_point);
- auto operand = cur->GetAssignedOperand();
+ InstructionOperand operand = cur->GetAssignedOperand();
DCHECK(!operand.IsStackSlot());
- DCHECK_EQ(kRepTagged, AllocatedOperand::cast(operand).machine_type());
+ DCHECK_EQ(MachineRepresentation::kTagged,
+ AllocatedOperand::cast(operand).representation());
map->RecordReference(AllocatedOperand::cast(operand));
}
}
@@ -3039,8 +3192,8 @@ namespace {
class LiveRangeBound {
public:
- explicit LiveRangeBound(const LiveRange* range)
- : range_(range), start_(range->Start()), end_(range->End()) {
+ explicit LiveRangeBound(const LiveRange* range, bool skip)
+ : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
DCHECK(!range->IsEmpty());
}
@@ -3051,6 +3204,7 @@ class LiveRangeBound {
const LiveRange* const range_;
const LifetimePosition start_;
const LifetimePosition end_;
+ const bool skip_;
private:
DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
@@ -3069,14 +3223,17 @@ class LiveRangeBoundArray {
bool ShouldInitialize() { return start_ == nullptr; }
- void Initialize(Zone* zone, const LiveRange* const range) {
- size_t length = 0;
- for (auto i = range; i != nullptr; i = i->next()) length++;
- start_ = zone->NewArray<LiveRangeBound>(length);
- length_ = length;
- auto curr = start_;
- for (auto i = range; i != nullptr; i = i->next(), ++curr) {
- new (curr) LiveRangeBound(i);
+ void Initialize(Zone* zone, const TopLevelLiveRange* const range) {
+ length_ = range->GetChildCount();
+
+ start_ = zone->NewArray<LiveRangeBound>(length_);
+ LiveRangeBound* curr = start_;
+ // Normally, spilled ranges do not need connecting moves, because the spill
+ // location has been assigned at definition. For ranges spilled in deferred
+ // blocks, that is not the case, so we need to connect the spilled children.
+ bool spilled_in_blocks = range->IsSpilledOnlyInDeferredBlocks();
+ for (const LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
+ new (curr) LiveRangeBound(i, !spilled_in_blocks && i->spilled());
}
}
@@ -3086,7 +3243,7 @@ class LiveRangeBoundArray {
while (true) {
size_t current_index = left_index + (right_index - left_index) / 2;
DCHECK(right_index > current_index);
- auto bound = &start_[current_index];
+ LiveRangeBound* bound = &start_[current_index];
if (bound->start_ <= position) {
if (position < bound->end_) return bound;
DCHECK(left_index < current_index);
@@ -3098,32 +3255,41 @@ class LiveRangeBoundArray {
}
LiveRangeBound* FindPred(const InstructionBlock* pred) {
- auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
- pred->last_instruction_index());
+ LifetimePosition pred_end =
+ LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
return Find(pred_end);
}
LiveRangeBound* FindSucc(const InstructionBlock* succ) {
- auto succ_start = LifetimePosition::GapFromInstructionIndex(
+ LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
succ->first_instruction_index());
return Find(succ_start);
}
- void Find(const InstructionBlock* block, const InstructionBlock* pred,
- FindResult* result) const {
- auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
- pred->last_instruction_index());
- auto bound = Find(pred_end);
+ bool FindConnectableSubranges(const InstructionBlock* block,
+ const InstructionBlock* pred,
+ FindResult* result) const {
+ LifetimePosition pred_end =
+ LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
+ LiveRangeBound* bound = Find(pred_end);
result->pred_cover_ = bound->range_;
- auto cur_start = LifetimePosition::GapFromInstructionIndex(
+ LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
- // Common case.
+
if (bound->CanCover(cur_start)) {
- result->cur_cover_ = bound->range_;
- return;
+ // Both blocks are covered by the same range, so there is nothing to
+ // connect.
+ return false;
}
- result->cur_cover_ = Find(cur_start)->range_;
+ bound = Find(cur_start);
+ if (bound->skip_) {
+ return false;
+ }
+ result->cur_cover_ = bound->range_;
DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
+ return (result->cur_cover_ != result->pred_cover_);
}
private:
@@ -3148,9 +3314,9 @@ class LiveRangeFinder {
LiveRangeBoundArray* ArrayFor(int operand_index) {
DCHECK(operand_index < bounds_length_);
- auto range = data_->live_ranges()[operand_index];
+ TopLevelLiveRange* range = data_->live_ranges()[operand_index];
DCHECK(range != nullptr && !range->IsEmpty());
- auto array = &bounds_[operand_index];
+ LiveRangeBoundArray* array = &bounds_[operand_index];
if (array->ShouldInitialize()) {
array->Initialize(zone_, range);
}
@@ -3201,25 +3367,28 @@ bool LiveRangeConnector::CanEagerlyResolveControlFlow(
void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
// Lazily linearize live ranges in memory for fast lookup.
LiveRangeFinder finder(data(), local_zone);
- auto& live_in_sets = data()->live_in_sets();
- for (auto block : code()->instruction_blocks()) {
+ ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
+ for (const InstructionBlock* block : code()->instruction_blocks()) {
if (CanEagerlyResolveControlFlow(block)) continue;
- auto live = live_in_sets[block->rpo_number().ToInt()];
+ BitVector* live = live_in_sets[block->rpo_number().ToInt()];
BitVector::Iterator iterator(live);
while (!iterator.Done()) {
- auto* array = finder.ArrayFor(iterator.Current());
- for (auto pred : block->predecessors()) {
+ LiveRangeBoundArray* array = finder.ArrayFor(iterator.Current());
+ for (const RpoNumber& pred : block->predecessors()) {
FindResult result;
- const auto* pred_block = code()->InstructionBlockAt(pred);
- array->Find(block, pred_block, &result);
- if (result.cur_cover_ == result.pred_cover_ ||
- (!result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
- result.cur_cover_->spilled()))
+ const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
+ if (!array->FindConnectableSubranges(block, pred_block, &result)) {
continue;
- auto pred_op = result.pred_cover_->GetAssignedOperand();
- auto cur_op = result.cur_cover_->GetAssignedOperand();
+ }
+ InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
+ InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
if (pred_op.Equals(cur_op)) continue;
- ResolveControlFlow(block, cur_op, pred_block, pred_op);
+ int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
+ USE(move_loc);
+ DCHECK_IMPLIES(
+ result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
+ !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()),
+ code()->GetInstructionBlock(move_loc)->IsDeferred());
}
iterator.Advance();
}
@@ -3227,10 +3396,10 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
}
-void LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
- const InstructionOperand& cur_op,
- const InstructionBlock* pred,
- const InstructionOperand& pred_op) {
+int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
+ const InstructionOperand& cur_op,
+ const InstructionBlock* pred,
+ const InstructionOperand& pred_op) {
DCHECK(!pred_op.Equals(cur_op));
int gap_index;
Instruction::GapPosition position;
@@ -3246,6 +3415,7 @@ void LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
position = Instruction::END;
}
data()->AddGapMove(gap_index, position, pred_op, cur_op);
+ return gap_index;
}
@@ -3257,7 +3427,7 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
LiveRange* first_range = top_range;
for (LiveRange *second_range = first_range->next(); second_range != nullptr;
first_range = second_range, second_range = second_range->next()) {
- auto pos = second_range->Start();
+ LifetimePosition pos = second_range->Start();
// Add gap move if the two live ranges touch and there is no block
// boundary.
if (!connect_spilled && second_range->spilled()) continue;
@@ -3266,8 +3436,8 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
!CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
continue;
}
- auto prev_operand = first_range->GetAssignedOperand();
- auto cur_operand = second_range->GetAssignedOperand();
+ InstructionOperand prev_operand = first_range->GetAssignedOperand();
+ InstructionOperand cur_operand = second_range->GetAssignedOperand();
if (prev_operand.Equals(cur_operand)) continue;
bool delay_insertion = false;
Instruction::GapPosition gap_pos;
@@ -3282,8 +3452,16 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
}
gap_pos = delay_insertion ? Instruction::END : Instruction::START;
}
- auto move = code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
- gap_pos, code_zone());
+ // Fills or spills for spilled in deferred blocks ranges must happen
+ // only in deferred blocks.
+ DCHECK_IMPLIES(
+ connect_spilled &&
+ !(prev_operand.IsAnyRegister() && cur_operand.IsAnyRegister()),
+ code()->GetInstructionBlock(gap_index)->IsDeferred());
+
+ ParallelMove* move =
+ code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
+ gap_pos, code_zone());
if (!delay_insertion) {
move->AddMove(prev_operand, cur_operand);
} else {
@@ -3298,15 +3476,15 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
ZoneVector<MoveOperands*> to_eliminate(local_zone);
to_insert.reserve(4);
to_eliminate.reserve(4);
- auto moves = delayed_insertion_map.begin()->first.first;
+ ParallelMove* moves = delayed_insertion_map.begin()->first.first;
for (auto it = delayed_insertion_map.begin();; ++it) {
bool done = it == delayed_insertion_map.end();
if (done || it->first.first != moves) {
// Commit the MoveOperands for current ParallelMove.
- for (auto move : to_eliminate) {
+ for (MoveOperands* move : to_eliminate) {
move->Eliminate();
}
- for (auto move : to_insert) {
+ for (MoveOperands* move : to_insert) {
moves->push_back(move);
}
if (done) break;
@@ -3316,8 +3494,9 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
moves = it->first.first;
}
// Gather all MoveOperands for a single ParallelMove.
- auto move = new (code_zone()) MoveOperands(it->first.second, it->second);
- auto eliminate = moves->PrepareInsertAfter(move);
+ MoveOperands* move =
+ new (code_zone()) MoveOperands(it->first.second, it->second);
+ MoveOperands* eliminate = moves->PrepareInsertAfter(move);
to_insert.push_back(move);
if (eliminate != nullptr) to_eliminate.push_back(eliminate);
}
diff --git a/chromium/v8/src/compiler/register-allocator.h b/chromium/v8/src/compiler/register-allocator.h
index 117ddedbcd2..b96a43ccec4 100644
--- a/chromium/v8/src/compiler/register-allocator.h
+++ b/chromium/v8/src/compiler/register-allocator.h
@@ -7,6 +7,7 @@
#include "src/compiler/instruction.h"
#include "src/ostreams.h"
+#include "src/register-configuration.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -57,6 +58,8 @@ class LifetimePosition final {
// Returns true if this lifetime position corresponds to a START value
bool IsStart() const { return (value_ & (kHalfStep - 1)) == 0; }
+ // Returns true if this lifetime position corresponds to an END value
+ bool IsEnd() const { return (value_ & (kHalfStep - 1)) == 1; }
// Returns true if this lifetime position corresponds to a gap START value
bool IsFullStart() const { return (value_ & (kStep - 1)) == 0; }
@@ -132,6 +135,8 @@ class LifetimePosition final {
return this->value_ >= that.value_;
}
+ void Print() const;
+
static inline LifetimePosition Invalid() { return LifetimePosition(); }
static inline LifetimePosition MaxPosition() {
@@ -191,6 +196,24 @@ class UseInterval final : public ZoneObject {
return start_ <= point && point < end_;
}
+ // Returns the index of the first gap covered by this interval.
+ int FirstGapIndex() const {
+ int ret = start_.ToInstructionIndex();
+ if (start_.IsInstructionPosition()) {
+ ++ret;
+ }
+ return ret;
+ }
+
+ // Returns the index of the last gap covered by this interval.
+ int LastGapIndex() const {
+ int ret = end_.ToInstructionIndex();
+ if (end_.IsGapPosition() && end_.IsStart()) {
+ --ret;
+ }
+ return ret;
+ }
+
private:
LifetimePosition start_;
LifetimePosition end_;
@@ -241,15 +264,15 @@ class UsePosition final : public ZoneObject {
void set_next(UsePosition* next) { next_ = next; }
// For hinting only.
- void set_assigned_register(int register_index) {
- flags_ = AssignedRegisterField::update(flags_, register_index);
+ void set_assigned_register(int register_code) {
+ flags_ = AssignedRegisterField::update(flags_, register_code);
}
UsePositionHintType hint_type() const {
return HintTypeField::decode(flags_);
}
bool HasHint() const;
- bool HintRegister(int* register_index) const;
+ bool HintRegister(int* register_code) const;
void ResolveHint(UsePosition* use_pos);
bool IsResolved() const {
return hint_type() != UsePositionHintType::kUnresolved;
@@ -296,7 +319,9 @@ class LiveRange : public ZoneObject {
InstructionOperand GetAssignedOperand() const;
- MachineType machine_type() const { return MachineTypeField::decode(bits_); }
+ MachineRepresentation representation() const {
+ return RepresentationField::decode(bits_);
+ }
int assigned_register() const { return AssignedRegisterField::decode(bits_); }
bool HasRegisterAssigned() const {
@@ -341,7 +366,8 @@ class LiveRange : public ZoneObject {
// live range to the result live range.
// The current range will terminate at position, while result will start from
// position.
- void DetachAt(LifetimePosition position, LiveRange* result, Zone* zone);
+ UsePosition* DetachAt(LifetimePosition position, LiveRange* result,
+ Zone* zone);
// Detaches at position, and then links the resulting ranges. Returns the
// child, which starts at position.
@@ -374,7 +400,10 @@ class LiveRange : public ZoneObject {
bool Covers(LifetimePosition position) const;
LifetimePosition FirstIntersection(LiveRange* other) const;
- void Verify() const;
+ void VerifyChildStructure() const {
+ VerifyIntervals();
+ VerifyPositions();
+ }
void ConvertUsesToOperand(const InstructionOperand& op,
const InstructionOperand& spill_op);
@@ -387,6 +416,8 @@ class LiveRange : public ZoneObject {
void set_weight(float weight) { weight_ = weight; }
LiveRangeGroup* group() const { return group_; }
void set_group(LiveRangeGroup* group) { group_ = group; }
+ void Print(const RegisterConfiguration* config, bool with_children) const;
+ void Print(bool with_children) const;
static const int kInvalidSize = -1;
static const float kInvalidWeight;
@@ -394,10 +425,9 @@ class LiveRange : public ZoneObject {
private:
friend class TopLevelLiveRange;
- explicit LiveRange(int relative_id, MachineType machine_type,
+ explicit LiveRange(int relative_id, MachineRepresentation rep,
TopLevelLiveRange* top_level);
- void AppendAsChild(TopLevelLiveRange* other);
void UpdateParentForAllChildren(TopLevelLiveRange* new_top_level);
void set_spilled(bool value) { bits_ = SpilledField::update(bits_, value); }
@@ -406,9 +436,12 @@ class LiveRange : public ZoneObject {
void AdvanceLastProcessedMarker(UseInterval* to_start_of,
LifetimePosition but_not_past) const;
+ void VerifyPositions() const;
+ void VerifyIntervals() const;
+
typedef BitField<bool, 0, 1> SpilledField;
typedef BitField<int32_t, 6, 6> AssignedRegisterField;
- typedef BitField<MachineType, 12, 15> MachineTypeField;
+ typedef BitField<MachineRepresentation, 12, 8> RepresentationField;
// Unique among children and splinters of the same virtual register.
int relative_id_;
@@ -424,7 +457,8 @@ class LiveRange : public ZoneObject {
mutable UsePosition* last_processed_use_;
// This is used as a cache, it's invalid outside of BuildLiveRanges.
mutable UsePosition* current_hint_position_;
-
+ // Cache the last position splintering stopped at.
+ mutable UsePosition* splitting_pointer_;
// greedy: the number of LifetimePositions covered by this range. Used to
// prioritize selecting live ranges for register assignment, as well as
// in weight calculations.
@@ -460,7 +494,7 @@ class LiveRangeGroup final : public ZoneObject {
class TopLevelLiveRange final : public LiveRange {
public:
- explicit TopLevelLiveRange(int vreg, MachineType machine_type);
+ explicit TopLevelLiveRange(int vreg, MachineRepresentation rep);
int spill_start_index() const { return spill_start_index_; }
bool IsFixed() const { return vreg_ < 0; }
@@ -490,8 +524,7 @@ class TopLevelLiveRange final : public LiveRange {
// result.
// The current range is pointed to as "splintered_from". No parent/child
// relationship is established between this and result.
- void Splinter(LifetimePosition start, LifetimePosition end,
- TopLevelLiveRange* result, Zone* zone);
+ void Splinter(LifetimePosition start, LifetimePosition end, Zone* zone);
// Assuming other was splintered from this range, embeds other and its
// children as part of the children sequence of this range.
@@ -528,17 +561,16 @@ class TopLevelLiveRange final : public LiveRange {
AllocatedOperand GetSpillRangeOperand() const;
- void SpillAtDefinition(Zone* zone, int gap_index,
- InstructionOperand* operand);
+ void RecordSpillLocation(Zone* zone, int gap_index,
+ InstructionOperand* operand);
void SetSpillOperand(InstructionOperand* operand);
void SetSpillStartIndex(int start) {
spill_start_index_ = Min(start, spill_start_index_);
}
- void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
- void CommitSpillsAtDefinition(InstructionSequence* sequence,
- const InstructionOperand& operand,
- bool might_be_duplicated);
+ void CommitSpillMoves(InstructionSequence* sequence,
+ const InstructionOperand& operand,
+ bool might_be_duplicated);
// If all the children of this range are spilled in deferred blocks, and if
// for any non-spilled child with a use position requiring a slot, that range
@@ -547,6 +579,12 @@ class TopLevelLiveRange final : public LiveRange {
// and instead let the LiveRangeConnector perform the spills within the
// deferred blocks. If so, we insert here spills for non-spilled ranges
// with slot use positions.
+ void MarkSpilledInDeferredBlock() {
+ spill_start_index_ = -1;
+ spilled_in_deferred_blocks_ = true;
+ spill_move_insertion_locations_ = nullptr;
+ }
+
bool TryCommitSpillInDeferredBlock(InstructionSequence* code,
const InstructionOperand& spill_operand);
@@ -559,24 +597,46 @@ class TopLevelLiveRange final : public LiveRange {
void UpdateSpillRangePostMerge(TopLevelLiveRange* merged);
int vreg() const { return vreg_; }
+#if DEBUG
+ int debug_virt_reg() const;
+#endif
+
+ void Verify() const;
+ void VerifyChildrenInOrder() const;
+
int GetNextChildId() {
return IsSplinter() ? splintered_from()->GetNextChildId()
: ++last_child_id_;
}
+ int GetChildCount() const { return last_child_id_ + 1; }
+
bool IsSpilledOnlyInDeferredBlocks() const {
return spilled_in_deferred_blocks_;
}
- struct SpillAtDefinitionList;
+ struct SpillMoveInsertionList;
- SpillAtDefinitionList* spills_at_definition() const {
- return spills_at_definition_;
+ SpillMoveInsertionList* spill_move_insertion_locations() const {
+ return spill_move_insertion_locations_;
}
- void set_last_child(LiveRange* range) { last_child_ = range; }
- LiveRange* last_child() const { return last_child_; }
+ TopLevelLiveRange* splinter() const { return splinter_; }
+ void SetSplinter(TopLevelLiveRange* splinter) {
+ DCHECK_NULL(splinter_);
+ DCHECK_NOT_NULL(splinter);
+
+ splinter_ = splinter;
+ splinter->relative_id_ = GetNextChildId();
+ splinter->set_spill_type(spill_type());
+ splinter->SetSplinteredFrom(this);
+ }
+
+ void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
+ bool has_preassigned_slot() const { return has_preassigned_slot_; }
private:
+ void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
+
typedef BitField<bool, 1, 1> HasSlotUseField;
typedef BitField<bool, 2, 1> IsPhiField;
typedef BitField<bool, 3, 1> IsNonLoopPhiField;
@@ -590,13 +650,14 @@ class TopLevelLiveRange final : public LiveRange {
InstructionOperand* spill_operand_;
SpillRange* spill_range_;
};
- SpillAtDefinitionList* spills_at_definition_;
+ SpillMoveInsertionList* spill_move_insertion_locations_;
// TODO(mtrofin): generalize spilling after definition, currently specialized
// just for spill in a single deferred block.
bool spilled_in_deferred_blocks_;
int spill_start_index_;
- LiveRange* last_child_;
- LiveRange* last_insertion_point_;
+ UsePosition* last_pos_;
+ TopLevelLiveRange* splinter_;
+ bool has_preassigned_slot_;
DISALLOW_COPY_AND_ASSIGN(TopLevelLiveRange);
};
@@ -622,6 +683,7 @@ class SpillRange final : public ZoneObject {
int ByteWidth() const;
bool IsEmpty() const { return live_ranges_.empty(); }
bool TryMerge(SpillRange* other);
+ bool HasSlot() const { return assigned_slot_ != kUnassignedSlot; }
void set_assigned_slot(int index) {
DCHECK_EQ(kUnassignedSlot, assigned_slot_);
@@ -637,6 +699,7 @@ class SpillRange final : public ZoneObject {
ZoneVector<TopLevelLiveRange*>& live_ranges() { return live_ranges_; }
int byte_width() const { return byte_width_; }
RegisterKind kind() const { return kind_; }
+ void Print() const;
private:
LifetimePosition End() const { return end_position_; }
@@ -666,9 +729,9 @@ class RegisterAllocationData final : public ZoneObject {
// For hinting.
int assigned_register() const { return assigned_register_; }
- void set_assigned_register(int register_index) {
+ void set_assigned_register(int register_code) {
DCHECK_EQ(assigned_register_, kUnassignedRegister);
- assigned_register_ = register_index;
+ assigned_register_ = register_code;
}
void UnsetAssignedRegister() { assigned_register_ = kUnassignedRegister; }
@@ -688,6 +751,8 @@ class RegisterAllocationData final : public ZoneObject {
InstructionOperand* operand;
};
typedef ZoneVector<DelayedReference> DelayedReferences;
+ typedef ZoneVector<std::pair<TopLevelLiveRange*, int>>
+ RangesWithPreassignedSlots;
RegisterAllocationData(const RegisterConfiguration* config,
Zone* allocation_zone, Frame* frame,
@@ -725,12 +790,12 @@ class RegisterAllocationData final : public ZoneObject {
const char* debug_name() const { return debug_name_; }
const RegisterConfiguration* config() const { return config_; }
- MachineType MachineTypeFor(int virtual_register);
+ MachineRepresentation RepresentationFor(int virtual_register);
TopLevelLiveRange* GetOrCreateLiveRangeFor(int index);
// Creates a new live range.
- TopLevelLiveRange* NewLiveRange(int index, MachineType machine_type);
- TopLevelLiveRange* NextLiveRange(MachineType machine_type);
+ TopLevelLiveRange* NewLiveRange(int index, MachineRepresentation rep);
+ TopLevelLiveRange* NextLiveRange(MachineRepresentation rep);
SpillRange* AssignSpillRangeToLiveRange(TopLevelLiveRange* range);
SpillRange* CreateSpillRangeForLiveRange(TopLevelLiveRange* range);
@@ -744,6 +809,7 @@ class RegisterAllocationData final : public ZoneObject {
}
bool ExistsUseWithoutDefinition();
+ bool RangesDefinedInDeferredStayInDeferred();
void MarkAllocated(RegisterKind kind, int index);
@@ -753,12 +819,9 @@ class RegisterAllocationData final : public ZoneObject {
PhiMapValue* GetPhiMapValueFor(int virtual_register);
bool IsBlockBoundary(LifetimePosition pos) const;
- void Print(const InstructionSequence* instructionSequence);
- void Print(const Instruction* instruction);
- void Print(const LiveRange* range, bool with_children = false);
- void Print(const InstructionOperand& op);
- void Print(const MoveOperands* move);
- void Print(const SpillRange* spill_range);
+ RangesWithPreassignedSlots& preassigned_slot_ranges() {
+ return preassigned_slot_ranges_;
+ }
private:
int GetNextLiveRangeId();
@@ -769,6 +832,8 @@ class RegisterAllocationData final : public ZoneObject {
const char* const debug_name_;
const RegisterConfiguration* const config_;
PhiMap phi_map_;
+ ZoneVector<int> allocatable_codes_;
+ ZoneVector<int> allocatable_double_codes_;
ZoneVector<BitVector*> live_in_sets_;
ZoneVector<BitVector*> live_out_sets_;
ZoneVector<TopLevelLiveRange*> live_ranges_;
@@ -779,6 +844,7 @@ class RegisterAllocationData final : public ZoneObject {
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
int virtual_register_count_;
+ RangesWithPreassignedSlots preassigned_slot_ranges_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
};
@@ -886,9 +952,21 @@ class RegisterAllocator : public ZoneObject {
InstructionSequence* code() const { return data()->code(); }
RegisterKind mode() const { return mode_; }
int num_registers() const { return num_registers_; }
+ int num_allocatable_registers() const { return num_allocatable_registers_; }
+ int allocatable_register_code(int allocatable_index) const {
+ return allocatable_register_codes_[allocatable_index];
+ }
+
+ // TODO(mtrofin): explain why splitting in gap START is always OK.
+ LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
+ int instruction_index);
Zone* allocation_zone() const { return data()->allocation_zone(); }
+ // Find the optimal split for ranges defined by a memory operand, e.g.
+ // constants or function parameters passed on the stack.
+ void SplitAndSpillRangesDefinedByMemoryOperand(bool operands_only);
+
// Split the given range at the given position.
// If range starts at or after the given position then the
// original range is returned.
@@ -897,6 +975,11 @@ class RegisterAllocator : public ZoneObject {
// still be owned by the original range after splitting.
LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
+ bool CanProcessRange(LiveRange* range) const {
+ return range != nullptr && !range->IsEmpty() && range->kind() == mode();
+ }
+
+
// Split the given range in a position from the interval [start, end].
LiveRange* SplitBetween(LiveRange* range, LifetimePosition start,
LifetimePosition end);
@@ -921,6 +1004,8 @@ class RegisterAllocator : public ZoneObject {
RegisterAllocationData* const data_;
const RegisterKind mode_;
const int num_registers_;
+ int num_allocatable_registers_;
+ const int* allocatable_register_codes_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
};
@@ -1067,10 +1152,10 @@ class LiveRangeConnector final : public ZoneObject {
bool CanEagerlyResolveControlFlow(const InstructionBlock* block) const;
- void ResolveControlFlow(const InstructionBlock* block,
- const InstructionOperand& cur_op,
- const InstructionBlock* pred,
- const InstructionOperand& pred_op);
+ int ResolveControlFlow(const InstructionBlock* block,
+ const InstructionOperand& cur_op,
+ const InstructionBlock* pred,
+ const InstructionOperand& pred_op);
RegisterAllocationData* const data_;
diff --git a/chromium/v8/src/compiler/register-configuration.cc b/chromium/v8/src/compiler/register-configuration.cc
deleted file mode 100644
index ebe6cfe23c3..00000000000
--- a/chromium/v8/src/compiler/register-configuration.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/register-configuration.h"
-#include "src/globals.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
- Register::kNumRegisters);
-STATIC_ASSERT(RegisterConfiguration::kMaxDoubleRegisters >=
- DoubleRegister::kMaxNumRegisters);
-
-class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
- public:
- ArchDefaultRegisterConfiguration()
- : RegisterConfiguration(Register::kMaxNumAllocatableRegisters,
-#if V8_TARGET_ARCH_X87
- 1,
- 1,
-#else
- DoubleRegister::NumAllocatableRegisters(),
- DoubleRegister::NumAllocatableAliasedRegisters(),
-#endif
- general_register_name_table_,
- double_register_name_table_) {
- DCHECK_EQ(Register::kMaxNumAllocatableRegisters,
- Register::NumAllocatableRegisters());
- for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
- general_register_name_table_[i] = Register::AllocationIndexToString(i);
- }
- DCHECK_GE(DoubleRegister::kMaxNumAllocatableRegisters,
- DoubleRegister::NumAllocatableRegisters());
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_name_table_[i] =
- DoubleRegister::AllocationIndexToString(i);
- }
- }
-
- const char*
- general_register_name_table_[Register::kMaxNumAllocatableRegisters];
- const char*
- double_register_name_table_[DoubleRegister::kMaxNumAllocatableRegisters];
-};
-
-
-static base::LazyInstance<ArchDefaultRegisterConfiguration>::type
- kDefaultRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
-
-} // namespace
-
-
-const RegisterConfiguration* RegisterConfiguration::ArchDefault() {
- return &kDefaultRegisterConfiguration.Get();
-}
-
-RegisterConfiguration::RegisterConfiguration(
- int num_general_registers, int num_double_registers,
- int num_aliased_double_registers, const char* const* general_register_names,
- const char* const* double_register_names)
- : num_general_registers_(num_general_registers),
- num_double_registers_(num_double_registers),
- num_aliased_double_registers_(num_aliased_double_registers),
- general_register_names_(general_register_names),
- double_register_names_(double_register_names) {}
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/compiler/register-configuration.h b/chromium/v8/src/compiler/register-configuration.h
deleted file mode 100644
index f0d58735ba7..00000000000
--- a/chromium/v8/src/compiler/register-configuration.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_REGISTER_CONFIGURATION_H_
-#define V8_COMPILER_REGISTER_CONFIGURATION_H_
-
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// An architecture independent representation of the sets of registers available
-// for instruction creation.
-class RegisterConfiguration {
- public:
- // Architecture independent maxes.
- static const int kMaxGeneralRegisters = 32;
- static const int kMaxDoubleRegisters = 32;
-
- static const RegisterConfiguration* ArchDefault();
-
- RegisterConfiguration(int num_general_registers, int num_double_registers,
- int num_aliased_double_registers,
- const char* const* general_register_name,
- const char* const* double_register_name);
-
- int num_general_registers() const { return num_general_registers_; }
- int num_double_registers() const { return num_double_registers_; }
- int num_aliased_double_registers() const {
- return num_aliased_double_registers_;
- }
-
- const char* general_register_name(int offset) const {
- DCHECK(offset >= 0 && offset < kMaxGeneralRegisters);
- return general_register_names_[offset];
- }
- const char* double_register_name(int offset) const {
- DCHECK(offset >= 0 && offset < kMaxDoubleRegisters);
- return double_register_names_[offset];
- }
-
- private:
- const int num_general_registers_;
- const int num_double_registers_;
- const int num_aliased_double_registers_;
- const char* const* general_register_names_;
- const char* const* double_register_names_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_REGISTER_CONFIGURATION_H_
diff --git a/chromium/v8/src/compiler/representation-change.cc b/chromium/v8/src/compiler/representation-change.cc
new file mode 100644
index 00000000000..5dab60f6a32
--- /dev/null
+++ b/chromium/v8/src/compiler/representation-change.cc
@@ -0,0 +1,537 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/representation-change.h"
+
+#include <sstream>
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/compiler/machine-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+const char* Truncation::description() const {
+ switch (kind()) {
+ case TruncationKind::kNone:
+ return "no-value-use";
+ case TruncationKind::kBool:
+ return "truncate-to-bool";
+ case TruncationKind::kWord32:
+ return "truncate-to-word32";
+ case TruncationKind::kWord64:
+ return "truncate-to-word64";
+ case TruncationKind::kFloat32:
+ return "truncate-to-float32";
+ case TruncationKind::kFloat64:
+ return "truncate-to-float64";
+ case TruncationKind::kAny:
+ return "no-truncation";
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+// Partial order for truncations:
+//
+// kWord64 kAny
+// ^ ^
+// \ |
+// \ kFloat64 <--+
+// \ ^ ^ |
+// \ / | |
+// kWord32 kFloat32 kBool
+// ^ ^ ^
+// \ | /
+// \ | /
+// \ | /
+// \ | /
+// \ | /
+// kNone
+
+// static
+Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1,
+ TruncationKind rep2) {
+ if (LessGeneral(rep1, rep2)) return rep2;
+ if (LessGeneral(rep2, rep1)) return rep1;
+ // Handle the generalization of float64-representable values.
+ if (LessGeneral(rep1, TruncationKind::kFloat64) &&
+ LessGeneral(rep2, TruncationKind::kFloat64)) {
+ return TruncationKind::kFloat64;
+ }
+ // All other combinations are illegal.
+ FATAL("Tried to combine incompatible truncations");
+ return TruncationKind::kNone;
+}
+
+
+// static
+bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
+ switch (rep1) {
+ case TruncationKind::kNone:
+ return true;
+ case TruncationKind::kBool:
+ return rep2 == TruncationKind::kBool || rep2 == TruncationKind::kAny;
+ case TruncationKind::kWord32:
+ return rep2 == TruncationKind::kWord32 ||
+ rep2 == TruncationKind::kWord64 ||
+ rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
+ case TruncationKind::kWord64:
+ return rep2 == TruncationKind::kWord64;
+ case TruncationKind::kFloat32:
+ return rep2 == TruncationKind::kFloat32 ||
+ rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
+ case TruncationKind::kFloat64:
+ return rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
+ case TruncationKind::kAny:
+ return rep2 == TruncationKind::kAny;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+namespace {
+
+// TODO(titzer): should Word64 also be implicitly convertable to others?
+bool IsWord(MachineRepresentation rep) {
+ return rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kWord16 ||
+ rep == MachineRepresentation::kWord32;
+}
+
+} // namespace
+
+
+// Changes representation from {output_rep} to {use_rep}. The {truncation}
+// parameter is only used for sanity checking - if the changer cannot figure
+// out signedness for the word32->float64 conversion, then we check that the
+// uses truncate to word32 (so they do not care about signedness).
+Node* RepresentationChanger::GetRepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ MachineRepresentation use_rep, Truncation truncation) {
+ if (output_rep == MachineRepresentation::kNone) {
+ // The output representation should be set.
+ return TypeError(node, output_rep, output_type, use_rep);
+ }
+ if (use_rep == output_rep) {
+ // Representations are the same. That's a no-op.
+ return node;
+ }
+ if (IsWord(use_rep) && IsWord(output_rep)) {
+ // Both are words less than or equal to 32-bits.
+ // Since loads of integers from memory implicitly sign or zero extend the
+ // value to the full machine word size and stores implicitly truncate,
+ // no representation change is necessary.
+ return node;
+ }
+ switch (use_rep) {
+ case MachineRepresentation::kTagged:
+ return GetTaggedRepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kFloat32:
+ return GetFloat32RepresentationFor(node, output_rep, output_type,
+ truncation);
+ case MachineRepresentation::kFloat64:
+ return GetFloat64RepresentationFor(node, output_rep, output_type,
+ truncation);
+ case MachineRepresentation::kBit:
+ return GetBitRepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return GetWord32RepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kWord64:
+ return GetWord64RepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kNone:
+ return node;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+Node* RepresentationChanger::GetTaggedRepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kHeapConstant:
+ return node; // No change necessary.
+ case IrOpcode::kInt32Constant:
+ if (output_type->Is(Type::Signed32())) {
+ int32_t value = OpParameter<int32_t>(node);
+ return jsgraph()->Constant(value);
+ } else if (output_type->Is(Type::Unsigned32())) {
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ return jsgraph()->Constant(static_cast<double>(value));
+ } else if (output_rep == MachineRepresentation::kBit) {
+ return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
+ : jsgraph()->TrueConstant();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
+ }
+ case IrOpcode::kFloat64Constant:
+ return jsgraph()->Constant(OpParameter<double>(node));
+ case IrOpcode::kFloat32Constant:
+ return jsgraph()->Constant(OpParameter<float>(node));
+ default:
+ break;
+ }
+ // Select the correct X -> Tagged operator.
+ const Operator* op;
+ if (output_rep == MachineRepresentation::kBit) {
+ op = simplified()->ChangeBitToBool();
+ } else if (IsWord(output_rep)) {
+ if (output_type->Is(Type::Unsigned32())) {
+ op = simplified()->ChangeUint32ToTagged();
+ } else if (output_type->Is(Type::Signed32())) {
+ op = simplified()->ChangeInt32ToTagged();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
+ }
+ } else if (output_rep ==
+ MachineRepresentation::kFloat32) { // float32 -> float64 -> tagged
+ node = InsertChangeFloat32ToFloat64(node);
+ op = simplified()->ChangeFloat64ToTagged();
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ op = simplified()->ChangeFloat64ToTagged();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::GetFloat32RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ Truncation truncation) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kNumberConstant:
+ return jsgraph()->Float32Constant(
+ DoubleToFloat32(OpParameter<double>(node)));
+ case IrOpcode::kInt32Constant:
+ if (output_type->Is(Type::Unsigned32())) {
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ return jsgraph()->Float32Constant(static_cast<float>(value));
+ } else {
+ int32_t value = OpParameter<int32_t>(node);
+ return jsgraph()->Float32Constant(static_cast<float>(value));
+ }
+ case IrOpcode::kFloat32Constant:
+ return node; // No change necessary.
+ default:
+ break;
+ }
+ // Select the correct X -> Float32 operator.
+ const Operator* op;
+ if (output_rep == MachineRepresentation::kBit) {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kFloat32);
+ } else if (IsWord(output_rep)) {
+ if (output_type->Is(Type::Signed32())) {
+ op = machine()->ChangeInt32ToFloat64();
+ } else {
+ // Either the output is int32 or the uses only care about the
+ // low 32 bits (so we can pick int32 safely).
+ DCHECK(output_type->Is(Type::Unsigned32()) ||
+ truncation.TruncatesToWord32());
+ op = machine()->ChangeUint32ToFloat64();
+ }
+ // int32 -> float64 -> float32
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = machine()->TruncateFloat64ToFloat32();
+ } else if (output_rep == MachineRepresentation::kTagged) {
+ op = simplified()->ChangeTaggedToFloat64(); // tagged -> float64 -> float32
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = machine()->TruncateFloat64ToFloat32();
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ op = machine()->TruncateFloat64ToFloat32();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kFloat32);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::GetFloat64RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ Truncation truncation) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kNumberConstant:
+ return jsgraph()->Float64Constant(OpParameter<double>(node));
+ case IrOpcode::kInt32Constant:
+ if (output_type->Is(Type::Signed32())) {
+ int32_t value = OpParameter<int32_t>(node);
+ return jsgraph()->Float64Constant(value);
+ } else {
+ DCHECK(output_type->Is(Type::Unsigned32()));
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ return jsgraph()->Float64Constant(static_cast<double>(value));
+ }
+ case IrOpcode::kFloat64Constant:
+ return node; // No change necessary.
+ case IrOpcode::kFloat32Constant:
+ return jsgraph()->Float64Constant(OpParameter<float>(node));
+ default:
+ break;
+ }
+ // Select the correct X -> Float64 operator.
+ const Operator* op;
+ if (output_rep == MachineRepresentation::kBit) {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kFloat64);
+ } else if (IsWord(output_rep)) {
+ if (output_type->Is(Type::Signed32())) {
+ op = machine()->ChangeInt32ToFloat64();
+ } else {
+ // Either the output is int32 or the uses only care about the
+ // low 32 bits (so we can pick int32 safely).
+ DCHECK(output_type->Is(Type::Unsigned32()) ||
+ truncation.TruncatesToWord32());
+ op = machine()->ChangeUint32ToFloat64();
+ }
+ } else if (output_rep == MachineRepresentation::kTagged) {
+ op = simplified()->ChangeTaggedToFloat64();
+ } else if (output_rep == MachineRepresentation::kFloat32) {
+ op = machine()->ChangeFloat32ToFloat64();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kFloat64);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::MakeTruncatedInt32Constant(double value) {
+ return jsgraph()->Int32Constant(DoubleToInt32(value));
+}
+
+
+Node* RepresentationChanger::GetWord32RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ return node; // No change necessary.
+ case IrOpcode::kFloat32Constant:
+ return MakeTruncatedInt32Constant(OpParameter<float>(node));
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kFloat64Constant:
+ return MakeTruncatedInt32Constant(OpParameter<double>(node));
+ default:
+ break;
+ }
+ // Select the correct X -> Word32 operator.
+ const Operator* op;
+ Type* type = NodeProperties::GetType(node);
+
+ if (output_rep == MachineRepresentation::kBit) {
+ return node; // Sloppy comparison -> word32
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ // TODO(jarin) Use only output_type here, once we intersect it with the
+ // type inferred by the typer.
+ if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else if (output_type->Is(Type::Signed32()) ||
+ type->Is(Type::Signed32())) {
+ op = machine()->ChangeFloat64ToInt32();
+ } else {
+ op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ }
+ } else if (output_rep == MachineRepresentation::kFloat32) {
+ node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
+ if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else if (output_type->Is(Type::Signed32()) ||
+ type->Is(Type::Signed32())) {
+ op = machine()->ChangeFloat64ToInt32();
+ } else {
+ op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ }
+ } else if (output_rep == MachineRepresentation::kTagged) {
+ if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+ op = simplified()->ChangeTaggedToUint32();
+ } else if (output_type->Is(Type::Signed32()) ||
+ type->Is(Type::Signed32())) {
+ op = simplified()->ChangeTaggedToInt32();
+ } else {
+ node = InsertChangeTaggedToFloat64(node);
+ op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ }
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord32);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::GetBitRepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
+ DCHECK(value.is_identical_to(factory()->true_value()) ||
+ value.is_identical_to(factory()->false_value()));
+ return jsgraph()->Int32Constant(
+ value.is_identical_to(factory()->true_value()) ? 1 : 0);
+ }
+ default:
+ break;
+ }
+ // Select the correct X -> Bit operator.
+ const Operator* op;
+ if (output_rep == MachineRepresentation::kTagged) {
+ op = simplified()->ChangeBoolToBit();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kBit);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::GetWord64RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type) {
+ if (output_rep == MachineRepresentation::kBit) {
+ return node; // Sloppy comparison -> word64
+ }
+ // Can't really convert Word64 to anything else. Purported to be internal.
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord64);
+}
+
+
+const Operator* RepresentationChanger::Int32OperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kNumberAdd:
+ return machine()->Int32Add();
+ case IrOpcode::kNumberSubtract:
+ return machine()->Int32Sub();
+ case IrOpcode::kNumberMultiply:
+ return machine()->Int32Mul();
+ case IrOpcode::kNumberDivide:
+ return machine()->Int32Div();
+ case IrOpcode::kNumberModulus:
+ return machine()->Int32Mod();
+ case IrOpcode::kNumberBitwiseOr:
+ return machine()->Word32Or();
+ case IrOpcode::kNumberBitwiseXor:
+ return machine()->Word32Xor();
+ case IrOpcode::kNumberBitwiseAnd:
+ return machine()->Word32And();
+ case IrOpcode::kNumberEqual:
+ return machine()->Word32Equal();
+ case IrOpcode::kNumberLessThan:
+ return machine()->Int32LessThan();
+ case IrOpcode::kNumberLessThanOrEqual:
+ return machine()->Int32LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+const Operator* RepresentationChanger::Uint32OperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kNumberAdd:
+ return machine()->Int32Add();
+ case IrOpcode::kNumberSubtract:
+ return machine()->Int32Sub();
+ case IrOpcode::kNumberMultiply:
+ return machine()->Int32Mul();
+ case IrOpcode::kNumberDivide:
+ return machine()->Uint32Div();
+ case IrOpcode::kNumberModulus:
+ return machine()->Uint32Mod();
+ case IrOpcode::kNumberEqual:
+ return machine()->Word32Equal();
+ case IrOpcode::kNumberLessThan:
+ return machine()->Uint32LessThan();
+ case IrOpcode::kNumberLessThanOrEqual:
+ return machine()->Uint32LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+const Operator* RepresentationChanger::Float64OperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kNumberAdd:
+ return machine()->Float64Add();
+ case IrOpcode::kNumberSubtract:
+ return machine()->Float64Sub();
+ case IrOpcode::kNumberMultiply:
+ return machine()->Float64Mul();
+ case IrOpcode::kNumberDivide:
+ return machine()->Float64Div();
+ case IrOpcode::kNumberModulus:
+ return machine()->Float64Mod();
+ case IrOpcode::kNumberEqual:
+ return machine()->Float64Equal();
+ case IrOpcode::kNumberLessThan:
+ return machine()->Float64LessThan();
+ case IrOpcode::kNumberLessThanOrEqual:
+ return machine()->Float64LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+Node* RepresentationChanger::TypeError(Node* node,
+ MachineRepresentation output_rep,
+ Type* output_type,
+ MachineRepresentation use) {
+ type_error_ = true;
+ if (!testing_type_errors_) {
+ std::ostringstream out_str;
+ out_str << output_rep << " (";
+ output_type->PrintTo(out_str, Type::SEMANTIC_DIM);
+ out_str << ")";
+
+ std::ostringstream use_str;
+ use_str << use;
+
+ V8_Fatal(__FILE__, __LINE__,
+ "RepresentationChangerError: node #%d:%s of "
+ "%s cannot be changed to %s",
+ node->id(), node->op()->mnemonic(), out_str.str().c_str(),
+ use_str.str().c_str());
+ }
+ return node;
+}
+
+
+Node* RepresentationChanger::InsertChangeFloat32ToFloat64(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(), node);
+}
+
+
+Node* RepresentationChanger::InsertChangeTaggedToFloat64(Node* node) {
+ return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
+ node);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/representation-change.h b/chromium/v8/src/compiler/representation-change.h
index 0c38e020add..62ea3b4684b 100644
--- a/chromium/v8/src/compiler/representation-change.h
+++ b/chromium/v8/src/compiler/representation-change.h
@@ -5,415 +5,106 @@
#ifndef V8_COMPILER_REPRESENTATION_CHANGE_H_
#define V8_COMPILER_REPRESENTATION_CHANGE_H_
-#include <sstream>
-
-#include "src/base/bits.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/machine-operator.h"
#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
namespace compiler {
-// Contains logic related to changing the representation of values for constants
-// and other nodes, as well as lowering Simplified->Machine operators.
-// Eagerly folds any representation changes for constants.
-class RepresentationChanger {
+class Truncation final {
public:
- RepresentationChanger(JSGraph* jsgraph, SimplifiedOperatorBuilder* simplified,
- Isolate* isolate)
- : jsgraph_(jsgraph),
- simplified_(simplified),
- isolate_(isolate),
- testing_type_errors_(false),
- type_error_(false) {}
-
- // TODO(titzer): should Word64 also be implicitly convertable to others?
- static bool IsWord(MachineTypeUnion type) {
- return (type & (kRepWord8 | kRepWord16 | kRepWord32)) != 0;
- }
+ // Constructors.
+ static Truncation None() { return Truncation(TruncationKind::kNone); }
+ static Truncation Bool() { return Truncation(TruncationKind::kBool); }
+ static Truncation Word32() { return Truncation(TruncationKind::kWord32); }
+ static Truncation Word64() { return Truncation(TruncationKind::kWord64); }
+ static Truncation Float32() { return Truncation(TruncationKind::kFloat32); }
+ static Truncation Float64() { return Truncation(TruncationKind::kFloat64); }
+ static Truncation Any() { return Truncation(TruncationKind::kAny); }
- Node* GetRepresentationFor(Node* node, MachineTypeUnion output_type,
- MachineTypeUnion use_type) {
- if (!base::bits::IsPowerOfTwo32(output_type & kRepMask)) {
- // There should be only one output representation.
- return TypeError(node, output_type, use_type);
- }
- if ((use_type & kRepMask) == (output_type & kRepMask)) {
- // Representations are the same. That's a no-op.
- return node;
- }
- if (IsWord(use_type) && IsWord(output_type)) {
- // Both are words less than or equal to 32-bits.
- // Since loads of integers from memory implicitly sign or zero extend the
- // value to the full machine word size and stores implicitly truncate,
- // no representation change is necessary.
- return node;
- }
- if (use_type & kRepTagged) {
- return GetTaggedRepresentationFor(node, output_type);
- } else if (use_type & kRepFloat32) {
- return GetFloat32RepresentationFor(node, output_type);
- } else if (use_type & kRepFloat64) {
- return GetFloat64RepresentationFor(node, output_type);
- } else if (use_type & kRepBit) {
- return GetBitRepresentationFor(node, output_type);
- } else if (IsWord(use_type)) {
- return GetWord32RepresentationFor(node, output_type,
- use_type & kTypeUint32);
- } else if (use_type & kRepWord64) {
- return GetWord64RepresentationFor(node, output_type);
- } else {
- return node;
- }
+ static Truncation Generalize(Truncation t1, Truncation t2) {
+ return Truncation(Generalize(t1.kind(), t2.kind()));
}
- Node* GetTaggedRepresentationFor(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kNumberConstant:
- case IrOpcode::kHeapConstant:
- return node; // No change necessary.
- case IrOpcode::kInt32Constant:
- if (output_type & kTypeUint32) {
- uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
- return jsgraph()->Constant(static_cast<double>(value));
- } else if (output_type & kTypeInt32) {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Constant(value);
- } else if (output_type & kRepBit) {
- return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
- : jsgraph()->TrueConstant();
- } else {
- return TypeError(node, output_type, kRepTagged);
- }
- case IrOpcode::kFloat64Constant:
- return jsgraph()->Constant(OpParameter<double>(node));
- case IrOpcode::kFloat32Constant:
- return jsgraph()->Constant(OpParameter<float>(node));
- default:
- break;
- }
- // Select the correct X -> Tagged operator.
- const Operator* op;
- if (output_type & kRepBit) {
- op = simplified()->ChangeBitToBool();
- } else if (IsWord(output_type)) {
- if (output_type & kTypeUint32) {
- op = simplified()->ChangeUint32ToTagged();
- } else if (output_type & kTypeInt32) {
- op = simplified()->ChangeInt32ToTagged();
- } else {
- return TypeError(node, output_type, kRepTagged);
- }
- } else if (output_type & kRepFloat32) { // float32 -> float64 -> tagged
- node = InsertChangeFloat32ToFloat64(node);
- op = simplified()->ChangeFloat64ToTagged();
- } else if (output_type & kRepFloat64) {
- op = simplified()->ChangeFloat64ToTagged();
- } else {
- return TypeError(node, output_type, kRepTagged);
- }
- return jsgraph()->graph()->NewNode(op, node);
+ // Queries.
+ bool TruncatesToWord32() const {
+ return LessGeneral(kind_, TruncationKind::kWord32);
}
-
- Node* GetFloat32RepresentationFor(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kFloat64Constant:
- case IrOpcode::kNumberConstant:
- return jsgraph()->Float32Constant(
- DoubleToFloat32(OpParameter<double>(node)));
- case IrOpcode::kInt32Constant:
- if (output_type & kTypeUint32) {
- uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
- return jsgraph()->Float32Constant(static_cast<float>(value));
- } else {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Float32Constant(static_cast<float>(value));
- }
- case IrOpcode::kFloat32Constant:
- return node; // No change necessary.
- default:
- break;
- }
- // Select the correct X -> Float32 operator.
- const Operator* op;
- if (output_type & kRepBit) {
- return TypeError(node, output_type, kRepFloat32);
- } else if (IsWord(output_type)) {
- if (output_type & kTypeUint32) {
- op = machine()->ChangeUint32ToFloat64();
- } else {
- op = machine()->ChangeInt32ToFloat64();
- }
- // int32 -> float64 -> float32
- node = jsgraph()->graph()->NewNode(op, node);
- op = machine()->TruncateFloat64ToFloat32();
- } else if (output_type & kRepTagged) {
- op = simplified()
- ->ChangeTaggedToFloat64(); // tagged -> float64 -> float32
- node = jsgraph()->graph()->NewNode(op, node);
- op = machine()->TruncateFloat64ToFloat32();
- } else if (output_type & kRepFloat64) {
- op = machine()->TruncateFloat64ToFloat32();
- } else {
- return TypeError(node, output_type, kRepFloat32);
- }
- return jsgraph()->graph()->NewNode(op, node);
- }
-
- Node* GetFloat64RepresentationFor(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kNumberConstant:
- return jsgraph()->Float64Constant(OpParameter<double>(node));
- case IrOpcode::kInt32Constant:
- if (output_type & kTypeUint32) {
- uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
- return jsgraph()->Float64Constant(static_cast<double>(value));
- } else {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Float64Constant(value);
- }
- case IrOpcode::kFloat64Constant:
- return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return jsgraph()->Float64Constant(OpParameter<float>(node));
- default:
- break;
- }
- // Select the correct X -> Float64 operator.
- const Operator* op;
- if (output_type & kRepBit) {
- return TypeError(node, output_type, kRepFloat64);
- } else if (IsWord(output_type)) {
- if (output_type & kTypeUint32) {
- op = machine()->ChangeUint32ToFloat64();
- } else {
- op = machine()->ChangeInt32ToFloat64();
- }
- } else if (output_type & kRepTagged) {
- op = simplified()->ChangeTaggedToFloat64();
- } else if (output_type & kRepFloat32) {
- op = machine()->ChangeFloat32ToFloat64();
- } else {
- return TypeError(node, output_type, kRepFloat64);
- }
- return jsgraph()->graph()->NewNode(op, node);
- }
-
- Node* MakeInt32Constant(double value) {
- if (value < 0) {
- DCHECK(IsInt32Double(value));
- int32_t iv = static_cast<int32_t>(value);
- return jsgraph()->Int32Constant(iv);
- } else {
- DCHECK(IsUint32Double(value));
- int32_t iv = static_cast<int32_t>(static_cast<uint32_t>(value));
- return jsgraph()->Int32Constant(iv);
- }
+ bool TruncatesNaNToZero() {
+ return LessGeneral(kind_, TruncationKind::kWord32) ||
+ LessGeneral(kind_, TruncationKind::kBool);
}
-
- Node* GetTruncatedWord32For(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold truncations for constants.
- switch (node->opcode()) {
- case IrOpcode::kInt32Constant:
- return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return jsgraph()->Int32Constant(
- DoubleToInt32(OpParameter<float>(node)));
- case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant:
- return jsgraph()->Int32Constant(
- DoubleToInt32(OpParameter<double>(node)));
- default:
- break;
- }
- // Select the correct X -> Word32 truncation operator.
- const Operator* op = NULL;
- if (output_type & kRepFloat64) {
- op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
- } else if (output_type & kRepFloat32) {
- node = InsertChangeFloat32ToFloat64(node);
- op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
- } else if (output_type & kRepTagged) {
- node = InsertChangeTaggedToFloat64(node);
- op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
- } else {
- return TypeError(node, output_type, kRepWord32);
- }
- return jsgraph()->graph()->NewNode(op, node);
+ bool TruncatesUndefinedToZeroOrNaN() {
+ return LessGeneral(kind_, TruncationKind::kFloat64) ||
+ LessGeneral(kind_, TruncationKind::kWord64);
}
- Node* GetWord32RepresentationFor(Node* node, MachineTypeUnion output_type,
- bool use_unsigned) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kInt32Constant:
- return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return MakeInt32Constant(OpParameter<float>(node));
- case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant:
- return MakeInt32Constant(OpParameter<double>(node));
- default:
- break;
- }
- // Select the correct X -> Word32 operator.
- const Operator* op;
- if (output_type & kRepBit) {
- return node; // Sloppy comparison -> word32
- } else if (output_type & kRepFloat64) {
- if (output_type & kTypeUint32 || use_unsigned) {
- op = machine()->ChangeFloat64ToUint32();
- } else {
- op = machine()->ChangeFloat64ToInt32();
- }
- } else if (output_type & kRepFloat32) {
- node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
- if (output_type & kTypeUint32 || use_unsigned) {
- op = machine()->ChangeFloat64ToUint32();
- } else {
- op = machine()->ChangeFloat64ToInt32();
- }
- } else if (output_type & kRepTagged) {
- if (output_type & kTypeUint32 || use_unsigned) {
- op = simplified()->ChangeTaggedToUint32();
- } else {
- op = simplified()->ChangeTaggedToInt32();
- }
- } else {
- return TypeError(node, output_type, kRepWord32);
- }
- return jsgraph()->graph()->NewNode(op, node);
- }
+ // Operators.
+ bool operator==(Truncation other) const { return kind() == other.kind(); }
+ bool operator!=(Truncation other) const { return !(*this == other); }
- Node* GetBitRepresentationFor(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kHeapConstant: {
- Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
- DCHECK(value.is_identical_to(factory()->true_value()) ||
- value.is_identical_to(factory()->false_value()));
- return jsgraph()->Int32Constant(
- value.is_identical_to(factory()->true_value()) ? 1 : 0);
- }
- default:
- break;
- }
- // Select the correct X -> Bit operator.
- const Operator* op;
- if (output_type & kRepTagged) {
- op = simplified()->ChangeBoolToBit();
- } else {
- return TypeError(node, output_type, kRepBit);
- }
- return jsgraph()->graph()->NewNode(op, node);
+ // Debug utilities.
+ const char* description() const;
+ bool IsLessGeneralThan(Truncation other) {
+ return LessGeneral(kind(), other.kind());
}
- Node* GetWord64RepresentationFor(Node* node, MachineTypeUnion output_type) {
- if (output_type & kRepBit) {
- return node; // Sloppy comparison -> word64
- }
- // Can't really convert Word64 to anything else. Purported to be internal.
- return TypeError(node, output_type, kRepWord64);
- }
+ private:
+ enum class TruncationKind : uint8_t {
+ kNone,
+ kBool,
+ kWord32,
+ kWord64,
+ kFloat32,
+ kFloat64,
+ kAny
+ };
+
+ explicit Truncation(TruncationKind kind) : kind_(kind) {}
+ TruncationKind kind() const { return kind_; }
+
+ TruncationKind kind_;
+
+ static TruncationKind Generalize(TruncationKind rep1, TruncationKind rep2);
+ static bool LessGeneral(TruncationKind rep1, TruncationKind rep2);
+};
- const Operator* Int32OperatorFor(IrOpcode::Value opcode) {
- switch (opcode) {
- case IrOpcode::kNumberAdd:
- return machine()->Int32Add();
- case IrOpcode::kNumberSubtract:
- return machine()->Int32Sub();
- case IrOpcode::kNumberMultiply:
- return machine()->Int32Mul();
- case IrOpcode::kNumberDivide:
- return machine()->Int32Div();
- case IrOpcode::kNumberModulus:
- return machine()->Int32Mod();
- case IrOpcode::kNumberEqual:
- return machine()->Word32Equal();
- case IrOpcode::kNumberLessThan:
- return machine()->Int32LessThan();
- case IrOpcode::kNumberLessThanOrEqual:
- return machine()->Int32LessThanOrEqual();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
- const Operator* Uint32OperatorFor(IrOpcode::Value opcode) {
- switch (opcode) {
- case IrOpcode::kNumberAdd:
- return machine()->Int32Add();
- case IrOpcode::kNumberSubtract:
- return machine()->Int32Sub();
- case IrOpcode::kNumberMultiply:
- return machine()->Int32Mul();
- case IrOpcode::kNumberDivide:
- return machine()->Uint32Div();
- case IrOpcode::kNumberModulus:
- return machine()->Uint32Mod();
- case IrOpcode::kNumberEqual:
- return machine()->Word32Equal();
- case IrOpcode::kNumberLessThan:
- return machine()->Uint32LessThan();
- case IrOpcode::kNumberLessThanOrEqual:
- return machine()->Uint32LessThanOrEqual();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
+// Contains logic related to changing the representation of values for constants
+// and other nodes, as well as lowering Simplified->Machine operators.
+// Eagerly folds any representation changes for constants.
+class RepresentationChanger final {
+ public:
+ RepresentationChanger(JSGraph* jsgraph, Isolate* isolate)
+ : jsgraph_(jsgraph),
+ isolate_(isolate),
+ testing_type_errors_(false),
+ type_error_(false) {}
- const Operator* Float64OperatorFor(IrOpcode::Value opcode) {
- switch (opcode) {
- case IrOpcode::kNumberAdd:
- return machine()->Float64Add();
- case IrOpcode::kNumberSubtract:
- return machine()->Float64Sub();
- case IrOpcode::kNumberMultiply:
- return machine()->Float64Mul();
- case IrOpcode::kNumberDivide:
- return machine()->Float64Div();
- case IrOpcode::kNumberModulus:
- return machine()->Float64Mod();
- case IrOpcode::kNumberEqual:
- return machine()->Float64Equal();
- case IrOpcode::kNumberLessThan:
- return machine()->Float64LessThan();
- case IrOpcode::kNumberLessThanOrEqual:
- return machine()->Float64LessThanOrEqual();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
+ // Changes representation from {output_type} to {use_rep}. The {truncation}
+ // parameter is only used for sanity checking - if the changer cannot figure
+ // out signedness for the word32->float64 conversion, then we check that the
+ // uses truncate to word32 (so they do not care about signedness).
+ Node* GetRepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type, MachineRepresentation use_rep,
+ Truncation truncation = Truncation::None());
+ const Operator* Int32OperatorFor(IrOpcode::Value opcode);
+ const Operator* Uint32OperatorFor(IrOpcode::Value opcode);
+ const Operator* Float64OperatorFor(IrOpcode::Value opcode);
MachineType TypeForBasePointer(const FieldAccess& access) {
- return access.tag() != 0 ? kMachAnyTagged : kMachPtr;
+ return access.tag() != 0 ? MachineType::AnyTagged()
+ : MachineType::Pointer();
}
MachineType TypeForBasePointer(const ElementAccess& access) {
- return access.tag() != 0 ? kMachAnyTagged : kMachPtr;
- }
-
- MachineType TypeFromUpperBound(Type* type) {
- if (type->Is(Type::None()))
- return kTypeAny; // TODO(titzer): should be an error
- if (type->Is(Type::Signed32())) return kTypeInt32;
- if (type->Is(Type::Unsigned32())) return kTypeUint32;
- if (type->Is(Type::Number())) return kTypeNumber;
- if (type->Is(Type::Boolean())) return kTypeBool;
- return kTypeAny;
+ return access.tag() != 0 ? MachineType::AnyTagged()
+ : MachineType::Pointer();
}
private:
JSGraph* jsgraph_;
- SimplifiedOperatorBuilder* simplified_;
Isolate* isolate_;
friend class RepresentationChangerTester; // accesses the below fields.
@@ -421,39 +112,30 @@ class RepresentationChanger {
bool testing_type_errors_; // If {true}, don't abort on a type error.
bool type_error_; // Set when a type error is detected.
- Node* TypeError(Node* node, MachineTypeUnion output_type,
- MachineTypeUnion use) {
- type_error_ = true;
- if (!testing_type_errors_) {
- std::ostringstream out_str;
- out_str << static_cast<MachineType>(output_type);
-
- std::ostringstream use_str;
- use_str << static_cast<MachineType>(use);
-
- V8_Fatal(__FILE__, __LINE__,
- "RepresentationChangerError: node #%d:%s of "
- "%s cannot be changed to %s",
- node->id(), node->op()->mnemonic(), out_str.str().c_str(),
- use_str.str().c_str());
- }
- return node;
- }
-
- Node* InsertChangeFloat32ToFloat64(Node* node) {
- return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(),
- node);
- }
-
- Node* InsertChangeTaggedToFloat64(Node* node) {
- return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
- node);
- }
+ Node* GetTaggedRepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type);
+ Node* GetFloat32RepresentationFor(Node* node,
+ MachineRepresentation output_rep,
+ Type* output_type, Truncation truncation);
+ Node* GetFloat64RepresentationFor(Node* node,
+ MachineRepresentation output_rep,
+ Type* output_type, Truncation truncation);
+ Node* GetWord32RepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type);
+ Node* GetBitRepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type);
+ Node* GetWord64RepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type);
+ Node* TypeError(Node* node, MachineRepresentation output_rep,
+ Type* output_type, MachineRepresentation use);
+ Node* MakeTruncatedInt32Constant(double value);
+ Node* InsertChangeFloat32ToFloat64(Node* node);
+ Node* InsertChangeTaggedToFloat64(Node* node);
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const { return isolate_; }
Factory* factory() const { return isolate()->factory(); }
- SimplifiedOperatorBuilder* simplified() { return simplified_; }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
};
diff --git a/chromium/v8/src/compiler/schedule.cc b/chromium/v8/src/compiler/schedule.cc
index 63f148d926e..455fcd120e1 100644
--- a/chromium/v8/src/compiler/schedule.cc
+++ b/chromium/v8/src/compiler/schedule.cc
@@ -34,7 +34,7 @@ bool BasicBlock::LoopContains(BasicBlock* block) const {
// RPO numbers must be initialized.
DCHECK(rpo_number_ >= 0);
DCHECK(block->rpo_number_ >= 0);
- if (loop_end_ == NULL) return false; // This is not a loop.
+ if (loop_end_ == nullptr) return false; // This is not a loop.
return block->rpo_number_ >= rpo_number_ &&
block->rpo_number_ < loop_end_->rpo_number_;
}
@@ -140,13 +140,13 @@ BasicBlock* Schedule::block(Node* node) const {
if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) {
return nodeid_to_block_[node->id()];
}
- return NULL;
+ return nullptr;
}
bool Schedule::IsScheduled(Node* node) {
if (node->id() >= nodeid_to_block_.size()) return false;
- return nodeid_to_block_[node->id()] != NULL;
+ return nodeid_to_block_[node->id()] != nullptr;
}
@@ -158,7 +158,7 @@ BasicBlock* Schedule::GetBlockById(BasicBlock::Id block_id) {
bool Schedule::SameBasicBlock(Node* a, Node* b) const {
BasicBlock* block = this->block(a);
- return block != NULL && block == this->block(b);
+ return block != nullptr && block == this->block(b);
}
@@ -176,7 +176,7 @@ void Schedule::PlanNode(BasicBlock* block, Node* node) {
os << "Planning #" << node->id() << ":" << node->op()->mnemonic()
<< " for future add to B" << block->id() << "\n";
}
- DCHECK(this->block(node) == NULL);
+ DCHECK(this->block(node) == nullptr);
SetBlockForNode(block, node);
}
@@ -187,7 +187,7 @@ void Schedule::AddNode(BasicBlock* block, Node* node) {
os << "Adding #" << node->id() << ":" << node->op()->mnemonic() << " to B"
<< block->id() << "\n";
}
- DCHECK(this->block(node) == NULL || this->block(node) == block);
+ DCHECK(this->block(node) == nullptr || this->block(node) == block);
block->AddNode(node);
SetBlockForNode(block, node);
}
@@ -354,7 +354,7 @@ std::ostream& operator<<(std::ostream& os, const Schedule& s) {
BasicBlock::Control control = block->control();
if (control != BasicBlock::kNone) {
os << " ";
- if (block->control_input() != NULL) {
+ if (block->control_input() != nullptr) {
os << *block->control_input();
} else {
os << "Goto";
diff --git a/chromium/v8/src/compiler/schedule.h b/chromium/v8/src/compiler/schedule.h
index 37ce76299ed..9624ff5a4ff 100644
--- a/chromium/v8/src/compiler/schedule.h
+++ b/chromium/v8/src/compiler/schedule.h
@@ -138,7 +138,7 @@ class BasicBlock final : public ZoneObject {
void set_rpo_number(int32_t rpo_number);
// Loop membership helpers.
- inline bool IsLoopHeader() const { return loop_end_ != NULL; }
+ inline bool IsLoopHeader() const { return loop_end_ != nullptr; }
bool LoopContains(BasicBlock* block) const;
// Computes the immediate common dominator of {b1} and {b2}. The worst time
@@ -153,8 +153,8 @@ class BasicBlock final : public ZoneObject {
BasicBlock* dominator_; // Immediate dominator of the block.
BasicBlock* rpo_next_; // Link to next block in special RPO order.
BasicBlock* loop_header_; // Pointer to dominating loop header basic block,
- // NULL if none. For loop headers, this points to
- // enclosing loop header.
+ // nullptr if none. For loop headers, this points to
+ // enclosing loop header.
BasicBlock* loop_end_; // end of the loop, if this block is a loop header.
int32_t loop_depth_; // loop nesting, 0 is top-level
diff --git a/chromium/v8/src/compiler/scheduler.cc b/chromium/v8/src/compiler/scheduler.cc
index aa9a7cfdb2e..80ce8b17112 100644
--- a/chromium/v8/src/compiler/scheduler.cc
+++ b/chromium/v8/src/compiler/scheduler.cc
@@ -221,9 +221,9 @@ class CFGBuilder : public ZoneObject {
queued_(scheduler->graph_, 2),
queue_(zone),
control_(zone),
- component_entry_(NULL),
- component_start_(NULL),
- component_end_(NULL) {}
+ component_entry_(nullptr),
+ component_start_(nullptr),
+ component_end_(nullptr) {}
// Run the control flow graph construction algorithm by walking the graph
// backwards from end through control edges, building and connecting the
@@ -253,7 +253,7 @@ class CFGBuilder : public ZoneObject {
ResetDataStructures();
Queue(exit);
- component_entry_ = NULL;
+ component_entry_ = nullptr;
component_start_ = block;
component_end_ = schedule_->block(exit);
scheduler_->equivalence_->Run(exit);
@@ -377,7 +377,7 @@ class CFGBuilder : public ZoneObject {
BasicBlock* BuildBlockForNode(Node* node) {
BasicBlock* block = schedule_->block(node);
- if (block == NULL) {
+ if (block == nullptr) {
block = schedule_->NewBasicBlock();
TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
node->op()->mnemonic());
@@ -501,34 +501,34 @@ class CFGBuilder : public ZoneObject {
void ConnectTailCall(Node* call) {
Node* call_control = NodeProperties::GetControlInput(call);
BasicBlock* call_block = FindPredecessorBlock(call_control);
- TraceConnect(call, call_block, NULL);
+ TraceConnect(call, call_block, nullptr);
schedule_->AddTailCall(call_block, call);
}
void ConnectReturn(Node* ret) {
Node* return_control = NodeProperties::GetControlInput(ret);
BasicBlock* return_block = FindPredecessorBlock(return_control);
- TraceConnect(ret, return_block, NULL);
+ TraceConnect(ret, return_block, nullptr);
schedule_->AddReturn(return_block, ret);
}
void ConnectDeoptimize(Node* deopt) {
Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
- TraceConnect(deopt, deoptimize_block, NULL);
+ TraceConnect(deopt, deoptimize_block, nullptr);
schedule_->AddDeoptimize(deoptimize_block, deopt);
}
void ConnectThrow(Node* thr) {
Node* throw_control = NodeProperties::GetControlInput(thr);
BasicBlock* throw_block = FindPredecessorBlock(throw_control);
- TraceConnect(thr, throw_block, NULL);
+ TraceConnect(thr, throw_block, nullptr);
schedule_->AddThrow(throw_block, thr);
}
void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
DCHECK_NOT_NULL(block);
- if (succ == NULL) {
+ if (succ == nullptr) {
TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
node->op()->mnemonic(), block->id().ToInt());
} else {
@@ -602,8 +602,8 @@ class SpecialRPONumberer : public ZoneObject {
SpecialRPONumberer(Zone* zone, Schedule* schedule)
: zone_(zone),
schedule_(schedule),
- order_(NULL),
- beyond_end_(NULL),
+ order_(nullptr),
+ beyond_end_(nullptr),
loops_(zone),
backedges_(zone),
stack_(zone),
@@ -630,7 +630,7 @@ class SpecialRPONumberer : public ZoneObject {
// numbering for basic blocks into the final schedule.
void SerializeRPOIntoSchedule() {
int32_t number = 0;
- for (BasicBlock* b = order_; b != NULL; b = b->rpo_next()) {
+ for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
b->set_rpo_number(number++);
schedule_->rpo_order()->push_back(b);
}
@@ -677,7 +677,7 @@ class SpecialRPONumberer : public ZoneObject {
BasicBlock* start;
void AddOutgoing(Zone* zone, BasicBlock* block) {
- if (outgoing == NULL) {
+ if (outgoing == nullptr) {
outgoing = new (zone->New(sizeof(ZoneVector<BasicBlock*>)))
ZoneVector<BasicBlock*>(zone);
}
@@ -713,7 +713,7 @@ class SpecialRPONumberer : public ZoneObject {
// use the schedule's end block in actual control flow (e.g. with end having
// successors). Once this has been cleaned up we can use the end block here.
BasicBlock* BeyondEndSentinel() {
- if (beyond_end_ == NULL) {
+ if (beyond_end_ == nullptr) {
BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
}
@@ -777,7 +777,7 @@ class SpecialRPONumberer : public ZoneObject {
// Initialize the "loop stack". Note the entry could be a loop header.
LoopInfo* loop =
- HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
+ HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
order = insertion_point;
// Perform an iterative post-order traversal, visiting loop bodies before
@@ -788,7 +788,7 @@ class SpecialRPONumberer : public ZoneObject {
while (stack_depth > 0) {
SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
BasicBlock* block = frame->block;
- BasicBlock* succ = NULL;
+ BasicBlock* succ = nullptr;
if (block != end && frame->index < block->SuccessorCount()) {
// Process the next normal successor.
@@ -798,7 +798,7 @@ class SpecialRPONumberer : public ZoneObject {
if (block->rpo_number() == kBlockOnStack) {
// Finish the loop body the first time the header is left on the
// stack.
- DCHECK(loop != NULL && loop->header == block);
+ DCHECK(loop != nullptr && loop->header == block);
loop->start = PushFront(order, block);
order = loop->end;
block->set_rpo_number(kBlockVisited2);
@@ -813,19 +813,19 @@ class SpecialRPONumberer : public ZoneObject {
size_t outgoing_index = frame->index - block->SuccessorCount();
LoopInfo* info = &loops_[GetLoopNumber(block)];
DCHECK(loop != info);
- if (block != entry && info->outgoing != NULL &&
+ if (block != entry && info->outgoing != nullptr &&
outgoing_index < info->outgoing->size()) {
succ = info->outgoing->at(outgoing_index);
frame->index++;
}
}
- if (succ != NULL) {
+ if (succ != nullptr) {
// Process the next successor.
if (succ->rpo_number() == kBlockOnStack) continue;
if (succ->rpo_number() == kBlockVisited2) continue;
DCHECK(succ->rpo_number() == kBlockUnvisited2);
- if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) {
+ if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
// The successor is not in the current loop or any nested loop.
// Add it to the outgoing edges of this loop and visit it later.
loop->AddOutgoing(zone_, succ);
@@ -865,10 +865,10 @@ class SpecialRPONumberer : public ZoneObject {
}
// Publish new order the first time.
- if (order_ == NULL) order_ = order;
+ if (order_ == nullptr) order_ = order;
// Compute the correct loop headers and set the correct loop ends.
- LoopInfo* current_loop = NULL;
+ LoopInfo* current_loop = nullptr;
BasicBlock* current_header = entry->loop_header();
int32_t loop_depth = entry->loop_depth();
if (entry->IsLoopHeader()) --loop_depth; // Entry might be a loop header.
@@ -879,11 +879,13 @@ class SpecialRPONumberer : public ZoneObject {
current->set_rpo_number(kBlockUnvisited1);
// Finish the previous loop(s) if we just exited them.
- while (current_header != NULL && current == current_header->loop_end()) {
+ while (current_header != nullptr &&
+ current == current_header->loop_end()) {
DCHECK(current_header->IsLoopHeader());
- DCHECK(current_loop != NULL);
+ DCHECK_NOT_NULL(current_loop);
current_loop = current_loop->prev;
- current_header = current_loop == NULL ? NULL : current_loop->header;
+ current_header =
+ current_loop == nullptr ? nullptr : current_loop->header;
--loop_depth;
}
current->set_loop_header(current_header);
@@ -893,7 +895,7 @@ class SpecialRPONumberer : public ZoneObject {
++loop_depth;
current_loop = &loops_[GetLoopNumber(current)];
BasicBlock* end = current_loop->end;
- current->set_loop_end(end == NULL ? BeyondEndSentinel() : end);
+ current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
current_header = current_loop->header;
TRACE("id:%d is a loop header, increment loop depth to %d\n",
current->id().ToInt(), loop_depth);
@@ -901,7 +903,7 @@ class SpecialRPONumberer : public ZoneObject {
current->set_loop_depth(loop_depth);
- if (current->loop_header() == NULL) {
+ if (current->loop_header() == nullptr) {
TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
current->loop_depth());
} else {
@@ -932,7 +934,7 @@ class SpecialRPONumberer : public ZoneObject {
BasicBlock* member = backedges->at(i).first;
BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
size_t loop_num = GetLoopNumber(header);
- if (loops_[loop_num].header == NULL) {
+ if (loops_[loop_num].header == nullptr) {
loops_[loop_num].header = header;
loops_[loop_num].members = new (zone_)
BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
@@ -979,7 +981,8 @@ class SpecialRPONumberer : public ZoneObject {
}
os << ":\n";
- for (BasicBlock* block = order_; block != NULL; block = block->rpo_next()) {
+ for (BasicBlock* block = order_; block != nullptr;
+ block = block->rpo_next()) {
os << std::setw(5) << "B" << block->rpo_number() << ":";
for (size_t i = 0; i < loops_.size(); i++) {
bool range = loops_[i].header->LoopContains(block);
@@ -988,11 +991,11 @@ class SpecialRPONumberer : public ZoneObject {
os << (range ? "x" : " ");
}
os << " id:" << block->id() << ": ";
- if (block->loop_end() != NULL) {
+ if (block->loop_end() != nullptr) {
os << " range: [B" << block->rpo_number() << ", B"
<< block->loop_end()->rpo_number() << ")";
}
- if (block->loop_header() != NULL) {
+ if (block->loop_header() != nullptr) {
os << " header: id:" << block->loop_header()->id();
}
if (block->loop_depth() > 0) {
@@ -1012,10 +1015,10 @@ class SpecialRPONumberer : public ZoneObject {
BasicBlock* header = loop->header;
BasicBlock* end = header->loop_end();
- DCHECK(header != NULL);
+ DCHECK_NOT_NULL(header);
DCHECK(header->rpo_number() >= 0);
DCHECK(header->rpo_number() < static_cast<int>(order->size()));
- DCHECK(end != NULL);
+ DCHECK_NOT_NULL(end);
DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
DCHECK(end->rpo_number() > header->rpo_number());
DCHECK(header->loop_header() != header);
@@ -1026,7 +1029,7 @@ class SpecialRPONumberer : public ZoneObject {
DCHECK_EQ(header, block);
bool end_found;
while (true) {
- if (block == NULL || block == loop->end) {
+ if (block == nullptr || block == loop->end) {
end_found = (loop->end == block);
break;
}
@@ -1042,7 +1045,7 @@ class SpecialRPONumberer : public ZoneObject {
// Check loop depth of the header.
int loop_depth = 0;
- for (LoopInfo* outer = loop; outer != NULL; outer = outer->prev) {
+ for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
loop_depth++;
}
DCHECK_EQ(loop_depth, header->loop_depth());
@@ -1096,7 +1099,7 @@ void Scheduler::ComputeSpecialRPONumbering() {
void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
- for (/*nop*/; block != NULL; block = block->rpo_next()) {
+ for (/*nop*/; block != nullptr; block = block->rpo_next()) {
auto pred = block->predecessors().begin();
auto end = block->predecessors().end();
DCHECK(pred != end); // All blocks except start have predecessors.
@@ -1153,7 +1156,7 @@ class PrepareUsesVisitor {
opcode == IrOpcode::kParameter
? schedule_->start()
: schedule_->block(NodeProperties::GetControlInput(node));
- DCHECK(block != NULL);
+ DCHECK_NOT_NULL(block);
schedule_->AddNode(block, node);
}
}
@@ -1243,7 +1246,7 @@ class ScheduleEarlyNodeVisitor {
if (data->minimum_block_ == schedule_->start()) return;
// Propagate schedule early position.
- DCHECK(data->minimum_block_ != NULL);
+ DCHECK_NOT_NULL(data->minimum_block_);
for (auto use : node->uses()) {
PropagateMinimumPositionToNode(data->minimum_block_, use);
}
@@ -1394,6 +1397,8 @@ class ScheduleLateNodeVisitor {
// Schedule the node or a floating control structure.
if (IrOpcode::IsMergeOpcode(node->opcode())) {
ScheduleFloatingControl(block, node);
+ } else if (node->opcode() == IrOpcode::kFinishRegion) {
+ ScheduleRegion(block, node);
} else {
ScheduleNode(block, node);
}
@@ -1519,10 +1524,11 @@ class ScheduleLateNodeVisitor {
BasicBlock* block = nullptr;
for (Edge edge : node->use_edges()) {
BasicBlock* use_block = GetBlockForUse(edge);
- block = block == NULL ? use_block : use_block == NULL
- ? block
- : BasicBlock::GetCommonDominator(
- block, use_block);
+ block = block == nullptr
+ ? use_block
+ : use_block == nullptr
+ ? block
+ : BasicBlock::GetCommonDominator(block, use_block);
}
return block;
}
@@ -1562,7 +1568,7 @@ class ScheduleLateNodeVisitor {
}
}
BasicBlock* result = schedule_->block(use);
- if (result == NULL) return NULL;
+ if (result == nullptr) return nullptr;
TRACE(" must dominate use #%d:%s in id:%d\n", use->id(),
use->op()->mnemonic(), result->id().ToInt());
return result;
@@ -1572,6 +1578,34 @@ class ScheduleLateNodeVisitor {
scheduler_->FuseFloatingControl(block, node);
}
+ void ScheduleRegion(BasicBlock* block, Node* region_end) {
+ // We only allow regions of instructions connected into a linear
+ // effect chain. The only value allowed to be produced by a node
+ // in the chain must be the value consumed by the FinishRegion node.
+
+ // We schedule back to front; we first schedule FinishRegion.
+ CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
+ ScheduleNode(block, region_end);
+
+ // Schedule the chain.
+ Node* node = NodeProperties::GetEffectInput(region_end);
+ while (node->opcode() != IrOpcode::kBeginRegion) {
+ DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
+ DCHECK_EQ(1, node->op()->EffectInputCount());
+ DCHECK_EQ(1, node->op()->EffectOutputCount());
+ DCHECK_EQ(0, node->op()->ControlOutputCount());
+ // The value output (if there is any) must be consumed
+ // by the EndRegion node.
+ DCHECK(node->op()->ValueOutputCount() == 0 ||
+ node == region_end->InputAt(0));
+ ScheduleNode(block, node);
+ node = NodeProperties::GetEffectInput(node);
+ }
+ // Schedule the BeginRegion node.
+ DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
+ ScheduleNode(block, node);
+ }
+
void ScheduleNode(BasicBlock* block, Node* node) {
schedule_->PlanNode(block, node);
scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
@@ -1655,9 +1689,9 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
// Iterate on phase 2: Compute special RPO and dominator tree.
special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
// TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
- for (BasicBlock* b = block->rpo_next(); b != NULL; b = b->rpo_next()) {
+ for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
b->set_dominator_depth(-1);
- b->set_dominator(NULL);
+ b->set_dominator(nullptr);
}
PropagateImmediateDominators(block->rpo_next());
diff --git a/chromium/v8/src/compiler/select-lowering.cc b/chromium/v8/src/compiler/select-lowering.cc
index 28a5d922b79..0e8b36fa73a 100644
--- a/chromium/v8/src/compiler/select-lowering.cc
+++ b/chromium/v8/src/compiler/select-lowering.cc
@@ -55,7 +55,7 @@ Reduction SelectLowering::Reduce(Node* node) {
node->ReplaceInput(0, vthen);
node->ReplaceInput(1, velse);
node->ReplaceInput(2, merge);
- NodeProperties::ChangeOp(node, common()->Phi(p.type(), 2));
+ NodeProperties::ChangeOp(node, common()->Phi(p.representation(), 2));
return Changed(node);
}
diff --git a/chromium/v8/src/compiler/simplified-lowering.cc b/chromium/v8/src/compiler/simplified-lowering.cc
index 7d495bf9839..653fea80eac 100644
--- a/chromium/v8/src/compiler/simplified-lowering.cc
+++ b/chromium/v8/src/compiler/simplified-lowering.cc
@@ -18,6 +18,7 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/source-position.h"
#include "src/objects.h"
+#include "src/type-cache.h"
namespace v8 {
namespace internal {
@@ -56,14 +57,279 @@ enum Phase {
};
+namespace {
+
+// The {UseInfo} class is used to describe a use of an input of a node.
+//
+// This information is used in two different ways, based on the phase:
+//
+// 1. During propagation, the use info is used to inform the input node
+// about what part of the input is used (we call this truncation) and what
+// is the preferred representation.
+//
+// 2. During lowering, the use info is used to properly convert the input
+// to the preferred representation. The preferred representation might be
+// insufficient to do the conversion (e.g. word32->float64 conv), so we also
+// need the signedness information to produce the correct value.
+class UseInfo {
+ public:
+ UseInfo(MachineRepresentation preferred, Truncation truncation)
+ : preferred_(preferred), truncation_(truncation) {}
+ static UseInfo TruncatingWord32() {
+ return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
+ }
+ static UseInfo TruncatingWord64() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
+ }
+ static UseInfo Bool() {
+ return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
+ }
+ static UseInfo Float32() {
+ return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
+ }
+ static UseInfo Float64() {
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
+ }
+ static UseInfo PointerInt() {
+ return kPointerSize == 4 ? TruncatingWord32() : TruncatingWord64();
+ }
+ static UseInfo AnyTagged() {
+ return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
+ }
+
+ // Undetermined representation.
+ static UseInfo Any() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::Any());
+ }
+ static UseInfo None() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::None());
+ }
+
+ // Truncation to a representation that is smaller than the preferred
+ // one.
+ static UseInfo Float64TruncatingToWord32() {
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Word32());
+ }
+ static UseInfo Word64TruncatingToWord32() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word32());
+ }
+ static UseInfo AnyTruncatingToBool() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::Bool());
+ }
+
+ MachineRepresentation preferred() const { return preferred_; }
+ Truncation truncation() const { return truncation_; }
+
+ private:
+ MachineRepresentation preferred_;
+ Truncation truncation_;
+};
+
+
+UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kTagged:
+ return UseInfo::AnyTagged();
+ case MachineRepresentation::kFloat64:
+ return UseInfo::Float64();
+ case MachineRepresentation::kFloat32:
+ return UseInfo::Float32();
+ case MachineRepresentation::kWord64:
+ return UseInfo::TruncatingWord64();
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return UseInfo::TruncatingWord32();
+ case MachineRepresentation::kBit:
+ return UseInfo::Bool();
+ case MachineRepresentation::kNone:
+ break;
+ }
+ UNREACHABLE();
+ return UseInfo::None();
+}
+
+
+UseInfo UseInfoForBasePointer(const FieldAccess& access) {
+ return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::PointerInt();
+}
+
+
+UseInfo UseInfoForBasePointer(const ElementAccess& access) {
+ return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::PointerInt();
+}
+
+
+#ifdef DEBUG
+// Helpers for monotonicity checking.
+bool MachineRepresentationIsSubtype(MachineRepresentation r1,
+ MachineRepresentation r2) {
+ switch (r1) {
+ case MachineRepresentation::kNone:
+ return true;
+ case MachineRepresentation::kBit:
+ return r2 == MachineRepresentation::kBit ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kWord8:
+ return r2 == MachineRepresentation::kWord8 ||
+ r2 == MachineRepresentation::kWord16 ||
+ r2 == MachineRepresentation::kWord32 ||
+ r2 == MachineRepresentation::kWord64 ||
+ r2 == MachineRepresentation::kFloat32 ||
+ r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kWord16:
+ return r2 == MachineRepresentation::kWord16 ||
+ r2 == MachineRepresentation::kWord32 ||
+ r2 == MachineRepresentation::kWord64 ||
+ r2 == MachineRepresentation::kFloat32 ||
+ r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kWord32:
+ return r2 == MachineRepresentation::kWord32 ||
+ r2 == MachineRepresentation::kWord64 ||
+ r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kWord64:
+ return r2 == MachineRepresentation::kWord64;
+ case MachineRepresentation::kFloat32:
+ return r2 == MachineRepresentation::kFloat32 ||
+ r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kFloat64:
+ return r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kTagged:
+ return r2 == MachineRepresentation::kTagged;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+class InputUseInfos {
+ public:
+ explicit InputUseInfos(Zone* zone) : input_use_infos_(zone) {}
+
+ void SetAndCheckInput(Node* node, int index, UseInfo use_info) {
+ if (input_use_infos_.empty()) {
+ input_use_infos_.resize(node->InputCount(), UseInfo::None());
+ }
+ // Check that the new use informatin is a super-type of the old
+ // one.
+ CHECK(IsUseLessGeneral(input_use_infos_[index], use_info));
+ input_use_infos_[index] = use_info;
+ }
+
+ private:
+ ZoneVector<UseInfo> input_use_infos_;
+
+ static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) {
+ return MachineRepresentationIsSubtype(use1.preferred(), use2.preferred()) &&
+ use1.truncation().IsLessGeneralThan(use2.truncation());
+ }
+};
+
+#endif // DEBUG
+
+} // namespace
+
+
class RepresentationSelector {
public:
// Information for each node tracked during the fixpoint.
- struct NodeInfo {
- MachineTypeUnion use : 15; // Union of all usages for the node.
- bool queued : 1; // Bookkeeping for the traversal.
- bool visited : 1; // Bookkeeping for the traversal.
- MachineTypeUnion output : 15; // Output type of the node.
+ class NodeOutputInfo {
+ public:
+ NodeOutputInfo(MachineRepresentation representation, Type* type)
+ : type_(type), representation_(representation) {}
+ NodeOutputInfo()
+ : type_(Type::None()), representation_(MachineRepresentation::kNone) {}
+
+ MachineRepresentation representation() const { return representation_; }
+ Type* type() const { return type_; }
+
+ static NodeOutputInfo None() {
+ return NodeOutputInfo(MachineRepresentation::kNone, Type::None());
+ }
+
+ static NodeOutputInfo Float32() {
+ return NodeOutputInfo(MachineRepresentation::kFloat32, Type::Number());
+ }
+
+ static NodeOutputInfo Float64() {
+ return NodeOutputInfo(MachineRepresentation::kFloat64, Type::Number());
+ }
+
+ static NodeOutputInfo NumberTruncatedToWord32() {
+ return NodeOutputInfo(MachineRepresentation::kWord32, Type::Number());
+ }
+
+ static NodeOutputInfo Int32() {
+ return NodeOutputInfo(MachineRepresentation::kWord32, Type::Signed32());
+ }
+
+ static NodeOutputInfo Uint32() {
+ return NodeOutputInfo(MachineRepresentation::kWord32, Type::Unsigned32());
+ }
+
+ static NodeOutputInfo Bool() {
+ return NodeOutputInfo(MachineRepresentation::kBit, Type::Boolean());
+ }
+
+ static NodeOutputInfo Int64() {
+ // TODO(jarin) Fix once we have a real int64 type.
+ return NodeOutputInfo(MachineRepresentation::kWord64, Type::Internal());
+ }
+
+ static NodeOutputInfo Uint64() {
+ // TODO(jarin) Fix once we have a real uint64 type.
+ return NodeOutputInfo(MachineRepresentation::kWord64, Type::Internal());
+ }
+
+ static NodeOutputInfo AnyTagged() {
+ return NodeOutputInfo(MachineRepresentation::kTagged, Type::Any());
+ }
+
+ static NodeOutputInfo NumberTagged() {
+ return NodeOutputInfo(MachineRepresentation::kTagged, Type::Number());
+ }
+
+ static NodeOutputInfo Pointer() {
+ return NodeOutputInfo(MachineType::PointerRepresentation(), Type::Any());
+ }
+
+ private:
+ Type* type_;
+ MachineRepresentation representation_;
+ };
+
+ class NodeInfo {
+ public:
+ // Adds new use to the node. Returns true if something has changed
+ // and the node has to be requeued.
+ bool AddUse(UseInfo info) {
+ Truncation old_truncation = truncation_;
+ truncation_ = Truncation::Generalize(truncation_, info.truncation());
+ return truncation_ != old_truncation;
+ }
+
+ void set_queued(bool value) { queued_ = value; }
+ bool queued() const { return queued_; }
+ void set_visited() { visited_ = true; }
+ bool visited() const { return visited_; }
+ Truncation truncation() const { return truncation_; }
+ void set_output_type(NodeOutputInfo output) { output_ = output; }
+
+ Type* output_type() const { return output_.type(); }
+ MachineRepresentation representation() const {
+ return output_.representation();
+ }
+
+ private:
+ bool queued_ = false; // Bookkeeping for the traversal.
+ bool visited_ = false; // Bookkeeping for the traversal.
+ NodeOutputInfo output_; // Output type and representation.
+ Truncation truncation_ = Truncation::None(); // Information about uses.
};
RepresentationSelector(JSGraph* jsgraph, Zone* zone,
@@ -71,34 +337,34 @@ class RepresentationSelector {
SourcePositionTable* source_positions)
: jsgraph_(jsgraph),
count_(jsgraph->graph()->NodeCount()),
- info_(zone->NewArray<NodeInfo>(count_)),
+ info_(count_, zone),
+#ifdef DEBUG
+ node_input_use_infos_(count_, InputUseInfos(zone), zone),
+#endif
nodes_(zone),
replacements_(zone),
phase_(PROPAGATE),
changer_(changer),
queue_(zone),
- source_positions_(source_positions) {
- memset(info_, 0, sizeof(NodeInfo) * count_);
-
- safe_int_additive_range_ =
- Type::Range(-std::pow(2.0, 52.0), std::pow(2.0, 52.0), zone);
+ source_positions_(source_positions),
+ type_cache_(TypeCache::Get()) {
}
void Run(SimplifiedLowering* lowering) {
// Run propagation phase to a fixpoint.
TRACE("--{Propagation phase}--\n");
phase_ = PROPAGATE;
- Enqueue(jsgraph_->graph()->end());
+ EnqueueInitial(jsgraph_->graph()->end());
// Process nodes from the queue until it is empty.
while (!queue_.empty()) {
Node* node = queue_.front();
NodeInfo* info = GetInfo(node);
queue_.pop();
- info->queued = false;
+ info->set_queued(false);
TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
- VisitNode(node, info->use, NULL);
+ VisitNode(node, info->truncation(), nullptr);
TRACE(" ==> output ");
- PrintInfo(info->output);
+ PrintOutputInfo(info);
TRACE("\n");
}
@@ -108,11 +374,12 @@ class RepresentationSelector {
// Process nodes from the collected {nodes_} vector.
for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
Node* node = *i;
+ NodeInfo* info = GetInfo(node);
TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
// Reuse {VisitNode()} so the representation rules are in one place.
SourcePositionTable::Scope scope(
source_positions_, source_positions_->GetSourcePosition(node));
- VisitNode(node, GetUseInfo(node), lowering);
+ VisitNode(node, info->truncation(), lowering);
}
// Perform the final replacements.
@@ -129,105 +396,173 @@ class RepresentationSelector {
}
}
- // Enqueue {node} if the {use} contains new information for that node.
- // Add {node} to {nodes_} if this is the first time it's been visited.
- void Enqueue(Node* node, MachineTypeUnion use = 0) {
+ void EnqueueInitial(Node* node) {
+ NodeInfo* info = GetInfo(node);
+ info->set_visited();
+ info->set_queued(true);
+ nodes_.push_back(node);
+ queue_.push(node);
+ }
+
+ // Enqueue {use_node}'s {index} input if the {use} contains new information
+ // for that input node. Add the input to {nodes_} if this is the first time
+ // it's been visited.
+ void EnqueueInput(Node* use_node, int index,
+ UseInfo use_info = UseInfo::None()) {
+ Node* node = use_node->InputAt(index);
if (phase_ != PROPAGATE) return;
NodeInfo* info = GetInfo(node);
- if (!info->visited) {
+#ifdef DEBUG
+ // Check monotonicity of input requirements.
+ node_input_use_infos_[use_node->id()].SetAndCheckInput(use_node, index,
+ use_info);
+#endif // DEBUG
+ if (!info->visited()) {
// First visit of this node.
- info->visited = true;
- info->queued = true;
+ info->set_visited();
+ info->set_queued(true);
nodes_.push_back(node);
queue_.push(node);
TRACE(" initial: ");
- info->use |= use;
- PrintUseInfo(node);
+ info->AddUse(use_info);
+ PrintTruncation(info->truncation());
return;
}
TRACE(" queue?: ");
- PrintUseInfo(node);
- if ((info->use & use) != use) {
+ PrintTruncation(info->truncation());
+ if (info->AddUse(use_info)) {
// New usage information for the node is available.
- if (!info->queued) {
+ if (!info->queued()) {
queue_.push(node);
- info->queued = true;
+ info->set_queued(true);
TRACE(" added: ");
} else {
TRACE(" inqueue: ");
}
- info->use |= use;
- PrintUseInfo(node);
+ PrintTruncation(info->truncation());
}
}
bool lower() { return phase_ == LOWER; }
- void Enqueue(Node* node, MachineType use) {
- Enqueue(node, static_cast<MachineTypeUnion>(use));
+ void EnqueueUses(Node* node) {
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsValueEdge(edge)) {
+ Node* const user = edge.from();
+ if (user->id() < count_) {
+ // New type information for the node is available.
+ NodeInfo* info = GetInfo(user);
+ // Enqueue the node only if we are sure it is reachable from
+ // the end and it has not been queued yet.
+ if (info->visited() && !info->queued()) {
+ queue_.push(user);
+ info->set_queued(true);
+ }
+ }
+ }
+ }
+ }
+
+ void SetOutputFromMachineType(Node* node, MachineType machine_type) {
+ Type* type = Type::None();
+ switch (machine_type.semantic()) {
+ case MachineSemantic::kNone:
+ type = Type::None();
+ break;
+ case MachineSemantic::kBool:
+ type = Type::Boolean();
+ break;
+ case MachineSemantic::kInt32:
+ type = Type::Signed32();
+ break;
+ case MachineSemantic::kUint32:
+ type = Type::Unsigned32();
+ break;
+ case MachineSemantic::kInt64:
+ // TODO(jarin) Fix once we have proper int64.
+ type = Type::Internal();
+ break;
+ case MachineSemantic::kUint64:
+ // TODO(jarin) Fix once we have proper uint64.
+ type = Type::Internal();
+ break;
+ case MachineSemantic::kNumber:
+ type = Type::Number();
+ break;
+ case MachineSemantic::kAny:
+ type = Type::Any();
+ break;
+ }
+ return SetOutput(node, NodeOutputInfo(machine_type.representation(), type));
}
- void SetOutput(Node* node, MachineTypeUnion output) {
+ void SetOutput(Node* node, NodeOutputInfo output_info) {
// Every node should have at most one output representation. Note that
// phis can have 0, if they have not been used in a representation-inducing
// instruction.
- DCHECK((output & kRepMask) == 0 ||
- base::bits::IsPowerOfTwo32(output & kRepMask));
- GetInfo(node)->output = output;
+ Type* output_type = output_info.type();
+ if (NodeProperties::IsTyped(node)) {
+ output_type = Type::Intersect(NodeProperties::GetType(node),
+ output_info.type(), jsgraph_->zone());
+ }
+ NodeInfo* info = GetInfo(node);
+ DCHECK(info->output_type()->Is(output_type));
+ DCHECK(MachineRepresentationIsSubtype(info->representation(),
+ output_info.representation()));
+ if (!output_type->Is(info->output_type()) ||
+ output_info.representation() != info->representation()) {
+ EnqueueUses(node);
+ }
+ info->set_output_type(
+ NodeOutputInfo(output_info.representation(), output_type));
+ }
+
+ bool BothInputsAreSigned32(Node* node) {
+ DCHECK_EQ(2, node->InputCount());
+ return GetInfo(node->InputAt(0))->output_type()->Is(Type::Signed32()) &&
+ GetInfo(node->InputAt(1))->output_type()->Is(Type::Signed32());
+ }
+
+ bool BothInputsAreUnsigned32(Node* node) {
+ DCHECK_EQ(2, node->InputCount());
+ return GetInfo(node->InputAt(0))->output_type()->Is(Type::Unsigned32()) &&
+ GetInfo(node->InputAt(1))->output_type()->Is(Type::Unsigned32());
}
bool BothInputsAre(Node* node, Type* type) {
DCHECK_EQ(2, node->InputCount());
- return NodeProperties::GetType(node->InputAt(0))->Is(type) &&
- NodeProperties::GetType(node->InputAt(1))->Is(type);
+ return GetInfo(node->InputAt(0))->output_type()->Is(type) &&
+ GetInfo(node->InputAt(1))->output_type()->Is(type);
}
- void ProcessTruncateWord32Input(Node* node, int index, MachineTypeUnion use) {
+ void ConvertInput(Node* node, int index, UseInfo use) {
Node* input = node->InputAt(index);
- if (phase_ == PROPAGATE) {
- // In the propagate phase, propagate the usage information backward.
- Enqueue(input, use);
- } else {
- // In the change phase, insert a change before the use if necessary.
- MachineTypeUnion output = GetInfo(input)->output;
- if ((output & (kRepBit | kRepWord8 | kRepWord16 | kRepWord32)) == 0) {
- // Output representation doesn't match usage.
- TRACE(" truncate-to-int32: #%d:%s(@%d #%d:%s) ", node->id(),
- node->op()->mnemonic(), index, input->id(),
- input->op()->mnemonic());
- TRACE(" from ");
- PrintInfo(output);
- TRACE(" to ");
- PrintInfo(use);
- TRACE("\n");
- Node* n = changer_->GetTruncatedWord32For(input, output);
- node->ReplaceInput(index, n);
- }
+ // In the change phase, insert a change before the use if necessary.
+ if (use.preferred() == MachineRepresentation::kNone)
+ return; // No input requirement on the use.
+ NodeInfo* input_info = GetInfo(input);
+ MachineRepresentation input_rep = input_info->representation();
+ if (input_rep != use.preferred()) {
+ // Output representation doesn't match usage.
+ TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(), node->op()->mnemonic(),
+ index, input->id(), input->op()->mnemonic());
+ TRACE(" from ");
+ PrintOutputInfo(input_info);
+ TRACE(" to ");
+ PrintUseInfo(use);
+ TRACE("\n");
+ Node* n = changer_->GetRepresentationFor(
+ input, input_info->representation(), input_info->output_type(),
+ use.preferred(), use.truncation());
+ node->ReplaceInput(index, n);
}
}
- void ProcessInput(Node* node, int index, MachineTypeUnion use) {
- Node* input = node->InputAt(index);
+ void ProcessInput(Node* node, int index, UseInfo use) {
if (phase_ == PROPAGATE) {
- // In the propagate phase, propagate the usage information backward.
- Enqueue(input, use);
+ EnqueueInput(node, index, use);
} else {
- // In the change phase, insert a change before the use if necessary.
- if ((use & kRepMask) == 0) return; // No input requirement on the use.
- MachineTypeUnion output = GetInfo(input)->output;
- if ((output & kRepMask & use) == 0) {
- // Output representation doesn't match usage.
- TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(),
- node->op()->mnemonic(), index, input->id(),
- input->op()->mnemonic());
- TRACE(" from ");
- PrintInfo(output);
- TRACE(" to ");
- PrintInfo(use);
- TRACE("\n");
- Node* n = changer_->GetRepresentationFor(input, output, use);
- node->ReplaceInput(index, n);
- }
+ ConvertInput(node, index, use);
}
}
@@ -236,11 +571,11 @@ class RepresentationSelector {
DCHECK_GE(index, NodeProperties::PastContextIndex(node));
for (int i = std::max(index, NodeProperties::FirstEffectIndex(node));
i < NodeProperties::PastEffectIndex(node); ++i) {
- Enqueue(node->InputAt(i)); // Effect inputs: just visit
+ EnqueueInput(node, i); // Effect inputs: just visit
}
for (int i = std::max(index, NodeProperties::FirstControlIndex(node));
i < NodeProperties::PastControlIndex(node); ++i) {
- Enqueue(node->InputAt(i)); // Control inputs: just visit
+ EnqueueInput(node, i); // Control inputs: just visit
}
}
@@ -253,167 +588,165 @@ class RepresentationSelector {
OperatorProperties::GetContextInputCount(node->op());
// Visit value and context inputs as tagged.
for (int i = 0; i < tagged_count; i++) {
- ProcessInput(node, i, kMachAnyTagged);
+ ProcessInput(node, i, UseInfo::AnyTagged());
}
// Only enqueue other inputs (framestates, effects, control).
for (int i = tagged_count; i < node->InputCount(); i++) {
- Enqueue(node->InputAt(i));
+ EnqueueInput(node, i);
}
- // Assume the output is tagged.
- SetOutput(node, kMachAnyTagged);
}
// Helper for binops of the R x L -> O variety.
- void VisitBinop(Node* node, MachineTypeUnion left_use,
- MachineTypeUnion right_use, MachineTypeUnion output) {
+ void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
+ NodeOutputInfo output) {
DCHECK_EQ(2, node->op()->ValueInputCount());
ProcessInput(node, 0, left_use);
ProcessInput(node, 1, right_use);
for (int i = 2; i < node->InputCount(); i++) {
- Enqueue(node->InputAt(i));
+ EnqueueInput(node, i);
}
SetOutput(node, output);
}
// Helper for binops of the I x I -> O variety.
- void VisitBinop(Node* node, MachineTypeUnion input_use,
- MachineTypeUnion output) {
+ void VisitBinop(Node* node, UseInfo input_use, NodeOutputInfo output) {
VisitBinop(node, input_use, input_use, output);
}
// Helper for unops of the I -> O variety.
- void VisitUnop(Node* node, MachineTypeUnion input_use,
- MachineTypeUnion output) {
+ void VisitUnop(Node* node, UseInfo input_use, NodeOutputInfo output) {
DCHECK_EQ(1, node->InputCount());
ProcessInput(node, 0, input_use);
SetOutput(node, output);
}
// Helper for leaf nodes.
- void VisitLeaf(Node* node, MachineTypeUnion output) {
+ void VisitLeaf(Node* node, NodeOutputInfo output) {
DCHECK_EQ(0, node->InputCount());
SetOutput(node, output);
}
// Helpers for specific types of binops.
void VisitFloat64Binop(Node* node) {
- VisitBinop(node, kMachFloat64, kMachFloat64);
+ VisitBinop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
+ }
+ void VisitInt32Binop(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ }
+ void VisitWord32TruncatingBinop(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::NumberTruncatedToWord32());
}
- void VisitInt32Binop(Node* node) { VisitBinop(node, kMachInt32, kMachInt32); }
void VisitUint32Binop(Node* node) {
- VisitBinop(node, kMachUint32, kMachUint32);
+ VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+ }
+ void VisitInt64Binop(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Int64());
}
- void VisitInt64Binop(Node* node) { VisitBinop(node, kMachInt64, kMachInt64); }
void VisitUint64Binop(Node* node) {
- VisitBinop(node, kMachUint64, kMachUint64);
+ VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Uint64());
+ }
+ void VisitFloat64Cmp(Node* node) {
+ VisitBinop(node, UseInfo::Float64(), NodeOutputInfo::Bool());
+ }
+ void VisitInt32Cmp(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Bool());
+ }
+ void VisitUint32Cmp(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Bool());
+ }
+ void VisitInt64Cmp(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Bool());
+ }
+ void VisitUint64Cmp(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Bool());
}
- void VisitFloat64Cmp(Node* node) { VisitBinop(node, kMachFloat64, kRepBit); }
- void VisitInt32Cmp(Node* node) { VisitBinop(node, kMachInt32, kRepBit); }
- void VisitUint32Cmp(Node* node) { VisitBinop(node, kMachUint32, kRepBit); }
- void VisitInt64Cmp(Node* node) { VisitBinop(node, kMachInt64, kRepBit); }
- void VisitUint64Cmp(Node* node) { VisitBinop(node, kMachUint64, kRepBit); }
// Infer representation for phi-like nodes.
- MachineType GetRepresentationForPhi(Node* node, MachineTypeUnion use) {
- // Phis adapt to the output representation their uses demand.
- Type* upper = NodeProperties::GetType(node);
- if ((use & kRepMask) == kRepFloat32) {
- // only float32 uses.
- return kRepFloat32;
- } else if ((use & kRepMask) == kRepFloat64) {
- // only float64 uses.
- return kRepFloat64;
- } else if ((use & kRepMask) == kRepTagged) {
- // only tagged uses.
- return kRepTagged;
- } else if (upper->Is(Type::Integral32())) {
- // Integer within [-2^31, 2^32[ range.
- if (upper->Is(Type::Signed32()) || upper->Is(Type::Unsigned32())) {
- // multiple uses, but we are within 32 bits range => pick kRepWord32.
- return kRepWord32;
- } else if (((use & kRepMask) == kRepWord32 &&
- !CanObserveNonWord32(use)) ||
- (use & kTypeMask) == kTypeInt32 ||
- (use & kTypeMask) == kTypeUint32) {
- // We only use 32 bits or we use the result consistently.
- return kRepWord32;
- } else {
- return kRepFloat64;
+ NodeOutputInfo GetOutputInfoForPhi(Node* node, Truncation use) {
+ // Compute the type.
+ Type* type = GetInfo(node->InputAt(0))->output_type();
+ for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
+ type = Type::Union(type, GetInfo(node->InputAt(i))->output_type(),
+ jsgraph_->zone());
+ }
+
+ // Compute the representation.
+ MachineRepresentation rep = MachineRepresentation::kTagged;
+ if (type->Is(Type::None())) {
+ rep = MachineRepresentation::kNone;
+ } else if (type->Is(Type::Signed32()) || type->Is(Type::Unsigned32())) {
+ rep = MachineRepresentation::kWord32;
+ } else if (use.TruncatesToWord32()) {
+ rep = MachineRepresentation::kWord32;
+ } else if (type->Is(Type::Boolean())) {
+ rep = MachineRepresentation::kBit;
+ } else if (type->Is(Type::Number())) {
+ rep = MachineRepresentation::kFloat64;
+ } else if (type->Is(Type::Internal())) {
+ // We mark (u)int64 as Type::Internal.
+ // TODO(jarin) This is a workaround for our lack of (u)int64
+ // types. This can be removed once we can represent (u)int64
+ // unambiguously. (At the moment internal objects, such as the hole,
+ // are also Type::Internal()).
+ bool is_word64 = GetInfo(node->InputAt(0))->representation() ==
+ MachineRepresentation::kWord64;
+#ifdef DEBUG
+ // Check that all the inputs agree on being Word64.
+ for (int i = 1; i < node->op()->ValueInputCount(); i++) {
+ DCHECK_EQ(is_word64, GetInfo(node->InputAt(i))->representation() ==
+ MachineRepresentation::kWord64);
}
- } else if (upper->Is(Type::Boolean())) {
- // multiple uses => pick kRepBit.
- return kRepBit;
- } else if (upper->Is(Type::Number())) {
- // multiple uses => pick kRepFloat64.
- return kRepFloat64;
- } else if (upper->Is(Type::Internal())) {
- return kMachPtr;
+#endif
+ rep = is_word64 ? MachineRepresentation::kWord64
+ : MachineRepresentation::kTagged;
}
- return kRepTagged;
+ return NodeOutputInfo(rep, type);
}
// Helper for handling selects.
- void VisitSelect(Node* node, MachineTypeUnion use,
+ void VisitSelect(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- ProcessInput(node, 0, kRepBit);
- MachineType output = GetRepresentationForPhi(node, use);
+ ProcessInput(node, 0, UseInfo::Bool());
- Type* upper = NodeProperties::GetType(node);
- MachineType output_type =
- static_cast<MachineType>(changer_->TypeFromUpperBound(upper) | output);
- SetOutput(node, output_type);
+ NodeOutputInfo output = GetOutputInfoForPhi(node, truncation);
+ SetOutput(node, output);
if (lower()) {
// Update the select operator.
SelectParameters p = SelectParametersOf(node->op());
- MachineType type = static_cast<MachineType>(output_type);
- if (type != p.type()) {
- NodeProperties::ChangeOp(node,
- lowering->common()->Select(type, p.hint()));
+ if (output.representation() != p.representation()) {
+ NodeProperties::ChangeOp(node, lowering->common()->Select(
+ output.representation(), p.hint()));
}
-
- // Convert inputs to the output representation of this select.
- ProcessInput(node, 1, output_type);
- ProcessInput(node, 2, output_type);
- } else {
- // Propagate {use} of the select to value inputs.
- MachineType use_type =
- static_cast<MachineType>((use & kTypeMask) | output);
- ProcessInput(node, 1, use_type);
- ProcessInput(node, 2, use_type);
}
+ // Convert inputs to the output representation of this phi, pass the
+ // truncation truncation along.
+ UseInfo input_use(output.representation(), truncation);
+ ProcessInput(node, 1, input_use);
+ ProcessInput(node, 2, input_use);
}
// Helper for handling phis.
- void VisitPhi(Node* node, MachineTypeUnion use,
+ void VisitPhi(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- MachineType output = GetRepresentationForPhi(node, use);
-
- Type* upper = NodeProperties::GetType(node);
- MachineType output_type =
- static_cast<MachineType>(changer_->TypeFromUpperBound(upper) | output);
- SetOutput(node, output_type);
+ NodeOutputInfo output = GetOutputInfoForPhi(node, truncation);
+ SetOutput(node, output);
int values = node->op()->ValueInputCount();
-
if (lower()) {
// Update the phi operator.
- MachineType type = static_cast<MachineType>(output_type);
- if (type != OpParameter<MachineType>(node)) {
- NodeProperties::ChangeOp(node, lowering->common()->Phi(type, values));
+ if (output.representation() != PhiRepresentationOf(node->op())) {
+ NodeProperties::ChangeOp(
+ node, lowering->common()->Phi(output.representation(), values));
}
+ }
- // Convert inputs to the output representation of this phi.
- for (int i = 0; i < node->InputCount(); i++) {
- ProcessInput(node, i, i < values ? output_type : 0);
- }
- } else {
- // Propagate {use} of the phi to value inputs, and 0 to control.
- MachineType use_type =
- static_cast<MachineType>((use & kTypeMask) | output);
- for (int i = 0; i < node->InputCount(); i++) {
- ProcessInput(node, i, i < values ? use_type : 0);
- }
+ // Convert inputs to the output representation of this phi, pass the
+ // truncation truncation along.
+ UseInfo input_use(output.representation(), truncation);
+ for (int i = 0; i < node->InputCount(); i++) {
+ ProcessInput(node, i, i < values ? input_use : UseInfo::None());
}
}
@@ -425,25 +758,38 @@ class RepresentationSelector {
for (int i = 0; i < node->InputCount(); i++) {
if (i == 0) {
// The target of the call.
- ProcessInput(node, i, 0);
+ ProcessInput(node, i, UseInfo::None());
} else if ((i - 1) < params) {
- ProcessInput(node, i, sig->GetParam(i - 1));
+ ProcessInput(node, i, TruncatingUseInfoFromRepresentation(
+ sig->GetParam(i - 1).representation()));
} else {
- ProcessInput(node, i, 0);
+ ProcessInput(node, i, UseInfo::None());
}
}
if (sig->return_count() > 0) {
- SetOutput(node, desc->GetMachineSignature()->GetReturn());
+ SetOutputFromMachineType(node, desc->GetMachineSignature()->GetReturn());
} else {
- SetOutput(node, kMachAnyTagged);
+ SetOutput(node, NodeOutputInfo::AnyTagged());
+ }
+ }
+
+ MachineSemantic DeoptValueSemanticOf(Type* type) {
+ CHECK(!type->Is(Type::None()));
+ // We only need signedness to do deopt correctly.
+ if (type->Is(Type::Signed32())) {
+ return MachineSemantic::kInt32;
+ } else if (type->Is(Type::Unsigned32())) {
+ return MachineSemantic::kUint32;
+ } else {
+ return MachineSemantic::kAny;
}
}
void VisitStateValues(Node* node) {
if (phase_ == PROPAGATE) {
for (int i = 0; i < node->InputCount(); i++) {
- Enqueue(node->InputAt(i), kTypeAny);
+ EnqueueInput(node, i, UseInfo::Any());
}
} else {
Zone* zone = jsgraph_->zone();
@@ -451,13 +797,20 @@ class RepresentationSelector {
new (zone->New(sizeof(ZoneVector<MachineType>)))
ZoneVector<MachineType>(node->InputCount(), zone);
for (int i = 0; i < node->InputCount(); i++) {
- MachineTypeUnion input_type = GetInfo(node->InputAt(i))->output;
- (*types)[i] = static_cast<MachineType>(input_type);
+ NodeInfo* input_info = GetInfo(node->InputAt(i));
+ MachineType machine_type(
+ input_info->representation(),
+ DeoptValueSemanticOf(input_info->output_type()));
+ DCHECK(machine_type.representation() !=
+ MachineRepresentation::kWord32 ||
+ machine_type.semantic() == MachineSemantic::kInt32 ||
+ machine_type.semantic() == MachineSemantic::kUint32);
+ (*types)[i] = machine_type;
}
NodeProperties::ChangeOp(node,
jsgraph_->common()->TypedStateValues(types));
}
- SetOutput(node, kMachAnyTagged);
+ SetOutput(node, NodeOutputInfo::AnyTagged());
}
const Operator* Int32Op(Node* node) {
@@ -472,52 +825,9 @@ class RepresentationSelector {
return changer_->Float64OperatorFor(node->opcode());
}
- bool CanLowerToInt32Binop(Node* node, MachineTypeUnion use) {
- return BothInputsAre(node, Type::Signed32()) &&
- (!CanObserveNonInt32(use) ||
- NodeProperties::GetType(node)->Is(Type::Signed32()));
- }
-
- bool CanLowerToInt32AdditiveBinop(Node* node, MachineTypeUnion use) {
- return BothInputsAre(node, safe_int_additive_range_) &&
- !CanObserveNonInt32(use);
- }
-
- bool CanLowerToUint32Binop(Node* node, MachineTypeUnion use) {
- return BothInputsAre(node, Type::Unsigned32()) &&
- (!CanObserveNonUint32(use) ||
- NodeProperties::GetType(node)->Is(Type::Unsigned32()));
- }
-
- bool CanLowerToUint32AdditiveBinop(Node* node, MachineTypeUnion use) {
- return BothInputsAre(node, safe_int_additive_range_) &&
- !CanObserveNonUint32(use);
- }
-
- bool CanObserveNonWord32(MachineTypeUnion use) {
- return (use & ~(kTypeUint32 | kTypeInt32)) != 0;
- }
-
- bool CanObserveNonInt32(MachineTypeUnion use) {
- return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
- }
-
- bool CanObserveMinusZero(MachineTypeUnion use) {
- // TODO(turbofan): technically Uint32 cannot observe minus zero either.
- return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
- }
-
- bool CanObserveNaN(MachineTypeUnion use) {
- return (use & (kTypeNumber | kTypeAny)) != 0;
- }
-
- bool CanObserveNonUint32(MachineTypeUnion use) {
- return (use & (kTypeInt32 | kTypeNumber | kTypeAny)) != 0;
- }
-
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
- void VisitNode(Node* node, MachineTypeUnion use,
+ void VisitNode(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
switch (node->opcode()) {
//------------------------------------------------------------------
@@ -525,41 +835,41 @@ class RepresentationSelector {
//------------------------------------------------------------------
case IrOpcode::kStart:
case IrOpcode::kDead:
- return VisitLeaf(node, 0);
+ return VisitLeaf(node, NodeOutputInfo::None());
case IrOpcode::kParameter: {
// TODO(titzer): use representation from linkage.
- Type* upper = NodeProperties::GetType(node);
- ProcessInput(node, 0, 0);
- SetOutput(node, kRepTagged | changer_->TypeFromUpperBound(upper));
+ Type* type = NodeProperties::GetType(node);
+ ProcessInput(node, 0, UseInfo::None());
+ SetOutput(node, NodeOutputInfo(MachineRepresentation::kTagged, type));
return;
}
case IrOpcode::kInt32Constant:
- return VisitLeaf(node, kRepWord32);
+ return VisitLeaf(node, NodeOutputInfo::Int32());
case IrOpcode::kInt64Constant:
- return VisitLeaf(node, kRepWord64);
+ return VisitLeaf(node, NodeOutputInfo::Int64());
case IrOpcode::kFloat32Constant:
- return VisitLeaf(node, kRepFloat32);
+ return VisitLeaf(node, NodeOutputInfo::Float32());
case IrOpcode::kFloat64Constant:
- return VisitLeaf(node, kRepFloat64);
+ return VisitLeaf(node, NodeOutputInfo::Float64());
case IrOpcode::kExternalConstant:
- return VisitLeaf(node, kMachPtr);
+ return VisitLeaf(node, NodeOutputInfo::Pointer());
case IrOpcode::kNumberConstant:
- return VisitLeaf(node, kRepTagged);
+ return VisitLeaf(node, NodeOutputInfo::NumberTagged());
case IrOpcode::kHeapConstant:
- return VisitLeaf(node, kRepTagged);
+ return VisitLeaf(node, NodeOutputInfo::AnyTagged());
case IrOpcode::kBranch:
- ProcessInput(node, 0, kRepBit);
- Enqueue(NodeProperties::GetControlInput(node, 0));
+ ProcessInput(node, 0, UseInfo::Bool());
+ EnqueueInput(node, NodeProperties::FirstControlIndex(node));
break;
case IrOpcode::kSwitch:
- ProcessInput(node, 0, kRepWord32);
- Enqueue(NodeProperties::GetControlInput(node, 0));
+ ProcessInput(node, 0, UseInfo::TruncatingWord32());
+ EnqueueInput(node, NodeProperties::FirstControlIndex(node));
break;
case IrOpcode::kSelect:
- return VisitSelect(node, use, lowering);
+ return VisitSelect(node, truncation, lowering);
case IrOpcode::kPhi:
- return VisitPhi(node, use, lowering);
+ return VisitPhi(node, truncation, lowering);
case IrOpcode::kCall:
return VisitCall(node, lowering);
@@ -575,15 +885,15 @@ class RepresentationSelector {
JS_OP_LIST(DEFINE_JS_CASE)
#undef DEFINE_JS_CASE
VisitInputs(node);
- return SetOutput(node, kRepTagged);
+ return SetOutput(node, NodeOutputInfo::AnyTagged());
//------------------------------------------------------------------
// Simplified operators.
//------------------------------------------------------------------
case IrOpcode::kBooleanNot: {
if (lower()) {
- MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
- if (input & kRepBit) {
+ NodeInfo* input_info = GetInfo(node->InputAt(0));
+ if (input_info->representation() == MachineRepresentation::kBit) {
// BooleanNot(x: kRepBit) => Word32Equal(x, #0)
node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
@@ -594,15 +904,15 @@ class RepresentationSelector {
}
} else {
// No input representation requirement; adapt during lowering.
- ProcessInput(node, 0, kTypeBool);
- SetOutput(node, kRepBit);
+ ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
+ SetOutput(node, NodeOutputInfo::Bool());
}
break;
}
case IrOpcode::kBooleanToNumber: {
if (lower()) {
- MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
- if (input & kRepBit) {
+ NodeInfo* input_info = GetInfo(node->InputAt(0));
+ if (input_info->representation() == MachineRepresentation::kBit) {
// BooleanToNumber(x: kRepBit) => x
DeferReplacement(node, node->InputAt(0));
} else {
@@ -612,8 +922,8 @@ class RepresentationSelector {
}
} else {
// No input representation requirement; adapt during lowering.
- ProcessInput(node, 0, kTypeBool);
- SetOutput(node, kMachInt32);
+ ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
+ SetOutput(node, NodeOutputInfo::Int32());
}
break;
}
@@ -621,11 +931,11 @@ class RepresentationSelector {
case IrOpcode::kNumberLessThan:
case IrOpcode::kNumberLessThanOrEqual: {
// Number comparisons reduce to integer comparisons for integer inputs.
- if (BothInputsAre(node, Type::Signed32())) {
+ if (BothInputsAreSigned32(node)) {
// => signed Int32Cmp
VisitInt32Cmp(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- } else if (BothInputsAre(node, Type::Unsigned32())) {
+ } else if (BothInputsAreUnsigned32(node)) {
// => unsigned Int32Cmp
VisitUint32Cmp(node);
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
@@ -638,28 +948,18 @@ class RepresentationSelector {
}
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract: {
- // Add and subtract reduce to Int32Add/Sub if the inputs
- // are already integers and all uses are truncating.
- if (CanLowerToInt32Binop(node, use)) {
+ if (BothInputsAre(node, Type::Signed32()) &&
+ NodeProperties::GetType(node)->Is(Type::Signed32())) {
+ // int32 + int32 = int32
// => signed Int32Add/Sub
VisitInt32Binop(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- } else if (CanLowerToInt32AdditiveBinop(node, use)) {
- // => signed Int32Add/Sub, truncating inputs
- ProcessTruncateWord32Input(node, 0, kTypeInt32);
- ProcessTruncateWord32Input(node, 1, kTypeInt32);
- SetOutput(node, kMachInt32);
+ } else if (BothInputsAre(node, type_cache_.kAdditiveSafeInteger) &&
+ truncation.TruncatesToWord32()) {
+ // safe-int + safe-int = x (truncated to int32)
+ // => signed Int32Add/Sub (truncated)
+ VisitWord32TruncatingBinop(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- } else if (CanLowerToUint32Binop(node, use)) {
- // => unsigned Int32Add/Sub
- VisitUint32Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
- } else if (CanLowerToUint32AdditiveBinop(node, use)) {
- // => signed Int32Add/Sub, truncating inputs
- ProcessTruncateWord32Input(node, 0, kTypeUint32);
- ProcessTruncateWord32Input(node, 1, kTypeUint32);
- SetOutput(node, kMachUint32);
- if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
} else {
// => Float64Add/Sub
VisitFloat64Binop(node);
@@ -668,14 +968,23 @@ class RepresentationSelector {
break;
}
case IrOpcode::kNumberMultiply: {
- NumberMatcher right(node->InputAt(1));
- if (right.IsInRange(-1048576, 1048576)) { // must fit double mantissa.
- if (CanLowerToInt32Binop(node, use)) {
- // => signed Int32Mul
+ if (BothInputsAreSigned32(node)) {
+ if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
+ // Multiply reduces to Int32Mul if the inputs and the output
+ // are integers.
VisitInt32Binop(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
break;
}
+ if (truncation.TruncatesToWord32() &&
+ NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger)) {
+ // Multiply reduces to Int32Mul if the inputs are integers,
+ // the uses are truncating and the result is in the safe
+ // integer range.
+ VisitWord32TruncatingBinop(node);
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ break;
+ }
}
// => Float64Mul
VisitFloat64Binop(node);
@@ -683,15 +992,23 @@ class RepresentationSelector {
break;
}
case IrOpcode::kNumberDivide: {
- if (CanLowerToInt32Binop(node, use)) {
+ if (BothInputsAreSigned32(node)) {
+ if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
// => signed Int32Div
VisitInt32Binop(node);
if (lower()) DeferReplacement(node, lowering->Int32Div(node));
break;
+ }
+ if (truncation.TruncatesToWord32()) {
+ // => signed Int32Div
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ break;
+ }
}
- if (BothInputsAre(node, Type::Unsigned32()) && !CanObserveNaN(use)) {
+ if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
// => unsigned Uint32Div
- VisitUint32Binop(node);
+ VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
break;
}
@@ -701,15 +1018,23 @@ class RepresentationSelector {
break;
}
case IrOpcode::kNumberModulus: {
- if (CanLowerToInt32Binop(node, use)) {
- // => signed Int32Mod
- VisitInt32Binop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
- break;
+ if (BothInputsAreSigned32(node)) {
+ if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
+ // => signed Int32Mod
+ VisitInt32Binop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ break;
+ }
+ if (truncation.TruncatesToWord32()) {
+ // => signed Int32Mod
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ break;
+ }
}
- if (BothInputsAre(node, Type::Unsigned32()) && !CanObserveNaN(use)) {
+ if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
// => unsigned Uint32Mod
- VisitUint32Binop(node);
+ VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
break;
}
@@ -718,85 +1043,69 @@ class RepresentationSelector {
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
break;
}
+ case IrOpcode::kNumberBitwiseOr:
+ case IrOpcode::kNumberBitwiseXor:
+ case IrOpcode::kNumberBitwiseAnd: {
+ VisitInt32Binop(node);
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ break;
+ }
case IrOpcode::kNumberShiftLeft: {
- VisitBinop(node, kMachInt32, kMachUint32, kMachInt32);
- if (lower()) lowering->DoShift(node, lowering->machine()->Word32Shl());
+ Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
+ }
break;
}
case IrOpcode::kNumberShiftRight: {
- VisitBinop(node, kMachInt32, kMachUint32, kMachInt32);
- if (lower()) lowering->DoShift(node, lowering->machine()->Word32Sar());
+ Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
+ }
break;
}
case IrOpcode::kNumberShiftRightLogical: {
- VisitBinop(node, kMachUint32, kMachUint32, kMachUint32);
- if (lower()) lowering->DoShift(node, lowering->machine()->Word32Shr());
+ Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
+ }
break;
}
case IrOpcode::kNumberToInt32: {
- MachineTypeUnion use_rep = use & kRepMask;
- Node* input = node->InputAt(0);
- Type* in_upper = NodeProperties::GetType(input);
- MachineTypeUnion in = GetInfo(input)->output;
- if (in_upper->Is(Type::Signed32())) {
- // If the input has type int32, pass through representation.
- VisitUnop(node, kTypeInt32 | use_rep, kTypeInt32 | use_rep);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeUint32 ||
- in_upper->Is(Type::Unsigned32())) {
- // Just change representation if necessary.
- VisitUnop(node, kTypeUint32 | kRepWord32, kTypeInt32 | kRepWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeInt32 ||
- (in & kRepMask) == kRepWord32) {
- // Just change representation if necessary.
- VisitUnop(node, kTypeInt32 | kRepWord32, kTypeInt32 | kRepWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- // Require the input in float64 format and perform truncation.
- // TODO(turbofan): avoid a truncation with a smi check.
- VisitUnop(node, kTypeInt32 | kRepFloat64, kTypeInt32 | kRepWord32);
- if (lower()) {
- NodeProperties::ChangeOp(
- node, lowering->machine()->TruncateFloat64ToInt32(
- TruncationMode::kJavaScript));
- }
- }
+ // Just change representation if necessary.
+ VisitUnop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ if (lower()) DeferReplacement(node, node->InputAt(0));
break;
}
case IrOpcode::kNumberToUint32: {
- MachineTypeUnion use_rep = use & kRepMask;
- Node* input = node->InputAt(0);
- Type* in_upper = NodeProperties::GetType(input);
- MachineTypeUnion in = GetInfo(input)->output;
- if (in_upper->Is(Type::Unsigned32())) {
- // If the input has type uint32, pass through representation.
- VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeInt32 ||
- in_upper->Is(Type::Signed32())) {
- // Just change representation if necessary.
- VisitUnop(node, kTypeInt32 | kRepWord32, kTypeUint32 | kRepWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeUint32 ||
- (in & kRepMask) == kRepWord32) {
- // Just change representation if necessary.
- VisitUnop(node, kTypeUint32 | kRepWord32, kTypeUint32 | kRepWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- // Require the input in float64 format and perform truncation.
- // TODO(turbofan): avoid a truncation with a smi check.
- VisitUnop(node, kTypeUint32 | kRepFloat64, kTypeUint32 | kRepWord32);
- if (lower()) {
- NodeProperties::ChangeOp(
- node, lowering->machine()->TruncateFloat64ToInt32(
- TruncationMode::kJavaScript));
- }
+ // Just change representation if necessary.
+ VisitUnop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ break;
+ }
+ case IrOpcode::kNumberIsHoleNaN: {
+ VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Bool());
+ if (lower()) {
+ // NumberIsHoleNaN(x) => Word32Equal(Float64ExtractLowWord32(x),
+ // #HoleNaNLower32)
+ node->ReplaceInput(0,
+ jsgraph_->graph()->NewNode(
+ lowering->machine()->Float64ExtractLowWord32(),
+ node->InputAt(0)));
+ node->AppendInput(jsgraph_->zone(),
+ jsgraph_->Int32Constant(kHoleNanLower32));
+ NodeProperties::ChangeOp(node, jsgraph_->machine()->Word32Equal());
}
break;
}
case IrOpcode::kPlainPrimitiveToNumber: {
- VisitUnop(node, kMachAnyTagged, kTypeNumber | kRepTagged);
+ VisitUnop(node, UseInfo::AnyTagged(), NodeOutputInfo::NumberTagged());
if (lower()) {
// PlainPrimitiveToNumber(x) => Call(ToNumberStub, x, no-context)
Operator::Properties properties = node->op()->properties();
@@ -813,119 +1122,128 @@ class RepresentationSelector {
break;
}
case IrOpcode::kReferenceEqual: {
- VisitBinop(node, kMachAnyTagged, kRepBit);
+ VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
if (lower()) {
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
}
break;
}
case IrOpcode::kStringEqual: {
- VisitBinop(node, kMachAnyTagged, kRepBit);
+ VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
if (lower()) lowering->DoStringEqual(node);
break;
}
case IrOpcode::kStringLessThan: {
- VisitBinop(node, kMachAnyTagged, kRepBit);
+ VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
if (lower()) lowering->DoStringLessThan(node);
break;
}
case IrOpcode::kStringLessThanOrEqual: {
- VisitBinop(node, kMachAnyTagged, kRepBit);
+ VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
if (lower()) lowering->DoStringLessThanOrEqual(node);
break;
}
case IrOpcode::kAllocate: {
- ProcessInput(node, 0, kMachAnyTagged);
+ ProcessInput(node, 0, UseInfo::AnyTagged());
ProcessRemainingInputs(node, 1);
- SetOutput(node, kMachAnyTagged);
- if (lower()) lowering->DoAllocate(node);
+ SetOutput(node, NodeOutputInfo::AnyTagged());
break;
}
case IrOpcode::kLoadField: {
FieldAccess access = FieldAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+ ProcessInput(node, 0, UseInfoForBasePointer(access));
ProcessRemainingInputs(node, 1);
- SetOutput(node, access.machine_type);
- if (lower()) lowering->DoLoadField(node);
+ SetOutputFromMachineType(node, access.machine_type);
break;
}
case IrOpcode::kStoreField: {
FieldAccess access = FieldAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access));
- ProcessInput(node, 1, access.machine_type);
+ ProcessInput(node, 0, UseInfoForBasePointer(access));
+ ProcessInput(node, 1, TruncatingUseInfoFromRepresentation(
+ access.machine_type.representation()));
ProcessRemainingInputs(node, 2);
- SetOutput(node, 0);
- if (lower()) lowering->DoStoreField(node);
+ SetOutput(node, NodeOutputInfo::None());
break;
}
case IrOpcode::kLoadBuffer: {
BufferAccess access = BufferAccessOf(node->op());
- ProcessInput(node, 0, kMachPtr); // buffer
- ProcessInput(node, 1, kMachInt32); // offset
- ProcessInput(node, 2, kMachInt32); // length
+ ProcessInput(node, 0, UseInfo::PointerInt()); // buffer
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // offset
+ ProcessInput(node, 2, UseInfo::TruncatingWord32()); // length
ProcessRemainingInputs(node, 3);
- // Tagged overrides everything if we have to do a typed array bounds
- // check, because we may need to return undefined then.
- MachineType output_type;
- if (use & kRepTagged) {
- output_type = kMachAnyTagged;
- } else if (use & kRepFloat64) {
- if (access.machine_type() & kRepFloat32) {
- output_type = access.machine_type();
+
+ NodeOutputInfo output_info;
+ if (truncation.TruncatesUndefinedToZeroOrNaN()) {
+ if (truncation.TruncatesNaNToZero()) {
+ // If undefined is truncated to a non-NaN number, we can use
+ // the load's representation.
+ output_info = NodeOutputInfo(access.machine_type().representation(),
+ NodeProperties::GetType(node));
} else {
- output_type = kMachFloat64;
+ // If undefined is truncated to a number, but the use can
+ // observe NaN, we need to output at least the float32
+ // representation.
+ if (access.machine_type().representation() ==
+ MachineRepresentation::kFloat32) {
+ output_info =
+ NodeOutputInfo(access.machine_type().representation(),
+ NodeProperties::GetType(node));
+ } else {
+ output_info = NodeOutputInfo::Float64();
+ }
}
- } else if (use & kRepFloat32) {
- output_type = kMachFloat32;
} else {
- output_type = access.machine_type();
+ // If undefined is not truncated away, we need to have the tagged
+ // representation.
+ output_info = NodeOutputInfo::AnyTagged();
}
- SetOutput(node, output_type);
- if (lower()) lowering->DoLoadBuffer(node, output_type, changer_);
+ SetOutput(node, output_info);
+ if (lower())
+ lowering->DoLoadBuffer(node, output_info.representation(), changer_);
break;
}
case IrOpcode::kStoreBuffer: {
BufferAccess access = BufferAccessOf(node->op());
- ProcessInput(node, 0, kMachPtr); // buffer
- ProcessInput(node, 1, kMachInt32); // offset
- ProcessInput(node, 2, kMachInt32); // length
- ProcessInput(node, 3, access.machine_type()); // value
+ ProcessInput(node, 0, UseInfo::PointerInt()); // buffer
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // offset
+ ProcessInput(node, 2, UseInfo::TruncatingWord32()); // length
+ ProcessInput(node, 3,
+ TruncatingUseInfoFromRepresentation(
+ access.machine_type().representation())); // value
ProcessRemainingInputs(node, 4);
- SetOutput(node, 0);
+ SetOutput(node, NodeOutputInfo::None());
if (lower()) lowering->DoStoreBuffer(node);
break;
}
case IrOpcode::kLoadElement: {
ElementAccess access = ElementAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access)); // base
- ProcessInput(node, 1, kMachInt32); // index
+ ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
ProcessRemainingInputs(node, 2);
- SetOutput(node, access.machine_type);
- if (lower()) lowering->DoLoadElement(node);
+ SetOutputFromMachineType(node, access.machine_type);
break;
}
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access)); // base
- ProcessInput(node, 1, kMachInt32); // index
- ProcessInput(node, 2, access.machine_type); // value
+ ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 2,
+ TruncatingUseInfoFromRepresentation(
+ access.machine_type.representation())); // value
ProcessRemainingInputs(node, 3);
- SetOutput(node, 0);
- if (lower()) lowering->DoStoreElement(node);
+ SetOutput(node, NodeOutputInfo::None());
+ break;
+ }
+ case IrOpcode::kObjectIsNumber: {
+ ProcessInput(node, 0, UseInfo::AnyTagged());
+ SetOutput(node, NodeOutputInfo::Bool());
+ if (lower()) lowering->DoObjectIsNumber(node);
break;
}
case IrOpcode::kObjectIsSmi: {
- ProcessInput(node, 0, kMachAnyTagged);
- SetOutput(node, kRepBit | kTypeBool);
- if (lower()) {
- Node* is_tagged = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->WordAnd(), node->InputAt(0),
- jsgraph_->IntPtrConstant(kSmiTagMask));
- Node* is_smi = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->WordEqual(), is_tagged,
- jsgraph_->IntPtrConstant(kSmiTag));
- DeferReplacement(node, is_smi);
- }
+ ProcessInput(node, 0, UseInfo::AnyTagged());
+ SetOutput(node, NodeOutputInfo::Bool());
+ if (lower()) lowering->DoObjectIsSmi(node);
break;
}
@@ -933,29 +1251,31 @@ class RepresentationSelector {
// Machine-level operators.
//------------------------------------------------------------------
case IrOpcode::kLoad: {
- // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
- MachineTypeUnion tBase = kRepTagged | kMachPtr;
- LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
- ProcessInput(node, 0, tBase); // pointer or object
- ProcessInput(node, 1, kMachIntPtr); // index
+ // TODO(jarin) Eventually, we should get rid of all machine stores
+ // from the high-level phases, then this becomes UNREACHABLE.
+ LoadRepresentation rep = LoadRepresentationOf(node->op());
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // tagged pointer
+ ProcessInput(node, 1, UseInfo::PointerInt()); // index
ProcessRemainingInputs(node, 2);
- SetOutput(node, rep);
+ SetOutputFromMachineType(node, rep);
break;
}
case IrOpcode::kStore: {
- // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
- MachineTypeUnion tBase = kRepTagged | kMachPtr;
- StoreRepresentation rep = OpParameter<StoreRepresentation>(node);
- ProcessInput(node, 0, tBase); // pointer or object
- ProcessInput(node, 1, kMachIntPtr); // index
- ProcessInput(node, 2, rep.machine_type());
+ // TODO(jarin) Eventually, we should get rid of all machine stores
+ // from the high-level phases, then this becomes UNREACHABLE.
+ StoreRepresentation rep = StoreRepresentationOf(node->op());
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // tagged pointer
+ ProcessInput(node, 1, UseInfo::PointerInt()); // index
+ ProcessInput(node, 2,
+ TruncatingUseInfoFromRepresentation(rep.representation()));
ProcessRemainingInputs(node, 3);
- SetOutput(node, 0);
+ SetOutput(node, NodeOutputInfo::None());
break;
}
case IrOpcode::kWord32Shr:
// We output unsigned int32 for shift right because JavaScript.
- return VisitBinop(node, kMachUint32, kMachUint32);
+ return VisitBinop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Uint32());
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
@@ -964,12 +1284,15 @@ class RepresentationSelector {
// We use signed int32 as the output type for these word32 operations,
// though the machine bits are the same for either signed or unsigned,
// because JavaScript considers the result from these operations signed.
- return VisitBinop(node, kRepWord32, kRepWord32 | kTypeInt32);
+ return VisitBinop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Int32());
case IrOpcode::kWord32Equal:
- return VisitBinop(node, kRepWord32, kRepBit);
+ return VisitBinop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Bool());
case IrOpcode::kWord32Clz:
- return VisitUnop(node, kMachUint32, kMachUint32);
+ return VisitUnop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Uint32());
case IrOpcode::kInt32Add:
case IrOpcode::kInt32Sub:
@@ -1013,42 +1336,45 @@ class RepresentationSelector {
case IrOpcode::kWord64Shl:
case IrOpcode::kWord64Shr:
case IrOpcode::kWord64Sar:
- return VisitBinop(node, kRepWord64, kRepWord64);
+ return VisitBinop(node, UseInfo::TruncatingWord64(),
+ NodeOutputInfo::Int64());
case IrOpcode::kWord64Equal:
- return VisitBinop(node, kRepWord64, kRepBit);
+ return VisitBinop(node, UseInfo::TruncatingWord64(),
+ NodeOutputInfo::Bool());
case IrOpcode::kChangeInt32ToInt64:
- return VisitUnop(node, kTypeInt32 | kRepWord32,
- kTypeInt32 | kRepWord64);
+ return VisitUnop(
+ node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo(MachineRepresentation::kWord64, Type::Signed32()));
case IrOpcode::kChangeUint32ToUint64:
- return VisitUnop(node, kTypeUint32 | kRepWord32,
- kTypeUint32 | kRepWord64);
+ return VisitUnop(
+ node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo(MachineRepresentation::kWord64, Type::Unsigned32()));
case IrOpcode::kTruncateFloat64ToFloat32:
- return VisitUnop(node, kTypeNumber | kRepFloat64,
- kTypeNumber | kRepFloat32);
+ return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float32());
case IrOpcode::kTruncateFloat64ToInt32:
- return VisitUnop(node, kTypeNumber | kRepFloat64,
- kTypeInt32 | kRepWord32);
+ return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Int32());
case IrOpcode::kTruncateInt64ToInt32:
// TODO(titzer): Is kTypeInt32 correct here?
- return VisitUnop(node, kTypeInt32 | kRepWord64,
- kTypeInt32 | kRepWord32);
+ return VisitUnop(node, UseInfo::Word64TruncatingToWord32(),
+ NodeOutputInfo::Int32());
case IrOpcode::kChangeFloat32ToFloat64:
- return VisitUnop(node, kTypeNumber | kRepFloat32,
- kTypeNumber | kRepFloat64);
+ return VisitUnop(node, UseInfo::Float32(), NodeOutputInfo::Float64());
case IrOpcode::kChangeInt32ToFloat64:
- return VisitUnop(node, kTypeInt32 | kRepWord32,
- kTypeInt32 | kRepFloat64);
+ return VisitUnop(
+ node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo(MachineRepresentation::kFloat64, Type::Signed32()));
case IrOpcode::kChangeUint32ToFloat64:
- return VisitUnop(node, kTypeUint32 | kRepWord32,
- kTypeUint32 | kRepFloat64);
+ return VisitUnop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo(MachineRepresentation::kFloat64,
+ Type::Unsigned32()));
case IrOpcode::kChangeFloat64ToInt32:
- return VisitUnop(node, kTypeInt32 | kRepFloat64,
- kTypeInt32 | kRepWord32);
+ return VisitUnop(node, UseInfo::Float64TruncatingToWord32(),
+ NodeOutputInfo::Int32());
case IrOpcode::kChangeFloat64ToUint32:
- return VisitUnop(node, kTypeUint32 | kRepFloat64,
- kTypeUint32 | kRepWord32);
+ return VisitUnop(node, UseInfo::Float64TruncatingToWord32(),
+ NodeOutputInfo::Uint32());
case IrOpcode::kFloat64Add:
case IrOpcode::kFloat64Sub:
@@ -1062,25 +1388,28 @@ class RepresentationSelector {
case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
- return VisitUnop(node, kMachFloat64, kMachFloat64);
+ return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64Cmp(node);
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
- return VisitUnop(node, kMachFloat64, kMachInt32);
+ return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Int32());
case IrOpcode::kFloat64InsertLowWord32:
case IrOpcode::kFloat64InsertHighWord32:
- return VisitBinop(node, kMachFloat64, kMachInt32, kMachFloat64);
+ return VisitBinop(node, UseInfo::Float64(), UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Float64());
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
- return VisitLeaf(node, kMachPtr);
+ return VisitLeaf(node, NodeOutputInfo::Pointer());
case IrOpcode::kStateValues:
VisitStateValues(node);
break;
default:
VisitInputs(node);
+ // Assume the output is tagged.
+ SetOutput(node, NodeOutputInfo::AnyTagged());
break;
}
}
@@ -1091,7 +1420,7 @@ class RepresentationSelector {
replacement->op()->mnemonic());
if (replacement->id() < count_ &&
- GetInfo(replacement)->output == GetInfo(node)->output) {
+ GetInfo(node)->output_type()->Is(GetInfo(replacement)->output_type())) {
// Replace with a previously existing node eagerly only if the type is the
// same.
node->ReplaceUses(replacement);
@@ -1106,23 +1435,44 @@ class RepresentationSelector {
node->NullAllInputs(); // Node is now dead.
}
- void PrintUseInfo(Node* node) {
- TRACE("#%d:%-20s ", node->id(), node->op()->mnemonic());
- PrintInfo(GetUseInfo(node));
- TRACE("\n");
+ void PrintOutputInfo(NodeInfo* info) {
+ if (FLAG_trace_representation) {
+ OFStream os(stdout);
+ os << info->representation() << " (";
+ info->output_type()->PrintTo(os, Type::SEMANTIC_DIM);
+ os << ")";
+ }
}
- void PrintInfo(MachineTypeUnion info) {
+ void PrintRepresentation(MachineRepresentation rep) {
if (FLAG_trace_representation) {
OFStream os(stdout);
- os << static_cast<MachineType>(info);
+ os << rep;
+ }
+ }
+
+ void PrintTruncation(Truncation truncation) {
+ if (FLAG_trace_representation) {
+ OFStream os(stdout);
+ os << truncation.description();
+ }
+ }
+
+ void PrintUseInfo(UseInfo info) {
+ if (FLAG_trace_representation) {
+ OFStream os(stdout);
+ os << info.preferred() << ":" << info.truncation().description();
}
}
private:
JSGraph* jsgraph_;
size_t const count_; // number of nodes in the graph
- NodeInfo* info_; // node id -> usage information
+ ZoneVector<NodeInfo> info_; // node id -> usage information
+#ifdef DEBUG
+ ZoneVector<InputUseInfos> node_input_use_infos_; // Debug information about
+ // requirements on inputs.
+#endif // DEBUG
NodeVector nodes_; // collected nodes
NodeVector replacements_; // replacements to be done after lowering
Phase phase_; // current phase of algorithm
@@ -1134,148 +1484,39 @@ class RepresentationSelector {
// lowering. Once this phase becomes a vanilla reducer, it should get source
// position information via the SourcePositionWrapper like all other reducers.
SourcePositionTable* source_positions_;
- Type* safe_int_additive_range_;
+ TypeCache const& type_cache_;
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() >= 0);
DCHECK(node->id() < count_);
return &info_[node->id()];
}
-
- MachineTypeUnion GetUseInfo(Node* node) { return GetInfo(node)->use; }
};
-Node* SimplifiedLowering::IsTagged(Node* node) {
- // TODO(titzer): factor this out to a TaggingScheme abstraction.
- STATIC_ASSERT(kSmiTagMask == 1); // Only works if tag is the low bit.
- return graph()->NewNode(machine()->WordAnd(), node,
- jsgraph()->Int32Constant(kSmiTagMask));
-}
-
-
SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
SourcePositionTable* source_positions)
: jsgraph_(jsgraph),
zone_(zone),
- zero_thirtyone_range_(Type::Range(0, 31, zone)),
+ type_cache_(TypeCache::Get()),
source_positions_(source_positions) {}
void SimplifiedLowering::LowerAllNodes() {
- SimplifiedOperatorBuilder simplified(graph()->zone());
- RepresentationChanger changer(jsgraph(), &simplified, jsgraph()->isolate());
+ RepresentationChanger changer(jsgraph(), jsgraph()->isolate());
RepresentationSelector selector(jsgraph(), zone_, &changer,
source_positions_);
selector.Run(this);
}
-Node* SimplifiedLowering::Untag(Node* node) {
- // TODO(titzer): factor this out to a TaggingScheme abstraction.
- Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
- return graph()->NewNode(machine()->WordSar(), node, shift_amount);
-}
-
-
-Node* SimplifiedLowering::SmiTag(Node* node) {
- // TODO(titzer): factor this out to a TaggingScheme abstraction.
- Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
- return graph()->NewNode(machine()->WordShl(), node, shift_amount);
-}
-
-
-Node* SimplifiedLowering::OffsetMinusTagConstant(int32_t offset) {
- return jsgraph()->Int32Constant(offset - kHeapObjectTag);
-}
-
-
-namespace {
-
-WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
- MachineType representation,
- Type* type) {
- if (type->Is(Type::TaggedSigned())) {
- // Write barriers are only for writes of heap objects.
- return kNoWriteBarrier;
- }
- if (base_is_tagged == kTaggedBase &&
- RepresentationOf(representation) == kRepTagged) {
- // Write barriers are only for writes into heap objects (i.e. tagged base).
- return kFullWriteBarrier;
- }
- return kNoWriteBarrier;
-}
-
-} // namespace
-
-
-void SimplifiedLowering::DoAllocate(Node* node) {
- PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
- AllocationSpace space = pretenure == TENURED ? OLD_SPACE : NEW_SPACE;
- Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
- Operator::Properties props = node->op()->properties();
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(zone(), f, 2, props);
- ExternalReference ref(f, jsgraph()->isolate());
- int32_t flags = AllocateTargetSpace::encode(space);
- node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
- node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags));
- node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
- node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
- node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
-}
-
-
-void SimplifiedLowering::DoLoadField(Node* node) {
- const FieldAccess& access = FieldAccessOf(node->op());
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
-}
-
-
-void SimplifiedLowering::DoStoreField(Node* node) {
- const FieldAccess& access = FieldAccessOf(node->op());
- Type* type = NodeProperties::GetType(node->InputAt(1));
- WriteBarrierKind kind =
- ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type, type);
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(access.machine_type, kind)));
-}
-
-
-Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
- Node* const key) {
- Node* index = key;
- const int element_size_shift = ElementSizeLog2Of(access.machine_type);
- if (element_size_shift) {
- index = graph()->NewNode(machine()->Word32Shl(), index,
- jsgraph()->Int32Constant(element_size_shift));
- }
- const int fixed_offset = access.header_size - access.tag();
- if (fixed_offset) {
- index = graph()->NewNode(machine()->Int32Add(), index,
- jsgraph()->Int32Constant(fixed_offset));
- }
- if (machine()->Is64()) {
- // TODO(turbofan): This is probably only correct for typed arrays, and only
- // if the typed arrays are at most 2GiB in size, which happens to match
- // exactly our current situation.
- index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
- }
- return index;
-}
-
-
-void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
+void SimplifiedLowering::DoLoadBuffer(Node* node,
+ MachineRepresentation output_rep,
RepresentationChanger* changer) {
DCHECK_EQ(IrOpcode::kLoadBuffer, node->opcode());
- DCHECK_NE(kMachNone, RepresentationOf(output_type));
- MachineType const type = BufferAccessOf(node->op()).machine_type();
- if (output_type != type) {
+ DCHECK_NE(MachineRepresentation::kNone, output_rep);
+ MachineType const access_type = BufferAccessOf(node->op()).machine_type();
+ if (output_rep != access_type.representation()) {
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
@@ -1291,19 +1532,21 @@ void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue =
- graph()->NewNode(machine()->Load(type), buffer, index, effect, if_true);
- Node* vtrue = changer->GetRepresentationFor(etrue, type, output_type);
+ Node* etrue = graph()->NewNode(machine()->Load(access_type), buffer, index,
+ effect, if_true);
+ Node* vtrue = changer->GetRepresentationFor(
+ etrue, access_type.representation(), NodeProperties::GetType(node),
+ output_rep, Truncation::None());
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* efalse = effect;
Node* vfalse;
- if (output_type & kRepTagged) {
+ if (output_rep == MachineRepresentation::kTagged) {
vfalse = jsgraph()->UndefinedConstant();
- } else if (output_type & kRepFloat64) {
+ } else if (output_rep == MachineRepresentation::kFloat64) {
vfalse =
jsgraph()->Float64Constant(std::numeric_limits<double>::quiet_NaN());
- } else if (output_type & kRepFloat32) {
+ } else if (output_rep == MachineRepresentation::kFloat32) {
vfalse =
jsgraph()->Float32Constant(std::numeric_limits<float>::quiet_NaN());
} else {
@@ -1321,36 +1564,54 @@ void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
node->ReplaceInput(1, vfalse);
node->ReplaceInput(2, merge);
node->TrimInputCount(3);
- NodeProperties::ChangeOp(node, common()->Phi(output_type, 2));
+ NodeProperties::ChangeOp(node, common()->Phi(output_rep, 2));
} else {
- NodeProperties::ChangeOp(node, machine()->CheckedLoad(type));
+ NodeProperties::ChangeOp(node, machine()->CheckedLoad(access_type));
}
}
void SimplifiedLowering::DoStoreBuffer(Node* node) {
DCHECK_EQ(IrOpcode::kStoreBuffer, node->opcode());
- MachineType const type = BufferAccessOf(node->op()).machine_type();
- NodeProperties::ChangeOp(node, machine()->CheckedStore(type));
+ MachineRepresentation const rep =
+ BufferAccessOf(node->op()).machine_type().representation();
+ NodeProperties::ChangeOp(node, machine()->CheckedStore(rep));
}
-void SimplifiedLowering::DoLoadElement(Node* node) {
- const ElementAccess& access = ElementAccessOf(node->op());
- node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+void SimplifiedLowering::DoObjectIsNumber(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ // TODO(bmeurer): Optimize somewhat based on input type.
+ Node* check =
+ graph()->NewNode(machine()->WordEqual(),
+ graph()->NewNode(machine()->WordAnd(), input,
+ jsgraph()->IntPtrConstant(kSmiTagMask)),
+ jsgraph()->IntPtrConstant(kSmiTag));
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = jsgraph()->Int32Constant(1);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(
+ machine()->WordEqual(),
+ graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), input,
+ jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
+ graph()->start(), if_false),
+ jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
+ Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ node->ReplaceInput(0, vtrue);
+ node->AppendInput(graph()->zone(), vfalse);
+ node->AppendInput(graph()->zone(), control);
+ NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
}
-void SimplifiedLowering::DoStoreElement(Node* node) {
- const ElementAccess& access = ElementAccessOf(node->op());
- Type* type = NodeProperties::GetType(node->InputAt(2));
- node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- access.machine_type,
- ComputeWriteBarrierKind(access.base_is_tagged,
- access.machine_type, type))));
+void SimplifiedLowering::DoObjectIsSmi(Node* node) {
+ node->ReplaceInput(0,
+ graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
+ jsgraph()->IntPtrConstant(kSmiTagMask)));
+ node->AppendInput(graph()->zone(), jsgraph()->IntPtrConstant(kSmiTag));
+ NodeProperties::ChangeOp(node, machine()->WordEqual());
}
@@ -1399,7 +1660,8 @@ Node* SimplifiedLowering::Int32Div(Node* const node) {
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
const Operator* const merge_op = common()->Merge(2);
- const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+ const Operator* const phi_op =
+ common()->Phi(MachineRepresentation::kWord32, 2);
Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
@@ -1476,7 +1738,8 @@ Node* SimplifiedLowering::Int32Mod(Node* const node) {
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
const Operator* const merge_op = common()->Merge(2);
- const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+ const Operator* const phi_op =
+ common()->Phi(MachineRepresentation::kWord32, 2);
Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
@@ -1555,7 +1818,7 @@ Node* SimplifiedLowering::Uint32Div(Node* const node) {
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
Diamond d(graph(), common(), check, BranchHint::kFalse);
Node* div = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, d.if_false);
- return d.Phi(kMachUint32, zero, div);
+ return d.Phi(MachineRepresentation::kWord32, zero, div);
}
@@ -1587,7 +1850,8 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
const Operator* const merge_op = common()->Merge(2);
- const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+ const Operator* const phi_op =
+ common()->Phi(MachineRepresentation::kWord32, 2);
Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), rhs,
graph()->start());
@@ -1618,10 +1882,10 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
}
-void SimplifiedLowering::DoShift(Node* node, Operator const* op) {
+void SimplifiedLowering::DoShift(Node* node, Operator const* op,
+ Type* rhs_type) {
Node* const rhs = NodeProperties::GetValueInput(node, 1);
- Type* const rhs_type = NodeProperties::GetType(rhs);
- if (!rhs_type->Is(zero_thirtyone_range_)) {
+ if (!rhs_type->Is(type_cache_.kZeroToThirtyOne)) {
node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
jsgraph()->Int32Constant(0x1f)));
}
diff --git a/chromium/v8/src/compiler/simplified-lowering.h b/chromium/v8/src/compiler/simplified-lowering.h
index 4b9e86b786c..f9410f8b41e 100644
--- a/chromium/v8/src/compiler/simplified-lowering.h
+++ b/chromium/v8/src/compiler/simplified-lowering.h
@@ -12,6 +12,11 @@
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class TypeCache;
+
+
namespace compiler {
// Forward declarations.
@@ -26,18 +31,14 @@ class SimplifiedLowering final {
void LowerAllNodes();
- // TODO(titzer): These are exposed for direct testing. Use a friend class.
- void DoAllocate(Node* node);
- void DoLoadField(Node* node);
- void DoStoreField(Node* node);
- // TODO(turbofan): The output_type can be removed once the result of the
+ // TODO(turbofan): The representation can be removed once the result of the
// representation analysis is stored in the node bounds.
- void DoLoadBuffer(Node* node, MachineType output_type,
+ void DoLoadBuffer(Node* node, MachineRepresentation rep,
RepresentationChanger* changer);
void DoStoreBuffer(Node* node);
- void DoLoadElement(Node* node);
- void DoStoreElement(Node* node);
- void DoShift(Node* node, Operator const* op);
+ void DoObjectIsNumber(Node* node);
+ void DoObjectIsSmi(Node* node);
+ void DoShift(Node* node, Operator const* op, Type* rhs_type);
void DoStringEqual(Node* node);
void DoStringLessThan(Node* node);
void DoStringLessThanOrEqual(Node* node);
@@ -45,7 +46,7 @@ class SimplifiedLowering final {
private:
JSGraph* const jsgraph_;
Zone* const zone_;
- Type* const zero_thirtyone_range_;
+ TypeCache const& type_cache_;
// TODO(danno): SimplifiedLowering shouldn't know anything about the source
// positions table, but must for now since there currently is no other way to
@@ -54,11 +55,6 @@ class SimplifiedLowering final {
// position information via the SourcePositionWrapper like all other reducers.
SourcePositionTable* source_positions_;
- Node* SmiTag(Node* node);
- Node* IsTagged(Node* node);
- Node* Untag(Node* node);
- Node* OffsetMinusTagConstant(int32_t offset);
- Node* ComputeIndex(const ElementAccess& access, Node* const key);
Node* StringComparison(Node* node);
Node* Int32Div(Node* const node);
Node* Int32Mod(Node* const node);
diff --git a/chromium/v8/src/compiler/simplified-operator-reducer.cc b/chromium/v8/src/compiler/simplified-operator-reducer.cc
index a7f790563eb..120d7926d54 100644
--- a/chromium/v8/src/compiler/simplified-operator-reducer.cc
+++ b/chromium/v8/src/compiler/simplified-operator-reducer.cc
@@ -15,7 +15,7 @@ namespace internal {
namespace compiler {
SimplifiedOperatorReducer::SimplifiedOperatorReducer(JSGraph* jsgraph)
- : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+ : jsgraph_(jsgraph) {}
SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
@@ -89,6 +89,8 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
break;
}
+ case IrOpcode::kReferenceEqual:
+ return ReduceReferenceEqual(node);
default:
break;
}
@@ -96,6 +98,23 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
+Reduction SimplifiedOperatorReducer::ReduceReferenceEqual(Node* node) {
+ DCHECK_EQ(IrOpcode::kReferenceEqual, node->opcode());
+ Node* const left = NodeProperties::GetValueInput(node, 0);
+ Node* const right = NodeProperties::GetValueInput(node, 1);
+ HeapObjectMatcher match_left(left);
+ HeapObjectMatcher match_right(right);
+ if (match_left.HasValue() && match_right.HasValue()) {
+ if (match_left.Value().is_identical_to(match_right.Value())) {
+ return Replace(jsgraph()->TrueConstant());
+ } else {
+ return Replace(jsgraph()->FalseConstant());
+ }
+ }
+ return NoChange();
+}
+
+
Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
Node* a) {
DCHECK_EQ(node->InputCount(), OperatorProperties::GetTotalInputCount(op));
diff --git a/chromium/v8/src/compiler/simplified-operator-reducer.h b/chromium/v8/src/compiler/simplified-operator-reducer.h
index c302250d266..979a3d03995 100644
--- a/chromium/v8/src/compiler/simplified-operator-reducer.h
+++ b/chromium/v8/src/compiler/simplified-operator-reducer.h
@@ -6,7 +6,6 @@
#define V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
@@ -15,6 +14,7 @@ namespace compiler {
// Forward declarations.
class JSGraph;
class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
class SimplifiedOperatorReducer final : public Reducer {
@@ -25,6 +25,8 @@ class SimplifiedOperatorReducer final : public Reducer {
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceReferenceEqual(Node* node);
+
Reduction Change(Node* node, const Operator* op, Node* a);
Reduction ReplaceFloat64(double value);
Reduction ReplaceInt32(int32_t value);
@@ -37,10 +39,9 @@ class SimplifiedOperatorReducer final : public Reducer {
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
MachineOperatorBuilder* machine() const;
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ SimplifiedOperatorBuilder* simplified() const;
JSGraph* const jsgraph_;
- SimplifiedOperatorBuilder simplified_;
DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
};
diff --git a/chromium/v8/src/compiler/simplified-operator.cc b/chromium/v8/src/compiler/simplified-operator.cc
index 8432d21d952..1eaa287fee5 100644
--- a/chromium/v8/src/compiler/simplified-operator.cc
+++ b/chromium/v8/src/compiler/simplified-operator.cc
@@ -29,24 +29,24 @@ MachineType BufferAccess::machine_type() const {
switch (external_array_type_) {
case kExternalUint8Array:
case kExternalUint8ClampedArray:
- return kMachUint8;
+ return MachineType::Uint8();
case kExternalInt8Array:
- return kMachInt8;
+ return MachineType::Int8();
case kExternalUint16Array:
- return kMachUint16;
+ return MachineType::Uint16();
case kExternalInt16Array:
- return kMachInt16;
+ return MachineType::Int16();
case kExternalUint32Array:
- return kMachUint32;
+ return MachineType::Uint32();
case kExternalInt32Array:
- return kMachInt32;
+ return MachineType::Int32();
case kExternalFloat32Array:
- return kMachFloat32;
+ return MachineType::Float32();
case kExternalFloat64Array:
- return kMachFloat64;
+ return MachineType::Float64();
}
UNREACHABLE();
- return kMachNone;
+ return MachineType::None();
}
@@ -168,11 +168,15 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
V(NumberMultiply, Operator::kCommutative, 2) \
V(NumberDivide, Operator::kNoProperties, 2) \
V(NumberModulus, Operator::kNoProperties, 2) \
+ V(NumberBitwiseOr, Operator::kCommutative, 2) \
+ V(NumberBitwiseXor, Operator::kCommutative, 2) \
+ V(NumberBitwiseAnd, Operator::kCommutative, 2) \
V(NumberShiftLeft, Operator::kNoProperties, 2) \
V(NumberShiftRight, Operator::kNoProperties, 2) \
V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
V(NumberToInt32, Operator::kNoProperties, 1) \
V(NumberToUint32, Operator::kNoProperties, 1) \
+ V(NumberIsHoleNaN, Operator::kNoProperties, 1) \
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1) \
V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
@@ -182,6 +186,7 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
V(ChangeBoolToBit, Operator::kNoProperties, 1) \
V(ChangeBitToBool, Operator::kNoProperties, 1) \
+ V(ObjectIsNumber, Operator::kNoProperties, 1) \
V(ObjectIsSmi, Operator::kNoProperties, 1)
#define NO_THROW_OP_LIST(V) \
diff --git a/chromium/v8/src/compiler/simplified-operator.h b/chromium/v8/src/compiler/simplified-operator.h
index 53b6b044a1a..3821a6de57d 100644
--- a/chromium/v8/src/compiler/simplified-operator.h
+++ b/chromium/v8/src/compiler/simplified-operator.h
@@ -7,8 +7,8 @@
#include <iosfwd>
-#include "src/compiler/machine-type.h"
#include "src/handles.h"
+#include "src/machine-type.h"
#include "src/objects.h"
namespace v8 {
@@ -125,7 +125,7 @@ ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
// - Bool: a tagged pointer to either the canonical JS #false or
// the canonical JS #true object
// - Bit: an untagged integer 0 or 1, but word-sized
-class SimplifiedOperatorBuilder final {
+class SimplifiedOperatorBuilder final : public ZoneObject {
public:
explicit SimplifiedOperatorBuilder(Zone* zone);
@@ -140,11 +140,15 @@ class SimplifiedOperatorBuilder final {
const Operator* NumberMultiply();
const Operator* NumberDivide();
const Operator* NumberModulus();
+ const Operator* NumberBitwiseOr();
+ const Operator* NumberBitwiseXor();
+ const Operator* NumberBitwiseAnd();
const Operator* NumberShiftLeft();
const Operator* NumberShiftRight();
const Operator* NumberShiftRightLogical();
const Operator* NumberToInt32();
const Operator* NumberToUint32();
+ const Operator* NumberIsHoleNaN();
const Operator* PlainPrimitiveToNumber();
@@ -163,6 +167,7 @@ class SimplifiedOperatorBuilder final {
const Operator* ChangeBoolToBit();
const Operator* ChangeBitToBool();
+ const Operator* ObjectIsNumber();
const Operator* ObjectIsSmi();
const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
diff --git a/chromium/v8/src/compiler/state-values-utils.cc b/chromium/v8/src/compiler/state-values-utils.cc
index 1c23c8ab88c..77cc227038b 100644
--- a/chromium/v8/src/compiler/state-values-utils.cc
+++ b/chromium/v8/src/compiler/state-values-utils.cc
@@ -271,7 +271,7 @@ Node* StateValuesAccess::iterator::node() {
MachineType StateValuesAccess::iterator::type() {
Node* state = Top()->node;
if (state->opcode() == IrOpcode::kStateValues) {
- return kMachAnyTagged;
+ return MachineType::AnyTagged();
} else {
DCHECK_EQ(IrOpcode::kTypedStateValues, state->opcode());
const ZoneVector<MachineType>* types =
diff --git a/chromium/v8/src/compiler/type-hint-analyzer.cc b/chromium/v8/src/compiler/type-hint-analyzer.cc
new file mode 100644
index 00000000000..42c4627b67b
--- /dev/null
+++ b/chromium/v8/src/compiler/type-hint-analyzer.cc
@@ -0,0 +1,98 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/type-hint-analyzer.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/type-hints.h"
+#include "src/ic/ic-state.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// TODO(bmeurer): This detour via types is ugly.
+BinaryOperationHints::Hint ToHint(Type* type) {
+ if (type->Is(Type::None())) return BinaryOperationHints::kNone;
+ if (type->Is(Type::SignedSmall())) return BinaryOperationHints::kSignedSmall;
+ if (type->Is(Type::Signed32())) return BinaryOperationHints::kSigned32;
+ if (type->Is(Type::Number())) return BinaryOperationHints::kNumber;
+ if (type->Is(Type::String())) return BinaryOperationHints::kString;
+ return BinaryOperationHints::kAny;
+}
+
+} // namespace
+
+
+bool TypeHintAnalysis::GetBinaryOperationHints(
+ TypeFeedbackId id, BinaryOperationHints* hints) const {
+ auto i = infos_.find(id);
+ if (i == infos_.end()) return false;
+ Handle<Code> code = i->second;
+ DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
+ BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
+ *hints = BinaryOperationHints(ToHint(state.GetLeftType()),
+ ToHint(state.GetRightType()),
+ ToHint(state.GetResultType()));
+ return true;
+}
+
+
+bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
+ ToBooleanHints* hints) const {
+ auto i = infos_.find(id);
+ if (i == infos_.end()) return false;
+ Handle<Code> code = i->second;
+ DCHECK_EQ(Code::TO_BOOLEAN_IC, code->kind());
+ ToBooleanStub stub(code->GetIsolate(), code->extra_ic_state());
+// TODO(bmeurer): Replace ToBooleanStub::Types with ToBooleanHints.
+#define ASSERT_COMPATIBLE(NAME, Name) \
+ STATIC_ASSERT(1 << ToBooleanStub::NAME == \
+ static_cast<int>(ToBooleanHint::k##Name))
+ ASSERT_COMPATIBLE(UNDEFINED, Undefined);
+ ASSERT_COMPATIBLE(BOOLEAN, Boolean);
+ ASSERT_COMPATIBLE(NULL_TYPE, Null);
+ ASSERT_COMPATIBLE(SMI, SmallInteger);
+ ASSERT_COMPATIBLE(SPEC_OBJECT, Receiver);
+ ASSERT_COMPATIBLE(STRING, String);
+ ASSERT_COMPATIBLE(SYMBOL, Symbol);
+ ASSERT_COMPATIBLE(HEAP_NUMBER, HeapNumber);
+ ASSERT_COMPATIBLE(SIMD_VALUE, SimdValue);
+#undef ASSERT_COMPATIBLE
+ *hints = ToBooleanHints(stub.types().ToIntegral());
+ return true;
+}
+
+
+TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
+ DisallowHeapAllocation no_gc;
+ TypeHintAnalysis::Infos infos(zone());
+ Isolate* const isolate = code->GetIsolate();
+ int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
+ for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ switch (target->kind()) {
+ case Code::BINARY_OP_IC:
+ case Code::TO_BOOLEAN_IC: {
+ // Add this feedback to the {infos}.
+ TypeFeedbackId id(static_cast<unsigned>(rinfo->data()));
+ infos.insert(std::make_pair(id, handle(target, isolate)));
+ break;
+ }
+ default:
+ // Ignore the remaining code objects.
+ break;
+ }
+ }
+ return new (zone()) TypeHintAnalysis(infos);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/type-hint-analyzer.h b/chromium/v8/src/compiler/type-hint-analyzer.h
new file mode 100644
index 00000000000..1a799056335
--- /dev/null
+++ b/chromium/v8/src/compiler/type-hint-analyzer.h
@@ -0,0 +1,51 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPE_HINT_ANALYZER_H_
+#define V8_COMPILER_TYPE_HINT_ANALYZER_H_
+
+#include "src/compiler/type-hints.h"
+#include "src/handles.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The result of analyzing type hints.
+class TypeHintAnalysis final : public ZoneObject {
+ public:
+ typedef ZoneMap<TypeFeedbackId, Handle<Code>> Infos;
+
+ explicit TypeHintAnalysis(Infos const& infos) : infos_(infos) {}
+
+ bool GetBinaryOperationHints(TypeFeedbackId id,
+ BinaryOperationHints* hints) const;
+ bool GetToBooleanHints(TypeFeedbackId id, ToBooleanHints* hints) const;
+
+ private:
+ Infos const infos_;
+};
+
+
+// The class that performs type hint analysis on the fullcodegen code object.
+class TypeHintAnalyzer final {
+ public:
+ explicit TypeHintAnalyzer(Zone* zone) : zone_(zone) {}
+
+ TypeHintAnalysis* Analyze(Handle<Code> code);
+
+ private:
+ Zone* zone() const { return zone_; }
+
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeHintAnalyzer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_TYPE_HINT_ANALYZER_H_
diff --git a/chromium/v8/src/compiler/type-hints.cc b/chromium/v8/src/compiler/type-hints.cc
new file mode 100644
index 00000000000..06abad6380e
--- /dev/null
+++ b/chromium/v8/src/compiler/type-hints.cc
@@ -0,0 +1,83 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/type-hints.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+std::ostream& operator<<(std::ostream& os, BinaryOperationHints::Hint hint) {
+ switch (hint) {
+ case BinaryOperationHints::kNone:
+ return os << "None";
+ case BinaryOperationHints::kSignedSmall:
+ return os << "SignedSmall";
+ case BinaryOperationHints::kSigned32:
+ return os << "Signed32";
+ case BinaryOperationHints::kNumber:
+ return os << "Number";
+ case BinaryOperationHints::kString:
+ return os << "String";
+ case BinaryOperationHints::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, BinaryOperationHints hints) {
+ return os << hints.left() << "*" << hints.right() << "->" << hints.result();
+}
+
+
+std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
+ switch (hint) {
+ case ToBooleanHint::kNone:
+ return os << "None";
+ case ToBooleanHint::kUndefined:
+ return os << "Undefined";
+ case ToBooleanHint::kBoolean:
+ return os << "Boolean";
+ case ToBooleanHint::kNull:
+ return os << "Null";
+ case ToBooleanHint::kSmallInteger:
+ return os << "SmallInteger";
+ case ToBooleanHint::kReceiver:
+ return os << "Receiver";
+ case ToBooleanHint::kString:
+ return os << "String";
+ case ToBooleanHint::kSymbol:
+ return os << "Symbol";
+ case ToBooleanHint::kHeapNumber:
+ return os << "HeapNumber";
+ case ToBooleanHint::kSimdValue:
+ return os << "SimdValue";
+ case ToBooleanHint::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
+ if (hints == ToBooleanHint::kAny) return os << "Any";
+ if (hints == ToBooleanHint::kNone) return os << "None";
+ bool first = true;
+ for (ToBooleanHints::mask_type i = 0; i < sizeof(i) * CHAR_BIT; ++i) {
+ ToBooleanHint const hint = static_cast<ToBooleanHint>(1u << i);
+ if (hints & hint) {
+ if (!first) os << "|";
+ first = false;
+ os << hint;
+ }
+ }
+ return os;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/type-hints.h b/chromium/v8/src/compiler/type-hints.h
new file mode 100644
index 00000000000..f1cc64036c1
--- /dev/null
+++ b/chromium/v8/src/compiler/type-hints.h
@@ -0,0 +1,84 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPE_HINTS_H_
+#define V8_COMPILER_TYPE_HINTS_H_
+
+#include "src/base/flags.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Type hints for an binary operation.
+class BinaryOperationHints final {
+ public:
+ enum Hint { kNone, kSignedSmall, kSigned32, kNumber, kString, kAny };
+
+ BinaryOperationHints() : BinaryOperationHints(kNone, kNone, kNone) {}
+ BinaryOperationHints(Hint left, Hint right, Hint result)
+ : bit_field_(LeftField::encode(left) | RightField::encode(right) |
+ ResultField::encode(result)) {}
+
+ static BinaryOperationHints Any() {
+ return BinaryOperationHints(kAny, kAny, kAny);
+ }
+
+ Hint left() const { return LeftField::decode(bit_field_); }
+ Hint right() const { return RightField::decode(bit_field_); }
+ Hint result() const { return ResultField::decode(bit_field_); }
+
+ bool operator==(BinaryOperationHints const& that) const {
+ return this->bit_field_ == that.bit_field_;
+ }
+ bool operator!=(BinaryOperationHints const& that) const {
+ return !(*this == that);
+ }
+
+ friend size_t hash_value(BinaryOperationHints const& hints) {
+ return hints.bit_field_;
+ }
+
+ private:
+ typedef BitField<Hint, 0, 3> LeftField;
+ typedef BitField<Hint, 3, 3> RightField;
+ typedef BitField<Hint, 6, 3> ResultField;
+
+ uint32_t bit_field_;
+};
+
+std::ostream& operator<<(std::ostream&, BinaryOperationHints::Hint);
+std::ostream& operator<<(std::ostream&, BinaryOperationHints);
+
+
+// Type hints for the ToBoolean type conversion.
+enum class ToBooleanHint : uint16_t {
+ kNone = 0u,
+ kUndefined = 1u << 0,
+ kBoolean = 1u << 1,
+ kNull = 1u << 2,
+ kSmallInteger = 1u << 3,
+ kReceiver = 1u << 4,
+ kString = 1u << 5,
+ kSymbol = 1u << 6,
+ kHeapNumber = 1u << 7,
+ kSimdValue = 1u << 8,
+ kAny = kUndefined | kBoolean | kNull | kSmallInteger | kReceiver | kString |
+ kSymbol | kHeapNumber | kSimdValue
+};
+
+std::ostream& operator<<(std::ostream&, ToBooleanHint);
+
+typedef base::Flags<ToBooleanHint, uint16_t> ToBooleanHints;
+
+std::ostream& operator<<(std::ostream&, ToBooleanHints);
+
+DEFINE_OPERATORS_FOR_FLAGS(ToBooleanHints)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_TYPE_HINTS_H_
diff --git a/chromium/v8/src/compiler/typer.cc b/chromium/v8/src/compiler/typer.cc
index 4707aef1e51..c1f816d34bc 100644
--- a/chromium/v8/src/compiler/typer.cc
+++ b/chromium/v8/src/compiler/typer.cc
@@ -5,8 +5,8 @@
#include "src/compiler/typer.h"
#include "src/base/flags.h"
-#include "src/base/lazy-instance.h"
#include "src/bootstrapper.h"
+#include "src/compilation-dependencies.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
@@ -14,19 +14,12 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
-#include "src/zone-type-cache.h"
+#include "src/type-cache.h"
namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-base::LazyInstance<ZoneTypeCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
-
-} // namespace
-
-
class Typer::Decorator final : public GraphDecorator {
public:
explicit Decorator(Typer* typer) : typer_(typer) {}
@@ -37,12 +30,16 @@ class Typer::Decorator final : public GraphDecorator {
};
-Typer::Typer(Isolate* isolate, Graph* graph, Type::FunctionType* function_type)
+Typer::Typer(Isolate* isolate, Graph* graph, Flags flags,
+ CompilationDependencies* dependencies,
+ Type::FunctionType* function_type)
: isolate_(isolate),
graph_(graph),
+ flags_(flags),
+ dependencies_(dependencies),
function_type_(function_type),
decorator_(nullptr),
- cache_(kCache.Get()) {
+ cache_(TypeCache::Get()) {
Zone* zone = this->zone();
Factory* const factory = isolate->factory();
@@ -57,12 +54,15 @@ Typer::Typer(Isolate* isolate, Graph* graph, Type::FunctionType* function_type)
singleton_false_ = Type::Constant(factory->false_value(), zone);
singleton_true_ = Type::Constant(factory->true_value(), zone);
+ singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
falsish_ = Type::Union(
Type::Undetectable(),
- Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
- Type::NullOrUndefined(), zone),
+ Type::Union(
+ Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
+ Type::NullOrUndefined(), zone),
+ singleton_the_hole_, zone),
zone);
truish_ = Type::Union(
singleton_true_,
@@ -204,6 +204,10 @@ class Typer::Visitor : public Reducer {
Zone* zone() { return typer_->zone(); }
Isolate* isolate() { return typer_->isolate(); }
Graph* graph() { return typer_->graph(); }
+ Typer::Flags flags() const { return typer_->flags(); }
+ CompilationDependencies* dependencies() const {
+ return typer_->dependencies();
+ }
void SetWeakened(NodeId node_id) { weakened_nodes_.insert(node_id); }
bool IsWeakened(NodeId node_id) {
@@ -230,7 +234,11 @@ class Typer::Visitor : public Reducer {
static Type* ToPrimitive(Type*, Typer*);
static Type* ToBoolean(Type*, Typer*);
+ static Type* ToInteger(Type*, Typer*);
+ static Type* ToLength(Type*, Typer*);
+ static Type* ToName(Type*, Typer*);
static Type* ToNumber(Type*, Typer*);
+ static Type* ToObject(Type*, Typer*);
static Type* ToString(Type*, Typer*);
static Type* NumberToInt32(Type*, Typer*);
static Type* NumberToUint32(Type*, Typer*);
@@ -247,11 +255,12 @@ class Typer::Visitor : public Reducer {
JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
- static Type* JSUnaryNotTyper(Type*, Typer*);
static Type* JSTypeOfTyper(Type*, Typer*);
static Type* JSLoadPropertyTyper(Type*, Type*, Typer*);
static Type* JSCallFunctionTyper(Type*, Typer*);
+ static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
+
Reduction UpdateType(Node* node, Type* current) {
if (NodeProperties::IsTyped(node)) {
// Widen the type of a previously typed node.
@@ -261,10 +270,10 @@ class Typer::Visitor : public Reducer {
current = Weaken(node, current, previous);
}
- DCHECK(previous->Is(current));
+ CHECK(previous->Is(current));
NodeProperties::SetType(node, current);
- if (!(previous->Is(current) && current->Is(previous))) {
+ if (!current->Is(previous)) {
// If something changed, revisit all uses.
return Changed(node);
}
@@ -399,6 +408,39 @@ Type* Typer::Visitor::ToBoolean(Type* type, Typer* t) {
}
+// static
+Type* Typer::Visitor::ToInteger(Type* type, Typer* t) {
+ // ES6 section 7.1.4 ToInteger ( argument )
+ type = ToNumber(type, t);
+ if (type->Is(t->cache_.kIntegerOrMinusZero)) return type;
+ return t->cache_.kIntegerOrMinusZero;
+}
+
+
+// static
+Type* Typer::Visitor::ToLength(Type* type, Typer* t) {
+ // ES6 section 7.1.15 ToLength ( argument )
+ type = ToInteger(type, t);
+ double min = type->Min();
+ double max = type->Max();
+ if (min <= 0.0) min = 0.0;
+ if (max > kMaxSafeInteger) max = kMaxSafeInteger;
+ if (max <= min) max = min;
+ return Type::Range(min, max, t->zone());
+}
+
+
+// static
+Type* Typer::Visitor::ToName(Type* type, Typer* t) {
+ // ES6 section 7.1.14 ToPropertyKey ( argument )
+ type = ToPrimitive(type, t);
+ if (type->Is(Type::Name())) return type;
+ if (type->Maybe(Type::Symbol())) return Type::Name();
+ return ToString(type, t);
+}
+
+
+// static
Type* Typer::Visitor::ToNumber(Type* type, Typer* t) {
if (type->Is(Type::Number())) return type;
if (type->Is(Type::NullOrUndefined())) {
@@ -421,7 +463,20 @@ Type* Typer::Visitor::ToNumber(Type* type, Typer* t) {
}
+// static
+Type* Typer::Visitor::ToObject(Type* type, Typer* t) {
+ // ES6 section 7.1.13 ToObject ( argument )
+ if (type->Is(Type::Receiver())) return type;
+ if (type->Is(Type::Primitive())) return Type::OtherObject();
+ if (!type->Maybe(Type::Undetectable())) return Type::DetectableReceiver();
+ return Type::Receiver();
+}
+
+
+// static
Type* Typer::Visitor::ToString(Type* type, Typer* t) {
+ // ES6 section 7.1.12 ToString ( argument )
+ type = ToPrimitive(type, t);
if (type->Is(Type::String())) return type;
return Type::String();
}
@@ -485,7 +540,7 @@ Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeInt32Constant(Node* node) {
double number = OpParameter<int32_t>(node);
return Type::Intersect(Type::Range(number, number, zone()),
- Type::UntaggedSigned32(), zone());
+ Type::UntaggedIntegral32(), zone());
}
@@ -554,13 +609,20 @@ Type* Typer::Visitor::TypeEffectSet(Node* node) {
}
-Type* Typer::Visitor::TypeValueEffect(Node* node) {
+Type* Typer::Visitor::TypeGuard(Node* node) {
+ Type* input_type = Operand(node, 0);
+ Type* guard_type = OpParameter<Type*>(node);
+ return Type::Intersect(input_type, guard_type, zone());
+}
+
+
+Type* Typer::Visitor::TypeBeginRegion(Node* node) {
UNREACHABLE();
return nullptr;
}
-Type* Typer::Visitor::TypeFinish(Node* node) { return Operand(node, 0); }
+Type* Typer::Visitor::TypeFinishRegion(Node* node) { return Operand(node, 0); }
Type* Typer::Visitor::TypeFrameState(Node* node) {
@@ -574,6 +636,11 @@ Type* Typer::Visitor::TypeStateValues(Node* node) {
}
+Type* Typer::Visitor::TypeObjectState(Node* node) {
+ return Type::Internal(zone());
+}
+
+
Type* Typer::Visitor::TypeTypedStateValues(Node* node) {
return Type::Internal(zone());
}
@@ -637,6 +704,10 @@ Type* Typer::Visitor::JSStrictEqualTyper(Type* lhs, Type* rhs, Typer* t) {
(lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
return t->singleton_false_;
}
+ if ((lhs->Is(t->singleton_the_hole_) || rhs->Is(t->singleton_the_hole_)) &&
+ !lhs->Maybe(rhs)) {
+ return t->singleton_false_;
+ }
if (lhs->IsConstant() && rhs->Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
@@ -985,7 +1056,7 @@ Type* Typer::Visitor::JSMultiplyRanger(Type::RangeType* lhs,
(rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
(rhs->Maybe(t->cache_.kSingletonZero) &&
(lmin == -V8_INFINITY || lmax == +V8_INFINITY));
- if (maybe_nan) return t->cache_.kWeakint; // Giving up.
+ if (maybe_nan) return t->cache_.kIntegerOrMinusZeroOrNaN; // Giving up.
bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
(rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
Type* range =
@@ -1076,22 +1147,14 @@ Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
// JS unary operators.
-Type* Typer::Visitor::JSUnaryNotTyper(Type* type, Typer* t) {
- return Invert(ToBoolean(type, t), t);
-}
-
-
-Type* Typer::Visitor::TypeJSUnaryNot(Node* node) {
- return TypeUnaryOp(node, JSUnaryNotTyper);
-}
-
-
Type* Typer::Visitor::JSTypeOfTyper(Type* type, Typer* t) {
Factory* const f = t->isolate()->factory();
if (type->Is(Type::Boolean())) {
return Type::Constant(f->boolean_string(), t->zone());
} else if (type->Is(Type::Number())) {
return Type::Constant(f->number_string(), t->zone());
+ } else if (type->Is(Type::String())) {
+ return Type::Constant(f->string_string(), t->zone());
} else if (type->Is(Type::Symbol())) {
return Type::Constant(f->symbol_string(), t->zone());
} else if (type->Is(Type::Union(Type::Undefined(), Type::Undetectable(),
@@ -1099,6 +1162,11 @@ Type* Typer::Visitor::JSTypeOfTyper(Type* type, Typer* t) {
return Type::Constant(f->undefined_string(), t->zone());
} else if (type->Is(Type::Null())) {
return Type::Constant(f->object_string(), t->zone());
+ } else if (type->Is(Type::Function())) {
+ return Type::Constant(f->function_string(), t->zone());
+ } else if (type->IsConstant()) {
+ return Type::Constant(
+ Object::TypeOf(t->isolate(), type->AsConstant()->Value()), t->zone());
}
return Type::InternalizedString();
}
@@ -1127,10 +1195,14 @@ Type* Typer::Visitor::TypeJSToString(Node* node) {
}
-Type* Typer::Visitor::TypeJSToName(Node* node) { return Type::Name(); }
+Type* Typer::Visitor::TypeJSToName(Node* node) {
+ return TypeUnaryOp(node, ToName);
+}
-Type* Typer::Visitor::TypeJSToObject(Node* node) { return Type::Receiver(); }
+Type* Typer::Visitor::TypeJSToObject(Node* node) {
+ return TypeUnaryOp(node, ToObject);
+}
// JS object operators.
@@ -1144,13 +1216,23 @@ Type* Typer::Visitor::TypeJSCreateArguments(Node* node) {
}
+Type* Typer::Visitor::TypeJSCreateArray(Node* node) {
+ return Type::OtherObject();
+}
+
+
Type* Typer::Visitor::TypeJSCreateClosure(Node* node) {
+ return Type::Function();
+}
+
+
+Type* Typer::Visitor::TypeJSCreateIterResultObject(Node* node) {
return Type::OtherObject();
}
Type* Typer::Visitor::TypeJSCreateLiteralArray(Node* node) {
- return Type::None(), Type::OtherObject();
+ return Type::OtherObject();
}
@@ -1159,6 +1241,11 @@ Type* Typer::Visitor::TypeJSCreateLiteralObject(Node* node) {
}
+Type* Typer::Visitor::TypeJSCreateLiteralRegExp(Node* node) {
+ return Type::OtherObject();
+}
+
+
Type* Typer::Visitor::JSLoadPropertyTyper(Type* object, Type* name, Typer* t) {
// TODO(rossberg): Use range types and sized array types to filter undefined.
if (object->IsArray() && name->Is(Type::Integral32())) {
@@ -1174,7 +1261,37 @@ Type* Typer::Visitor::TypeJSLoadProperty(Node* node) {
}
-Type* Typer::Visitor::TypeJSLoadNamed(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeJSLoadNamed(Node* node) {
+ Factory* const f = isolate()->factory();
+ Handle<Name> name = NamedAccessOf(node->op()).name();
+ if (name.is_identical_to(f->prototype_string())) {
+ Type* receiver = Operand(node, 0);
+ if (receiver->Is(Type::None())) return Type::None();
+ if (receiver->IsConstant() &&
+ receiver->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(receiver->AsConstant()->Value());
+ if (function->has_prototype()) {
+ // We need to add a code dependency on the initial map of the {function}
+ // in order to be notified about changes to "prototype" of {function},
+ // so we can only infer a constant type if deoptimization is enabled.
+ if (flags() & kDeoptimizationEnabled) {
+ JSFunction::EnsureHasInitialMap(function);
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+ return Type::Constant(handle(initial_map->prototype(), isolate()),
+ zone());
+ }
+ }
+ } else if (receiver->IsClass() &&
+ receiver->AsClass()->Map()->IsJSFunctionMap()) {
+ Handle<Map> map = receiver->AsClass()->Map();
+ return map->has_non_instance_prototype() ? Type::Primitive(zone())
+ : Type::Receiver(zone());
+ }
+ }
+ return Type::Any();
+}
Type* Typer::Visitor::TypeJSLoadGlobal(Node* node) { return Type::Any(); }
@@ -1298,6 +1415,10 @@ Type* Typer::Visitor::TypeJSInstanceOf(Node* node) {
Type* Typer::Visitor::TypeJSLoadContext(Node* node) {
+ ContextAccess const& access = ContextAccessOf(node->op());
+ if (access.index() == Context::EXTENSION_INDEX) {
+ return Type::TaggedPointer();
+ }
// Since contexts are mutable, we just return the top.
return Type::Any();
}
@@ -1309,14 +1430,7 @@ Type* Typer::Visitor::TypeJSStoreContext(Node* node) {
}
-Type* Typer::Visitor::TypeJSLoadDynamicGlobal(Node* node) {
- return Type::Any();
-}
-
-
-Type* Typer::Visitor::TypeJSLoadDynamicContext(Node* node) {
- return Type::Any();
-}
+Type* Typer::Visitor::TypeJSLoadDynamic(Node* node) { return Type::Any(); }
Type* Typer::Visitor::WrapContextTypeForInput(Node* node) {
@@ -1373,12 +1487,64 @@ Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
- return fun->IsFunction() ? fun->AsFunction()->Result() : Type::Any();
+ if (fun->IsFunction()) {
+ return fun->AsFunction()->Result();
+ }
+ if (fun->IsConstant() && fun->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(fun->AsConstant()->Value());
+ if (function->shared()->HasBuiltinFunctionId()) {
+ switch (function->shared()->builtin_function_id()) {
+ case kMathRandom:
+ return Type::OrderedNumber();
+ case kMathFloor:
+ case kMathRound:
+ case kMathCeil:
+ return t->cache_.kIntegerOrMinusZeroOrNaN;
+ // Unary math functions.
+ case kMathAbs:
+ case kMathLog:
+ case kMathExp:
+ case kMathSqrt:
+ case kMathCos:
+ case kMathSin:
+ case kMathTan:
+ case kMathAcos:
+ case kMathAsin:
+ case kMathAtan:
+ case kMathFround:
+ return Type::Number();
+ // Binary math functions.
+ case kMathAtan2:
+ case kMathPow:
+ case kMathMax:
+ case kMathMin:
+ return Type::Number();
+ case kMathImul:
+ return Type::Signed32();
+ case kMathClz32:
+ return t->cache_.kZeroToThirtyTwo;
+ // String functions.
+ case kStringCharAt:
+ case kStringFromCharCode:
+ return Type::String();
+ // Array functions.
+ case kArrayIndexOf:
+ case kArrayLastIndexOf:
+ return Type::Number();
+ default:
+ break;
+ }
+ }
+ }
+ return Type::Any();
}
Type* Typer::Visitor::TypeJSCallFunction(Node* node) {
- return TypeUnaryOp(node, JSCallFunctionTyper); // We ignore argument types.
+ // TODO(bmeurer): We could infer better types if we wouldn't ignore the
+ // argument types for the JSCallFunctionTyper above.
+ return TypeUnaryOp(node, JSCallFunctionTyper);
}
@@ -1391,12 +1557,12 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineIsMinusZero:
case Runtime::kInlineIsFunction:
case Runtime::kInlineIsRegExp:
+ case Runtime::kInlineIsJSReceiver:
return Type::Boolean(zone());
case Runtime::kInlineDoubleLo:
case Runtime::kInlineDoubleHi:
return Type::Signed32();
case Runtime::kInlineConstructDouble:
- case Runtime::kInlineDateField:
case Runtime::kInlineMathFloor:
case Runtime::kInlineMathSqrt:
case Runtime::kInlineMathAcos:
@@ -1406,10 +1572,29 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
return Type::Number();
case Runtime::kInlineMathClz32:
return Type::Range(0, 32, zone());
- case Runtime::kInlineStringGetLength:
- return Type::Range(0, String::kMaxLength, zone());
+ case Runtime::kInlineCreateIterResultObject:
+ case Runtime::kInlineRegExpConstructResult:
+ return Type::OtherObject();
+ case Runtime::kInlineSubString:
+ return Type::String();
+ case Runtime::kInlineToInteger:
+ return TypeUnaryOp(node, ToInteger);
+ case Runtime::kInlineToLength:
+ return TypeUnaryOp(node, ToLength);
+ case Runtime::kInlineToName:
+ return TypeUnaryOp(node, ToName);
+ case Runtime::kInlineToNumber:
+ return TypeUnaryOp(node, ToNumber);
case Runtime::kInlineToObject:
- return Type::Receiver();
+ return TypeUnaryOp(node, ToObject);
+ case Runtime::kInlineToPrimitive:
+ case Runtime::kInlineToPrimitive_Number:
+ case Runtime::kInlineToPrimitive_String:
+ return TypeUnaryOp(node, ToPrimitive);
+ case Runtime::kInlineToString:
+ return TypeUnaryOp(node, ToString);
+ case Runtime::kHasInPrototypeChain:
+ return Type::Boolean();
default:
break;
}
@@ -1417,6 +1602,11 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
}
+Type* Typer::Visitor::TypeJSConvertReceiver(Node* node) {
+ return Type::Receiver();
+}
+
+
Type* Typer::Visitor::TypeJSForInNext(Node* node) {
return Type::Union(Type::Name(), Type::Undefined(), zone());
}
@@ -1439,6 +1629,15 @@ Type* Typer::Visitor::TypeJSForInStep(Node* node) {
}
+Type* Typer::Visitor::TypeJSLoadMessage(Node* node) { return Type::Any(); }
+
+
+Type* Typer::Visitor::TypeJSStoreMessage(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+
Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
@@ -1493,6 +1692,21 @@ Type* Typer::Visitor::TypeNumberModulus(Node* node) {
}
+Type* Typer::Visitor::TypeNumberBitwiseOr(Node* node) {
+ return Type::Signed32(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberBitwiseXor(Node* node) {
+ return Type::Signed32(zone());
+}
+
+
+Type* Typer::Visitor::TypeNumberBitwiseAnd(Node* node) {
+ return Type::Signed32(zone());
+}
+
+
Type* Typer::Visitor::TypeNumberShiftLeft(Node* node) {
return Type::Signed32(zone());
}
@@ -1518,13 +1732,27 @@ Type* Typer::Visitor::TypeNumberToUint32(Node* node) {
}
+Type* Typer::Visitor::TypeNumberIsHoleNaN(Node* node) {
+ return Type::Boolean(zone());
+}
+
+
Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
+// static
+Type* Typer::Visitor::ReferenceEqualTyper(Type* lhs, Type* rhs, Typer* t) {
+ if (lhs->IsConstant() && rhs->Is(lhs)) {
+ return t->singleton_true_;
+ }
+ return Type::Boolean();
+}
+
+
Type* Typer::Visitor::TypeReferenceEqual(Node* node) {
- return Type::Boolean(zone());
+ return TypeBinaryOp(node, ReferenceEqualTyper);
}
@@ -1556,14 +1784,14 @@ Type* ChangeRepresentation(Type* type, Type* rep, Zone* zone) {
Type* Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
Type* arg = Operand(node, 0);
// TODO(neis): DCHECK(arg->Is(Type::Signed32()));
- return ChangeRepresentation(arg, Type::UntaggedSigned32(), zone());
+ return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
}
Type* Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
Type* arg = Operand(node, 0);
// TODO(neis): DCHECK(arg->Is(Type::Unsigned32()));
- return ChangeRepresentation(arg, Type::UntaggedUnsigned32(), zone());
+ return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
}
@@ -1614,8 +1842,53 @@ Type* Typer::Visitor::TypeChangeBitToBool(Node* node) {
Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::TaggedPointer(); }
+namespace {
+
+MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
+ if (object_type->IsConstant() &&
+ object_type->AsConstant()->Value()->IsHeapObject()) {
+ Handle<Map> object_map(
+ Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
+ if (object_map->is_stable()) return object_map;
+ } else if (object_type->IsClass()) {
+ Handle<Map> object_map = object_type->AsClass()->Map();
+ if (object_map->is_stable()) return object_map;
+ }
+ return MaybeHandle<Map>();
+}
+
+} // namespace
+
+
Type* Typer::Visitor::TypeLoadField(Node* node) {
- return FieldAccessOf(node->op()).type;
+ FieldAccess const& access = FieldAccessOf(node->op());
+ if (access.base_is_tagged == kTaggedBase &&
+ access.offset == HeapObject::kMapOffset) {
+ // The type of LoadField[Map](o) is Constant(map) if map is stable and
+ // either
+ // (a) o has type Constant(object) and map == object->map, or
+ // (b) o has type Class(map),
+ // and either
+ // (1) map cannot transition further, or
+ // (2) deoptimization is enabled and we can add a code dependency on the
+ // stability of map (to guard the Constant type information).
+ Type* const object = Operand(node, 0);
+ if (object->Is(Type::None())) return Type::None();
+ Handle<Map> object_map;
+ if (GetStableMapFromObjectType(object).ToHandle(&object_map)) {
+ if (object_map->CanTransition()) {
+ if (flags() & kDeoptimizationEnabled) {
+ dependencies()->AssumeMapStable(object_map);
+ } else {
+ return access.type;
+ }
+ }
+ Type* object_map_type = Type::Constant(object_map, zone());
+ DCHECK(object_map_type->Is(access.type));
+ return object_map_type;
+ }
+ }
+ return access.type;
}
@@ -1657,7 +1930,22 @@ Type* Typer::Visitor::TypeStoreElement(Node* node) {
}
-Type* Typer::Visitor::TypeObjectIsSmi(Node* node) { return Type::Boolean(); }
+Type* Typer::Visitor::TypeObjectIsNumber(Node* node) {
+ Type* arg = Operand(node, 0);
+ if (arg->Is(Type::None())) return Type::None();
+ if (arg->Is(Type::Number())) return typer_->singleton_true_;
+ if (!arg->Maybe(Type::Number())) return typer_->singleton_false_;
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::TypeObjectIsSmi(Node* node) {
+ Type* arg = Operand(node, 0);
+ if (arg->Is(Type::None())) return Type::None();
+ if (arg->Is(Type::TaggedSigned())) return typer_->singleton_true_;
+ if (arg->Is(Type::TaggedPointer())) return typer_->singleton_false_;
+ return Type::Boolean();
+}
// Machine operators.
@@ -1698,6 +1986,14 @@ Type* Typer::Visitor::TypeWord32Equal(Node* node) { return Type::Boolean(); }
Type* Typer::Visitor::TypeWord32Clz(Node* node) { return Type::Integral32(); }
+Type* Typer::Visitor::TypeWord32Ctz(Node* node) { return Type::Integral32(); }
+
+
+Type* Typer::Visitor::TypeWord32Popcnt(Node* node) {
+ return Type::Integral32();
+}
+
+
Type* Typer::Visitor::TypeWord64And(Node* node) { return Type::Internal(); }
@@ -1719,6 +2015,15 @@ Type* Typer::Visitor::TypeWord64Sar(Node* node) { return Type::Internal(); }
Type* Typer::Visitor::TypeWord64Ror(Node* node) { return Type::Internal(); }
+Type* Typer::Visitor::TypeWord64Clz(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Ctz(Node* node) { return Type::Internal(); }
+
+
+Type* Typer::Visitor::TypeWord64Popcnt(Node* node) { return Type::Internal(); }
+
+
Type* Typer::Visitor::TypeWord64Equal(Node* node) { return Type::Boolean(); }
@@ -1780,9 +2085,19 @@ Type* Typer::Visitor::TypeUint32MulHigh(Node* node) {
Type* Typer::Visitor::TypeInt64Add(Node* node) { return Type::Internal(); }
+Type* Typer::Visitor::TypeInt64AddWithOverflow(Node* node) {
+ return Type::Internal();
+}
+
+
Type* Typer::Visitor::TypeInt64Sub(Node* node) { return Type::Internal(); }
+Type* Typer::Visitor::TypeInt64SubWithOverflow(Node* node) {
+ return Type::Internal();
+}
+
+
Type* Typer::Visitor::TypeInt64Mul(Node* node) { return Type::Internal(); }
@@ -1820,16 +2135,36 @@ Type* Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
Type* Typer::Visitor::TypeChangeFloat64ToInt32(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32(), zone());
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
}
Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
- return Type::Intersect(Type::Unsigned32(), Type::UntaggedUnsigned32(),
+ return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
zone());
}
+Type* Typer::Visitor::TypeTryTruncateFloat32ToInt64(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeTryTruncateFloat64ToInt64(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeTryTruncateFloat32ToUint64(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeTryTruncateFloat64ToUint64(Node* node) {
+ return Type::Internal();
+}
+
+
Type* Typer::Visitor::TypeChangeInt32ToFloat64(Node* node) {
return Type::Intersect(Type::Signed32(), Type::UntaggedFloat64(), zone());
}
@@ -1856,12 +2191,32 @@ Type* Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
Type* Typer::Visitor::TypeTruncateFloat64ToInt32(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32(), zone());
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
}
Type* Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32(), zone());
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeRoundInt64ToFloat32(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeRoundInt64ToFloat64(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat64(), zone());
+}
+
+
+Type* Typer::Visitor::TypeRoundUint64ToFloat32(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeRoundUint64ToFloat64(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat64(), zone());
}
@@ -1968,12 +2323,36 @@ Type* Typer::Visitor::TypeFloat64LessThanOrEqual(Node* node) {
}
+Type* Typer::Visitor::TypeFloat32RoundDown(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
Type* Typer::Visitor::TypeFloat64RoundDown(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
return Type::Number();
}
+Type* Typer::Visitor::TypeFloat32RoundUp(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat64RoundUp(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat32RoundTruncate(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
Type* Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
return Type::Number();
@@ -1986,6 +2365,18 @@ Type* Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
}
+Type* Typer::Visitor::TypeFloat32RoundTiesEven(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat64RoundTiesEven(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
Type* Typer::Visitor::TypeFloat64ExtractLowWord32(Node* node) {
return Type::Signed32();
}
@@ -2029,64 +2420,7 @@ Type* Typer::Visitor::TypeCheckedStore(Node* node) {
Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
- if (value->IsJSFunction()) {
- if (JSFunction::cast(*value)->shared()->HasBuiltinFunctionId()) {
- switch (JSFunction::cast(*value)->shared()->builtin_function_id()) {
- case kMathRandom:
- return typer_->cache_.kRandomFunc0;
- case kMathFloor:
- case kMathRound:
- case kMathCeil:
- return typer_->cache_.kWeakintFunc1;
- // Unary math functions.
- case kMathAbs: // TODO(rossberg): can't express overloading
- case kMathLog:
- case kMathExp:
- case kMathSqrt:
- case kMathCos:
- case kMathSin:
- case kMathTan:
- case kMathAcos:
- case kMathAsin:
- case kMathAtan:
- case kMathFround:
- return typer_->cache_.kNumberFunc1;
- // Binary math functions.
- case kMathAtan2:
- case kMathPow:
- case kMathMax:
- case kMathMin:
- return typer_->cache_.kNumberFunc2;
- case kMathImul:
- return typer_->cache_.kImulFunc;
- case kMathClz32:
- return typer_->cache_.kClz32Func;
- default:
- break;
- }
- }
- int const arity =
- JSFunction::cast(*value)->shared()->internal_formal_parameter_count();
- switch (arity) {
- case SharedFunctionInfo::kDontAdaptArgumentsSentinel:
- // Some smart optimization at work... &%$!&@+$!
- break;
- case 0:
- return typer_->cache_.kAnyFunc0;
- case 1:
- return typer_->cache_.kAnyFunc1;
- case 2:
- return typer_->cache_.kAnyFunc2;
- case 3:
- return typer_->cache_.kAnyFunc3;
- default: {
- DCHECK_LT(3, arity);
- Type** const params = zone()->NewArray<Type*>(arity);
- std::fill(&params[0], &params[arity], Type::Any(zone()));
- return Type::Function(Type::Any(zone()), arity, params, zone());
- }
- }
- } else if (value->IsJSTypedArray()) {
+ if (value->IsJSTypedArray()) {
switch (JSTypedArray::cast(*value)->type()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
diff --git a/chromium/v8/src/compiler/typer.h b/chromium/v8/src/compiler/typer.h
index 065262907ba..41770266c81 100644
--- a/chromium/v8/src/compiler/typer.h
+++ b/chromium/v8/src/compiler/typer.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_TYPER_H_
#define V8_COMPILER_TYPER_H_
+#include "src/base/flags.h"
#include "src/compiler/graph.h"
#include "src/types.h"
@@ -12,14 +13,23 @@ namespace v8 {
namespace internal {
// Forward declarations.
-class ZoneTypeCache;
+class CompilationDependencies;
+class TypeCache;
namespace compiler {
class Typer {
public:
- Typer(Isolate* isolate, Graph* graph,
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ Typer(Isolate* isolate, Graph* graph, Flags flags = kNoFlags,
+ CompilationDependencies* dependencies = nullptr,
Type::FunctionType* function_type = nullptr);
~Typer();
@@ -34,16 +44,21 @@ class Typer {
Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
Isolate* isolate() const { return isolate_; }
+ Flags flags() const { return flags_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
Type::FunctionType* function_type() const { return function_type_; }
Isolate* const isolate_;
Graph* const graph_;
+ Flags const flags_;
+ CompilationDependencies* const dependencies_;
Type::FunctionType* function_type_;
Decorator* decorator_;
- ZoneTypeCache const& cache_;
+ TypeCache const& cache_;
Type* singleton_false_;
Type* singleton_true_;
+ Type* singleton_the_hole_;
Type* signed32ish_;
Type* unsigned32ish_;
Type* falsish_;
@@ -52,6 +67,8 @@ class Typer {
DISALLOW_COPY_AND_ASSIGN(Typer);
};
+DEFINE_OPERATORS_FOR_FLAGS(Typer::Flags)
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/verifier.cc b/chromium/v8/src/compiler/verifier.cc
index 57bcef16a0d..1a3ef8e7830 100644
--- a/chromium/v8/src/compiler/verifier.cc
+++ b/chromium/v8/src/compiler/verifier.cc
@@ -250,12 +250,12 @@ void Verifier::Visitor::Check(Node* node) {
break;
}
default: {
- UNREACHABLE();
+ V8_Fatal(__FILE__, __LINE__, "Switch #%d illegally used by #%d:%s",
+ node->id(), use->id(), use->op()->mnemonic());
break;
}
}
}
- CHECK_LE(1, count_case);
CHECK_EQ(1, count_default);
CHECK_EQ(node->op()->ControlOutputCount(), count_case + count_default);
// Type is empty.
@@ -413,10 +413,13 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_LT(1, effect_count);
break;
}
- case IrOpcode::kValueEffect:
+ case IrOpcode::kGuard:
+ // TODO(bmeurer): what are the constraints on these?
+ break;
+ case IrOpcode::kBeginRegion:
// TODO(rossberg): what are the constraints on these?
break;
- case IrOpcode::kFinish: {
+ case IrOpcode::kFinishRegion: {
// TODO(rossberg): what are the constraints on these?
// Type must be subsumed by input type.
if (typing == TYPED) {
@@ -433,6 +436,7 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_EQ(6, input_count);
break;
case IrOpcode::kStateValues:
+ case IrOpcode::kObjectState:
case IrOpcode::kTypedStateValues:
// TODO(jarin): what are the constraints on these?
break;
@@ -453,7 +457,6 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSGreaterThan:
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSGreaterThanOrEqual:
- case IrOpcode::kJSUnaryNot:
// Type is Boolean.
CheckUpperIs(node, Type::Boolean());
break;
@@ -508,12 +511,21 @@ void Verifier::Visitor::Check(Node* node) {
// Type is OtherObject.
CheckUpperIs(node, Type::OtherObject());
break;
+ case IrOpcode::kJSCreateArray:
+ // Type is OtherObject.
+ CheckUpperIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSCreateClosure:
// Type is Function.
+ CheckUpperIs(node, Type::Function());
+ break;
+ case IrOpcode::kJSCreateIterResultObject:
+ // Type is OtherObject.
CheckUpperIs(node, Type::OtherObject());
break;
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
// Type is OtherObject.
CheckUpperIs(node, Type::OtherObject());
break;
@@ -541,8 +553,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kJSLoadContext:
- case IrOpcode::kJSLoadDynamicGlobal:
- case IrOpcode::kJSLoadDynamicContext:
+ case IrOpcode::kJSLoadDynamic:
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
@@ -566,6 +577,7 @@ void Verifier::Visitor::Check(Node* node) {
}
case IrOpcode::kJSCallConstruct:
+ case IrOpcode::kJSConvertReceiver:
// Type is Receiver.
CheckUpperIs(node, Type::Receiver());
break;
@@ -599,6 +611,10 @@ void Verifier::Visitor::Check(Node* node) {
break;
}
+ case IrOpcode::kJSLoadMessage:
+ case IrOpcode::kJSStoreMessage:
+ break;
+
case IrOpcode::kJSStackCheck:
// Type is empty.
CheckNotTyped(node);
@@ -635,6 +651,14 @@ void Verifier::Visitor::Check(Node* node) {
// TODO(rossberg): activate once we retype after opcode changes.
// CheckUpperIs(node, Type::Number());
break;
+ case IrOpcode::kNumberBitwiseOr:
+ case IrOpcode::kNumberBitwiseXor:
+ case IrOpcode::kNumberBitwiseAnd:
+ // (Signed32, Signed32) -> Signed32
+ CheckValueInputIs(node, 0, Type::Signed32());
+ CheckValueInputIs(node, 1, Type::Signed32());
+ CheckUpperIs(node, Type::Signed32());
+ break;
case IrOpcode::kNumberShiftLeft:
case IrOpcode::kNumberShiftRight:
// (Signed32, Unsigned32) -> Signed32
@@ -658,6 +682,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Number());
CheckUpperIs(node, Type::Unsigned32());
break;
+ case IrOpcode::kNumberIsHoleNaN:
+ // Number -> Boolean
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckUpperIs(node, Type::Boolean());
+ break;
case IrOpcode::kPlainPrimitiveToNumber:
// PlainPrimitive -> Number
CheckValueInputIs(node, 0, Type::PlainPrimitive());
@@ -677,6 +706,7 @@ void Verifier::Visitor::Check(Node* node) {
CheckUpperIs(node, Type::Boolean());
break;
}
+ case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsSmi:
CheckValueInputIs(node, 0, Type::Any());
CheckUpperIs(node, Type::Boolean());
@@ -803,6 +833,8 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kWord32Ror:
case IrOpcode::kWord32Equal:
case IrOpcode::kWord32Clz:
+ case IrOpcode::kWord32Ctz:
+ case IrOpcode::kWord32Popcnt:
case IrOpcode::kWord64And:
case IrOpcode::kWord64Or:
case IrOpcode::kWord64Xor:
@@ -810,6 +842,9 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kWord64Shr:
case IrOpcode::kWord64Sar:
case IrOpcode::kWord64Ror:
+ case IrOpcode::kWord64Clz:
+ case IrOpcode::kWord64Popcnt:
+ case IrOpcode::kWord64Ctz:
case IrOpcode::kWord64Equal:
case IrOpcode::kInt32Add:
case IrOpcode::kInt32AddWithOverflow:
@@ -827,7 +862,9 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
case IrOpcode::kInt64Add:
+ case IrOpcode::kInt64AddWithOverflow:
case IrOpcode::kInt64Sub:
+ case IrOpcode::kInt64SubWithOverflow:
case IrOpcode::kInt64Mul:
case IrOpcode::kInt64Div:
case IrOpcode::kInt64Mod:
@@ -857,13 +894,23 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat64Min:
case IrOpcode::kFloat64Abs:
case IrOpcode::kFloat64Sqrt:
+ case IrOpcode::kFloat32RoundDown:
case IrOpcode::kFloat64RoundDown:
+ case IrOpcode::kFloat32RoundUp:
+ case IrOpcode::kFloat64RoundUp:
+ case IrOpcode::kFloat32RoundTruncate:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
+ case IrOpcode::kFloat32RoundTiesEven:
+ case IrOpcode::kFloat64RoundTiesEven:
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
case IrOpcode::kTruncateInt64ToInt32:
+ case IrOpcode::kRoundInt64ToFloat32:
+ case IrOpcode::kRoundInt64ToFloat64:
+ case IrOpcode::kRoundUint64ToFloat64:
+ case IrOpcode::kRoundUint64ToFloat32:
case IrOpcode::kTruncateFloat64ToFloat32:
case IrOpcode::kTruncateFloat64ToInt32:
case IrOpcode::kBitcastFloat32ToInt32:
@@ -877,6 +924,10 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ case IrOpcode::kTryTruncateFloat64ToUint64:
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
case IrOpcode::kFloat64InsertLowWord32:
@@ -928,7 +979,7 @@ static bool HasDominatingDef(Schedule* schedule, Node* node,
use_pos--;
}
block = block->dominator();
- if (block == NULL) break;
+ if (block == nullptr) break;
use_pos = static_cast<int>(block->NodeCount()) - 1;
if (node == block->control_input()) return true;
}
@@ -939,7 +990,7 @@ static bool HasDominatingDef(Schedule* schedule, Node* node,
static bool Dominates(Schedule* schedule, Node* dominator, Node* dominatee) {
BasicBlock* dom = schedule->block(dominator);
BasicBlock* sub = schedule->block(dominatee);
- while (sub != NULL) {
+ while (sub != nullptr) {
if (sub == dom) {
return true;
}
@@ -1055,7 +1106,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
{
// Verify the dominance relation.
ZoneVector<BitVector*> dominators(zone);
- dominators.resize(count, NULL);
+ dominators.resize(count, nullptr);
// Compute a set of all the nodes that dominate a given node by using
// a forward fixpoint. O(n^2).
@@ -1068,7 +1119,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
queue.pop();
BitVector* block_doms = dominators[block->id().ToSize()];
BasicBlock* idom = block->dominator();
- if (idom != NULL && !block_doms->Contains(idom->id().ToInt())) {
+ if (idom != nullptr && !block_doms->Contains(idom->id().ToInt())) {
V8_Fatal(__FILE__, __LINE__, "Block B%d is not dominated by B%d",
block->rpo_number(), idom->rpo_number());
}
@@ -1076,7 +1127,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
BasicBlock* succ = block->SuccessorAt(s);
BitVector* succ_doms = dominators[succ->id().ToSize()];
- if (succ_doms == NULL) {
+ if (succ_doms == nullptr) {
// First time visiting the node. S.doms = B U B.doms
succ_doms = new (zone) BitVector(static_cast<int>(count), zone);
succ_doms->CopyFrom(*block_doms);
@@ -1098,7 +1149,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
b != rpo_order->end(); ++b) {
BasicBlock* block = *b;
BasicBlock* idom = block->dominator();
- if (idom == NULL) continue;
+ if (idom == nullptr) continue;
BitVector* block_doms = dominators[block->id().ToSize()];
for (BitVector::Iterator it(block_doms); !it.Done(); it.Advance()) {
@@ -1138,7 +1189,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
// Check inputs to control for this block.
Node* control = block->control_input();
- if (control != NULL) {
+ if (control != nullptr) {
CHECK_EQ(block, schedule->block(control));
CheckInputsDominate(schedule, block, control,
static_cast<int>(block->NodeCount()) - 1);
diff --git a/chromium/v8/src/compiler/verifier.h b/chromium/v8/src/compiler/verifier.h
index cee323e4803..428558d42da 100644
--- a/chromium/v8/src/compiler/verifier.h
+++ b/chromium/v8/src/compiler/verifier.h
@@ -56,8 +56,8 @@ class ScheduleVerifier {
public:
static void Run(Schedule* schedule);
};
-}
-}
-} // namespace v8::internal::compiler
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_VERIFIER_H_
diff --git a/chromium/v8/src/compiler/wasm-compiler.cc b/chromium/v8/src/compiler/wasm-compiler.cc
new file mode 100644
index 00000000000..17065d61b4a
--- /dev/null
+++ b/chromium/v8/src/compiler/wasm-compiler.cc
@@ -0,0 +1,2031 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/wasm-compiler.h"
+
+#include "src/isolate-inl.h"
+
+#include "src/base/platform/platform.h"
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/change-lowering.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/source-position.h"
+#include "src/compiler/typer.h"
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+// TODO(titzer): pull WASM_64 up to a common header.
+#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
+#define WASM_64 1
+#else
+#define WASM_64 0
+#endif
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+const Operator* UnsupportedOpcode(wasm::WasmOpcode opcode) {
+ if (wasm::WasmOpcodes::IsSupported(opcode)) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Unsupported opcode #%d:%s reported as supported", opcode,
+ wasm::WasmOpcodes::OpcodeName(opcode));
+ }
+ V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", opcode,
+ wasm::WasmOpcodes::OpcodeName(opcode));
+ return nullptr;
+}
+
+
+void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
+ Graph* g = jsgraph->graph();
+ if (g->end()) {
+ NodeProperties::MergeControlToEnd(g, jsgraph->common(), node);
+ } else {
+ g->SetEnd(g->NewNode(jsgraph->common()->End(1), node));
+ }
+}
+
+
+enum TrapReason {
+ kTrapUnreachable,
+ kTrapMemOutOfBounds,
+ kTrapDivByZero,
+ kTrapDivUnrepresentable,
+ kTrapRemByZero,
+ kTrapFloatUnrepresentable,
+ kTrapFuncInvalid,
+ kTrapFuncSigMismatch,
+ kTrapCount
+};
+
+
+static const char* kTrapMessages[] = {
+ "unreachable", "memory access out of bounds",
+ "divide by zero", "divide result unrepresentable",
+ "remainder by zero", "integer result unrepresentable",
+ "invalid function", "function signature mismatch"};
+} // namespace
+
+
+// A helper that handles building graph fragments for trapping.
+// To avoid generating a ton of redundant code that just calls the runtime
+// to trap, we generate a per-trap-reason block of code that all trap sites
+// in this function will branch to.
+class WasmTrapHelper : public ZoneObject {
+ public:
+ explicit WasmTrapHelper(WasmGraphBuilder* builder)
+ : builder_(builder),
+ jsgraph_(builder->jsgraph()),
+ graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {
+ for (int i = 0; i < kTrapCount; i++) traps_[i] = nullptr;
+ }
+
+ // Make the current control path trap to unreachable.
+ void Unreachable() { ConnectTrap(kTrapUnreachable); }
+
+ // Add a check that traps if {node} is equal to {val}.
+ Node* TrapIfEq32(TrapReason reason, Node* node, int32_t val) {
+ Int32Matcher m(node);
+ if (m.HasValue() && !m.Is(val)) return graph()->start();
+ if (val == 0) {
+ AddTrapIfFalse(reason, node);
+ } else {
+ AddTrapIfTrue(reason,
+ graph()->NewNode(jsgraph()->machine()->Word32Equal(), node,
+ jsgraph()->Int32Constant(val)));
+ }
+ return builder_->Control();
+ }
+
+ // Add a check that traps if {node} is zero.
+ Node* ZeroCheck32(TrapReason reason, Node* node) {
+ return TrapIfEq32(reason, node, 0);
+ }
+
+ // Add a check that traps if {node} is equal to {val}.
+ Node* TrapIfEq64(TrapReason reason, Node* node, int64_t val) {
+ Int64Matcher m(node);
+ if (m.HasValue() && !m.Is(val)) return graph()->start();
+ AddTrapIfTrue(reason,
+ graph()->NewNode(jsgraph()->machine()->Word64Equal(), node,
+ jsgraph()->Int64Constant(val)));
+ return builder_->Control();
+ }
+
+ // Add a check that traps if {node} is zero.
+ Node* ZeroCheck64(TrapReason reason, Node* node) {
+ return TrapIfEq64(reason, node, 0);
+ }
+
+ // Add a trap if {cond} is true.
+ void AddTrapIfTrue(TrapReason reason, Node* cond) {
+ AddTrapIf(reason, cond, true);
+ }
+
+ // Add a trap if {cond} is false.
+ void AddTrapIfFalse(TrapReason reason, Node* cond) {
+ AddTrapIf(reason, cond, false);
+ }
+
+ // Add a trap if {cond} is true or false according to {iftrue}.
+ void AddTrapIf(TrapReason reason, Node* cond, bool iftrue) {
+ Node** effect_ptr = builder_->effect_;
+ Node** control_ptr = builder_->control_;
+ Node* before = *effect_ptr;
+ BranchHint hint = iftrue ? BranchHint::kFalse : BranchHint::kTrue;
+ Node* branch = graph()->NewNode(common()->Branch(hint), cond, *control_ptr);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+ *control_ptr = iftrue ? if_true : if_false;
+ ConnectTrap(reason);
+ *control_ptr = iftrue ? if_false : if_true;
+ *effect_ptr = before;
+ }
+
+ private:
+ WasmGraphBuilder* builder_;
+ JSGraph* jsgraph_;
+ Graph* graph_;
+ Node* traps_[kTrapCount];
+ Node* effects_[kTrapCount];
+
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph() { return jsgraph_->graph(); }
+ CommonOperatorBuilder* common() { return jsgraph()->common(); }
+
+ void ConnectTrap(TrapReason reason) {
+ if (traps_[reason] == nullptr) {
+ // Create trap code for the first time this trap is used.
+ return BuildTrapCode(reason);
+ }
+ // Connect the current control and effect to the existing trap code.
+ builder_->AppendToMerge(traps_[reason], builder_->Control());
+ builder_->AppendToPhi(traps_[reason], effects_[reason], builder_->Effect());
+ }
+
+ void BuildTrapCode(TrapReason reason) {
+ Node* exception = builder_->String(kTrapMessages[reason]);
+ Node* end;
+ Node** control_ptr = builder_->control_;
+ Node** effect_ptr = builder_->effect_;
+ wasm::ModuleEnv* module = builder_->module_;
+ *control_ptr = traps_[reason] =
+ graph()->NewNode(common()->Merge(1), *control_ptr);
+ *effect_ptr = effects_[reason] =
+ graph()->NewNode(common()->EffectPhi(1), *effect_ptr, *control_ptr);
+
+ if (module && !module->context.is_null()) {
+ // Use the module context to call the runtime to throw an exception.
+ Runtime::FunctionId f = Runtime::kThrow;
+ const Runtime::Function* fun = Runtime::FunctionForId(f);
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
+ CallDescriptor::kNoFlags);
+ Node* inputs[] = {
+ jsgraph()->CEntryStubConstant(fun->result_size), // C entry
+ exception, // exception
+ jsgraph()->ExternalConstant(
+ ExternalReference(f, jsgraph()->isolate())), // ref
+ jsgraph()->Int32Constant(fun->nargs), // arity
+ jsgraph()->Constant(module->context), // context
+ *effect_ptr,
+ *control_ptr};
+
+ Node* node = graph()->NewNode(
+ common()->Call(desc), static_cast<int>(arraysize(inputs)), inputs);
+ *control_ptr = node;
+ *effect_ptr = node;
+ }
+ if (false) {
+ // End the control flow with a throw
+ Node* thrw =
+ graph()->NewNode(common()->Throw(), jsgraph()->ZeroConstant(),
+ *effect_ptr, *control_ptr);
+ end = thrw;
+ } else {
+ // End the control flow with returning 0xdeadbeef
+ Node* ret_value;
+ if (builder_->GetFunctionSignature()->return_count() > 0) {
+ switch (builder_->GetFunctionSignature()->GetReturn()) {
+ case wasm::kAstI32:
+ ret_value = jsgraph()->Int32Constant(0xdeadbeef);
+ break;
+ case wasm::kAstI64:
+ ret_value = jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
+ break;
+ case wasm::kAstF32:
+ ret_value = jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
+ break;
+ case wasm::kAstF64:
+ ret_value = jsgraph()->Float64Constant(
+ bit_cast<double>(0xdeadbeefdeadbeef));
+ break;
+ default:
+ UNREACHABLE();
+ ret_value = nullptr;
+ }
+ } else {
+ ret_value = jsgraph()->Int32Constant(0xdeadbeef);
+ }
+ end = graph()->NewNode(jsgraph()->common()->Return(), ret_value,
+ *effect_ptr, *control_ptr);
+ }
+
+ MergeControlToEnd(jsgraph(), end);
+ }
+};
+
+
+WasmGraphBuilder::WasmGraphBuilder(Zone* zone, JSGraph* jsgraph,
+ wasm::FunctionSig* function_signature)
+ : zone_(zone),
+ jsgraph_(jsgraph),
+ module_(nullptr),
+ mem_buffer_(nullptr),
+ mem_size_(nullptr),
+ function_table_(nullptr),
+ control_(nullptr),
+ effect_(nullptr),
+ cur_buffer_(def_buffer_),
+ cur_bufsize_(kDefaultBufferSize),
+ trap_(new (zone) WasmTrapHelper(this)),
+ function_signature_(function_signature) {
+ DCHECK_NOT_NULL(jsgraph_);
+}
+
+
+Node* WasmGraphBuilder::Error() { return jsgraph()->Dead(); }
+
+
+Node* WasmGraphBuilder::Start(unsigned params) {
+ Node* start = graph()->NewNode(jsgraph()->common()->Start(params));
+ graph()->SetStart(start);
+ return start;
+}
+
+
+Node* WasmGraphBuilder::Param(unsigned index, wasm::LocalType type) {
+ return graph()->NewNode(jsgraph()->common()->Parameter(index),
+ graph()->start());
+}
+
+
+Node* WasmGraphBuilder::Loop(Node* entry) {
+ return graph()->NewNode(jsgraph()->common()->Loop(1), entry);
+}
+
+
+Node* WasmGraphBuilder::Terminate(Node* effect, Node* control) {
+ Node* terminate =
+ graph()->NewNode(jsgraph()->common()->Terminate(), effect, control);
+ MergeControlToEnd(jsgraph(), terminate);
+ return terminate;
+}
+
+
+unsigned WasmGraphBuilder::InputCount(Node* node) {
+ return static_cast<unsigned>(node->InputCount());
+}
+
+
+bool WasmGraphBuilder::IsPhiWithMerge(Node* phi, Node* merge) {
+ return phi && IrOpcode::IsPhiOpcode(phi->opcode()) &&
+ NodeProperties::GetControlInput(phi) == merge;
+}
+
+
+void WasmGraphBuilder::AppendToMerge(Node* merge, Node* from) {
+ DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
+ merge->AppendInput(jsgraph()->zone(), from);
+ int new_size = merge->InputCount();
+ NodeProperties::ChangeOp(
+ merge, jsgraph()->common()->ResizeMergeOrPhi(merge->op(), new_size));
+}
+
+
+void WasmGraphBuilder::AppendToPhi(Node* merge, Node* phi, Node* from) {
+ DCHECK(IrOpcode::IsPhiOpcode(phi->opcode()));
+ DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
+ int new_size = phi->InputCount();
+ phi->InsertInput(jsgraph()->zone(), phi->InputCount() - 1, from);
+ NodeProperties::ChangeOp(
+ phi, jsgraph()->common()->ResizeMergeOrPhi(phi->op(), new_size));
+}
+
+
+Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
+ return graph()->NewNode(jsgraph()->common()->Merge(count), count, controls);
+}
+
+
+Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
+ Node* control) {
+ DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
+ Node** buf = Realloc(vals, count);
+ buf = Realloc(buf, count + 1);
+ buf[count] = control;
+ return graph()->NewNode(jsgraph()->common()->Phi(type, count), count + 1,
+ buf);
+}
+
+
+Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
+ Node* control) {
+ DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
+ Node** buf = Realloc(effects, count);
+ buf = Realloc(buf, count + 1);
+ buf[count] = control;
+ return graph()->NewNode(jsgraph()->common()->EffectPhi(count), count + 1,
+ buf);
+}
+
+
+Node* WasmGraphBuilder::Int32Constant(int32_t value) {
+ return jsgraph()->Int32Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Int64Constant(int64_t value) {
+ return jsgraph()->Int64Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
+ Node* right) {
+ const Operator* op;
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ switch (opcode) {
+ case wasm::kExprI32Add:
+ op = m->Int32Add();
+ break;
+ case wasm::kExprI32Sub:
+ op = m->Int32Sub();
+ break;
+ case wasm::kExprI32Mul:
+ op = m->Int32Mul();
+ break;
+ case wasm::kExprI32DivS: {
+ trap_->ZeroCheck32(kTrapDivByZero, right);
+ Node* before = *control_;
+ Node* denom_is_m1;
+ Node* denom_is_not_m1;
+ Branch(graph()->NewNode(jsgraph()->machine()->Word32Equal(), right,
+ jsgraph()->Int32Constant(-1)),
+ &denom_is_m1, &denom_is_not_m1);
+ *control_ = denom_is_m1;
+ trap_->TrapIfEq32(kTrapDivUnrepresentable, left, kMinInt);
+ if (*control_ != denom_is_m1) {
+ *control_ = graph()->NewNode(jsgraph()->common()->Merge(2),
+ denom_is_not_m1, *control_);
+ } else {
+ *control_ = before;
+ }
+ return graph()->NewNode(m->Int32Div(), left, right, *control_);
+ }
+ case wasm::kExprI32DivU:
+ op = m->Uint32Div();
+ return graph()->NewNode(op, left, right,
+ trap_->ZeroCheck32(kTrapDivByZero, right));
+ case wasm::kExprI32RemS: {
+ trap_->ZeroCheck32(kTrapRemByZero, right);
+ Diamond d(graph(), jsgraph()->common(),
+ graph()->NewNode(jsgraph()->machine()->Word32Equal(), right,
+ jsgraph()->Int32Constant(-1)));
+
+ Node* rem = graph()->NewNode(m->Int32Mod(), left, right, d.if_false);
+
+ return d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ rem);
+ }
+ case wasm::kExprI32RemU:
+ op = m->Uint32Mod();
+ return graph()->NewNode(op, left, right,
+ trap_->ZeroCheck32(kTrapRemByZero, right));
+ case wasm::kExprI32And:
+ op = m->Word32And();
+ break;
+ case wasm::kExprI32Ior:
+ op = m->Word32Or();
+ break;
+ case wasm::kExprI32Xor:
+ op = m->Word32Xor();
+ break;
+ case wasm::kExprI32Shl:
+ op = m->Word32Shl();
+ break;
+ case wasm::kExprI32ShrU:
+ op = m->Word32Shr();
+ break;
+ case wasm::kExprI32ShrS:
+ op = m->Word32Sar();
+ break;
+ case wasm::kExprI32Eq:
+ op = m->Word32Equal();
+ break;
+ case wasm::kExprI32Ne:
+ return Invert(Binop(wasm::kExprI32Eq, left, right));
+ case wasm::kExprI32LtS:
+ op = m->Int32LessThan();
+ break;
+ case wasm::kExprI32LeS:
+ op = m->Int32LessThanOrEqual();
+ break;
+ case wasm::kExprI32LtU:
+ op = m->Uint32LessThan();
+ break;
+ case wasm::kExprI32LeU:
+ op = m->Uint32LessThanOrEqual();
+ break;
+ case wasm::kExprI32GtS:
+ op = m->Int32LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI32GeS:
+ op = m->Int32LessThanOrEqual();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI32GtU:
+ op = m->Uint32LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI32GeU:
+ op = m->Uint32LessThanOrEqual();
+ std::swap(left, right);
+ break;
+#if WASM_64
+ // Opcodes only supported on 64-bit platforms.
+ // TODO(titzer): query the machine operator builder here instead of #ifdef.
+ case wasm::kExprI64Add:
+ op = m->Int64Add();
+ break;
+ case wasm::kExprI64Sub:
+ op = m->Int64Sub();
+ break;
+ case wasm::kExprI64Mul:
+ op = m->Int64Mul();
+ break;
+ case wasm::kExprI64DivS: {
+ trap_->ZeroCheck64(kTrapDivByZero, right);
+ Node* before = *control_;
+ Node* denom_is_m1;
+ Node* denom_is_not_m1;
+ Branch(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+ jsgraph()->Int64Constant(-1)),
+ &denom_is_m1, &denom_is_not_m1);
+ *control_ = denom_is_m1;
+ trap_->TrapIfEq64(kTrapDivUnrepresentable, left,
+ std::numeric_limits<int64_t>::min());
+ if (*control_ != denom_is_m1) {
+ *control_ = graph()->NewNode(jsgraph()->common()->Merge(2),
+ denom_is_not_m1, *control_);
+ } else {
+ *control_ = before;
+ }
+ return graph()->NewNode(m->Int64Div(), left, right, *control_);
+ }
+ case wasm::kExprI64DivU:
+ op = m->Uint64Div();
+ return graph()->NewNode(op, left, right,
+ trap_->ZeroCheck64(kTrapDivByZero, right));
+ case wasm::kExprI64RemS: {
+ trap_->ZeroCheck64(kTrapRemByZero, right);
+ Diamond d(jsgraph()->graph(), jsgraph()->common(),
+ graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+ jsgraph()->Int64Constant(-1)));
+
+ Node* rem = graph()->NewNode(m->Int64Mod(), left, right, d.if_false);
+
+ return d.Phi(MachineRepresentation::kWord64, jsgraph()->Int64Constant(0),
+ rem);
+ }
+ case wasm::kExprI64RemU:
+ op = m->Uint64Mod();
+ return graph()->NewNode(op, left, right,
+ trap_->ZeroCheck64(kTrapRemByZero, right));
+ case wasm::kExprI64And:
+ op = m->Word64And();
+ break;
+ case wasm::kExprI64Ior:
+ op = m->Word64Or();
+ break;
+ case wasm::kExprI64Xor:
+ op = m->Word64Xor();
+ break;
+ case wasm::kExprI64Shl:
+ op = m->Word64Shl();
+ break;
+ case wasm::kExprI64ShrU:
+ op = m->Word64Shr();
+ break;
+ case wasm::kExprI64ShrS:
+ op = m->Word64Sar();
+ break;
+ case wasm::kExprI64Eq:
+ op = m->Word64Equal();
+ break;
+ case wasm::kExprI64Ne:
+ return Invert(Binop(wasm::kExprI64Eq, left, right));
+ case wasm::kExprI64LtS:
+ op = m->Int64LessThan();
+ break;
+ case wasm::kExprI64LeS:
+ op = m->Int64LessThanOrEqual();
+ break;
+ case wasm::kExprI64LtU:
+ op = m->Uint64LessThan();
+ break;
+ case wasm::kExprI64LeU:
+ op = m->Uint64LessThanOrEqual();
+ break;
+ case wasm::kExprI64GtS:
+ op = m->Int64LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI64GeS:
+ op = m->Int64LessThanOrEqual();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI64GtU:
+ op = m->Uint64LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI64GeU:
+ op = m->Uint64LessThanOrEqual();
+ std::swap(left, right);
+ break;
+#endif
+
+ case wasm::kExprF32CopySign:
+ return BuildF32CopySign(left, right);
+ case wasm::kExprF64CopySign:
+ return BuildF64CopySign(left, right);
+ case wasm::kExprF32Add:
+ op = m->Float32Add();
+ break;
+ case wasm::kExprF32Sub:
+ op = m->Float32Sub();
+ break;
+ case wasm::kExprF32Mul:
+ op = m->Float32Mul();
+ break;
+ case wasm::kExprF32Div:
+ op = m->Float32Div();
+ break;
+ case wasm::kExprF32Eq:
+ op = m->Float32Equal();
+ break;
+ case wasm::kExprF32Ne:
+ return Invert(Binop(wasm::kExprF32Eq, left, right));
+ case wasm::kExprF32Lt:
+ op = m->Float32LessThan();
+ break;
+ case wasm::kExprF32Ge:
+ op = m->Float32LessThanOrEqual();
+ std::swap(left, right);
+ break;
+ case wasm::kExprF32Gt:
+ op = m->Float32LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprF32Le:
+ op = m->Float32LessThanOrEqual();
+ break;
+ case wasm::kExprF64Add:
+ op = m->Float64Add();
+ break;
+ case wasm::kExprF64Sub:
+ op = m->Float64Sub();
+ break;
+ case wasm::kExprF64Mul:
+ op = m->Float64Mul();
+ break;
+ case wasm::kExprF64Div:
+ op = m->Float64Div();
+ break;
+ case wasm::kExprF64Eq:
+ op = m->Float64Equal();
+ break;
+ case wasm::kExprF64Ne:
+ return Invert(Binop(wasm::kExprF64Eq, left, right));
+ case wasm::kExprF64Lt:
+ op = m->Float64LessThan();
+ break;
+ case wasm::kExprF64Le:
+ op = m->Float64LessThanOrEqual();
+ break;
+ case wasm::kExprF64Gt:
+ op = m->Float64LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprF64Ge:
+ op = m->Float64LessThanOrEqual();
+ std::swap(left, right);
+ break;
+ case wasm::kExprF32Min:
+ return BuildF32Min(left, right);
+ case wasm::kExprF64Min:
+ return BuildF64Min(left, right);
+ case wasm::kExprF32Max:
+ return BuildF32Max(left, right);
+ case wasm::kExprF64Max:
+ return BuildF64Max(left, right);
+ default:
+ op = UnsupportedOpcode(opcode);
+ }
+ return graph()->NewNode(op, left, right);
+}
+
+
+Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
+ const Operator* op;
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ switch (opcode) {
+ case wasm::kExprBoolNot:
+ op = m->Word32Equal();
+ return graph()->NewNode(op, input, jsgraph()->Int32Constant(0));
+ case wasm::kExprF32Abs:
+ op = m->Float32Abs();
+ break;
+ case wasm::kExprF32Neg:
+ return BuildF32Neg(input);
+ case wasm::kExprF32Sqrt:
+ op = m->Float32Sqrt();
+ break;
+ case wasm::kExprF64Abs:
+ op = m->Float64Abs();
+ break;
+ case wasm::kExprF64Neg:
+ return BuildF64Neg(input);
+ case wasm::kExprF64Sqrt:
+ op = m->Float64Sqrt();
+ break;
+ case wasm::kExprI32SConvertF64:
+ return BuildI32SConvertF64(input);
+ case wasm::kExprI32UConvertF64:
+ return BuildI32UConvertF64(input);
+ case wasm::kExprF32ConvertF64:
+ op = m->TruncateFloat64ToFloat32();
+ break;
+ case wasm::kExprF64SConvertI32:
+ op = m->ChangeInt32ToFloat64();
+ break;
+ case wasm::kExprF64UConvertI32:
+ op = m->ChangeUint32ToFloat64();
+ break;
+ case wasm::kExprF32SConvertI32:
+ op = m->ChangeInt32ToFloat64(); // TODO(titzer): two conversions
+ input = graph()->NewNode(op, input);
+ op = m->TruncateFloat64ToFloat32();
+ break;
+ case wasm::kExprF32UConvertI32:
+ op = m->ChangeUint32ToFloat64();
+ input = graph()->NewNode(op, input);
+ op = m->TruncateFloat64ToFloat32();
+ break;
+ case wasm::kExprI32SConvertF32:
+ return BuildI32SConvertF32(input);
+ case wasm::kExprI32UConvertF32:
+ return BuildI32UConvertF32(input);
+ case wasm::kExprF64ConvertF32:
+ op = m->ChangeFloat32ToFloat64();
+ break;
+ case wasm::kExprF32ReinterpretI32:
+ op = m->BitcastInt32ToFloat32();
+ break;
+ case wasm::kExprI32ReinterpretF32:
+ op = m->BitcastFloat32ToInt32();
+ break;
+ case wasm::kExprI32Clz:
+ op = m->Word32Clz();
+ break;
+ case wasm::kExprI32Ctz: {
+ if (m->Word32Ctz().IsSupported()) {
+ op = m->Word32Ctz().op();
+ break;
+ } else {
+ return BuildI32Ctz(input);
+ }
+ }
+ case wasm::kExprI32Popcnt: {
+ if (m->Word32Popcnt().IsSupported()) {
+ op = m->Word32Popcnt().op();
+ break;
+ } else {
+ return BuildI32Popcnt(input);
+ }
+ }
+ case wasm::kExprF32Floor: {
+ if (m->Float32RoundDown().IsSupported()) {
+ op = m->Float32RoundDown().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF32Ceil: {
+ if (m->Float32RoundUp().IsSupported()) {
+ op = m->Float32RoundUp().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF32Trunc: {
+ if (m->Float32RoundTruncate().IsSupported()) {
+ op = m->Float32RoundTruncate().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF32NearestInt: {
+ if (m->Float32RoundTiesEven().IsSupported()) {
+ op = m->Float32RoundTiesEven().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF64Floor: {
+ if (m->Float64RoundDown().IsSupported()) {
+ op = m->Float64RoundDown().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF64Ceil: {
+ if (m->Float64RoundUp().IsSupported()) {
+ op = m->Float64RoundUp().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF64Trunc: {
+ if (m->Float64RoundTruncate().IsSupported()) {
+ op = m->Float64RoundTruncate().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF64NearestInt: {
+ if (m->Float64RoundTiesEven().IsSupported()) {
+ op = m->Float64RoundTiesEven().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+
+#if WASM_64
+ // Opcodes only supported on 64-bit platforms.
+ // TODO(titzer): query the machine operator builder here instead of #ifdef.
+ case wasm::kExprI32ConvertI64:
+ op = m->TruncateInt64ToInt32();
+ break;
+ case wasm::kExprI64SConvertI32:
+ op = m->ChangeInt32ToInt64();
+ break;
+ case wasm::kExprI64UConvertI32:
+ op = m->ChangeUint32ToUint64();
+ break;
+ case wasm::kExprF32SConvertI64:
+ op = m->RoundInt64ToFloat32();
+ break;
+ case wasm::kExprF32UConvertI64:
+ op = m->RoundUint64ToFloat32();
+ break;
+ case wasm::kExprF64SConvertI64:
+ op = m->RoundInt64ToFloat64();
+ break;
+ case wasm::kExprF64UConvertI64:
+ op = m->RoundUint64ToFloat64();
+ break;
+ case wasm::kExprI64SConvertF32: {
+ Node* trunc = graph()->NewNode(m->TryTruncateFloat32ToInt64(), input);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+ case wasm::kExprI64SConvertF64: {
+ Node* trunc = graph()->NewNode(m->TryTruncateFloat64ToInt64(), input);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+ case wasm::kExprI64UConvertF32: {
+ Node* trunc = graph()->NewNode(m->TryTruncateFloat32ToUint64(), input);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+ case wasm::kExprI64UConvertF64: {
+ Node* trunc = graph()->NewNode(m->TryTruncateFloat64ToUint64(), input);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+ case wasm::kExprF64ReinterpretI64:
+ op = m->BitcastInt64ToFloat64();
+ break;
+ case wasm::kExprI64ReinterpretF64:
+ op = m->BitcastFloat64ToInt64();
+ break;
+ case wasm::kExprI64Clz:
+ op = m->Word64Clz();
+ break;
+ case wasm::kExprI64Ctz: {
+ if (m->Word64Ctz().IsSupported()) {
+ op = m->Word64Ctz().op();
+ break;
+ } else {
+ return BuildI64Ctz(input);
+ }
+ }
+ case wasm::kExprI64Popcnt: {
+ if (m->Word64Popcnt().IsSupported()) {
+ op = m->Word64Popcnt().op();
+ break;
+ } else {
+ return BuildI64Popcnt(input);
+ }
+ }
+#endif
+ default:
+ op = UnsupportedOpcode(opcode);
+ }
+ return graph()->NewNode(op, input);
+}
+
+
+Node* WasmGraphBuilder::Float32Constant(float value) {
+ return jsgraph()->Float32Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Float64Constant(double value) {
+ return jsgraph()->Float64Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Constant(Handle<Object> value) {
+ return jsgraph()->Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Branch(Node* cond, Node** true_node,
+ Node** false_node) {
+ DCHECK_NOT_NULL(cond);
+ DCHECK_NOT_NULL(*control_);
+ Node* branch =
+ graph()->NewNode(jsgraph()->common()->Branch(), cond, *control_);
+ *true_node = graph()->NewNode(jsgraph()->common()->IfTrue(), branch);
+ *false_node = graph()->NewNode(jsgraph()->common()->IfFalse(), branch);
+ return branch;
+}
+
+
+Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
+ return graph()->NewNode(jsgraph()->common()->Switch(count), key, *control_);
+}
+
+
+Node* WasmGraphBuilder::IfValue(int32_t value, Node* sw) {
+ DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
+ return graph()->NewNode(jsgraph()->common()->IfValue(value), sw);
+}
+
+
+Node* WasmGraphBuilder::IfDefault(Node* sw) {
+ DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
+ return graph()->NewNode(jsgraph()->common()->IfDefault(), sw);
+}
+
+
+Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
+ DCHECK_NOT_NULL(*control_);
+ DCHECK_NOT_NULL(*effect_);
+
+ if (count == 0) {
+ // Handle a return of void.
+ vals[0] = jsgraph()->Int32Constant(0);
+ count = 1;
+ }
+
+ Node** buf = Realloc(vals, count);
+ buf = Realloc(buf, count + 2);
+ buf[count] = *effect_;
+ buf[count + 1] = *control_;
+ Node* ret = graph()->NewNode(jsgraph()->common()->Return(), count + 2, vals);
+
+ MergeControlToEnd(jsgraph(), ret);
+ return ret;
+}
+
+
+Node* WasmGraphBuilder::ReturnVoid() { return Return(0, Buffer(0)); }
+
+
+Node* WasmGraphBuilder::Unreachable() {
+ trap_->Unreachable();
+ return nullptr;
+}
+
+
+Node* WasmGraphBuilder::BuildF32Neg(Node* input) {
+ Node* result =
+ Unop(wasm::kExprF32ReinterpretI32,
+ Binop(wasm::kExprI32Xor, Unop(wasm::kExprI32ReinterpretF32, input),
+ jsgraph()->Int32Constant(0x80000000)));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildF64Neg(Node* input) {
+#if WASM_64
+ Node* result =
+ Unop(wasm::kExprF64ReinterpretI64,
+ Binop(wasm::kExprI64Xor, Unop(wasm::kExprI64ReinterpretF64, input),
+ jsgraph()->Int64Constant(0x8000000000000000)));
+
+ return result;
+#else
+ MachineOperatorBuilder* m = jsgraph()->machine();
+
+ Node* old_high_word = graph()->NewNode(m->Float64ExtractHighWord32(), input);
+ Node* new_high_word = Binop(wasm::kExprI32Xor, old_high_word,
+ jsgraph()->Int32Constant(0x80000000));
+
+ return graph()->NewNode(m->Float64InsertHighWord32(), input, new_high_word);
+#endif
+}
+
+
+Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
+ Node* result = Unop(
+ wasm::kExprF32ReinterpretI32,
+ Binop(wasm::kExprI32Ior,
+ Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, left),
+ jsgraph()->Int32Constant(0x7fffffff)),
+ Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, right),
+ jsgraph()->Int32Constant(0x80000000))));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
+#if WASM_64
+ Node* result = Unop(
+ wasm::kExprF64ReinterpretI64,
+ Binop(wasm::kExprI64Ior,
+ Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, left),
+ jsgraph()->Int64Constant(0x7fffffffffffffff)),
+ Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, right),
+ jsgraph()->Int64Constant(0x8000000000000000))));
+
+ return result;
+#else
+ MachineOperatorBuilder* m = jsgraph()->machine();
+
+ Node* high_word_left = graph()->NewNode(m->Float64ExtractHighWord32(), left);
+ Node* high_word_right =
+ graph()->NewNode(m->Float64ExtractHighWord32(), right);
+
+ Node* new_high_word =
+ Binop(wasm::kExprI32Ior, Binop(wasm::kExprI32And, high_word_left,
+ jsgraph()->Int32Constant(0x7fffffff)),
+ Binop(wasm::kExprI32And, high_word_right,
+ jsgraph()->Int32Constant(0x80000000)));
+
+ return graph()->NewNode(m->Float64InsertHighWord32(), left, new_high_word);
+#endif
+}
+
+
+Node* WasmGraphBuilder::BuildF32Min(Node* left, Node* right) {
+ Diamond left_le_right(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Le, left, right));
+
+ Diamond right_lt_left(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Lt, right, left));
+
+ Diamond left_is_not_nan(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Eq, left, left));
+
+ return left_le_right.Phi(
+ wasm::kAstF32, left,
+ right_lt_left.Phi(wasm::kAstF32, right,
+ left_is_not_nan.Phi(wasm::kAstF32, right, left)));
+}
+
+
+Node* WasmGraphBuilder::BuildF32Max(Node* left, Node* right) {
+ Diamond left_ge_right(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Ge, left, right));
+
+ Diamond right_gt_left(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Gt, right, left));
+
+ Diamond left_is_not_nan(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Eq, left, left));
+
+ return left_ge_right.Phi(
+ wasm::kAstF32, left,
+ right_gt_left.Phi(wasm::kAstF32, right,
+ left_is_not_nan.Phi(wasm::kAstF32, right, left)));
+}
+
+
+Node* WasmGraphBuilder::BuildF64Min(Node* left, Node* right) {
+ Diamond left_le_right(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Le, left, right));
+
+ Diamond right_lt_left(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Lt, right, left));
+
+ Diamond left_is_not_nan(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Eq, left, left));
+
+ return left_le_right.Phi(
+ wasm::kAstF64, left,
+ right_lt_left.Phi(wasm::kAstF64, right,
+ left_is_not_nan.Phi(wasm::kAstF64, right, left)));
+}
+
+
+Node* WasmGraphBuilder::BuildF64Max(Node* left, Node* right) {
+ Diamond left_ge_right(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Ge, left, right));
+
+ Diamond right_gt_left(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Lt, right, left));
+
+ Diamond left_is_not_nan(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Eq, left, left));
+
+ return left_ge_right.Phi(
+ wasm::kAstF64, left,
+ right_gt_left.Phi(wasm::kAstF64, right,
+ left_is_not_nan.Phi(wasm::kAstF64, right, left)));
+}
+
+
+Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // Truncation of the input value is needed for the overflow check later.
+ Node* trunc = Unop(wasm::kExprF32Trunc, input);
+ // TODO(titzer): two conversions
+ Node* f64_trunc = graph()->NewNode(m->ChangeFloat32ToFloat64(), trunc);
+ Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), f64_trunc);
+
+ // Convert the result back to f64. If we end up at a different value than the
+ // truncated input value, then there has been an overflow and we trap.
+ Node* check = Unop(wasm::kExprF64SConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF64Ne, f64_trunc, check);
+ trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // Truncation of the input value is needed for the overflow check later.
+ Node* trunc = Unop(wasm::kExprF64Trunc, input);
+ Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), trunc);
+
+ // Convert the result back to f64. If we end up at a different value than the
+ // truncated input value, then there has been an overflow and we trap.
+ Node* check = Unop(wasm::kExprF64SConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
+ trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // Truncation of the input value is needed for the overflow check later.
+ Node* trunc = Unop(wasm::kExprF32Trunc, input);
+ // TODO(titzer): two conversions
+ Node* f64_trunc = graph()->NewNode(m->ChangeFloat32ToFloat64(), trunc);
+ Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), f64_trunc);
+
+ // Convert the result back to f64. If we end up at a different value than the
+ // truncated input value, then there has been an overflow and we trap.
+ Node* check = Unop(wasm::kExprF64UConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF64Ne, f64_trunc, check);
+ trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // Truncation of the input value is needed for the overflow check later.
+ Node* trunc = Unop(wasm::kExprF64Trunc, input);
+ Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), trunc);
+
+ // Convert the result back to f64. If we end up at a different value than the
+ // truncated input value, then there has been an overflow and we trap.
+ Node* check = Unop(wasm::kExprF64UConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
+ trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32Ctz(Node* input) {
+ //// Implement the following code as TF graph.
+ // value = value | (value << 1);
+ // value = value | (value << 2);
+ // value = value | (value << 4);
+ // value = value | (value << 8);
+ // value = value | (value << 16);
+ // return CountPopulation32(0xffffffff XOR value);
+
+ Node* result =
+ Binop(wasm::kExprI32Ior, input,
+ Binop(wasm::kExprI32Shl, input, jsgraph()->Int32Constant(1)));
+
+ result = Binop(wasm::kExprI32Ior, result,
+ Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(2)));
+
+ result = Binop(wasm::kExprI32Ior, result,
+ Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(4)));
+
+ result = Binop(wasm::kExprI32Ior, result,
+ Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(8)));
+
+ result =
+ Binop(wasm::kExprI32Ior, result,
+ Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(16)));
+
+ result = BuildI32Popcnt(
+ Binop(wasm::kExprI32Xor, jsgraph()->Int32Constant(0xffffffff), result));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI64Ctz(Node* input) {
+ //// Implement the following code as TF graph.
+ // value = value | (value << 1);
+ // value = value | (value << 2);
+ // value = value | (value << 4);
+ // value = value | (value << 8);
+ // value = value | (value << 16);
+ // value = value | (value << 32);
+ // return CountPopulation64(0xffffffffffffffff XOR value);
+
+ Node* result =
+ Binop(wasm::kExprI64Ior, input,
+ Binop(wasm::kExprI64Shl, input, jsgraph()->Int64Constant(1)));
+
+ result = Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(2)));
+
+ result = Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(4)));
+
+ result = Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(8)));
+
+ result =
+ Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(16)));
+
+ result =
+ Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(32)));
+
+ result = BuildI64Popcnt(Binop(
+ wasm::kExprI64Xor, jsgraph()->Int64Constant(0xffffffffffffffff), result));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32Popcnt(Node* input) {
+ //// Implement the following code as a TF graph.
+ // value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
+ // value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
+ // value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
+ // value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
+ // value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
+
+ Node* result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, input, jsgraph()->Int32Constant(1)),
+ jsgraph()->Int32Constant(0x55555555)),
+ Binop(wasm::kExprI32And, input, jsgraph()->Int32Constant(0x55555555)));
+
+ result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(2)),
+ jsgraph()->Int32Constant(0x33333333)),
+ Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x33333333)));
+
+ result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(4)),
+ jsgraph()->Int32Constant(0x0f0f0f0f)),
+ Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x0f0f0f0f)));
+
+ result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(8)),
+ jsgraph()->Int32Constant(0x00ff00ff)),
+ Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x00ff00ff)));
+
+ result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(16)),
+ jsgraph()->Int32Constant(0x0000ffff)),
+ Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x0000ffff)));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI64Popcnt(Node* input) {
+ //// Implement the following code as a TF graph.
+ // value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
+ // value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
+ // value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
+ // value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
+ // value = ((value >> 16) & 0x0000ffff0000ffff) + (value &
+ // 0x0000ffff0000ffff);
+ // value = ((value >> 32) & 0x00000000ffffffff) + (value &
+ // 0x00000000ffffffff);
+
+ Node* result =
+ Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And,
+ Binop(wasm::kExprI64ShrU, input, jsgraph()->Int64Constant(1)),
+ jsgraph()->Int64Constant(0x5555555555555555)),
+ Binop(wasm::kExprI64And, input,
+ jsgraph()->Int64Constant(0x5555555555555555)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(2)),
+ jsgraph()->Int64Constant(0x3333333333333333)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x3333333333333333)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(4)),
+ jsgraph()->Int64Constant(0x0f0f0f0f0f0f0f0f)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x0f0f0f0f0f0f0f0f)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(8)),
+ jsgraph()->Int64Constant(0x00ff00ff00ff00ff)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x00ff00ff00ff00ff)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(16)),
+ jsgraph()->Int64Constant(0x0000ffff0000ffff)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x0000ffff0000ffff)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(32)),
+ jsgraph()->Int64Constant(0x00000000ffffffff)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x00000000ffffffff)));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
+ const size_t params = sig->parameter_count();
+ const size_t extra = 2; // effect and control inputs.
+ const size_t count = 1 + params + extra;
+
+ // Reallocate the buffer to make space for extra inputs.
+ args = Realloc(args, count);
+
+ // Add effect and control inputs.
+ args[params + 1] = *effect_;
+ args[params + 2] = *control_;
+
+ const Operator* op = jsgraph()->common()->Call(
+ module_->GetWasmCallDescriptor(jsgraph()->zone(), sig));
+ Node* call = graph()->NewNode(op, static_cast<int>(count), args);
+
+ *effect_ = call;
+ return call;
+}
+
+
+Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args) {
+ DCHECK_NULL(args[0]);
+
+ // Add code object as constant.
+ args[0] = Constant(module_->GetFunctionCode(index));
+ wasm::FunctionSig* sig = module_->GetFunctionSignature(index);
+
+ return BuildWasmCall(sig, args);
+}
+
+
+Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
+ DCHECK_NOT_NULL(args[0]);
+
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+
+ // Compute the code object by loading it from the function table.
+ Node* key = args[0];
+ Node* table = FunctionTable();
+
+ // Bounds check the index.
+ int table_size = static_cast<int>(module_->FunctionTableSize());
+ {
+ Node* size = Int32Constant(static_cast<int>(table_size));
+ Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
+ trap_->AddTrapIfFalse(kTrapFuncInvalid, in_bounds);
+ }
+
+ // Load signature from the table and check.
+ // The table is a FixedArray; signatures are encoded as SMIs.
+ // [sig1, sig2, sig3, ...., code1, code2, code3 ...]
+ ElementAccess access = AccessBuilder::ForFixedArrayElement();
+ const int fixed_offset = access.header_size - access.tag();
+ {
+ Node* load_sig = graph()->NewNode(
+ machine->Load(MachineType::AnyTagged()), table,
+ graph()->NewNode(machine->Int32Add(),
+ graph()->NewNode(machine->Word32Shl(), key,
+ Int32Constant(kPointerSizeLog2)),
+ Int32Constant(fixed_offset)),
+ *effect_, *control_);
+ Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
+ jsgraph()->SmiConstant(index));
+ trap_->AddTrapIfFalse(kTrapFuncSigMismatch, sig_match);
+ }
+
+ // Load code object from the table.
+ int offset = fixed_offset + kPointerSize * table_size;
+ Node* load_code = graph()->NewNode(
+ machine->Load(MachineType::AnyTagged()), table,
+ graph()->NewNode(machine->Int32Add(),
+ graph()->NewNode(machine->Word32Shl(), key,
+ Int32Constant(kPointerSizeLog2)),
+ Int32Constant(offset)),
+ *effect_, *control_);
+
+ args[0] = load_code;
+ wasm::FunctionSig* sig = module_->GetSignature(index);
+ return BuildWasmCall(sig, args);
+}
+
+
+Node* WasmGraphBuilder::ToJS(Node* node, Node* context, wasm::LocalType type) {
+ SimplifiedOperatorBuilder simplified(jsgraph()->zone());
+ switch (type) {
+ case wasm::kAstI32:
+ return graph()->NewNode(simplified.ChangeInt32ToTagged(), node);
+ case wasm::kAstI64:
+ // TODO(titzer): i64->JS has no good solution right now. Using lower 32
+ // bits.
+ node =
+ graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(), node);
+ return graph()->NewNode(simplified.ChangeInt32ToTagged(), node);
+ case wasm::kAstF32:
+ node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
+ node);
+ return graph()->NewNode(simplified.ChangeFloat64ToTagged(), node);
+ case wasm::kAstF64:
+ return graph()->NewNode(simplified.ChangeFloat64ToTagged(), node);
+ case wasm::kAstStmt:
+ return jsgraph()->UndefinedConstant();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
+ wasm::LocalType type) {
+ // Do a JavaScript ToNumber.
+ Node* num =
+ graph()->NewNode(jsgraph()->javascript()->ToNumber(), node, context,
+ jsgraph()->EmptyFrameState(), *effect_, *control_);
+ *control_ = num;
+ *effect_ = num;
+
+ // Change representation.
+ SimplifiedOperatorBuilder simplified(jsgraph()->zone());
+ num = graph()->NewNode(simplified.ChangeTaggedToFloat64(), num);
+
+ switch (type) {
+ case wasm::kAstI32: {
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToInt32(
+ TruncationMode::kJavaScript),
+ num);
+ break;
+ }
+ case wasm::kAstI64:
+ // TODO(titzer): JS->i64 has no good solution right now. Using 32 bits.
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToInt32(
+ TruncationMode::kJavaScript),
+ num);
+ num = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), num);
+ break;
+ case wasm::kAstF32:
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
+ num);
+ break;
+ case wasm::kAstF64:
+ break;
+ case wasm::kAstStmt:
+ num = jsgraph()->Int32Constant(0);
+ break;
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+ return num;
+}
+
+
+Node* WasmGraphBuilder::Invert(Node* node) {
+ return Unop(wasm::kExprBoolNot, node);
+}
+
+
+void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
+ wasm::FunctionSig* sig) {
+ int params = static_cast<int>(sig->parameter_count());
+ int count = params + 3;
+ Node** args = Buffer(count);
+
+ // Build the start and the JS parameter nodes.
+ Node* start = Start(params + 3);
+ *control_ = start;
+ *effect_ = start;
+ // JS context is the last parameter.
+ Node* context = graph()->NewNode(
+ jsgraph()->common()->Parameter(params + 1, "context"), start);
+
+ int pos = 0;
+ args[pos++] = Constant(wasm_code);
+
+ // Convert JS parameters to WASM numbers.
+ for (int i = 0; i < params; i++) {
+ Node* param = graph()->NewNode(jsgraph()->common()->Parameter(i), start);
+ args[pos++] = FromJS(param, context, sig->GetParam(i));
+ }
+
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
+
+ // Call the WASM code.
+ CallDescriptor* desc = module_->GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
+ Node* jsval =
+ ToJS(call, context,
+ sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
+ Node* ret =
+ graph()->NewNode(jsgraph()->common()->Return(), jsval, call, start);
+
+ MergeControlToEnd(jsgraph(), ret);
+}
+
+
+void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSFunction> function,
+ wasm::FunctionSig* sig) {
+ int js_count = function->shared()->internal_formal_parameter_count();
+ int wasm_count = static_cast<int>(sig->parameter_count());
+
+ // Build the start and the parameter nodes.
+ Isolate* isolate = jsgraph()->isolate();
+ CallDescriptor* desc;
+ Node* start = Start(wasm_count + 3);
+ *effect_ = start;
+ *control_ = start;
+ // JS context is the last parameter.
+ Node* context = Constant(Handle<Context>(function->context(), isolate));
+ Node** args = Buffer(wasm_count + 7);
+
+ bool arg_count_before_args = false;
+ bool add_new_target_undefined = false;
+
+ int pos = 0;
+ if (js_count == wasm_count) {
+ // exact arity match, just call the function directly.
+ desc = Linkage::GetJSCallDescriptor(graph()->zone(), false, wasm_count + 1,
+ CallDescriptor::kNoFlags);
+ arg_count_before_args = false;
+ add_new_target_undefined = true;
+ } else {
+ // Use the Call builtin.
+ Callable callable = CodeFactory::Call(isolate);
+ args[pos++] = jsgraph()->HeapConstant(callable.code());
+ desc = Linkage::GetStubCallDescriptor(isolate, graph()->zone(),
+ callable.descriptor(), wasm_count + 1,
+ CallDescriptor::kNoFlags);
+ arg_count_before_args = true;
+ }
+
+ args[pos++] = jsgraph()->Constant(function); // JS function.
+ if (arg_count_before_args) {
+ args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
+ }
+ // JS receiver.
+ Handle<Object> global(function->context()->global_object(), isolate);
+ args[pos++] = jsgraph()->Constant(global);
+
+ // Convert WASM numbers to JS values.
+ for (int i = 0; i < wasm_count; i++) {
+ Node* param = graph()->NewNode(jsgraph()->common()->Parameter(i), start);
+ args[pos++] = ToJS(param, context, sig->GetParam(i));
+ }
+
+ if (add_new_target_undefined) {
+ args[pos++] = jsgraph()->UndefinedConstant(); // new target
+ }
+
+ if (!arg_count_before_args) {
+ args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
+ }
+ args[pos++] = context;
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
+
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+
+ // Convert the return value back.
+ Node* val =
+ FromJS(call, context,
+ sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
+ Node* ret = graph()->NewNode(jsgraph()->common()->Return(), val, call, start);
+
+ MergeControlToEnd(jsgraph(), ret);
+}
+
+
+Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
+ if (offset == 0) {
+ if (!mem_buffer_)
+ mem_buffer_ = jsgraph()->IntPtrConstant(module_->mem_start);
+ return mem_buffer_;
+ } else {
+ return jsgraph()->IntPtrConstant(module_->mem_start + offset);
+ }
+}
+
+
+Node* WasmGraphBuilder::MemSize(uint32_t offset) {
+ int32_t size = static_cast<int>(module_->mem_end - module_->mem_start);
+ if (offset == 0) {
+ if (!mem_size_) mem_size_ = jsgraph()->Int32Constant(size);
+ return mem_size_;
+ } else {
+ return jsgraph()->Int32Constant(size + offset);
+ }
+}
+
+
+Node* WasmGraphBuilder::FunctionTable() {
+ if (!function_table_) {
+ DCHECK(!module_->function_table.is_null());
+ function_table_ = jsgraph()->Constant(module_->function_table);
+ }
+ return function_table_;
+}
+
+
+Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
+ MachineType mem_type = module_->GetGlobalType(index);
+ Node* addr = jsgraph()->IntPtrConstant(
+ module_->globals_area + module_->module->globals->at(index).offset);
+ const Operator* op = jsgraph()->machine()->Load(mem_type);
+ Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
+ *control_);
+ *effect_ = node;
+ return node;
+}
+
+
+Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
+ MachineType mem_type = module_->GetGlobalType(index);
+ Node* addr = jsgraph()->IntPtrConstant(
+ module_->globals_area + module_->module->globals->at(index).offset);
+ const Operator* op = jsgraph()->machine()->Store(
+ StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
+ Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
+ *effect_, *control_);
+ *effect_ = node;
+ return node;
+}
+
+
+void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
+ uint32_t offset) {
+ // TODO(turbofan): fold bounds checks for constant indexes.
+ CHECK_GE(module_->mem_end, module_->mem_start);
+ ptrdiff_t size = module_->mem_end - module_->mem_start;
+ byte memsize = wasm::WasmOpcodes::MemSize(memtype);
+ Node* cond;
+ if (static_cast<ptrdiff_t>(offset) >= size ||
+ static_cast<ptrdiff_t>(offset + memsize) > size) {
+ // The access will always throw.
+ cond = jsgraph()->Int32Constant(0);
+ } else {
+ // Check against the limit.
+ size_t limit = size - offset - memsize;
+ CHECK(limit <= kMaxUInt32);
+ cond = graph()->NewNode(
+ jsgraph()->machine()->Uint32LessThanOrEqual(), index,
+ jsgraph()->Int32Constant(static_cast<uint32_t>(limit)));
+ }
+
+ trap_->AddTrapIfFalse(kTrapMemOutOfBounds, cond);
+}
+
+
+Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
+ Node* index, uint32_t offset) {
+ Node* load;
+
+ if (module_ && module_->asm_js) {
+ // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
+ DCHECK_EQ(0, offset);
+ const Operator* op = jsgraph()->machine()->CheckedLoad(memtype);
+ load = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), *effect_,
+ *control_);
+ } else {
+ // WASM semantics throw on OOB. Introduce explicit bounds check.
+ BoundsCheckMem(memtype, index, offset);
+ load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
+ MemBuffer(offset), index, *effect_, *control_);
+ }
+
+ *effect_ = load;
+
+ if (type == wasm::kAstI64 &&
+ ElementSizeLog2Of(memtype.representation()) < 3) {
+ // TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes.
+ if (memtype.IsSigned()) {
+ // sign extend
+ load = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), load);
+ } else {
+ // zero extend
+ load =
+ graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), load);
+ }
+ }
+
+ return load;
+}
+
+
+Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
+ uint32_t offset, Node* val) {
+ Node* store;
+ if (module_ && module_->asm_js) {
+ // asm.js semantics use CheckedStore (i.e. ignore OOB writes).
+ DCHECK_EQ(0, offset);
+ const Operator* op =
+ jsgraph()->machine()->CheckedStore(memtype.representation());
+ store = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), val, *effect_,
+ *control_);
+ } else {
+ // WASM semantics throw on OOB. Introduce explicit bounds check.
+ BoundsCheckMem(memtype, index, offset);
+ StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+ store =
+ graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
+ index, val, *effect_, *control_);
+ }
+ *effect_ = store;
+ return store;
+}
+
+
+void WasmGraphBuilder::PrintDebugName(Node* node) {
+ PrintF("#%d:%s", node->id(), node->op()->mnemonic());
+}
+
+
+Node* WasmGraphBuilder::String(const char* string) {
+ return jsgraph()->Constant(
+ jsgraph()->isolate()->factory()->NewStringFromAsciiChecked(string));
+}
+
+
+Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
+
+
+Handle<JSFunction> CompileJSToWasmWrapper(
+ Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
+ Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index) {
+ wasm::WasmFunction* func = &module->module->functions->at(index);
+
+ //----------------------------------------------------------------------------
+ // Create the JSFunction object.
+ //----------------------------------------------------------------------------
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfo(name, wasm_code, false);
+ int params = static_cast<int>(func->sig->parameter_count());
+ shared->set_length(params);
+ shared->set_internal_formal_parameter_count(1 + params);
+ Handle<JSFunction> function = isolate->factory()->NewFunction(
+ isolate->wasm_function_map(), name, MaybeHandle<Code>());
+ function->SetInternalField(0, *module_object);
+ function->set_shared(*shared);
+
+ //----------------------------------------------------------------------------
+ // Create the Graph
+ //----------------------------------------------------------------------------
+ Zone zone;
+ Graph graph(&zone);
+ CommonOperatorBuilder common(&zone);
+ JSOperatorBuilder javascript(&zone);
+ MachineOperatorBuilder machine(&zone);
+ JSGraph jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine);
+
+ Node* control = nullptr;
+ Node* effect = nullptr;
+
+ WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+ builder.set_control_ptr(&control);
+ builder.set_effect_ptr(&effect);
+ builder.set_module(module);
+ builder.BuildJSToWasmWrapper(wasm_code, func->sig);
+
+ //----------------------------------------------------------------------------
+ // Run the compilation pipeline.
+ //----------------------------------------------------------------------------
+ {
+ // Changes lowering requires types.
+ Typer typer(isolate, &graph);
+ NodeVector roots(&zone);
+ jsgraph.GetCachedNodes(&roots);
+ typer.Run(roots);
+
+ // Run generic and change lowering.
+ JSGenericLowering generic(true, &jsgraph);
+ ChangeLowering changes(&jsgraph);
+ GraphReducer graph_reducer(&zone, &graph, jsgraph.Dead());
+ graph_reducer.AddReducer(&changes);
+ graph_reducer.AddReducer(&generic);
+ graph_reducer.ReduceGraph();
+
+ if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
+ os << "-- Graph after change lowering -- " << std::endl;
+ os << AsRPO(graph);
+ }
+
+ // Schedule and compile to machine code.
+ int params = static_cast<int>(
+ module->GetFunctionSignature(index)->parameter_count());
+ CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
+ &zone, false, params + 1, CallDescriptor::kNoFlags);
+ CompilationInfo info("js-to-wasm", isolate, &zone);
+ // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
+ info.set_output_code_kind(Code::WASM_FUNCTION);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+
+#ifdef ENABLE_DISASSEMBLER
+ // Disassemble the wrapper code for debugging.
+ if (!code.is_null() && FLAG_print_opt_code) {
+ Vector<char> buffer;
+ const char* name = "";
+ if (func->name_offset > 0) {
+ const byte* ptr = module->module->module_start + func->name_offset;
+ name = reinterpret_cast<const char*>(ptr);
+ }
+ SNPrintF(buffer, "JS->WASM function wrapper #%d:%s", index, name);
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
+#endif
+ // Set the JSFunction's machine code.
+ function->set_code(*code);
+ }
+ return function;
+}
+
+
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+ Handle<JSFunction> function,
+ uint32_t index) {
+ wasm::WasmFunction* func = &module->module->functions->at(index);
+
+ //----------------------------------------------------------------------------
+ // Create the Graph
+ //----------------------------------------------------------------------------
+ Zone zone;
+ Graph graph(&zone);
+ CommonOperatorBuilder common(&zone);
+ JSOperatorBuilder javascript(&zone);
+ MachineOperatorBuilder machine(&zone);
+ JSGraph jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine);
+
+ Node* control = nullptr;
+ Node* effect = nullptr;
+
+ WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+ builder.set_control_ptr(&control);
+ builder.set_effect_ptr(&effect);
+ builder.set_module(module);
+ builder.BuildWasmToJSWrapper(function, func->sig);
+
+ Handle<Code> code = Handle<Code>::null();
+ {
+ // Changes lowering requires types.
+ Typer typer(isolate, &graph);
+ NodeVector roots(&zone);
+ jsgraph.GetCachedNodes(&roots);
+ typer.Run(roots);
+
+ // Run generic and change lowering.
+ JSGenericLowering generic(true, &jsgraph);
+ ChangeLowering changes(&jsgraph);
+ GraphReducer graph_reducer(&zone, &graph, jsgraph.Dead());
+ graph_reducer.AddReducer(&changes);
+ graph_reducer.AddReducer(&generic);
+ graph_reducer.ReduceGraph();
+
+ if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
+ os << "-- Graph after change lowering -- " << std::endl;
+ os << AsRPO(graph);
+ }
+
+ // Schedule and compile to machine code.
+ CallDescriptor* incoming = module->GetWasmCallDescriptor(&zone, func->sig);
+ CompilationInfo info("wasm-to-js", isolate, &zone);
+ // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
+ info.set_output_code_kind(Code::WASM_FUNCTION);
+ code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+
+#ifdef ENABLE_DISASSEMBLER
+ // Disassemble the wrapper code for debugging.
+ if (!code.is_null() && FLAG_print_opt_code) {
+ Vector<char> buffer;
+ const char* name = "";
+ if (func->name_offset > 0) {
+ const byte* ptr = module->module->module_start + func->name_offset;
+ name = reinterpret_cast<const char*>(ptr);
+ }
+ SNPrintF(buffer, "WASM->JS function wrapper #%d:%s", index, name);
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
+#endif
+ }
+ return code;
+}
+
+
+// Helper function to compile a single function.
+Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction& function,
+ int index) {
+ if (FLAG_trace_wasm_compiler || FLAG_trace_wasm_decode_time) {
+ // TODO(titzer): clean me up a bit.
+ OFStream os(stdout);
+ os << "Compiling WASM function #" << index << ":";
+ if (function.name_offset > 0) {
+ os << module_env->module->GetName(function.name_offset);
+ }
+ os << std::endl;
+ }
+ // Initialize the function environment for decoding.
+ wasm::FunctionEnv env;
+ env.module = module_env;
+ env.sig = function.sig;
+ env.local_int32_count = function.local_int32_count;
+ env.local_int64_count = function.local_int64_count;
+ env.local_float32_count = function.local_float32_count;
+ env.local_float64_count = function.local_float64_count;
+ env.SumLocals();
+
+ // Create a TF graph during decoding.
+ Zone zone;
+ Graph graph(&zone);
+ CommonOperatorBuilder common(&zone);
+ MachineOperatorBuilder machine(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags());
+ JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ WasmGraphBuilder builder(&zone, &jsgraph, function.sig);
+ wasm::TreeResult result = wasm::BuildTFGraph(
+ &builder, &env, // --
+ module_env->module->module_start, // --
+ module_env->module->module_start + function.code_start_offset, // --
+ module_env->module->module_start + function.code_end_offset); // --
+
+ if (result.failed()) {
+ if (FLAG_trace_wasm_compiler) {
+ OFStream os(stdout);
+ os << "Compilation failed: " << result << std::endl;
+ }
+ // Add the function as another context for the exception
+ Vector<char> buffer;
+ SNPrintF(buffer, "Compiling WASM function #%d:%s failed:", index,
+ module_env->module->GetName(function.name_offset));
+ thrower.Failed(buffer.start(), result);
+ return Handle<Code>::null();
+ }
+
+ // Run the compiler pipeline to generate machine code.
+ CallDescriptor* descriptor = const_cast<CallDescriptor*>(
+ module_env->GetWasmCallDescriptor(&zone, function.sig));
+ CompilationInfo info("wasm", isolate, &zone);
+ info.set_output_code_kind(Code::WASM_FUNCTION);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(&info, descriptor, &graph);
+
+#ifdef ENABLE_DISASSEMBLER
+ // Disassemble the code for debugging.
+ if (!code.is_null() && FLAG_print_opt_code) {
+ Vector<char> buffer;
+ const char* name = "";
+ if (function.name_offset > 0) {
+ const byte* ptr = module_env->module->module_start + function.name_offset;
+ name = reinterpret_cast<const char*>(ptr);
+ }
+ SNPrintF(buffer, "WASM function #%d:%s", index, name);
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
+#endif
+ return code;
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/wasm-compiler.h b/chromium/v8/src/compiler/wasm-compiler.h
new file mode 100644
index 00000000000..1a17a832e4e
--- /dev/null
+++ b/chromium/v8/src/compiler/wasm-compiler.h
@@ -0,0 +1,190 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_WASM_COMPILER_H_
+#define V8_COMPILER_WASM_COMPILER_H_
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/wasm/wasm-opcodes.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+// Forward declarations for some compiler data structures.
+class Node;
+class JSGraph;
+class Graph;
+}
+
+namespace wasm {
+// Forward declarations for some WASM data structures.
+struct ModuleEnv;
+struct WasmFunction;
+class ErrorThrower;
+
+// Expose {Node} and {Graph} opaquely as {wasm::TFNode} and {wasm::TFGraph}.
+typedef compiler::Node TFNode;
+typedef compiler::JSGraph TFGraph;
+}
+
+namespace compiler {
+// Compiles a single function, producing a code object.
+Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction& function, int index);
+
+// Wraps a JS function, producing a code object that can be called from WASM.
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+ Handle<JSFunction> function,
+ uint32_t index);
+
+// Wraps a given wasm code object, producing a JSFunction that can be called
+// from JavaScript.
+Handle<JSFunction> CompileJSToWasmWrapper(
+ Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
+ Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index);
+
+// Abstracts details of building TurboFan graph nodes for WASM to separate
+// the WASM decoder from the internal details of TurboFan.
+class WasmTrapHelper;
+class WasmGraphBuilder {
+ public:
+ WasmGraphBuilder(Zone* z, JSGraph* g, wasm::FunctionSig* function_signature);
+
+ Node** Buffer(size_t count) {
+ if (count > cur_bufsize_) {
+ size_t new_size = count + cur_bufsize_ + 5;
+ cur_buffer_ =
+ reinterpret_cast<Node**>(zone_->New(new_size * sizeof(Node*)));
+ cur_bufsize_ = new_size;
+ }
+ return cur_buffer_;
+ }
+
+ //-----------------------------------------------------------------------
+ // Operations independent of {control} or {effect}.
+ //-----------------------------------------------------------------------
+ Node* Error();
+ Node* Start(unsigned params);
+ Node* Param(unsigned index, wasm::LocalType type);
+ Node* Loop(Node* entry);
+ Node* Terminate(Node* effect, Node* control);
+ Node* Merge(unsigned count, Node** controls);
+ Node* Phi(wasm::LocalType type, unsigned count, Node** vals, Node* control);
+ Node* EffectPhi(unsigned count, Node** effects, Node* control);
+ Node* Int32Constant(int32_t value);
+ Node* Int64Constant(int64_t value);
+ Node* Float32Constant(float value);
+ Node* Float64Constant(double value);
+ Node* Constant(Handle<Object> value);
+ Node* Binop(wasm::WasmOpcode opcode, Node* left, Node* right);
+ Node* Unop(wasm::WasmOpcode opcode, Node* input);
+ unsigned InputCount(Node* node);
+ bool IsPhiWithMerge(Node* phi, Node* merge);
+ void AppendToMerge(Node* merge, Node* from);
+ void AppendToPhi(Node* merge, Node* phi, Node* from);
+
+ //-----------------------------------------------------------------------
+ // Operations that read and/or write {control} and {effect}.
+ //-----------------------------------------------------------------------
+ Node* Branch(Node* cond, Node** true_node, Node** false_node);
+ Node* Switch(unsigned count, Node* key);
+ Node* IfValue(int32_t value, Node* sw);
+ Node* IfDefault(Node* sw);
+ Node* Return(unsigned count, Node** vals);
+ Node* ReturnVoid();
+ Node* Unreachable();
+
+ Node* CallDirect(uint32_t index, Node** args);
+ Node* CallIndirect(uint32_t index, Node** args);
+ void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
+ void BuildWasmToJSWrapper(Handle<JSFunction> function,
+ wasm::FunctionSig* sig);
+ Node* ToJS(Node* node, Node* context, wasm::LocalType type);
+ Node* FromJS(Node* node, Node* context, wasm::LocalType type);
+ Node* Invert(Node* node);
+ Node* FunctionTable();
+
+ //-----------------------------------------------------------------------
+ // Operations that concern the linear memory.
+ //-----------------------------------------------------------------------
+ Node* MemSize(uint32_t offset);
+ Node* LoadGlobal(uint32_t index);
+ Node* StoreGlobal(uint32_t index, Node* val);
+ Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
+ uint32_t offset);
+ Node* StoreMem(MachineType type, Node* index, uint32_t offset, Node* val);
+
+ static void PrintDebugName(Node* node);
+
+ Node* Control() { return *control_; }
+ Node* Effect() { return *effect_; }
+
+ void set_module(wasm::ModuleEnv* module) { this->module_ = module; }
+
+ void set_control_ptr(Node** control) { this->control_ = control; }
+
+ void set_effect_ptr(Node** effect) { this->effect_ = effect; }
+
+ wasm::FunctionSig* GetFunctionSignature() { return function_signature_; }
+
+ private:
+ static const int kDefaultBufferSize = 16;
+ friend class WasmTrapHelper;
+
+ Zone* zone_;
+ JSGraph* jsgraph_;
+ wasm::ModuleEnv* module_;
+ Node* mem_buffer_;
+ Node* mem_size_;
+ Node* function_table_;
+ Node** control_;
+ Node** effect_;
+ Node** cur_buffer_;
+ size_t cur_bufsize_;
+ Node* def_buffer_[kDefaultBufferSize];
+
+ WasmTrapHelper* trap_;
+ wasm::FunctionSig* function_signature_;
+
+ // Internal helper methods.
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph();
+
+ Node* String(const char* string);
+ Node* MemBuffer(uint32_t offset);
+ void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset);
+
+ Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args);
+ Node* BuildF32Neg(Node* input);
+ Node* BuildF64Neg(Node* input);
+ Node* BuildF32CopySign(Node* left, Node* right);
+ Node* BuildF64CopySign(Node* left, Node* right);
+ Node* BuildF32Min(Node* left, Node* right);
+ Node* BuildF32Max(Node* left, Node* right);
+ Node* BuildF64Min(Node* left, Node* right);
+ Node* BuildF64Max(Node* left, Node* right);
+ Node* BuildI32SConvertF32(Node* input);
+ Node* BuildI32SConvertF64(Node* input);
+ Node* BuildI32UConvertF32(Node* input);
+ Node* BuildI32UConvertF64(Node* input);
+ Node* BuildI32Ctz(Node* input);
+ Node* BuildI32Popcnt(Node* input);
+ Node* BuildI64Ctz(Node* input);
+ Node* BuildI64Popcnt(Node* input);
+
+ Node** Realloc(Node** buffer, size_t count) {
+ Node** buf = Buffer(count);
+ if (buf != buffer) memcpy(buf, buffer, count * sizeof(Node*));
+ return buf;
+ }
+};
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_WASM_COMPILER_H_
diff --git a/chromium/v8/src/compiler/wasm-linkage.cc b/chromium/v8/src/compiler/wasm-linkage.cc
new file mode 100644
index 00000000000..7419a5c31f9
--- /dev/null
+++ b/chromium/v8/src/compiler/wasm-linkage.cc
@@ -0,0 +1,282 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+
+#include "src/wasm/wasm-module.h"
+
+#include "src/compiler/linkage.h"
+
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+// TODO(titzer): this should not be in the WASM namespace.
+namespace wasm {
+
+using compiler::LocationSignature;
+using compiler::CallDescriptor;
+using compiler::LinkageLocation;
+
+namespace {
+MachineType MachineTypeFor(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return MachineType::Int32();
+ case kAstI64:
+ return MachineType::Int64();
+ case kAstF64:
+ return MachineType::Float64();
+ case kAstF32:
+ return MachineType::Float32();
+ default:
+ UNREACHABLE();
+ return MachineType::AnyTagged();
+ }
+}
+
+
+// Platform-specific configuration for C calling convention.
+LinkageLocation regloc(Register reg) {
+ return LinkageLocation::ForRegister(reg.code());
+}
+
+
+LinkageLocation regloc(DoubleRegister reg) {
+ return LinkageLocation::ForRegister(reg.code());
+}
+
+
+LinkageLocation stackloc(int i) {
+ return LinkageLocation::ForCallerFrameSlot(i);
+}
+
+
+#if V8_TARGET_ARCH_IA32
+// ===========================================================================
+// == ia32 ===================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi, edi
+#define GP_RETURN_REGISTERS eax, edx
+#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
+#define FP_RETURN_REGISTERS xmm1, xmm2
+
+#elif V8_TARGET_ARCH_X64
+// ===========================================================================
+// == x64 ====================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS rax, rdx, rcx, rbx, rsi, rdi
+#define GP_RETURN_REGISTERS rax, rdx
+#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
+#define FP_RETURN_REGISTERS xmm1, xmm2
+
+#elif V8_TARGET_ARCH_X87
+// ===========================================================================
+// == x87 ====================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi, edi
+#define GP_RETURN_REGISTERS eax, edx
+#define FP_RETURN_REGISTERS stX_0
+
+#elif V8_TARGET_ARCH_ARM
+// ===========================================================================
+// == arm ====================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS r0, r1, r2, r3
+#define GP_RETURN_REGISTERS r0, r1
+#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
+#define FP_RETURN_REGISTERS d0, d1
+
+#elif V8_TARGET_ARCH_ARM64
+// ===========================================================================
+// == arm64 ====================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS x0, x1, x2, x3, x4, x5, x6, x7
+#define GP_RETURN_REGISTERS x0, x1
+#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
+#define FP_RETURN_REGISTERS d0, d1
+
+#elif V8_TARGET_ARCH_MIPS
+// ===========================================================================
+// == mips ===================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS a0, a1, a2, a3
+#define GP_RETURN_REGISTERS v0, v1
+#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
+#define FP_RETURN_REGISTERS f2, f4
+
+#elif V8_TARGET_ARCH_MIPS64
+// ===========================================================================
+// == mips64 =================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
+#define GP_RETURN_REGISTERS v0, v1
+#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
+#define FP_RETURN_REGISTERS f2, f4
+
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+// ===========================================================================
+// == ppc & ppc64 ============================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS r3, r4, r5, r6, r7, r8, r9, r10
+#define GP_RETURN_REGISTERS r3, r4
+#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
+#define FP_RETURN_REGISTERS d0, d1
+
+#else
+// ===========================================================================
+// == unknown ================================================================
+// ===========================================================================
+// Don't define anything. We'll just always use the stack.
+#endif
+
+
+// Helper for allocating either an GP or FP reg, or the next stack slot.
+struct Allocator {
+ Allocator(const Register* gp, int gpc, const DoubleRegister* fp, int fpc)
+ : gp_count(gpc),
+ gp_offset(0),
+ gp_regs(gp),
+ fp_count(fpc),
+ fp_offset(0),
+ fp_regs(fp),
+ stack_offset(0) {}
+
+ int gp_count;
+ int gp_offset;
+ const Register* gp_regs;
+
+ int fp_count;
+ int fp_offset;
+ const DoubleRegister* fp_regs;
+
+ int stack_offset;
+
+ LinkageLocation Next(LocalType type) {
+ if (IsFloatingPoint(type)) {
+ // Allocate a floating point register/stack location.
+ if (fp_offset < fp_count) {
+ return regloc(fp_regs[fp_offset++]);
+ } else {
+ int offset = -1 - stack_offset;
+ stack_offset += Words(type);
+ return stackloc(offset);
+ }
+ } else {
+ // Allocate a general purpose register/stack location.
+ if (gp_offset < gp_count) {
+ return regloc(gp_regs[gp_offset++]);
+ } else {
+ int offset = -1 - stack_offset;
+ stack_offset += Words(type);
+ return stackloc(offset);
+ }
+ }
+ }
+ bool IsFloatingPoint(LocalType type) {
+ return type == kAstF32 || type == kAstF64;
+ }
+ int Words(LocalType type) {
+ // The code generation for pushing parameters on the stack does not
+ // distinguish between float32 and float64. Therefore also float32 needs
+ // two words.
+ if (kPointerSize < 8 &&
+ (type == kAstI64 || type == kAstF64 || type == kAstF32)) {
+ return 2;
+ }
+ return 1;
+ }
+};
+} // namespace
+
+
+// General code uses the above configuration data.
+CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
+ FunctionSig* fsig) {
+ MachineSignature::Builder msig(zone, fsig->return_count(),
+ fsig->parameter_count());
+ LocationSignature::Builder locations(zone, fsig->return_count(),
+ fsig->parameter_count());
+
+#ifdef GP_RETURN_REGISTERS
+ static const Register kGPReturnRegisters[] = {GP_RETURN_REGISTERS};
+ static const int kGPReturnRegistersCount =
+ static_cast<int>(arraysize(kGPReturnRegisters));
+#else
+ static const Register* kGPReturnRegisters = nullptr;
+ static const int kGPReturnRegistersCount = 0;
+#endif
+
+#ifdef FP_RETURN_REGISTERS
+ static const DoubleRegister kFPReturnRegisters[] = {FP_RETURN_REGISTERS};
+ static const int kFPReturnRegistersCount =
+ static_cast<int>(arraysize(kFPReturnRegisters));
+#else
+ static const DoubleRegister* kFPReturnRegisters = nullptr;
+ static const int kFPReturnRegistersCount = 0;
+#endif
+
+ Allocator rets(kGPReturnRegisters, kGPReturnRegistersCount,
+ kFPReturnRegisters, kFPReturnRegistersCount);
+
+ // Add return location(s).
+ const int return_count = static_cast<int>(locations.return_count_);
+ for (int i = 0; i < return_count; i++) {
+ LocalType ret = fsig->GetReturn(i);
+ msig.AddReturn(MachineTypeFor(ret));
+ locations.AddReturn(rets.Next(ret));
+ }
+
+#ifdef GP_PARAM_REGISTERS
+ static const Register kGPParamRegisters[] = {GP_PARAM_REGISTERS};
+ static const int kGPParamRegistersCount =
+ static_cast<int>(arraysize(kGPParamRegisters));
+#else
+ static const Register* kGPParamRegisters = nullptr;
+ static const int kGPParamRegistersCount = 0;
+#endif
+
+#ifdef FP_PARAM_REGISTERS
+ static const DoubleRegister kFPParamRegisters[] = {FP_PARAM_REGISTERS};
+ static const int kFPParamRegistersCount =
+ static_cast<int>(arraysize(kFPParamRegisters));
+#else
+ static const DoubleRegister* kFPParamRegisters = nullptr;
+ static const int kFPParamRegistersCount = 0;
+#endif
+
+ Allocator params(kGPParamRegisters, kGPParamRegistersCount, kFPParamRegisters,
+ kFPParamRegistersCount);
+
+ // Add register and/or stack parameter(s).
+ const int parameter_count = static_cast<int>(fsig->parameter_count());
+ for (int i = 0; i < parameter_count; i++) {
+ LocalType param = fsig->GetParam(i);
+ msig.AddParam(MachineTypeFor(param));
+ locations.AddParam(params.Next(param));
+ }
+
+ const RegList kCalleeSaveRegisters = 0;
+ const RegList kCalleeSaveFPRegisters = 0;
+
+ // The target for WASM calls is always a code object.
+ MachineType target_type = MachineType::AnyTagged();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ msig.Build(), // machine_sig
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ compiler::Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ CallDescriptor::kUseNativeStack, // flags
+ "c-call");
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/x64/code-generator-x64.cc b/chromium/v8/src/compiler/x64/code-generator-x64.cc
index 4c991718f8b..be406fbad26 100644
--- a/chromium/v8/src/compiler/x64/code-generator-x64.cc
+++ b/chromium/v8/src/compiler/x64/code-generator-x64.cc
@@ -4,11 +4,11 @@
#include "src/compiler/code-generator.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/scopes.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -49,8 +49,8 @@ class X64OperandConverter : public InstructionOperandConverter {
Operand ToOperand(InstructionOperand* op, int extra = 0) {
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return Operand(offset.from_stack_pointer() ? rsp : rbp,
offset.offset() + extra);
}
@@ -166,7 +166,7 @@ class OutOfLineLoadNaN final : public OutOfLineCode {
OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() final { __ pcmpeqd(result_, result_); }
+ void Generate() final { __ Pcmpeqd(result_, result_); }
private:
XMMRegister const result_;
@@ -181,7 +181,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
void Generate() final {
__ subp(rsp, Immediate(kDoubleSize));
- __ movsd(MemOperand(rsp, 0), input_);
+ __ Movsd(MemOperand(rsp, 0), input_);
__ SlowTruncateToI(result_, rsp, 0);
__ addp(rsp, Immediate(kDoubleSize));
}
@@ -191,6 +191,46 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
XMMRegister const input_;
};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ operand_(operand),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ leap(scratch1_, operand_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Operand const operand_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
} // namespace
@@ -335,7 +375,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
\
void Generate() final { \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ pcmpeqd(result_, result_); \
+ __ Pcmpeqd(result_, result_); \
__ cmpl(kScratchRegister, Immediate(length_)); \
__ j(above_equal, exit()); \
__ asm_instr(result_, \
@@ -533,13 +573,25 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
} while (false)
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ movq(rsp, rbp);
- __ popq(rbp);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ addq(rsp, Immediate(sp_slot_delta * kPointerSize));
}
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ subq(rsp, Immediate(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ movq(rbp, MemOperand(rbp, 0));
+ }
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -555,14 +607,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
- int entry = Code::kHeaderSize - kHeapObjectTag;
- __ Call(Operand(reg, entry));
+ __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(reg);
}
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ jmp(code, RelocInfo::CODE_TARGET);
@@ -571,6 +625,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(reg);
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -582,6 +637,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Assert(equal, kWrongFunctionContext);
}
__ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
}
@@ -592,15 +648,27 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
break;
}
case kArchPrepareCallCFunction: {
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters);
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
@@ -610,6 +678,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -622,12 +692,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -643,12 +716,30 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
- __ cvttsd2siq(result, input);
+ __ Cvttsd2siq(result, input);
__ cmpq(result, Immediate(1));
__ j(overflow, ool->entry());
__ bind(ool->exit());
break;
}
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ Register value = i.InputRegister(index);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
+ scratch0, scratch1, mode);
+ __ movp(operand, value);
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ not_zero, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kX64Add32:
ASSEMBLE_BINOP(addl);
break;
@@ -763,6 +854,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX64Ror:
ASSEMBLE_SHIFT(rorq, 6);
break;
+ case kX64Lzcnt:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Lzcntq(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Lzcntq(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
case kX64Lzcnt32:
if (instr->InputAt(0)->IsRegister()) {
__ Lzcntl(i.OutputRegister(), i.InputRegister(0));
@@ -770,8 +868,36 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Lzcntl(i.OutputRegister(), i.InputOperand(0));
}
break;
+ case kX64Tzcnt:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Tzcntq(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Tzcntq(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64Tzcnt32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Tzcntl(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Tzcntl(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64Popcnt:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Popcntq(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Popcntq(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64Popcnt32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Popcntl(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Popcntl(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
case kSSEFloat32Cmp:
- ASSEMBLE_SSE_BINOP(ucomiss);
+ ASSEMBLE_SSE_BINOP(Ucomiss);
break;
case kSSEFloat32Add:
ASSEMBLE_SSE_BINOP(addss);
@@ -812,10 +938,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_SSE_BINOP(minss);
break;
case kSSEFloat32ToFloat64:
- ASSEMBLE_SSE_UNOP(cvtss2sd);
+ ASSEMBLE_SSE_UNOP(Cvtss2sd);
+ break;
+ case kSSEFloat32Round: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
+ }
case kSSEFloat64Cmp:
- ASSEMBLE_SSE_BINOP(ucomisd);
+ ASSEMBLE_SSE_BINOP(Ucomisd);
break;
case kSSEFloat64Add:
ASSEMBLE_SSE_BINOP(addsd);
@@ -830,14 +963,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_SSE_BINOP(divsd);
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kSSEFloat64Mod: {
__ subq(rsp, Immediate(kDoubleSize));
// Move values to st(0) and st(1).
- __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
+ __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
__ fld_d(Operand(rsp, 0));
- __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
+ __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
__ fld_d(Operand(rsp, 0));
// Loop while fprem isn't done.
Label mod_loop;
@@ -860,7 +993,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// Move output to stack and clean up.
__ fstp(1);
__ fstp_d(Operand(rsp, 0));
- __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
+ __ Movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
break;
}
@@ -891,48 +1024,224 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
CpuFeatureScope sse_scope(masm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ __ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
case kSSEFloat64ToFloat32:
- ASSEMBLE_SSE_UNOP(cvtsd2ss);
+ ASSEMBLE_SSE_UNOP(Cvtsd2ss);
break;
case kSSEFloat64ToInt32:
if (instr->InputAt(0)->IsDoubleRegister()) {
- __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
- __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+ __ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
}
break;
case kSSEFloat64ToUint32: {
if (instr->InputAt(0)->IsDoubleRegister()) {
- __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
- __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
+ __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
}
__ AssertZeroExtended(i.OutputRegister());
break;
}
+ case kSSEFloat32ToInt64:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
+ }
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 1);
+ Label done;
+ Label fail;
+ __ Move(kScratchDoubleReg, static_cast<float>(INT64_MIN));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Ucomiss(kScratchDoubleReg, i.InputDoubleRegister(0));
+ } else {
+ __ Ucomiss(kScratchDoubleReg, i.InputOperand(0));
+ }
+ // If the input is NaN, then the conversion fails.
+ __ j(parity_even, &fail);
+ // If the input is INT64_MIN, then the conversion succeeds.
+ __ j(equal, &done);
+ __ cmpq(i.OutputRegister(0), Immediate(1));
+ // If the conversion results in INT64_MIN, but the input was not
+ // INT64_MIN, then the conversion fails.
+ __ j(no_overflow, &done);
+ __ bind(&fail);
+ __ Set(i.OutputRegister(1), 0);
+ __ bind(&done);
+ }
+ break;
+ case kSSEFloat64ToInt64:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttsd2siq(i.OutputRegister(0), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
+ }
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 1);
+ Label done;
+ Label fail;
+ __ Move(kScratchDoubleReg, static_cast<double>(INT64_MIN));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Ucomisd(kScratchDoubleReg, i.InputDoubleRegister(0));
+ } else {
+ __ Ucomisd(kScratchDoubleReg, i.InputOperand(0));
+ }
+ // If the input is NaN, then the conversion fails.
+ __ j(parity_even, &fail);
+ // If the input is INT64_MIN, then the conversion succeeds.
+ __ j(equal, &done);
+ __ cmpq(i.OutputRegister(0), Immediate(1));
+ // If the conversion results in INT64_MIN, but the input was not
+ // INT64_MIN, then the conversion fails.
+ __ j(no_overflow, &done);
+ __ bind(&fail);
+ __ Set(i.OutputRegister(1), 0);
+ __ bind(&done);
+ }
+ break;
+ case kSSEFloat32ToUint64: {
+ Label done;
+ Label success;
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 0);
+ }
+ // There does not exist a Float32ToUint64 instruction, so we have to use
+ // the Float32ToInt64 instruction.
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
+ }
+ // Check if the result of the Float32ToInt64 conversion is positive, we
+ // are already done.
+ __ testq(i.OutputRegister(), i.OutputRegister());
+ __ j(positive, &success);
+ // The result of the first conversion was negative, which means that the
+ // input value was not within the positive int64 range. We subtract 2^64
+ // and convert it again to see if it is within the uint64 range.
+ __ Move(kScratchDoubleReg, -9223372036854775808.0f);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ addss(kScratchDoubleReg, i.InputDoubleRegister(0));
+ } else {
+ __ addss(kScratchDoubleReg, i.InputOperand(0));
+ }
+ __ Cvttss2siq(i.OutputRegister(), kScratchDoubleReg);
+ __ testq(i.OutputRegister(), i.OutputRegister());
+ // The only possible negative value here is 0x80000000000000000, which is
+ // used on x64 to indicate an integer overflow.
+ __ j(negative, &done);
+ // The input value is within uint64 range and the second conversion worked
+ // successfully, but we still have to undo the subtraction we did
+ // earlier.
+ __ Set(kScratchRegister, 0x8000000000000000);
+ __ orq(i.OutputRegister(), kScratchRegister);
+ __ bind(&success);
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 1);
+ }
+ __ bind(&done);
+ break;
+ }
+ case kSSEFloat64ToUint64: {
+ Label done;
+ Label success;
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 0);
+ }
+ // There does not exist a Float64ToUint64 instruction, so we have to use
+ // the Float64ToInt64 instruction.
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
+ }
+ // Check if the result of the Float64ToInt64 conversion is positive, we
+ // are already done.
+ __ testq(i.OutputRegister(), i.OutputRegister());
+ __ j(positive, &success);
+ // The result of the first conversion was negative, which means that the
+ // input value was not within the positive int64 range. We subtract 2^64
+ // and convert it again to see if it is within the uint64 range.
+ __ Move(kScratchDoubleReg, -9223372036854775808.0);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ addsd(kScratchDoubleReg, i.InputDoubleRegister(0));
+ } else {
+ __ addsd(kScratchDoubleReg, i.InputOperand(0));
+ }
+ __ Cvttsd2siq(i.OutputRegister(), kScratchDoubleReg);
+ __ testq(i.OutputRegister(), i.OutputRegister());
+ // The only possible negative value here is 0x80000000000000000, which is
+ // used on x64 to indicate an integer overflow.
+ __ j(negative, &done);
+ // The input value is within uint64 range and the second conversion worked
+ // successfully, but we still have to undo the subtraction we did
+ // earlier.
+ __ Set(kScratchRegister, 0x8000000000000000);
+ __ orq(i.OutputRegister(), kScratchRegister);
+ __ bind(&success);
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 1);
+ }
+ __ bind(&done);
+ break;
+ }
case kSSEInt32ToFloat64:
if (instr->InputAt(0)->IsRegister()) {
- __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
+ case kSSEInt64ToFloat32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
- __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
+ case kSSEInt64ToFloat64:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
+ case kSSEUint64ToFloat32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movq(kScratchRegister, i.InputRegister(0));
+ } else {
+ __ movq(kScratchRegister, i.InputOperand(0));
+ }
+ __ Cvtqui2ss(i.OutputDoubleRegister(), kScratchRegister,
+ i.TempRegister(0));
+ break;
+ case kSSEUint64ToFloat64:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movq(kScratchRegister, i.InputRegister(0));
+ } else {
+ __ movq(kScratchRegister, i.InputOperand(0));
+ }
+ __ Cvtqui2sd(i.OutputDoubleRegister(), kScratchRegister,
+ i.TempRegister(0));
+ break;
case kSSEUint32ToFloat64:
if (instr->InputAt(0)->IsRegister()) {
__ movl(kScratchRegister, i.InputRegister(0));
} else {
__ movl(kScratchRegister, i.InputOperand(0));
}
- __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
+ __ Cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
break;
case kSSEFloat64ExtractLowWord32:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
} else {
- __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kSSEFloat64ExtractHighWord32:
@@ -958,9 +1267,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kSSEFloat64LoadLowWord32:
if (instr->InputAt(0)->IsRegister()) {
- __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
- __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
+ __ Movd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kAVXFloat32Cmp: {
@@ -985,7 +1294,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_AVX_BINOP(vdivss);
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulss depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ __ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kAVXFloat32Max:
ASSEMBLE_AVX_BINOP(vmaxss);
@@ -1015,7 +1324,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_AVX_BINOP(vdivsd);
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kAVXFloat64Max:
ASSEMBLE_AVX_BINOP(vmaxsd);
@@ -1025,9 +1334,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kAVXFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 33);
CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
@@ -1039,9 +1348,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kAVXFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 31);
CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
@@ -1053,9 +1362,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kAVXFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 1);
CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
@@ -1067,9 +1376,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kAVXFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 63);
CpuFeatureScope avx_scope(masm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
@@ -1164,39 +1473,39 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kX64Movsd:
if (instr->HasOutput()) {
- __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movsd(operand, i.InputDoubleRegister(index));
+ __ Movsd(operand, i.InputDoubleRegister(index));
}
break;
case kX64BitcastFI:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
} else {
- __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kX64BitcastDL:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ movq(i.OutputRegister(), i.InputOperand(0));
} else {
- __ movq(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Movq(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kX64BitcastIF:
if (instr->InputAt(0)->IsRegister()) {
- __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
__ movss(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kX64BitcastLD:
if (instr->InputAt(0)->IsRegister()) {
- __ movq(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ Movq(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
- __ movsd(i.OutputDoubleRegister(), i.InputOperand(0));
+ __ Movsd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kX64Lea32: {
@@ -1245,15 +1554,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX64Push:
if (HasImmediateInput(instr, 0)) {
__ pushq(i.InputImmediate(0));
+ frame_access_state()->IncreaseSPDelta(1);
} else {
if (instr->InputAt(0)->IsRegister()) {
__ pushq(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
} else if (instr->InputAt(0)->IsDoubleRegister()) {
// TODO(titzer): use another machine instruction?
__ subq(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
} else {
__ pushq(i.InputOperand(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
}
break;
@@ -1266,24 +1579,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
- case kX64StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register value = i.InputRegister(2);
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- if (HasImmediateInput(instr, 1)) {
- int index = i.InputInt32(1);
- Register scratch = i.TempRegister(1);
- __ movq(Operand(object, index), value);
- __ RecordWriteContextSlot(object, index, value, scratch, mode);
- } else {
- Register index = i.InputRegister(1);
- __ movq(Operand(object, index, times_1, 0), value);
- __ leaq(index, Operand(object, index, times_1, 0));
- __ RecordWrite(object, index, value, mode);
- }
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
break;
@@ -1303,10 +1598,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
break;
case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(Movss);
break;
case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(movb);
@@ -1321,10 +1616,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_STORE_INTEGER(movq);
break;
case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+ ASSEMBLE_CHECKED_STORE_FLOAT(Movss);
break;
case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+ ASSEMBLE_CHECKED_STORE_FLOAT(Movsd);
break;
case kX64StackCheck:
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
@@ -1510,17 +1805,17 @@ static const int kQuadWordSize = 16;
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ pushq(rbp);
__ movq(rbp, rsp);
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1602,10 +1897,10 @@ void CodeGenerator::AssembleReturn() {
__ addp(rsp, Immediate(stack_size));
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ movq(rsp, rbp); // Move stack pointer back to frame pointer.
__ popq(rbp); // Pop caller's frame pointer.
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -1618,14 +1913,14 @@ void CodeGenerator::AssembleReturn() {
}
size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
// Might need rcx for scratch if pop_size is too big.
- DCHECK_EQ(0, descriptor->CalleeSavedRegisters() & rcx.bit());
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rcx.bit());
__ Ret(static_cast<int>(pop_size), rcx);
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- X64OperandConverter g(this, NULL);
+ X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1720,23 +2015,23 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
XMMRegister src = g.ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movaps(dst, src);
+ __ Movapd(dst, src);
} else {
DCHECK(destination->IsDoubleStackSlot());
Operand dst = g.ToOperand(destination);
- __ movsd(dst, src);
+ __ Movsd(dst, src);
}
} else if (source->IsDoubleStackSlot()) {
DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
Operand src = g.ToOperand(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movsd(dst, src);
+ __ Movsd(dst, src);
} else {
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = g.ToOperand(destination);
- __ movsd(xmm0, src);
- __ movsd(dst, xmm0);
+ __ Movsd(xmm0, src);
+ __ Movsd(dst, xmm0);
}
} else {
UNREACHABLE();
@@ -1746,16 +2041,25 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- X64OperandConverter g(this, NULL);
+ X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Register-register.
- __ xchgq(g.ToRegister(source), g.ToRegister(destination));
+ Register src = g.ToRegister(source);
+ Register dst = g.ToRegister(destination);
+ __ movq(kScratchRegister, src);
+ __ movq(src, dst);
+ __ movq(dst, kScratchRegister);
} else if (source->IsRegister() && destination->IsStackSlot()) {
Register src = g.ToRegister(source);
+ __ pushq(src);
+ frame_access_state()->IncreaseSPDelta(1);
Operand dst = g.ToOperand(destination);
- __ xchgq(src, dst);
+ __ movq(src, dst);
+ frame_access_state()->IncreaseSPDelta(-1);
+ dst = g.ToOperand(destination);
+ __ popq(dst);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
(source->IsDoubleStackSlot() &&
destination->IsDoubleStackSlot())) {
@@ -1764,24 +2068,29 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination);
__ movq(tmp, dst);
- __ xchgq(tmp, src);
- __ movq(dst, tmp);
+ __ pushq(src);
+ frame_access_state()->IncreaseSPDelta(1);
+ src = g.ToOperand(source);
+ __ movq(src, tmp);
+ frame_access_state()->IncreaseSPDelta(-1);
+ dst = g.ToOperand(destination);
+ __ popq(dst);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movaps(xmm0, src);
- __ movaps(src, dst);
- __ movaps(dst, xmm0);
+ __ Movapd(xmm0, src);
+ __ Movapd(src, dst);
+ __ Movapd(dst, xmm0);
} else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
- __ movsd(xmm0, src);
- __ movsd(src, dst);
- __ movsd(dst, xmm0);
+ __ Movsd(xmm0, src);
+ __ Movsd(src, dst);
+ __ Movsd(dst, xmm0);
} else {
// No other combinations are possible.
UNREACHABLE();
@@ -1816,6 +2125,6 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
#undef __
-} // namespace internal
} // namespace compiler
+} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/x64/instruction-codes-x64.h b/chromium/v8/src/compiler/x64/instruction-codes-x64.h
index 7d3b434d158..8e8e7652c31 100644
--- a/chromium/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/chromium/v8/src/compiler/x64/instruction-codes-x64.h
@@ -46,7 +46,12 @@ namespace compiler {
V(X64Sar32) \
V(X64Ror) \
V(X64Ror32) \
+ V(X64Lzcnt) \
V(X64Lzcnt32) \
+ V(X64Tzcnt) \
+ V(X64Tzcnt32) \
+ V(X64Popcnt) \
+ V(X64Popcnt32) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -58,6 +63,7 @@ namespace compiler {
V(SSEFloat32Max) \
V(SSEFloat32Min) \
V(SSEFloat32ToFloat64) \
+ V(SSEFloat32Round) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
@@ -73,7 +79,15 @@ namespace compiler {
V(SSEFloat64ToFloat32) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
+ V(SSEFloat32ToInt64) \
+ V(SSEFloat64ToInt64) \
+ V(SSEFloat32ToUint64) \
+ V(SSEFloat64ToUint64) \
V(SSEInt32ToFloat64) \
+ V(SSEInt64ToFloat32) \
+ V(SSEInt64ToFloat64) \
+ V(SSEUint64ToFloat32) \
+ V(SSEUint64ToFloat64) \
V(SSEUint32ToFloat64) \
V(SSEFloat64ExtractLowWord32) \
V(SSEFloat64ExtractHighWord32) \
@@ -119,7 +133,6 @@ namespace compiler {
V(X64Inc32) \
V(X64Push) \
V(X64Poke) \
- V(X64StoreWriteBarrier) \
V(X64StackCheck)
diff --git a/chromium/v8/src/compiler/x64/instruction-scheduler-x64.cc b/chromium/v8/src/compiler/x64/instruction-scheduler-x64.cc
new file mode 100644
index 00000000000..f8537c879c4
--- /dev/null
+++ b/chromium/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -0,0 +1,182 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kX64Add:
+ case kX64Add32:
+ case kX64And:
+ case kX64And32:
+ case kX64Cmp:
+ case kX64Cmp32:
+ case kX64Test:
+ case kX64Test32:
+ case kX64Or:
+ case kX64Or32:
+ case kX64Xor:
+ case kX64Xor32:
+ case kX64Sub:
+ case kX64Sub32:
+ case kX64Imul:
+ case kX64Imul32:
+ case kX64ImulHigh32:
+ case kX64UmulHigh32:
+ case kX64Idiv:
+ case kX64Idiv32:
+ case kX64Udiv:
+ case kX64Udiv32:
+ case kX64Not:
+ case kX64Not32:
+ case kX64Neg:
+ case kX64Neg32:
+ case kX64Shl:
+ case kX64Shl32:
+ case kX64Shr:
+ case kX64Shr32:
+ case kX64Sar:
+ case kX64Sar32:
+ case kX64Ror:
+ case kX64Ror32:
+ case kX64Lzcnt:
+ case kX64Lzcnt32:
+ case kX64Tzcnt:
+ case kX64Tzcnt32:
+ case kX64Popcnt:
+ case kX64Popcnt32:
+ case kSSEFloat32Cmp:
+ case kSSEFloat32Add:
+ case kSSEFloat32Sub:
+ case kSSEFloat32Mul:
+ case kSSEFloat32Div:
+ case kSSEFloat32Abs:
+ case kSSEFloat32Neg:
+ case kSSEFloat32Sqrt:
+ case kSSEFloat32Round:
+ case kSSEFloat32Max:
+ case kSSEFloat32Min:
+ case kSSEFloat32ToFloat64:
+ case kSSEFloat64Cmp:
+ case kSSEFloat64Add:
+ case kSSEFloat64Sub:
+ case kSSEFloat64Mul:
+ case kSSEFloat64Div:
+ case kSSEFloat64Mod:
+ case kSSEFloat64Abs:
+ case kSSEFloat64Neg:
+ case kSSEFloat64Sqrt:
+ case kSSEFloat64Round:
+ case kSSEFloat64Max:
+ case kSSEFloat64Min:
+ case kSSEFloat64ToFloat32:
+ case kSSEFloat64ToInt32:
+ case kSSEFloat64ToUint32:
+ case kSSEFloat64ToInt64:
+ case kSSEFloat32ToInt64:
+ case kSSEFloat64ToUint64:
+ case kSSEFloat32ToUint64:
+ case kSSEInt32ToFloat64:
+ case kSSEInt64ToFloat32:
+ case kSSEInt64ToFloat64:
+ case kSSEUint64ToFloat32:
+ case kSSEUint64ToFloat64:
+ case kSSEUint32ToFloat64:
+ case kSSEFloat64ExtractLowWord32:
+ case kSSEFloat64ExtractHighWord32:
+ case kSSEFloat64InsertLowWord32:
+ case kSSEFloat64InsertHighWord32:
+ case kSSEFloat64LoadLowWord32:
+ case kAVXFloat32Cmp:
+ case kAVXFloat32Add:
+ case kAVXFloat32Sub:
+ case kAVXFloat32Mul:
+ case kAVXFloat32Div:
+ case kAVXFloat32Max:
+ case kAVXFloat32Min:
+ case kAVXFloat64Cmp:
+ case kAVXFloat64Add:
+ case kAVXFloat64Sub:
+ case kAVXFloat64Mul:
+ case kAVXFloat64Div:
+ case kAVXFloat64Max:
+ case kAVXFloat64Min:
+ case kAVXFloat64Abs:
+ case kAVXFloat64Neg:
+ case kAVXFloat32Abs:
+ case kAVXFloat32Neg:
+ case kX64BitcastFI:
+ case kX64BitcastDL:
+ case kX64BitcastIF:
+ case kX64BitcastLD:
+ case kX64Lea32:
+ case kX64Lea:
+ case kX64Dec32:
+ case kX64Inc32:
+ return (instr->addressing_mode() == kMode_None)
+ ? kNoOpcodeFlags
+ : kIsLoadOperation | kHasSideEffect;
+
+ case kX64Movsxbl:
+ case kX64Movzxbl:
+ case kX64Movsxwl:
+ case kX64Movzxwl:
+ case kX64Movsxlq:
+ DCHECK(instr->InputCount() >= 1);
+ return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
+ : kIsLoadOperation;
+
+ case kX64Movb:
+ case kX64Movw:
+ return kHasSideEffect;
+
+ case kX64Movl:
+ if (instr->HasOutput()) {
+ DCHECK(instr->InputCount() >= 1);
+ return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
+ : kIsLoadOperation;
+ } else {
+ return kHasSideEffect;
+ }
+
+ case kX64Movq:
+ case kX64Movsd:
+ case kX64Movss:
+ return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
+
+ case kX64StackCheck:
+ return kIsLoadOperation;
+
+ case kX64Push:
+ case kX64Poke:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/x64/instruction-selector-x64.cc b/chromium/v8/src/compiler/x64/instruction-selector-x64.cc
index 516a9a7691c..c47a42eefe3 100644
--- a/chromium/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/chromium/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -41,12 +41,12 @@ class X64OperandGenerator final : public OperandGenerator {
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
- if (base != NULL) {
+ if (base != nullptr) {
inputs[(*input_count)++] = UseRegister(base);
- if (index != NULL) {
+ if (index != nullptr) {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
- if (displacement != NULL) {
+ if (displacement != nullptr) {
inputs[(*input_count)++] = UseImmediate(displacement);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
kMode_MR4I, kMode_MR8I};
@@ -57,7 +57,7 @@ class X64OperandGenerator final : public OperandGenerator {
mode = kMRn_modes[scale_exponent];
}
} else {
- if (displacement == NULL) {
+ if (displacement == nullptr) {
mode = kMode_MR;
} else {
inputs[(*input_count)++] = UseImmediate(displacement);
@@ -65,10 +65,10 @@ class X64OperandGenerator final : public OperandGenerator {
}
}
} else {
- DCHECK(index != NULL);
+ DCHECK_NOT_NULL(index);
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
- if (displacement != NULL) {
+ if (displacement != nullptr) {
inputs[(*input_count)++] = UseImmediate(displacement);
static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
kMode_M4I, kMode_M8I};
@@ -91,7 +91,7 @@ class X64OperandGenerator final : public OperandGenerator {
size_t* input_count) {
BaseWithIndexAndDisplacement64Matcher m(operand, true);
DCHECK(m.matches());
- if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+ if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
m.displacement(), inputs, input_count);
} else {
@@ -108,33 +108,32 @@ class X64OperandGenerator final : public OperandGenerator {
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
X64OperandGenerator g(this);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kX64Movss;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kX64Movsd;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kX64Movl;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kX64Movq;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -156,93 +155,118 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK_EQ(kRepTagged, rep);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
- InstructionOperand temps[] = {g.TempRegister(rcx), g.TempRegister()};
- Emit(kX64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, rbx),
- g.UseImmediate(index), g.UseFixed(value, rcx), arraysize(temps),
- temps);
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
} else {
- InstructionOperand temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
- Emit(kX64StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, rbx),
- g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
- temps);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
}
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
-
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kX64Movss;
- break;
- case kRepFloat64:
- opcode = kX64Movsd;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kX64Movb;
- break;
- case kRepWord16:
- opcode = kX64Movw;
- break;
- case kRepWord32:
- opcode = kX64Movl;
- break;
- case kRepTagged: // Fall through.
- case kRepWord64:
- opcode = kX64Movq;
- break;
- default:
- UNREACHABLE();
- return;
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kX64Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kX64Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kX64Movb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kX64Movw;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kX64Movl;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kX64Movq;
+ break;
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code =
+ opcode | AddressingModeField::encode(addressing_mode);
+ InstructionOperand value_operand =
+ g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
+ inputs[input_count++] = value_operand;
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
+ inputs);
}
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- InstructionOperand value_operand =
- g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
- inputs[input_count++] = value_operand;
- Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
X64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
opcode = kCheckedLoadWord64;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -266,33 +290,35 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
X64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
opcode = kCheckedStoreWord64;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -506,8 +532,8 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
return;
}
VisitWord32Shift(this, node, kX64Shl32);
@@ -572,19 +598,49 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
}
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitWord32Clz(Node* node) {
X64OperandGenerator g(this);
Emit(kX64Lzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
+void InstructionSelector::VisitWord64Ctz(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Tzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Popcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
X64OperandGenerator g(this);
// Try to match the Add to a leal pattern
BaseWithIndexAndDisplacement32Matcher m(node);
if (m.matches() &&
- (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
+ (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
m.displacement());
return;
@@ -600,6 +656,16 @@ void InstructionSelector::VisitInt64Add(Node* node) {
}
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ VisitBinop(this, node, kX64Add, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX64Add, &cont);
+}
+
+
void InstructionSelector::VisitInt32Sub(Node* node) {
X64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -630,6 +696,16 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
}
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kX64Sub, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX64Sub, &cont);
+}
+
+
namespace {
void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
@@ -688,8 +764,8 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
return;
}
VisitMul(this, node, kX64Imul32);
@@ -781,6 +857,70 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
}
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kSSEFloat32ToUint64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
+}
+
+
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
X64OperandGenerator g(this);
Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -906,6 +1046,34 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
}
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEInt64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEInt64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kSSEUint64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kSSEUint64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
X64OperandGenerator g(this);
Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -1045,11 +1213,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
}
@@ -1060,20 +1248,20 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
- X64OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
+}
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor = GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())));
- }
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
+}
+
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ X64OperandGenerator g(this);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -1082,147 +1270,35 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
0, nullptr, 0, nullptr);
// Poke any stack arguments.
- for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* input = buffer.pushed_nodes[n]) {
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
int slot = static_cast<int>(n);
- InstructionOperand value = g.CanBeImmediate(input)
- ? g.UseImmediate(input)
- : g.UseRegister(input);
+ InstructionOperand value = g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
+ : g.UseRegister(input.node());
Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
} else {
// Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ for (PushParameter input : base::Reversed(*arguments)) {
// TODO(titzer): X64Push cannot handle stack->stack double moves
// because there is no way to encode fixed double slots.
InstructionOperand value =
- g.CanBeImmediate(input)
- ? g.UseImmediate(input)
+ g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input))
- ? g.UseRegister(input)
- : g.Use(input);
+ sequence()->IsFloat(GetVirtualRegister(input.node()))
+ ? g.UseRegister(input.node())
+ : g.Use(input.node());
Emit(kX64Push, g.NoOutput(), value);
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- X64OperandGenerator g(this);
- CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
- // TODO(titzer): Handle pushing double parameters.
- InstructionOperand value =
- g.CanBeImmediate(input)
- ? g.UseImmediate(input)
- : IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
- Emit(kX64Push, g.NoOutput(), value);
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t output_count = buffer.outputs.size();
- auto* outputs = &buffer.outputs.front();
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
namespace {
@@ -1429,12 +1505,12 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == NULL || IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
@@ -1442,6 +1518,12 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
case IrOpcode::kInt32SubWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(this, node, kX64Sub32, &cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kX64Add, &cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kX64Sub, &cont);
default:
break;
}
@@ -1704,10 +1786,21 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat32Min |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min |
- MachineOperatorBuilder::kWord32ShiftIsSafe;
+ MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ flags |= MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord64Popcnt;
+ }
if (CpuFeatures::IsSupported(SSE4_1)) {
- flags |= MachineOperatorBuilder::kFloat64RoundDown |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ flags |= MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
}
return flags;
}
diff --git a/chromium/v8/src/compiler/x87/code-generator-x87.cc b/chromium/v8/src/compiler/x87/code-generator-x87.cc
index 9ca9a3076f8..f4e334dbb26 100644
--- a/chromium/v8/src/compiler/x87/code-generator-x87.cc
+++ b/chromium/v8/src/compiler/x87/code-generator-x87.cc
@@ -4,11 +4,11 @@
#include "src/compiler/code-generator.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/scopes.h"
#include "src/x87/assembler-x87.h"
#include "src/x87/frames-x87.h"
#include "src/x87/macro-assembler-x87.h"
@@ -42,12 +42,18 @@ class X87OperandConverter : public InstructionOperandConverter {
return Operand(ToRegister(op));
}
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return Operand(offset.from_stack_pointer() ? esp : ebp,
offset.offset() + extra);
}
+ Operand ToMaterializableOperand(int materializable_offset) {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ Frame::FPOffsetToSlot(materializable_offset));
+ return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+ }
+
Operand HighOperand(InstructionOperand* op) {
DCHECK(op->IsDoubleStackSlot());
return ToOperand(op, kPointerSize);
@@ -218,6 +224,46 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
X87Register const input_;
};
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ operand_(operand),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ void Generate() final {
+ if (mode_ > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value_, exit());
+ }
+ if (mode_ > RecordWriteMode::kValueIsMap) {
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
+ }
+ SaveFPRegsMode const save_fp_mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
+ EMIT_REMEMBERED_SET, save_fp_mode);
+ __ lea(scratch1_, operand_);
+ __ CallStub(&stub);
+ }
+
+ private:
+ Register const object_;
+ Operand const operand_;
+ Register const value_;
+ Register const scratch0_;
+ Register const scratch1_;
+ RecordWriteMode const mode_;
+};
+
} // namespace
@@ -290,13 +336,25 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
} while (false)
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ mov(esp, ebp);
- __ pop(ebp);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ add(esp, Immediate(sp_slot_delta * kPointerSize));
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ mov(ebp, MemOperand(ebp, 0));
}
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -312,7 +370,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
- __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+ __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(reg);
}
RecordCallPosition(instr);
bool double_result =
@@ -328,10 +387,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
__ fld1();
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ jmp(code, RelocInfo::CODE_TARGET);
@@ -340,6 +401,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(reg);
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -365,6 +427,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
__ fld1();
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
@@ -374,15 +437,27 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchLazyBailout: {
+ EnsureSpaceForLazyDeopt();
+ RecordCallPosition(instr);
break;
}
case kArchPrepareCallCFunction: {
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, i.TempRegister(0));
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
@@ -392,6 +467,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -404,12 +481,34 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ int double_register_param_count = 0;
+ int x87_layout = 0;
+ for (size_t i = 0; i < instr->InputCount(); i++) {
+ if (instr->InputAt(i)->IsDoubleRegister()) {
+ double_register_param_count++;
+ }
+ }
+ // Currently we use only one X87 register. If double_register_param_count
+ // is bigger than 1, it means duplicated double register is added to input
+ // of this instruction.
+ if (double_register_param_count > 0) {
+ x87_layout = (0 << 3) | 1;
+ }
+ // The layout of x87 register stack is loaded on the top of FPU register
+ // stack for deoptimization.
+ __ push(Immediate(x87_layout));
+ __ fild_s(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kPointerSize));
+
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -431,6 +530,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
+ case kArchStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ Register value = i.InputRegister(index);
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
+ scratch0, scratch1, mode);
+ __ mov(operand, value);
+ __ CheckPageFlag(object, scratch0,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ not_zero, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
case kX87Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -538,11 +655,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX87Lzcnt:
__ Lzcnt(i.OutputRegister(), i.InputOperand(0));
break;
+ case kX87Popcnt:
+ __ Popcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
case kX87LoadFloat64Constant: {
InstructionOperand* source = instr->InputAt(0);
InstructionOperand* destination = instr->Output();
DCHECK(source->IsConstant());
- X87OperandConverter g(this, NULL);
+ X87OperandConverter g(this, nullptr);
Constant src_constant = g.ToConstant(source);
DCHECK_EQ(Constant::kFloat64, src_constant.type());
@@ -625,7 +745,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fld(1);
__ fld(1);
__ FCmp();
- __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+
+ // At least one NaN.
+ // Return the second operands if one of the two operands is NaN
+ __ j(parity_even, &return_right, Label::kNear);
__ j(equal, &check_zero, Label::kNear); // left == right.
__ j(condition, &return_left, Label::kNear);
__ jmp(&return_right, Label::kNear);
@@ -639,12 +762,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fadd(1);
__ jmp(&return_left, Label::kNear);
- __ bind(&check_nan_left);
- __ fld(0);
- __ fld(0);
- __ FCmp(); // NaN check.
- __ j(parity_even, &return_left, Label::kNear); // left == NaN.
-
__ bind(&return_right);
__ fxch();
@@ -662,7 +779,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fld(1);
__ fld(1);
__ FCmp();
- __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ // At least one NaN.
+ // Return the second operands if one of the two operands is NaN
+ __ j(parity_even, &return_right, Label::kNear);
__ j(equal, &check_zero, Label::kNear); // left == right.
__ j(condition, &return_left, Label::kNear);
__ jmp(&return_right, Label::kNear);
@@ -689,11 +808,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ pop(eax); // restore esp
__ jmp(&return_left, Label::kNear);
- __ bind(&check_nan_left);
- __ fld(0);
- __ fld(0);
- __ FCmp(); // NaN check.
- __ j(parity_even, &return_left, Label::kNear); // left == NaN.
__ bind(&return_right);
__ fxch();
@@ -717,6 +831,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lea(esp, Operand(esp, kFloatSize));
break;
}
+ case kX87Float32Round: {
+ RoundingMode mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ // Set the correct round mode in x87 control register
+ __ X87SetRC((mode << 10));
+
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ InstructionOperand* input = instr->InputAt(0);
+ USE(input);
+ DCHECK(input->IsDoubleStackSlot());
+ __ fstp(0);
+ __ fld_s(i.InputOperand(0));
+ }
+ __ frndint();
+ __ X87SetRC(0x0000);
+ break;
+ }
case kX87Float64Add: {
__ X87SetFPUCW(0x027F);
__ fstp(0);
@@ -963,20 +1094,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float64Sqrt: {
+ __ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_d(MemOperand(esp, 0));
__ fsqrt();
__ lea(esp, Operand(esp, kDoubleSize));
+ __ X87SetFPUCW(0x037F);
break;
}
case kX87Float64Round: {
RoundingMode mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- if (mode == MiscField::encode(kRoundDown)) {
- __ X87SetRC(0x0400);
- } else {
- __ X87SetRC(0x0c00);
- }
+ // Set the correct round mode in x87 control register
+ __ X87SetRC((mode << 10));
if (!instr->InputAt(0)->IsDoubleRegister()) {
InstructionOperand* input = instr->InputAt(0);
@@ -1070,24 +1200,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87BitcastFI: {
- __ fstp(0);
__ mov(i.OutputRegister(), MemOperand(esp, 0));
__ lea(esp, Operand(esp, kFloatSize));
break;
}
case kX87BitcastIF: {
+ __ fstp(0);
if (instr->InputAt(0)->IsRegister()) {
__ lea(esp, Operand(esp, -kFloatSize));
__ mov(MemOperand(esp, 0), i.InputRegister(0));
- __ fstp(0);
__ fld_s(MemOperand(esp, 0));
__ lea(esp, Operand(esp, kFloatSize));
} else {
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ mov(MemOperand(esp, 0), i.InputRegister(0));
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
+ __ fld_s(i.InputOperand(0));
}
break;
}
@@ -1129,30 +1254,34 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX87Push:
if (instr->InputAt(0)->IsDoubleRegister()) {
auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
- if (allocated.machine_type() == kRepFloat32) {
+ if (allocated.representation() == MachineRepresentation::kFloat32) {
__ sub(esp, Immediate(kDoubleSize));
__ fst_s(Operand(esp, 0));
} else {
- DCHECK(allocated.machine_type() == kRepFloat64);
+ DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(Operand(esp, 0));
}
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else if (instr->InputAt(0)->IsDoubleStackSlot()) {
auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
- if (allocated.machine_type() == kRepFloat32) {
+ if (allocated.representation() == MachineRepresentation::kFloat32) {
__ sub(esp, Immediate(kDoubleSize));
__ fld_s(i.InputOperand(0));
__ fstp_s(MemOperand(esp, 0));
} else {
- DCHECK(allocated.machine_type() == kRepFloat64);
+ DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
__ sub(esp, Immediate(kDoubleSize));
__ fld_d(i.InputOperand(0));
__ fstp_d(MemOperand(esp, 0));
}
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
+ frame_access_state()->IncreaseSPDelta(1);
} else {
__ push(i.InputOperand(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
break;
case kX87Poke: {
@@ -1186,24 +1315,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
UNREACHABLE();
}
break;
- case kX87StoreWriteBarrier: {
- Register object = i.InputRegister(0);
- Register value = i.InputRegister(2);
- SaveFPRegsMode mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- if (HasImmediateInput(instr, 1)) {
- int index = i.InputInt32(1);
- Register scratch = i.TempRegister(1);
- __ mov(Operand(object, index), value);
- __ RecordWriteContextSlot(object, index, value, scratch, mode);
- } else {
- Register index = i.InputRegister(1);
- __ mov(Operand(object, index, times_1, 0), value);
- __ lea(index, Operand(object, index, times_1, 0));
- __ RecordWrite(object, index, value, mode);
- }
- break;
- }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break;
@@ -1563,20 +1674,20 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
// Assemble a prologue similar the to cdecl calling convention.
__ push(ebp);
__ mov(ebp, esp);
} else if (descriptor->IsJSFunctionCall()) {
// TODO(turbofan): this prologue is redundant with OSR, but needed for
// code aging.
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1629,10 +1740,10 @@ void CodeGenerator::AssembleReturn() {
}
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
__ pop(ebp); // Pop caller's frame pointer.
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -1653,7 +1764,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- X87OperandConverter g(this, NULL);
+ X87OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1680,11 +1791,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (IsMaterializableFromFrame(src, &offset)) {
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
- __ mov(dst, Operand(ebp, offset));
+ __ mov(dst, g.ToMaterializableOperand(offset));
} else {
DCHECK(destination->IsStackSlot());
Operand dst = g.ToOperand(destination);
- __ push(Operand(ebp, offset));
+ __ push(g.ToMaterializableOperand(offset));
__ pop(dst);
}
} else if (destination->IsRegister()) {
@@ -1747,11 +1858,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DCHECK(destination->IsDoubleStackSlot());
Operand dst = g.ToOperand(destination);
auto allocated = AllocatedOperand::cast(*source);
- switch (allocated.machine_type()) {
- case kRepFloat32:
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
__ fst_s(dst);
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
__ fst_d(dst);
break;
default:
@@ -1764,11 +1875,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsDoubleRegister()) {
// always only push one value into the x87 stack.
__ fstp(0);
- switch (allocated.machine_type()) {
- case kRepFloat32:
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
__ fld_s(src);
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
__ fld_d(src);
break;
default:
@@ -1776,12 +1887,12 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
Operand dst = g.ToOperand(destination);
- switch (allocated.machine_type()) {
- case kRepFloat32:
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
__ fld_s(src);
__ fstp_s(dst);
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
__ fld_d(src);
__ fstp_d(dst);
break;
@@ -1797,7 +1908,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- X87OperandConverter g(this, NULL);
+ X87OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
@@ -1810,23 +1921,27 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ xchg(g.ToRegister(source), g.ToOperand(destination));
} else if (source->IsStackSlot() && destination->IsStackSlot()) {
// Memory-memory.
- Operand src = g.ToOperand(source);
- Operand dst = g.ToOperand(destination);
- __ push(dst);
- __ push(src);
- __ pop(dst);
- __ pop(src);
+ Operand dst1 = g.ToOperand(destination);
+ __ push(dst1);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand src1 = g.ToOperand(source);
+ __ push(src1);
+ Operand dst2 = g.ToOperand(destination);
+ __ pop(dst2);
+ frame_access_state()->IncreaseSPDelta(-1);
+ Operand src2 = g.ToOperand(source);
+ __ pop(src2);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
UNREACHABLE();
} else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
auto allocated = AllocatedOperand::cast(*source);
- switch (allocated.machine_type()) {
- case kRepFloat32:
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
__ fld_s(g.ToOperand(destination));
__ fxch();
__ fstp_s(g.ToOperand(destination));
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
__ fld_d(g.ToOperand(destination));
__ fxch();
__ fstp_d(g.ToOperand(destination));
@@ -1836,14 +1951,14 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
auto allocated = AllocatedOperand::cast(*source);
- switch (allocated.machine_type()) {
- case kRepFloat32:
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
__ fld_s(g.ToOperand(source));
__ fld_s(g.ToOperand(destination));
__ fstp_s(g.ToOperand(source));
__ fstp_s(g.ToOperand(destination));
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
__ fld_d(g.ToOperand(source));
__ fld_d(g.ToOperand(destination));
__ fstp_d(g.ToOperand(source));
diff --git a/chromium/v8/src/compiler/x87/instruction-codes-x87.h b/chromium/v8/src/compiler/x87/instruction-codes-x87.h
index 9408e41724e..b498d9c59cc 100644
--- a/chromium/v8/src/compiler/x87/instruction-codes-x87.h
+++ b/chromium/v8/src/compiler/x87/instruction-codes-x87.h
@@ -33,6 +33,7 @@ namespace compiler {
V(X87Sar) \
V(X87Ror) \
V(X87Lzcnt) \
+ V(X87Popcnt) \
V(X87Float32Cmp) \
V(X87Float32Add) \
V(X87Float32Sub) \
@@ -42,6 +43,7 @@ namespace compiler {
V(X87Float32Min) \
V(X87Float32Abs) \
V(X87Float32Sqrt) \
+ V(X87Float32Round) \
V(X87LoadFloat64Constant) \
V(X87Float64Add) \
V(X87Float64Sub) \
@@ -80,7 +82,6 @@ namespace compiler {
V(X87PushFloat64) \
V(X87PushFloat32) \
V(X87Poke) \
- V(X87StoreWriteBarrier) \
V(X87StackCheck)
diff --git a/chromium/v8/src/compiler/x87/instruction-scheduler-x87.cc b/chromium/v8/src/compiler/x87/instruction-scheduler-x87.cc
new file mode 100644
index 00000000000..af86a87ad78
--- /dev/null
+++ b/chromium/v8/src/compiler/x87/instruction-scheduler-x87.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNIMPLEMENTED();
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNIMPLEMENTED();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/x87/instruction-selector-x87.cc b/chromium/v8/src/compiler/x87/instruction-selector-x87.cc
index ac868fb932b..cff4aafb278 100644
--- a/chromium/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/chromium/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -54,18 +54,18 @@ class X87OperandGenerator final : public OperandGenerator {
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
- int32_t displacement = (displacement_node == NULL)
+ int32_t displacement = (displacement_node == nullptr)
? 0
: OpParameter<int32_t>(displacement_node);
- if (base != NULL) {
+ if (base != nullptr) {
if (base->opcode() == IrOpcode::kInt32Constant) {
displacement += OpParameter<int32_t>(base);
- base = NULL;
+ base = nullptr;
}
}
- if (base != NULL) {
+ if (base != nullptr) {
inputs[(*input_count)++] = UseRegister(base);
- if (index != NULL) {
+ if (index != nullptr) {
DCHECK(scale >= 0 && scale <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
@@ -88,7 +88,7 @@ class X87OperandGenerator final : public OperandGenerator {
}
} else {
DCHECK(scale >= 0 && scale <= 3);
- if (index != NULL) {
+ if (index != nullptr) {
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
inputs[(*input_count)++] = TempImmediate(displacement);
@@ -113,7 +113,7 @@ class X87OperandGenerator final : public OperandGenerator {
size_t* input_count) {
BaseWithIndexAndDisplacement32Matcher m(node, true);
DCHECK(m.matches());
- if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+ if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
m.displacement(), inputs, input_count);
} else {
@@ -130,29 +130,29 @@ class X87OperandGenerator final : public OperandGenerator {
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kX87Movss;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kX87Movsd;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kX87Movsxbl : kX87Movzxbl;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kX87Movsxbl : kX87Movzxbl;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kX87Movsxwl : kX87Movzxwl;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kX87Movsxwl : kX87Movzxwl;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kX87Movl;
break;
- default:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -175,95 +175,123 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = RepresentationOf(store_rep.machine_type());
- if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
- DCHECK_EQ(kRepTagged, rep);
- // TODO(dcarney): refactor RecordWrite function to take temp registers
- // and pass them here instead of using fixed regs
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ if (write_barrier_kind != kNoWriteBarrier) {
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
- InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister()};
- Emit(kX87StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
- g.UseImmediate(index), g.UseFixed(value, ecx), arraysize(temps),
- temps);
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
} else {
- InstructionOperand temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
- Emit(kX87StoreWriteBarrier, g.NoOutput(), g.UseFixed(base, ebx),
- g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
- temps);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
+ ? g.UseRegister(value)
+ : g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
+ switch (write_barrier_kind) {
+ case kNoWriteBarrier:
+ UNREACHABLE();
+ break;
+ case kMapWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsMap;
+ break;
+ case kPointerWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsPointer;
+ break;
+ case kFullWriteBarrier:
+ record_write_mode = RecordWriteMode::kValueIsAny;
+ break;
+ }
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ } else {
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kX87Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kX87Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kX87Movb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kX87Movw;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kX87Movl;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
}
- return;
- }
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
- opcode = kX87Movss;
- break;
- case kRepFloat64:
- opcode = kX87Movsd;
- break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = kX87Movb;
- break;
- case kRepWord16:
- opcode = kX87Movw;
- break;
- case kRepTagged: // Fall through.
- case kRepWord32:
- opcode = kX87Movl;
- break;
- default:
- UNREACHABLE();
- return;
- }
+ InstructionOperand val;
+ if (g.CanBeImmediate(value)) {
+ val = g.UseImmediate(value);
+ } else if (rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit) {
+ val = g.UseByteRegister(value);
+ } else {
+ val = g.UseRegister(value);
+ }
- InstructionOperand val;
- if (g.CanBeImmediate(value)) {
- val = g.UseImmediate(value);
- } else if (rep == kRepWord8 || rep == kRepBit) {
- val = g.UseByteRegister(value);
- } else {
- val = g.UseRegister(value);
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code =
+ opcode | AddressingModeField::encode(addressing_mode);
+ inputs[input_count++] = val;
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
+ inputs);
}
-
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
X87OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -283,38 +311,42 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
X87OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
InstructionOperand value_operand =
- g.CanBeImmediate(value)
- ? g.UseImmediate(value)
- : ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
- : g.UseRegister(value));
+ g.CanBeImmediate(value) ? g.UseImmediate(value)
+ : ((rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit)
+ ? g.UseByteRegister(value)
+ : g.UseRegister(value));
InstructionOperand offset_operand = g.UseRegister(offset);
InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
@@ -485,8 +517,8 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, node, index, m.scale(), base, nullptr);
return;
}
VisitShift(this, node, kX87Shl);
@@ -514,13 +546,22 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
X87OperandGenerator g(this);
// Try to match the Add to a lea pattern
BaseWithIndexAndDisplacement32Matcher m(node);
if (m.matches() &&
- (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
+ (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
@@ -557,8 +598,8 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, node, index, m.scale(), base, nullptr);
return;
}
X87OperandGenerator g(this);
@@ -667,7 +708,7 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87BitcastFI, g.DefineAsRegister(node), 0, NULL);
+ Emit(kX87BitcastFI, g.DefineAsRegister(node), 0, nullptr);
}
@@ -681,7 +722,7 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Add, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Add, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -689,7 +730,7 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Add, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Add, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -697,7 +738,7 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -705,7 +746,7 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -713,7 +754,7 @@ void InstructionSelector::VisitFloat32Mul(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Mul, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Mul, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -721,7 +762,7 @@ void InstructionSelector::VisitFloat64Mul(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Mul, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Mul, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -729,7 +770,7 @@ void InstructionSelector::VisitFloat32Div(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Div, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Div, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -737,7 +778,7 @@ void InstructionSelector::VisitFloat64Div(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Div, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Div, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -754,7 +795,7 @@ void InstructionSelector::VisitFloat32Max(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -762,7 +803,7 @@ void InstructionSelector::VisitFloat64Max(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -770,7 +811,7 @@ void InstructionSelector::VisitFloat32Min(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -778,35 +819,42 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Min, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
void InstructionSelector::VisitFloat32Abs(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float32Abs, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
void InstructionSelector::VisitFloat64Abs(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float32Sqrt, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Sqrt, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float64Sqrt, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Sqrt, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32Round | MiscField::encode(kRoundDown),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
}
@@ -817,6 +865,27 @@ void InstructionSelector::VisitFloat64RoundDown(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32Round | MiscField::encode(kRoundUp), g.UseFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64Round | MiscField::encode(kRoundUp), g.UseFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32Round | MiscField::encode(kRoundToZero),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Float64Round | MiscField::encode(kRoundToZero),
@@ -829,20 +898,24 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
X87OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+ Emit(kX87Float32Round | MiscField::encode(kRoundToNearest),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
- FrameStateDescriptor* frame_state_descriptor = nullptr;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
- }
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64Round | MiscField::encode(kRoundToNearest),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
+ X87OperandGenerator g(this);
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
@@ -853,148 +926,35 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
0, nullptr, 0, nullptr, temp_count, temps);
// Poke any stack arguments.
- for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
- if (Node* input = buffer.pushed_nodes[n]) {
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
int const slot = static_cast<int>(n);
- InstructionOperand value = g.CanBeImmediate(input)
- ? g.UseImmediate(input)
- : g.UseRegister(input);
+ InstructionOperand value = g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
+ : g.UseRegister(input.node());
Emit(kX87Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
} else {
// Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
+ for (PushParameter input : base::Reversed(*arguments)) {
// TODO(titzer): handle pushing double parameters.
- if (input == nullptr) continue;
+ if (input.node() == nullptr) continue;
InstructionOperand value =
- g.CanBeImmediate(input)
- ? g.UseImmediate(input)
+ g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input))
- ? g.UseRegister(input)
- : g.Use(input);
+ sequence()->IsFloat(GetVirtualRegister(input.node()))
+ ? g.UseRegister(input.node())
+ : g.Use(input.node());
Emit(kX87Push, g.NoOutput(), value);
}
}
-
- // Pass label of exception handler block.
- CallDescriptor::Flags flags = descriptor->flags();
- if (handler) {
- DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
- flags |= CallDescriptor::kHasExceptionHandler;
- buffer.instruction_args.push_back(g.Label(handler));
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallAddress:
- opcode =
- kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
- break;
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
- break;
- default:
- UNREACHABLE();
- return;
- }
-
- // Emit the call instruction.
- size_t const output_count = buffer.outputs.size();
- auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
}
-void InstructionSelector::VisitTailCall(Node* node) {
- X87OperandGenerator g(this);
- CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
- DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
-
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, true);
-
- // Push any stack arguments.
- for (Node* input : base::Reversed(buffer.pushed_nodes)) {
- // TODO(titzer): Handle pushing double parameters.
- InstructionOperand value =
- g.CanBeImmediate(input)
- ? g.UseImmediate(input)
- : IsSupported(ATOM) ? g.UseRegister(input) : g.Use(input);
- Emit(kX87Push, g.NoOutput(), value);
- }
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t output_count = buffer.outputs.size();
- auto* outputs = &buffer.outputs.front();
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front())->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
- }
-}
+bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
namespace {
@@ -1162,12 +1122,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == NULL || selector->IsDefined(result)) {
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1365,6 +1325,18 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe;
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ flags |= MachineOperatorBuilder::kWord32Popcnt;
+ }
+
+ flags |= MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
return flags;
}
diff --git a/chromium/v8/src/compiler/zone-pool.h b/chromium/v8/src/compiler/zone-pool.h
index 15866ea3581..aaf9daac464 100644
--- a/chromium/v8/src/compiler/zone-pool.h
+++ b/chromium/v8/src/compiler/zone-pool.h
@@ -19,16 +19,17 @@ class ZonePool final {
public:
class Scope final {
public:
- explicit Scope(ZonePool* zone_pool) : zone_pool_(zone_pool), zone_(NULL) {}
+ explicit Scope(ZonePool* zone_pool)
+ : zone_pool_(zone_pool), zone_(nullptr) {}
~Scope() { Destroy(); }
Zone* zone() {
- if (zone_ == NULL) zone_ = zone_pool_->NewEmptyZone();
+ if (zone_ == nullptr) zone_ = zone_pool_->NewEmptyZone();
return zone_;
}
void Destroy() {
- if (zone_ != NULL) zone_pool_->ReturnZone(zone_);
- zone_ = NULL;
+ if (zone_ != nullptr) zone_pool_->ReturnZone(zone_);
+ zone_ = nullptr;
}
private:
diff --git a/chromium/v8/src/context-measure.cc b/chromium/v8/src/context-measure.cc
index da4aae498b6..0b87e396141 100644
--- a/chromium/v8/src/context-measure.cc
+++ b/chromium/v8/src/context-measure.cc
@@ -74,5 +74,5 @@ void ContextMeasure::VisitPointers(Object** start, Object** end) {
MeasureObject(HeapObject::cast(*current));
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/context-measure.h b/chromium/v8/src/context-measure.h
index f01c37418f5..665c5479126 100644
--- a/chromium/v8/src/context-measure.h
+++ b/chromium/v8/src/context-measure.h
@@ -5,7 +5,9 @@
#ifndef V8_CONTEXT_MEASURE_H_
#define V8_CONTEXT_MEASURE_H_
-#include "src/snapshot/serialize.h"
+#include "src/address-map.h"
+#include "src/assert-scope.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
@@ -17,7 +19,7 @@ class ContextMeasure : public ObjectVisitor {
int Size() { return size_; }
int Count() { return count_; }
- void VisitPointers(Object** start, Object** end);
+ void VisitPointers(Object** start, Object** end) override;
private:
void MeasureObject(HeapObject* object);
@@ -41,7 +43,7 @@ class ContextMeasure : public ObjectVisitor {
DISALLOW_COPY_AND_ASSIGN(ContextMeasure);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CONTEXT_MEASURE_H_
diff --git a/chromium/v8/src/contexts-inl.h b/chromium/v8/src/contexts-inl.h
index e88cd33ad3b..67257ae0d71 100644
--- a/chromium/v8/src/contexts-inl.h
+++ b/chromium/v8/src/contexts-inl.h
@@ -56,24 +56,28 @@ Context* Context::previous() {
void Context::set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
-bool Context::has_extension() { return extension() != nullptr; }
-Object* Context::extension() { return get(EXTENSION_INDEX); }
-void Context::set_extension(Object* object) { set(EXTENSION_INDEX, object); }
+bool Context::has_extension() { return !extension()->IsTheHole(); }
+HeapObject* Context::extension() {
+ return HeapObject::cast(get(EXTENSION_INDEX));
+}
+void Context::set_extension(HeapObject* object) {
+ set(EXTENSION_INDEX, object);
+}
JSModule* Context::module() { return JSModule::cast(get(EXTENSION_INDEX)); }
void Context::set_module(JSModule* module) { set(EXTENSION_INDEX, module); }
-GlobalObject* Context::global_object() {
- Object* result = get(GLOBAL_OBJECT_INDEX);
- DCHECK(IsBootstrappingOrGlobalObject(this->GetIsolate(), result));
- return reinterpret_cast<GlobalObject*>(result);
+Context* Context::native_context() {
+ Object* result = get(NATIVE_CONTEXT_INDEX);
+ DCHECK(IsBootstrappingOrNativeContext(this->GetIsolate(), result));
+ return reinterpret_cast<Context*>(result);
}
-void Context::set_global_object(GlobalObject* object) {
- set(GLOBAL_OBJECT_INDEX, object);
+void Context::set_native_context(Context* context) {
+ set(NATIVE_CONTEXT_INDEX, context);
}
@@ -120,8 +124,8 @@ bool Context::IsScriptContext() {
bool Context::HasSameSecurityTokenAs(Context* that) {
- return this->global_object()->native_context()->security_token() ==
- that->global_object()->native_context()->security_token();
+ return this->native_context()->security_token() ==
+ that->native_context()->security_token();
}
diff --git a/chromium/v8/src/contexts.cc b/chromium/v8/src/contexts.cc
index a008d49ac37..79a9e926a56 100644
--- a/chromium/v8/src/contexts.cc
+++ b/chromium/v8/src/contexts.cc
@@ -4,10 +4,10 @@
#include "src/contexts.h"
+#include "src/ast/scopeinfo.h"
#include "src/bootstrapper.h"
#include "src/debug/debug.h"
#include "src/isolate-inl.h"
-#include "src/scopeinfo.h"
namespace v8 {
namespace internal {
@@ -82,8 +82,8 @@ Context* Context::declaration_context() {
JSObject* Context::extension_object() {
DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext());
- Object* object = extension();
- if (object == nullptr) return nullptr;
+ HeapObject* object = extension();
+ if (object->IsTheHole()) return nullptr;
if (IsBlockContext()) {
if (!object->IsSloppyBlockWithEvalContextExtension()) return nullptr;
object = SloppyBlockWithEvalContextExtension::cast(object)->extension();
@@ -103,7 +103,7 @@ JSReceiver* Context::extension_receiver() {
ScopeInfo* Context::scope_info() {
DCHECK(IsModuleContext() || IsScriptContext() || IsBlockContext());
- Object* object = extension();
+ HeapObject* object = extension();
if (object->IsSloppyBlockWithEvalContextExtension()) {
DCHECK(IsBlockContext());
object = SloppyBlockWithEvalContextExtension::cast(object)->scope_info();
@@ -118,14 +118,8 @@ String* Context::catch_name() {
}
-JSBuiltinsObject* Context::builtins() {
- GlobalObject* object = global_object();
- if (object->IsJSGlobalObject()) {
- return JSGlobalObject::cast(object)->builtins();
- } else {
- DCHECK(object->IsJSBuiltinsObject());
- return JSBuiltinsObject::cast(object);
- }
+JSGlobalObject* Context::global_object() {
+ return JSGlobalObject::cast(native_context()->extension());
}
@@ -138,17 +132,6 @@ Context* Context::script_context() {
}
-Context* Context::native_context() {
- // Fast case: the receiver context is already a native context.
- if (IsNativeContext()) return this;
- // The global object has a direct pointer to the native context. If the
- // following DCHECK fails, the native context is probably being accessed
- // indirectly during bootstrapping. This is unsupported.
- DCHECK(global_object()->IsGlobalObject());
- return global_object()->native_context();
-}
-
-
JSObject* Context::global_proxy() {
return native_context()->global_proxy_object();
}
@@ -163,30 +146,24 @@ void Context::set_global_proxy(JSObject* object) {
* Lookups a property in an object environment, taking the unscopables into
* account. This is used For HasBinding spec algorithms for ObjectEnvironment.
*/
-static Maybe<PropertyAttributes> UnscopableLookup(LookupIterator* it) {
+static Maybe<bool> UnscopableLookup(LookupIterator* it) {
Isolate* isolate = it->isolate();
- Maybe<PropertyAttributes> attrs = JSReceiver::GetPropertyAttributes(it);
- DCHECK(attrs.IsJust() || isolate->has_pending_exception());
- if (!attrs.IsJust() || attrs.FromJust() == ABSENT) return attrs;
+ Maybe<bool> found = JSReceiver::HasProperty(it);
+ if (!found.IsJust() || !found.FromJust()) return found;
- Handle<Symbol> unscopables_symbol = isolate->factory()->unscopables_symbol();
- Handle<Object> receiver = it->GetReceiver();
Handle<Object> unscopables;
- MaybeHandle<Object> maybe_unscopables =
- Object::GetProperty(receiver, unscopables_symbol);
- if (!maybe_unscopables.ToHandle(&unscopables)) {
- return Nothing<PropertyAttributes>();
- }
- if (!unscopables->IsSpecObject()) return attrs;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, unscopables,
+ Object::GetProperty(it->GetReceiver(),
+ isolate->factory()->unscopables_symbol()),
+ Nothing<bool>());
+ if (!unscopables->IsJSReceiver()) return Just(true);
Handle<Object> blacklist;
- MaybeHandle<Object> maybe_blacklist =
- Object::GetProperty(unscopables, it->name());
- if (!maybe_blacklist.ToHandle(&blacklist)) {
- DCHECK(isolate->has_pending_exception());
- return Nothing<PropertyAttributes>();
- }
- return blacklist->BooleanValue() ? Just(ABSENT) : attrs;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, blacklist,
+ Object::GetProperty(unscopables, it->name()),
+ Nothing<bool>());
+ return Just(!blacklist->BooleanValue());
}
static void GetAttributesAndBindingFlags(VariableMode mode,
@@ -264,7 +241,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
// 1. Check global objects, subjects of with, and extension objects.
- if ((context->IsNativeContext() || context->IsWithContext() ||
+ if ((context->IsNativeContext() ||
+ (context->IsWithContext() && ((flags & SKIP_WITH_CONTEXT) == 0)) ||
context->IsFunctionContext() || context->IsBlockContext()) &&
context->extension_receiver() != nullptr) {
Handle<JSReceiver> object(context->extension_receiver());
@@ -305,7 +283,15 @@ Handle<Object> Context::Lookup(Handle<String> name,
maybe = Just(ABSENT);
} else {
LookupIterator it(object, name);
- maybe = UnscopableLookup(&it);
+ Maybe<bool> found = UnscopableLookup(&it);
+ if (found.IsNothing()) {
+ maybe = Nothing<PropertyAttributes>();
+ } else {
+ // Luckily, consumers of |maybe| only care whether the property
+ // was absent or not, so we can return a dummy |NONE| value
+ // for its attributes when it was present.
+ maybe = Just(found.FromJust() ? NONE : ABSENT);
+ }
}
} else {
maybe = JSReceiver::GetPropertyAttributes(object, name);
@@ -384,7 +370,9 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
// 3. Prepare to continue with the previous (next outermost) context.
- if (context->IsNativeContext()) {
+ if (context->IsNativeContext() ||
+ ((flags & STOP_AT_DECLARATION_SCOPE) != 0 &&
+ context->is_declaration_context())) {
follow_context_chain = false;
} else {
context = Handle<Context>(context->previous(), isolate);
@@ -565,6 +553,15 @@ bool Context::IsJSBuiltin(Handle<Context> native_context,
#ifdef DEBUG
+
+bool Context::IsBootstrappingOrNativeContext(Isolate* isolate, Object* object) {
+ // During bootstrapping we allow all objects to pass as global
+ // objects. This is necessary to fix circular dependencies.
+ return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
+ isolate->bootstrapper()->IsActive() || object->IsNativeContext();
+}
+
+
bool Context::IsBootstrappingOrValidParentContext(
Object* object, Context* child) {
// During bootstrapping we allow all objects to pass as
@@ -576,15 +573,18 @@ bool Context::IsBootstrappingOrValidParentContext(
context->IsModuleContext() || !child->IsModuleContext();
}
+#endif
-bool Context::IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object) {
- // During bootstrapping we allow all objects to pass as global
- // objects. This is necessary to fix circular dependencies.
- return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
- isolate->bootstrapper()->IsActive() ||
- object->IsGlobalObject();
+
+void Context::IncrementErrorsThrown() {
+ DCHECK(IsNativeContext());
+
+ int previous_value = errors_thrown()->value();
+ set_errors_thrown(Smi::FromInt(previous_value + 1));
}
-#endif
+
+
+int Context::GetErrorsThrown() { return errors_thrown()->value(); }
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/contexts.h b/chromium/v8/src/contexts.h
index 1ca572576ee..6c9e1950750 100644
--- a/chromium/v8/src/contexts.h
+++ b/chromium/v8/src/contexts.h
@@ -13,11 +13,15 @@ namespace internal {
enum ContextLookupFlags {
- FOLLOW_CONTEXT_CHAIN = 1,
- FOLLOW_PROTOTYPE_CHAIN = 2,
+ FOLLOW_CONTEXT_CHAIN = 1 << 0,
+ FOLLOW_PROTOTYPE_CHAIN = 1 << 1,
+ STOP_AT_DECLARATION_SCOPE = 1 << 2,
+ SKIP_WITH_CONTEXT = 1 << 3,
DONT_FOLLOW_CHAINS = 0,
- FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN
+ FOLLOW_CHAINS = FOLLOW_CONTEXT_CHAIN | FOLLOW_PROTOTYPE_CHAIN,
+ LEXICAL_TEST =
+ FOLLOW_CONTEXT_CHAIN | STOP_AT_DECLARATION_SCOPE | SKIP_WITH_CONTEXT,
};
@@ -74,26 +78,27 @@ enum BindingFlags {
// Factory::NewContext.
#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
+ V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
V(CONCAT_ITERABLE_TO_ARRAY_INDEX, JSFunction, concat_iterable_to_array) \
V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site) \
V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
- V(NON_NUMBER_TO_NUMBER_INDEX, JSFunction, non_number_to_number) \
+ V(OBJECT_FREEZE, JSFunction, object_freeze) \
+ V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
+ V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
+ V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
+ V(OBJECT_KEYS, JSFunction, object_keys) \
V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
+ V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
+ V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
- V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
- V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun)
+ V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable)
-#define NATIVE_CONTEXT_JS_BUILTINS(V) \
- V(APPLY_PREPARE_BUILTIN_INDEX, JSFunction, apply_prepare_builtin) \
- V(CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX, JSFunction, \
- concat_iterable_to_array_builtin) \
- V(REFLECT_APPLY_PREPARE_BUILTIN_INDEX, JSFunction, \
- reflect_apply_prepare_builtin) \
- V(REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX, JSFunction, \
- reflect_construct_prepare_builtin)
+#define NATIVE_CONTEXT_JS_BUILTINS(V) \
+ V(CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX, JSFunction, \
+ concat_iterable_to_array_builtin)
#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
@@ -105,10 +110,7 @@ enum BindingFlags {
V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
- V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
- V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
@@ -116,7 +118,6 @@ enum BindingFlags {
V(JSON_SERIALIZE_ADAPTER_INDEX, JSFunction, json_serialize_adapter) \
V(MAKE_ERROR_FUNCTION_INDEX, JSFunction, make_error_function) \
V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete) \
- V(MAP_FROM_ARRAY_INDEX, JSFunction, map_from_array) \
V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
@@ -127,13 +128,10 @@ enum BindingFlags {
V(NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE, JSFunction, \
native_object_notifier_perform_change) \
V(NATIVE_OBJECT_OBSERVE_INDEX, JSFunction, native_object_observe) \
- V(NO_SIDE_EFFECT_TO_STRING_FUN_INDEX, JSFunction, \
- no_side_effect_to_string_fun) \
+ V(NO_SIDE_EFFECTS_TO_STRING_FUN_INDEX, JSFunction, \
+ no_side_effects_to_string_fun) \
V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
V(OBJECT_TO_STRING, JSFunction, object_to_string) \
- V(OBJECT_DEFINE_OWN_PROPERTY_INDEX, JSFunction, object_define_own_property) \
- V(OBJECT_GET_OWN_PROPERTY_DESCROPTOR_INDEX, JSFunction, \
- object_get_own_property_descriptor) \
V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, observers_begin_perform_splice) \
V(OBSERVERS_END_SPLICE_INDEX, JSFunction, observers_end_perform_splice) \
V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice) \
@@ -141,6 +139,7 @@ enum BindingFlags {
V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain) \
V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
+ V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction, \
promise_has_user_defined_reject_handler) \
V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
@@ -151,13 +150,9 @@ enum BindingFlags {
V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
- V(SET_FROM_ARRAY_INDEX, JSFunction, set_from_array) \
V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
V(STACK_OVERFLOW_BOILERPLATE_INDEX, JSObject, stack_overflow_boilerplate) \
V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
- V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
- to_complete_property_descriptor) \
- V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun) \
V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
NATIVE_CONTEXT_JS_BUILTINS(V)
@@ -183,8 +178,10 @@ enum BindingFlags {
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
+ V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
+ V(ERRORS_THROWN_INDEX, Smi, errors_thrown) \
V(EXTRAS_EXPORTS_OBJECT_INDEX, JSObject, extras_binding_object) \
V(EXTRAS_UTILS_OBJECT_INDEX, JSObject, extras_utils_object) \
V(FAST_ALIASED_ARGUMENTS_MAP_INDEX, Map, fast_aliased_arguments_map) \
@@ -192,6 +189,9 @@ enum BindingFlags {
V(FLOAT32X4_FUNCTION_INDEX, JSFunction, float32x4_function) \
V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
V(FUNCTION_CACHE_INDEX, ObjectHashTable, function_cache) \
+ V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function) \
+ V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
+ generator_function_function) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
@@ -203,13 +203,36 @@ enum BindingFlags {
V(INT8X16_FUNCTION_INDEX, JSFunction, int8x16_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
- V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
- V(JS_ARRAY_STRONG_MAPS_INDEX, Object, js_array_strong_maps) \
+ V(JS_ARRAY_FAST_SMI_ELEMENTS_MAP_INDEX, Map, \
+ js_array_fast_smi_elements_map_index) \
+ V(JS_ARRAY_FAST_HOLEY_SMI_ELEMENTS_MAP_INDEX, Map, \
+ js_array_fast_holey_smi_elements_map_index) \
+ V(JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, Map, js_array_fast_elements_map_index) \
+ V(JS_ARRAY_FAST_HOLEY_ELEMENTS_MAP_INDEX, Map, \
+ js_array_fast_holey_elements_map_index) \
+ V(JS_ARRAY_FAST_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
+ js_array_fast_double_elements_map_index) \
+ V(JS_ARRAY_FAST_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
+ js_array_fast_holey_double_elements_map_index) \
+ V(JS_ARRAY_FAST_SMI_ELEMENTS_STRONG_MAP_INDEX, Map, \
+ js_array_fast_smi_elements_strong_map_index) \
+ V(JS_ARRAY_FAST_HOLEY_SMI_ELEMENTS_STRONG_MAP_INDEX, Map, \
+ js_array_fast_holey_smi_elements_strong_map_index) \
+ V(JS_ARRAY_FAST_ELEMENTS_STRONG_MAP_INDEX, Map, \
+ js_array_fast_elements_strong_map_index) \
+ V(JS_ARRAY_FAST_HOLEY_ELEMENTS_STRONG_MAP_INDEX, Map, \
+ js_array_fast_holey_elements_strong_map_index) \
+ V(JS_ARRAY_FAST_DOUBLE_ELEMENTS_STRONG_MAP_INDEX, Map, \
+ js_array_fast_double_elements_strong_map_index) \
+ V(JS_ARRAY_FAST_HOLEY_DOUBLE_ELEMENTS_STRONG_MAP_INDEX, Map, \
+ js_array_fast_holey_double_elements_strong_map_index) \
V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \
V(JS_MAP_MAP_INDEX, Map, js_map_map) \
V(JS_OBJECT_STRONG_MAP_INDEX, Map, js_object_strong_map) \
V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \
V(JS_SET_MAP_INDEX, Map, js_set_map) \
+ V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \
+ V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map) \
@@ -220,12 +243,17 @@ enum BindingFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
+ V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map) \
+ V(PROXY_CONSTRUCTOR_MAP_INDEX, Map, proxy_constructor_map) \
+ V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \
+ V(PROXY_FUNCTION_MAP_INDEX, Map, proxy_function_map) \
+ V(PROXY_MAP_INDEX, Map, proxy_map) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
- V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
+ V(SELF_WEAK_CELL_INDEX, WeakCell, self_weak_cell) \
V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \
V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun) \
V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \
@@ -234,6 +262,7 @@ enum BindingFlags {
sloppy_function_without_prototype_map) \
V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
sloppy_function_with_readonly_prototype_map) \
+ V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map) \
V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \
V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map) \
V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \
@@ -328,7 +357,7 @@ class ScriptContextTable : public FixedArray {
// function contexts, and non-NULL for 'with' contexts.
// Used to implement the 'with' statement.
//
-// [ extension ] A pointer to an extension JSObject, or NULL. Used to
+// [ extension ] A pointer to an extension JSObject, or "the hole". Used to
// implement 'with' statements and dynamic declarations
// (through 'eval'). The object in a 'with' statement is
// stored in the extension slot of a 'with' context.
@@ -367,12 +396,12 @@ class Context: public FixedArray {
// These slots are in all contexts.
CLOSURE_INDEX,
PREVIOUS_INDEX,
- // The extension slot is used for either the global object (in global
+ // The extension slot is used for either the global object (in native
// contexts), eval extension object (function contexts), subject of with
// (with contexts), or the variable name (catch contexts), the serialized
// scope info (block contexts), or the module instance (module contexts).
EXTENSION_INDEX,
- GLOBAL_OBJECT_INDEX,
+ NATIVE_CONTEXT_INDEX,
// These slots are only in native contexts.
#define NATIVE_CONTEXT_SLOT(index, type, name) index,
@@ -389,12 +418,18 @@ class Context: public FixedArray {
// Total number of slots.
NATIVE_CONTEXT_SLOTS,
FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST,
+ FIRST_JS_ARRAY_MAP_SLOT = JS_ARRAY_FAST_SMI_ELEMENTS_MAP_INDEX,
+ FIRST_JS_ARRAY_STRONG_MAP_SLOT =
+ JS_ARRAY_FAST_SMI_ELEMENTS_STRONG_MAP_INDEX,
MIN_CONTEXT_SLOTS = GLOBAL_PROXY_INDEX,
// This slot holds the thrown value in catch contexts.
THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
};
+ void IncrementErrorsThrown();
+ int GetErrorsThrown();
+
// Direct slot access.
inline JSFunction* closure();
inline void set_closure(JSFunction* closure);
@@ -403,8 +438,8 @@ class Context: public FixedArray {
inline void set_previous(Context* context);
inline bool has_extension();
- inline Object* extension();
- inline void set_extension(Object* object);
+ inline HeapObject* extension();
+ inline void set_extension(HeapObject* object);
JSObject* extension_object();
JSReceiver* extension_receiver();
ScopeInfo* scope_info();
@@ -418,21 +453,19 @@ class Context: public FixedArray {
Context* declaration_context();
bool is_declaration_context();
- inline GlobalObject* global_object();
- inline void set_global_object(GlobalObject* object);
-
// Returns a JSGlobalProxy object or null.
JSObject* global_proxy();
void set_global_proxy(JSObject* global);
- // The builtins object.
- JSBuiltinsObject* builtins();
+ // Get the JSGlobalObject object.
+ JSGlobalObject* global_object();
// Get the script context by traversing the context chain.
Context* script_context();
- // Compute the native context by traversing the context chain.
- Context* native_context();
+ // Compute the native context.
+ inline Context* native_context();
+ inline void set_native_context(Context* context);
// Predicates for context types. IsNativeContext is also defined on Object
// because we frequently have to know if arbitrary objects are natives
@@ -532,6 +565,13 @@ class Context: public FixedArray {
: SLOPPY_FUNCTION_MAP_INDEX;
}
+ static int ArrayMapIndex(ElementsKind elements_kind,
+ Strength strength = Strength::WEAK) {
+ DCHECK(IsFastElementsKind(elements_kind));
+ return elements_kind + (is_strong(strength) ? FIRST_JS_ARRAY_STRONG_MAP_SLOT
+ : FIRST_JS_ARRAY_MAP_SLOT);
+ }
+
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
static const int kNotFound = -1;
@@ -547,14 +587,15 @@ class Context: public FixedArray {
private:
#ifdef DEBUG
// Bootstrapping-aware type checks.
+ static bool IsBootstrappingOrNativeContext(Isolate* isolate, Object* object);
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
- static bool IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object);
#endif
STATIC_ASSERT(kHeaderSize == Internals::kContextHeaderSize);
STATIC_ASSERT(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CONTEXTS_H_
diff --git a/chromium/v8/src/conversions-inl.h b/chromium/v8/src/conversions-inl.h
index 4b3ac27cf1d..3e56799bc9b 100644
--- a/chromium/v8/src/conversions-inl.h
+++ b/chromium/v8/src/conversions-inl.h
@@ -20,7 +20,6 @@
#include "src/conversions.h"
#include "src/double.h"
#include "src/objects-inl.h"
-#include "src/scanner.h"
#include "src/strtod.h"
namespace v8 {
@@ -70,7 +69,7 @@ inline unsigned int FastD2UI(double x) {
inline float DoubleToFloat32(double x) {
- // TODO(yanggou): This static_cast is implementation-defined behaviour in C++,
+ // TODO(yangguo): This static_cast is implementation-defined behaviour in C++,
// so we may need to do the conversion manually instead to match the spec.
volatile float f = static_cast<float>(x);
return f;
@@ -295,7 +294,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache,
return std::ldexp(static_cast<double>(negative ? -number : number), exponent);
}
-
+// ES6 18.2.5 parseInt(string, radix)
template <class Iterator, class EndMark>
double InternalStringToInt(UnicodeCache* unicode_cache,
Iterator current,
@@ -758,6 +757,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
return (sign == NEGATIVE) ? -converted : converted;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_CONVERSIONS_INL_H_
diff --git a/chromium/v8/src/conversions.cc b/chromium/v8/src/conversions.cc
index 2ce1d70fe69..7867719968d 100644
--- a/chromium/v8/src/conversions.cc
+++ b/chromium/v8/src/conversions.cc
@@ -10,6 +10,7 @@
#include "src/assert-scope.h"
#include "src/char-predicates-inl.h"
+#include "src/codegen.h"
#include "src/conversions-inl.h"
#include "src/dtoa.h"
#include "src/factory.h"
@@ -440,7 +441,7 @@ char* DoubleToRadixCString(double value, int radix) {
// at least one digit.
int integer_pos = kBufferSize - 2;
do {
- double remainder = std::fmod(integer_part, radix);
+ double remainder = modulo(integer_part, radix);
integer_buffer[integer_pos--] = chars[static_cast<int>(remainder)];
integer_part -= remainder;
integer_part /= radix;
@@ -483,6 +484,7 @@ char* DoubleToRadixCString(double value, int radix) {
}
+// ES6 18.2.4 parseFloat(string)
double StringToDouble(UnicodeCache* unicode_cache, Handle<String> string,
int flags, double empty_string_val) {
Handle<String> flattened = String::Flatten(string);
@@ -490,7 +492,6 @@ double StringToDouble(UnicodeCache* unicode_cache, Handle<String> string,
DisallowHeapAllocation no_gc;
String::FlatContent flat = flattened->GetFlatContent();
DCHECK(flat.IsFlat());
- // ECMA-262 section 15.1.2.3, empty string is NaN
if (flat.IsOneByte()) {
return StringToDouble(unicode_cache, flat.ToOneByteVector(), flags,
empty_string_val);
diff --git a/chromium/v8/src/counters.h b/chromium/v8/src/counters.h
index 740b2a86e48..d8a3f091f84 100644
--- a/chromium/v8/src/counters.h
+++ b/chromium/v8/src/counters.h
@@ -483,16 +483,25 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
HR(gc_idle_time_limit_overshot, V8.GCIdleTimeLimit.Overshot, 0, 10000, 101) \
HR(gc_idle_time_limit_undershot, V8.GCIdleTimeLimit.Undershot, 0, 10000, \
101) \
- HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6)
+ HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
+ HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
+ HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Garbage collection timers. */ \
HT(gc_compactor, V8.GCCompactor, 10000, MILLISECOND) \
+ HT(gc_finalize, V8.GCFinalizeMC, 10000, MILLISECOND) \
+ HT(gc_finalize_reduce_memory, V8.GCFinalizeMCReduceMemory, 10000, \
+ MILLISECOND) \
HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
HT(gc_context, V8.GCContext, 10000, \
MILLISECOND) /* GC context cleanup time */ \
HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND) \
HT(gc_incremental_marking, V8.GCIncrementalMarking, 10000, MILLISECOND) \
+ HT(gc_incremental_marking_start, V8.GCIncrementalMarkingStart, 10000, \
+ MILLISECOND) \
+ HT(gc_incremental_marking_finalize, V8.GCIncrementalMarkingFinalize, 10000, \
+ MILLISECOND) \
HT(gc_low_memory_notification, V8.GCLowMemoryNotification, 10000, \
MILLISECOND) \
/* Parsing timers. */ \
@@ -705,7 +714,11 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed) \
SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
- SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)
+ SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed) \
+ SC(turbo_escape_allocs_replaced, V8.TurboEscapeAllocsReplaced) \
+ SC(crankshaft_escape_allocs_replaced, V8.CrankshaftEscapeAllocsReplaced) \
+ SC(turbo_escape_loads_replaced, V8.TurboEscapeLoadsReplaced) \
+ SC(crankshaft_escape_loads_replaced, V8.CrankshaftEscapeLoadsReplaced)
// This file contains all the v8 counters that are in use.
@@ -886,6 +899,7 @@ class Counters {
DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_COUNTERS_H_
diff --git a/chromium/v8/src/crankshaft/OWNERS b/chromium/v8/src/crankshaft/OWNERS
new file mode 100644
index 00000000000..2918dddc4c9
--- /dev/null
+++ b/chromium/v8/src/crankshaft/OWNERS
@@ -0,0 +1,7 @@
+set noparent
+
+bmeurer@chromium.org
+danno@chromium.org
+jarin@chromium.org
+jkummerow@chromium.org
+verwaest@chromium.org
diff --git a/chromium/v8/src/crankshaft/arm/OWNERS b/chromium/v8/src/crankshaft/arm/OWNERS
new file mode 100644
index 00000000000..906a5ce6418
--- /dev/null
+++ b/chromium/v8/src/crankshaft/arm/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/chromium/v8/src/arm/lithium-arm.cc b/chromium/v8/src/crankshaft/arm/lithium-arm.cc
index 4ccb0209956..cd736ecd8f0 100644
--- a/chromium/v8/src/arm/lithium-arm.cc
+++ b/chromium/v8/src/crankshaft/arm/lithium-arm.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm/lithium-arm.h"
+#include "src/crankshaft/arm/lithium-arm.h"
#include <sstream>
-#include "src/arm/lithium-codegen-arm.h"
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
+#include "src/crankshaft/arm/lithium-codegen-arm.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
namespace v8 {
namespace internal {
@@ -298,13 +298,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -323,11 +316,6 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -346,12 +334,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -444,14 +426,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -1000,7 +981,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ return AssignEnvironment(result);
}
@@ -1225,14 +1208,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), r1);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
@@ -1850,14 +1825,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), r0);
- LDateField* result =
- new(zone()) LDateField(object, FixedTemp(r1), instr->index());
- return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2141,15 +2108,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2226,7 +2184,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
DCHECK(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
}
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
@@ -2234,7 +2192,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(elements_kind)));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2299,7 +2259,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
}
- return new(zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
}
DCHECK(
@@ -2311,7 +2271,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2433,19 +2394,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);
@@ -2486,13 +2434,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2587,12 +2528,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/chromium/v8/src/arm/lithium-arm.h b/chromium/v8/src/crankshaft/arm/lithium-arm.h
index 8954710e53c..6329f36fb2e 100644
--- a/chromium/v8/src/arm/lithium-arm.h
+++ b/chromium/v8/src/crankshaft/arm/lithium-arm.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM_LITHIUM_ARM_H_
-#define V8_ARM_LITHIUM_ARM_H_
+#ifndef V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_
+#define V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -33,7 +33,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -61,7 +60,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -89,7 +87,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -100,7 +97,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -132,7 +128,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -143,7 +138,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1379,25 +1373,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1607,15 +1582,17 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1672,22 +1649,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1911,25 +1872,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2189,34 +2131,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2225,6 +2147,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -2552,19 +2475,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
@@ -2609,19 +2519,6 @@ class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
@@ -2885,6 +2782,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM_LITHIUM_ARM_H_
+#endif // V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_
diff --git a/chromium/v8/src/arm/lithium-codegen-arm.cc b/chromium/v8/src/crankshaft/arm/lithium-codegen-arm.cc
index d958405e82c..2bd07882326 100644
--- a/chromium/v8/src/arm/lithium-codegen-arm.cc
+++ b/chromium/v8/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm/lithium-codegen-arm.h"
-#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/crankshaft/arm/lithium-codegen-arm.h"
+
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
+#include "src/crankshaft/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/profiler/cpu-profiler.h"
@@ -72,7 +73,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ __ vstr(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -88,8 +89,8 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
+ __ vldr(DoubleRegister::from_code(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
@@ -114,24 +115,6 @@ bool LCodeGen::GeneratePrologue() {
// pp: Callee's constant pool pointer (if enabled)
// fp: Caller's frame pointer.
// lr: Caller's pc.
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
- __ ldr(r2, MemOperand(sp, receiver_offset));
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(ne, &ok);
-
- __ ldr(r2, GlobalObjectOperand());
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
-
- __ str(r2, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
- }
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -139,10 +122,9 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
frame_is_built_ = true;
- info_->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -187,7 +169,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(r1);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -196,7 +178,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -215,7 +197,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ str(r0, target);
// Update the write barrier. This clobbers r3 and r0.
if (need_write_barrier) {
@@ -405,13 +387,13 @@ bool LCodeGen::GenerateSafepointTable() {
}
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+Register LCodeGen::ToRegister(int code) const {
+ return Register::from_code(code);
}
-DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
- return DwVfpRegister::FromAllocationIndex(index);
+DwVfpRegister LCodeGen::ToDoubleRegister(int code) const {
+ return DwVfpRegister::from_code(code);
}
@@ -892,60 +874,6 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1882,40 +1810,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(result));
- DCHECK(object.is(r0));
- DCHECK(!scratch.is(scratch0()));
- DCHECK(!scratch.is(object));
-
- if (index->value() == 0) {
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand(stamp));
- __ ldr(scratch, MemOperand(scratch));
- __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch, scratch0());
- __ b(ne, &runtime);
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(r1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -2258,7 +2152,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
__ b(ge, instr->TrueLabel(chunk_));
}
@@ -2610,39 +2504,20 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
+ __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, is_false);
- __ b(eq, is_true);
- __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
__ b(eq, is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
- __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(gt, is_false);
+ __ b(eq, is_false);
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
Register instance_type = ip;
__ GetMapConstructor(temp, temp, temp2, instance_type);
// Objects with a non-function constructor have class 'Object'.
__ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
- if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
+ if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
__ b(ne, is_true);
} else {
__ b(ne, is_false);
@@ -2701,6 +2576,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
Register const object_map = scratch0();
+ Register const object_instance_type = ip;
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
@@ -2716,6 +2592,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ ldrb(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
+ // Deoptimize for proxies.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+
__ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, prototype);
EmitTrueBranch(instr, eq);
@@ -2754,14 +2640,13 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(r0);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
- no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
+ masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
}
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
if (instr->has_constant_parameter_count()) {
@@ -2779,10 +2664,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ Jump(lr);
-
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
}
@@ -2798,7 +2679,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Operand(Smi::FromInt(index)));
}
@@ -2812,7 +2693,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Operand(Smi::FromInt(index)));
}
@@ -2833,28 +2714,10 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(r0));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(context, instr->slot_index()));
+ __ ldr(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
@@ -2871,7 +2734,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
Register scratch = scratch0();
- MemOperand target = ContextOperand(context, instr->slot_index());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
Label skip_assignment;
@@ -3327,15 +3190,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
- __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
__ b(&result_in_receiver);
__ bind(&global_object);
__ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ ldr(result,
- ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+ __ ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
@@ -3392,7 +3254,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is r0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3436,7 +3299,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3456,7 +3319,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize r0 to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ mov(r0, Operand(arity));
// Invoke function.
@@ -3767,7 +3631,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(r1, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3838,11 +3702,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(r1));
DCHECK(ToRegister(instr->result()).is(r0));
- __ mov(r0, Operand(instr->arity()));
-
// Change context.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ mov(r0, Operand(instr->arity()));
+
// Load the code entry address
__ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ Call(ip);
@@ -3857,7 +3723,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -3871,32 +3737,16 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ Move(vector_register, vector);
__ mov(slot_register, Operand(Smi::FromInt(index)));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ mov(r0, Operand(arity));
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(r1));
- DCHECK(ToRegister(instr->result()).is(r0));
-
- __ mov(r0, Operand(instr->arity()));
- // No cell in r2 for construct type feedback in optimized code
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(r1));
@@ -3919,7 +3769,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -3934,17 +3784,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4076,30 +3926,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(instr->language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
if (instr->index()->IsConstantOperand()) {
@@ -4570,7 +4396,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -5311,11 +5138,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- __ jmp(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
@@ -5389,50 +5213,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Label materialized;
- // Registers will be used as follows:
- // r6 = literals array.
- // r1 = regexp literal.
- // r0 = regexp literal clone.
- // r2-5 are used as temporaries.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ Move(r6, instr->hydrogen()->literals());
- __ ldr(r1, FieldMemOperand(r6, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &materialized);
-
- // Create regexp literal using runtime function
- // Result will be in r0.
- __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r4, Operand(instr->hydrogen()->pattern()));
- __ mov(r3, Operand(instr->hydrogen()->flags()));
- __ Push(r6, r5, r4, r3);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(r1, r0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ Push(r1, r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(r1);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(r3));
DCHECK(ToRegister(instr->result()).is(r0));
@@ -5513,8 +5293,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ JumpIfSmi(input, false_label);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ b(eq, true_label);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(input, scratch, ip, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
__ b(lt, false_label);
// Check for callable or undetectable objects => false.
__ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
@@ -5541,30 +5321,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp1, scratch0());
- EmitBranch(instr, eq);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- DCHECK(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
-
- // Check the marker in the calling frame.
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
- __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5697,8 +5453,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ SmiTst(r0);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(r0, r1, r1, JS_PROXY_TYPE);
DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
@@ -5712,7 +5468,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r0);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
@@ -5840,7 +5596,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/chromium/v8/src/arm/lithium-codegen-arm.h b/chromium/v8/src/crankshaft/arm/lithium-codegen-arm.h
index dc58479047d..24a083ff2f2 100644
--- a/chromium/v8/src/arm/lithium-codegen-arm.h
+++ b/chromium/v8/src/crankshaft/arm/lithium-codegen-arm.h
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
-#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
+#ifndef V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
+#define V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
-#include "src/arm/lithium-arm.h"
-
-#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/ast/scopes.h"
+#include "src/crankshaft/arm/lithium-arm.h"
+#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
+#include "src/crankshaft/lithium-codegen.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -25,13 +24,9 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -208,6 +203,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
@@ -238,9 +238,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DwVfpRegister ToDoubleRegister(int index) const;
@@ -293,10 +290,6 @@ class LCodeGen: public LCodeGenBase {
Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -318,13 +311,9 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorStoreICRegisters(T* instr);
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@@ -393,6 +382,7 @@ class LDeferredCode : public ZoneObject {
int instruction_index_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM_LITHIUM_CODEGEN_ARM_H_
+#endif // V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
diff --git a/chromium/v8/src/arm/lithium-gap-resolver-arm.cc b/chromium/v8/src/crankshaft/arm/lithium-gap-resolver-arm.cc
index e1bd47b2ec0..066db7dc54d 100644
--- a/chromium/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/chromium/v8/src/crankshaft/arm/lithium-gap-resolver-arm.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm/lithium-codegen-arm.h"
-#include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/crankshaft/arm/lithium-codegen-arm.h"
+#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/arm/lithium-gap-resolver-arm.h b/chromium/v8/src/crankshaft/arm/lithium-gap-resolver-arm.h
index 88f1a7bb67f..59413c57725 100644
--- a/chromium/v8/src/arm/lithium-gap-resolver-arm.h
+++ b/chromium/v8/src/crankshaft/arm/lithium-gap-resolver-arm.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+#ifndef V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+#define V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -57,6 +57,7 @@ class LGapResolver final BASE_EMBEDDED {
bool need_to_restore_root_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+#endif // V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
diff --git a/chromium/v8/src/crankshaft/arm64/OWNERS b/chromium/v8/src/crankshaft/arm64/OWNERS
new file mode 100644
index 00000000000..906a5ce6418
--- /dev/null
+++ b/chromium/v8/src/crankshaft/arm64/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/chromium/v8/src/arm64/delayed-masm-arm64-inl.h b/chromium/v8/src/crankshaft/arm64/delayed-masm-arm64-inl.h
index 2c446303716..503fd88ded6 100644
--- a/chromium/v8/src/arm64/delayed-masm-arm64-inl.h
+++ b/chromium/v8/src/crankshaft/arm64/delayed-masm-arm64-inl.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_DELAYED_MASM_ARM64_INL_H_
-#define V8_ARM64_DELAYED_MASM_ARM64_INL_H_
+#ifndef V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
+#define V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
-#include "src/arm64/delayed-masm-arm64.h"
+#include "src/crankshaft/arm64/delayed-masm-arm64.h"
namespace v8 {
namespace internal {
@@ -50,6 +50,7 @@ void DelayedMasm::LoadObject(Register result, Handle<Object> object) {
#undef __
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM64_DELAYED_MASM_ARM64_INL_H_
+#endif // V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
diff --git a/chromium/v8/src/arm64/delayed-masm-arm64.cc b/chromium/v8/src/crankshaft/arm64/delayed-masm-arm64.cc
index e86f10262fd..6124706cb3c 100644
--- a/chromium/v8/src/arm64/delayed-masm-arm64.cc
+++ b/chromium/v8/src/crankshaft/arm64/delayed-masm-arm64.cc
@@ -4,8 +4,8 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/delayed-masm-arm64.h"
-#include "src/arm64/lithium-codegen-arm64.h"
+#include "src/crankshaft/arm64/delayed-masm-arm64.h"
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/arm64/delayed-masm-arm64.h b/chromium/v8/src/crankshaft/arm64/delayed-masm-arm64.h
index 76227a38983..5da2b729037 100644
--- a/chromium/v8/src/arm64/delayed-masm-arm64.h
+++ b/chromium/v8/src/crankshaft/arm64/delayed-masm-arm64.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_DELAYED_MASM_ARM64_H_
-#define V8_ARM64_DELAYED_MASM_ARM64_H_
+#ifndef V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
+#define V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -159,6 +159,7 @@ class DelayedMasm BASE_EMBEDDED {
#endif
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM64_DELAYED_MASM_ARM64_H_
+#endif // V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
diff --git a/chromium/v8/src/arm64/lithium-arm64.cc b/chromium/v8/src/crankshaft/arm64/lithium-arm64.cc
index e623718a1a3..3f43338585e 100644
--- a/chromium/v8/src/arm64/lithium-arm64.cc
+++ b/chromium/v8/src/crankshaft/arm64/lithium-arm64.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm64/lithium-arm64.h"
+#include "src/crankshaft/arm64/lithium-arm64.h"
#include <sstream>
-#include "src/arm64/lithium-codegen-arm64.h"
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
namespace v8 {
namespace internal {
@@ -82,13 +82,6 @@ void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -289,11 +282,6 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -313,12 +301,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
left()->PrintTo(stream);
@@ -375,14 +357,13 @@ const char* LArithmeticT::Mnemonic() const {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -1088,15 +1069,6 @@ LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- // The call to CallConstructStub will expect the constructor to be in x1.
- LOperand* constructor = UseFixed(instr->constructor(), x1);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), cp);
// The call to ArrayConstructCode will expect the constructor to be in x1.
@@ -1396,13 +1368,6 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), x0);
- LDateField* result = new(zone()) LDateField(object, instr->index());
- return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
return new(zone()) LDebugBreak();
}
@@ -1585,8 +1550,11 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
- LOperand* scratch = TempRegister();
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype, scratch);
+ LOperand* scratch1 = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LHasInPrototypeChainAndBranch* result = new (zone())
+ LHasInPrototypeChainAndBranch(object, prototype, scratch1, scratch2);
+ return AssignEnvironment(result);
}
@@ -1599,12 +1567,6 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister(), TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
HCompareMinusZeroAndBranch* instr) {
LOperand* value = UseRegister(instr->value());
@@ -1688,15 +1650,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
DCHECK(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
@@ -1735,8 +1688,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
IsDoubleOrFloatElementsKind(instr->elements_kind())));
LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LLoadKeyedExternal(elements, key, temp));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ LInstruction* result = DefineAsRegister(new (zone()) LLoadKeyedExternal(
+ elements, key, backing_store_owner, temp));
if (elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32)) {
result = AssignEnvironment(result);
@@ -2061,13 +2015,6 @@ LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
HValue* value = instr->value();
DCHECK(value->representation().IsDouble());
@@ -2362,7 +2309,9 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
DCHECK(instr->elements()->representation().IsExternal());
- return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone())
+ LStoreKeyedExternal(elements, key, val, backing_store_owner, temp);
} else if (instr->value()->representation().IsDouble()) {
DCHECK(instr->elements()->representation().IsTagged());
@@ -2449,19 +2398,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), x1);
diff --git a/chromium/v8/src/arm64/lithium-arm64.h b/chromium/v8/src/crankshaft/arm64/lithium-arm64.h
index a77a6da38fd..1b627d13f85 100644
--- a/chromium/v8/src/arm64/lithium-arm64.h
+++ b/chromium/v8/src/crankshaft/arm64/lithium-arm64.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_LITHIUM_ARM64_H_
-#define V8_ARM64_LITHIUM_ARM64_H_
+#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_
+#define V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -35,7 +35,6 @@ class LCodeGen;
V(Branch) \
V(CallFunction) \
V(CallJSFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -65,7 +64,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -92,7 +90,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsSmiAndBranch) \
V(IsStringAndBranch) \
V(IsUndetectableAndBranch) \
@@ -102,7 +99,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyedExternal) \
V(LoadKeyedFixed) \
V(LoadKeyedFixedDouble) \
@@ -140,7 +136,6 @@ class LCodeGen;
V(Prologue) \
V(PreparePushArguments) \
V(PushArguments) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -152,7 +147,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyedExternal) \
V(StoreKeyedFixed) \
V(StoreKeyedFixedDouble) \
@@ -853,25 +847,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -1267,23 +1242,6 @@ class LContext final : public LTemplateInstruction<1, 0, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDateField(LOperand* date, Smi* index) : index_(index) {
- inputs_[0] = date;
- }
-
- LOperand* date() { return inputs_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
@@ -1492,18 +1450,20 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
};
-class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 2> {
public:
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
- LOperand* scratch) {
+ LOperand* scratch1, LOperand* scratch2) {
inputs_[0] = object;
inputs_[1] = prototype;
- temps_[0] = scratch;
+ temps_[0] = scratch1;
+ temps_[1] = scratch2;
}
LOperand* object() const { return inputs_[0]; }
LOperand* prototype() const { return inputs_[1]; }
- LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch1() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
"has-in-prototype-chain-and-branch")
@@ -1585,21 +1545,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 2> {
- public:
- LIsConstructCallAndBranch(LOperand* temp1, LOperand* temp2) {
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1650,22 +1595,6 @@ class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1732,16 +1661,18 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-template<int T>
-class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
+template <int T>
+class LLoadKeyed : public LTemplateInstruction<1, 3, T> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
this->inputs_[0] = elements;
this->inputs_[1] = key;
+ this->inputs_[2] = backing_store_owner;
}
LOperand* elements() { return this->inputs_[0]; }
LOperand* key() { return this->inputs_[1]; }
+ LOperand* backing_store_owner() { return this->inputs_[2]; }
ElementsKind elements_kind() const {
return this->hydrogen()->elements_kind();
}
@@ -1774,8 +1705,9 @@ class LLoadKeyed : public LTemplateInstruction<1, 2, T> {
class LLoadKeyedExternal: public LLoadKeyed<1> {
public:
- LLoadKeyedExternal(LOperand* elements, LOperand* key, LOperand* temp) :
- LLoadKeyed<1>(elements, key) {
+ LLoadKeyedExternal(LOperand* elements, LOperand* key,
+ LOperand* backing_store_owner, LOperand* temp)
+ : LLoadKeyed<1>(elements, key, backing_store_owner) {
temps_[0] = temp;
}
@@ -1787,8 +1719,8 @@ class LLoadKeyedExternal: public LLoadKeyed<1> {
class LLoadKeyedFixed: public LLoadKeyed<1> {
public:
- LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp) :
- LLoadKeyed<1>(elements, key) {
+ LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp)
+ : LLoadKeyed<1>(elements, key, nullptr) {
temps_[0] = temp;
}
@@ -1800,8 +1732,8 @@ class LLoadKeyedFixed: public LLoadKeyed<1> {
class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
public:
- LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp) :
- LLoadKeyed<1>(elements, key) {
+ LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp)
+ : LLoadKeyed<1>(elements, key, nullptr) {
temps_[0] = temp;
}
@@ -2307,19 +2239,6 @@ class LPushArguments final : public LTemplateResultInstruction<0> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LReturn final : public LTemplateInstruction<0, 3, 0> {
public:
LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
@@ -2435,35 +2354,15 @@ class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-template<int T>
-class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
+template <int T>
+class LStoreKeyed : public LTemplateInstruction<0, 4, T> {
public:
- LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
this->inputs_[0] = elements;
this->inputs_[1] = key;
this->inputs_[2] = value;
+ this->inputs_[3] = backing_store_owner;
}
bool is_external() const { return this->hydrogen()->is_external(); }
@@ -2476,6 +2375,7 @@ class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
LOperand* elements() { return this->inputs_[0]; }
LOperand* key() { return this->inputs_[1]; }
LOperand* value() { return this->inputs_[2]; }
+ LOperand* backing_store_owner() { return this->inputs_[3]; }
ElementsKind elements_kind() const {
return this->hydrogen()->elements_kind();
}
@@ -2515,8 +2415,8 @@ class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
class LStoreKeyedExternal final : public LStoreKeyed<1> {
public:
LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* temp) :
- LStoreKeyed<1>(elements, key, value) {
+ LOperand* backing_store_owner, LOperand* temp)
+ : LStoreKeyed<1>(elements, key, value, backing_store_owner) {
temps_[0] = temp;
}
@@ -2529,8 +2429,8 @@ class LStoreKeyedExternal final : public LStoreKeyed<1> {
class LStoreKeyedFixed final : public LStoreKeyed<1> {
public:
LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* temp) :
- LStoreKeyed<1>(elements, key, value) {
+ LOperand* temp)
+ : LStoreKeyed<1>(elements, key, value, nullptr) {
temps_[0] = temp;
}
@@ -2543,8 +2443,8 @@ class LStoreKeyedFixed final : public LStoreKeyed<1> {
class LStoreKeyedFixedDouble final : public LStoreKeyed<1> {
public:
LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
- LOperand* temp) :
- LStoreKeyed<1>(elements, key, value) {
+ LOperand* temp)
+ : LStoreKeyed<1>(elements, key, value, nullptr) {
temps_[0] = temp;
}
@@ -3254,6 +3154,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM64_LITHIUM_ARM64_H_
+#endif // V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_
diff --git a/chromium/v8/src/arm64/lithium-codegen-arm64.cc b/chromium/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 108698a9ad4..571bc154afc 100644
--- a/chromium/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/chromium/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
+
#include "src/arm64/frames-arm64.h"
-#include "src/arm64/lithium-codegen-arm64.h"
-#include "src/arm64/lithium-gap-resolver-arm64.h"
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
+#include "src/crankshaft/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/profiler/cpu-profiler.h"
@@ -370,7 +371,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).Is(x0));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -384,37 +385,17 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ Mov(vector_register, vector);
__ Mov(slot_register, Operand(Smi::FromInt(index)));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ Mov(x0, arity);
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(instr->IsMarkedAsCall());
- DCHECK(ToRegister(instr->constructor()).is(x1));
-
- __ Mov(x0, instr->arity());
- // No cell in x2 for construct type feedback in optimized code.
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
-
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
- RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
-
- DCHECK(ToRegister(instr->result()).is(x0));
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->context()).is(cp));
@@ -439,7 +420,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -453,17 +434,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ B(&done);
__ Bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
@@ -593,7 +574,7 @@ void LCodeGen::SaveCallerDoubles() {
while (!iterator.Done()) {
// TODO(all): Is this supposed to save just the callee-saved doubles? It
// looks like it's saving all of them.
- FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ FPRegister value = FPRegister::from_code(iterator.Current());
__ Poke(value, count * kDoubleSize);
iterator.Advance();
count++;
@@ -611,7 +592,7 @@ void LCodeGen::RestoreCallerDoubles() {
while (!iterator.Done()) {
// TODO(all): Is this supposed to restore just the callee-saved doubles? It
// looks like it's restoring all of them.
- FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
+ FPRegister value = FPRegister::from_code(iterator.Current());
__ Peek(value, count * kDoubleSize);
iterator.Advance();
count++;
@@ -625,23 +606,12 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
- // TODO(all): Add support for stop_t FLAG in DEBUG mode.
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
- __ Peek(x10, receiver_offset);
- __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
-
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
- __ Poke(x10, receiver_offset);
-
- __ Bind(&ok);
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info()->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ Debug("stop-at", __LINE__, BREAK);
}
+#endif
}
DCHECK(__ StackPointer().Is(jssp));
@@ -650,10 +620,9 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
frame_is_built_ = true;
- info_->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -682,7 +651,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ Mov(x10, Operand(info()->scope()->GetScopeInfo(info()->isolate())));
__ Push(x1, x10);
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -691,7 +660,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ Push(x1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
// Context is returned in x0. It replaces the context passed to us. It's
@@ -914,62 +883,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
-
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
-
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::DeoptimizeBranch(
LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
BranchType branch_type, Register reg, int bit,
@@ -1158,7 +1071,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
Register LCodeGen::ToRegister(LOperand* op) const {
// TODO(all): support zero register results, as ToRegister32.
DCHECK((op != NULL) && op->IsRegister());
- return Register::FromAllocationIndex(op->index());
+ return Register::from_code(op->index());
}
@@ -1182,7 +1095,7 @@ Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
DCHECK((op != NULL) && op->IsDoubleRegister());
- return DoubleRegister::FromAllocationIndex(op->index());
+ return DoubleRegister::from_code(op->index());
}
@@ -1558,11 +1471,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
- } else {
- __ B(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
} else {
Register size = ToRegister32(instr->size());
__ Sxtw(size.X(), size);
@@ -1572,20 +1482,20 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ Bind(deferred->exit());
if (instr->hydrogen()->MustPrefillWithFiller()) {
- Register filler_count = temp1;
- Register filler = temp2;
- Register untagged_result = ToRegister(instr->temp3());
+ Register start = temp1;
+ Register end = temp2;
+ Register filler = ToRegister(instr->temp3());
+
+ __ Sub(start, result, kHeapObjectTag);
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Mov(filler_count, size / kPointerSize);
+ __ Add(end, start, size);
} else {
- __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
+ __ Add(end, start, ToRegister(instr->size()));
}
-
- __ Sub(untagged_result, result, kHeapObjectTag);
- __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
- __ FillFields(untagged_result, filler_count, filler);
+ __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(start, end, filler);
} else {
DCHECK(instr->temp3() == NULL);
}
@@ -1668,7 +1578,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in argc (receiver) which is x0, as
// expected by InvokeFunction.
ParameterCount actual(argc);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -1919,7 +1830,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
- __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE);
__ B(ge, true_label);
}
@@ -1996,7 +1907,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize x0 to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
__ Mov(arity_reg, arity);
// Invoke function.
@@ -2064,11 +1976,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->function()).is(x1));
- __ Mov(x0, Operand(instr->arity()));
-
// Change context.
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
+ __ Mov(x0, instr->arity());
+
// Load the code entry address
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
__ Call(x10);
@@ -2345,27 +2259,13 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
__ JumpIfSmi(input, false_label);
Register map = scratch2;
+ __ CompareObjectType(input, map, scratch1, JS_FUNCTION_TYPE);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-
- // We expect CompareObjectType to load the object instance type in scratch1.
- __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
- __ B(lt, false_label);
- __ B(eq, true_label);
- __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
__ B(eq, true_label);
} else {
- __ IsObjectJSObjectType(input, map, scratch1, false_label);
+ __ B(eq, false_label);
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
{
UseScratchRegisterScope temps(masm());
@@ -2616,40 +2516,6 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register temp1 = x10;
- Register temp2 = x11;
- Smi* index = instr->index();
-
- DCHECK(object.is(result) && object.Is(x0));
- DCHECK(instr->IsMarkedAsCall());
-
- if (index->value() == 0) {
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ Mov(temp1, Operand(stamp));
- __ Ldr(temp1, MemOperand(temp1));
- __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Cmp(temp1, temp2);
- __ B(ne, &runtime);
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ B(&done);
- }
-
- __ Bind(&runtime);
- __ Mov(x1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ Bind(&done);
- }
-}
-
-
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
Deoptimizer::BailoutType type = instr->hydrogen()->type();
// TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
@@ -2856,8 +2722,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(object, x1, x1, JS_PROXY_TYPE);
DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
Label use_cache, call_runtime;
@@ -2870,7 +2736,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ Bind(&call_runtime);
__ Push(object);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
@@ -2989,7 +2855,8 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
- Register const object_map = ToRegister(instr->scratch());
+ Register const object_map = ToRegister(instr->scratch1());
+ Register const object_instance_type = ToRegister(instr->scratch2());
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
@@ -3004,6 +2871,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ Bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ Ldrb(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
+ // Deoptimize for proxies.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+
__ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ Cmp(object_prototype, prototype);
__ B(eq, instr->TrueLabel(chunk_));
@@ -3037,7 +2914,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(x1, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3047,29 +2924,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- // Get the frame pointer for the calling frame.
- __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &check_frame_marker);
- __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ Bind(&check_frame_marker);
- __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
-
- EmitCompareAndBranch(
- instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -3203,7 +3057,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Mov(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Mov(slot_register, Smi::FromInt(index));
}
@@ -3217,7 +3071,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Mov(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Mov(slot_register, Smi::FromInt(index));
}
@@ -3237,24 +3091,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(x0));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ Mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
Register key,
Register base,
@@ -4617,18 +4453,16 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ Push(x0);
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
Register stack_pointer = masm()->StackPointer();
__ Mov(stack_pointer, fp);
- no_frame_start = masm_->pc_offset();
__ Pop(fp, lr);
}
@@ -4641,10 +4475,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ DropBySMI(parameter_count);
}
__ Ret();
-
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -4893,7 +4723,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
__ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
__ Push(scratch1, scratch2);
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -5371,30 +5201,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ Mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(instr->language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoStringAdd(LStringAdd* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->left()).Is(x1));
@@ -5493,7 +5299,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTagAndPush(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(x0, result);
}
@@ -5641,48 +5448,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Label materialized;
- // Registers will be used as follows:
- // x7 = literals array.
- // x1 = regexp literal.
- // x0 = regexp literal clone.
- // x10-x12 are used as temporaries.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ LoadObject(x7, instr->hydrogen()->literals());
- __ Ldr(x1, FieldMemOperand(x7, literal_offset));
- __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
-
- // Create regexp literal using runtime function
- // Result will be in x0.
- __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ Mov(x11, Operand(instr->hydrogen()->pattern()));
- __ Mov(x10, Operand(instr->hydrogen()->flags()));
- __ Push(x7, x12, x11, x10);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ Mov(x1, x0);
-
- __ Bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
- __ B(&allocated);
-
- __ Bind(&runtime_allocate);
- __ Mov(x0, Smi::FromInt(size));
- __ Push(x1, x0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ Pop(x1);
-
- __ Bind(&allocated);
- // Copy the content into the newly allocated memory.
- __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
-}
-
-
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object = ToRegister(instr->object());
@@ -5838,8 +5603,8 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
__ JumpIfSmi(value, false_label);
__ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ JumpIfObjectType(value, map, scratch, FIRST_SPEC_OBJECT_TYPE,
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(value, map, scratch, FIRST_JS_RECEIVER_TYPE,
false_label, lt);
// Check for callable or undetectable objects => false.
__ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
@@ -5912,14 +5677,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
- __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE);
__ B(ge, &copy_receiver);
Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+ __ Ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
__ B(&done);
__ Bind(&copy_receiver);
@@ -6014,7 +5779,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ Push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/chromium/v8/src/arm64/lithium-codegen-arm64.h b/chromium/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
index 20e572c65c7..18856da1548 100644
--- a/chromium/v8/src/arm64/lithium-codegen-arm64.h
+++ b/chromium/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
-#define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
+#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
+#define V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
-#include "src/arm64/lithium-arm64.h"
+#include "src/crankshaft/arm64/lithium-arm64.h"
-#include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/ast/scopes.h"
+#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
+#include "src/crankshaft/lithium-codegen.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -26,13 +26,9 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -201,9 +197,6 @@ class LCodeGen: public LCodeGenBase {
Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed);
- void PopulateDeoptimizationData(Handle<Code> code);
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
MemOperand BuildSeqStringOperand(Register string,
Register temp,
LOperand* index,
@@ -314,6 +307,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
@@ -342,13 +340,9 @@ class LCodeGen: public LCodeGenBase {
void EnsureSpaceForLazyDeopt(int space_needed) override;
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table itself is
@@ -460,6 +454,7 @@ class BranchGenerator BASE_EMBEDDED {
LCodeGen* codegen_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
+#endif // V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
diff --git a/chromium/v8/src/arm64/lithium-gap-resolver-arm64.cc b/chromium/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc
index 1520fa18884..3ef9f63ab59 100644
--- a/chromium/v8/src/arm64/lithium-gap-resolver-arm64.cc
+++ b/chromium/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm64/delayed-masm-arm64-inl.h"
-#include "src/arm64/lithium-codegen-arm64.h"
-#include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/crankshaft/arm64/delayed-masm-arm64-inl.h"
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
+#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/arm64/lithium-gap-resolver-arm64.h b/chromium/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
index 8866db4c947..4f5eb223d42 100644
--- a/chromium/v8/src/arm64/lithium-gap-resolver-arm64.h
+++ b/chromium/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
-#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+#define V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
-#include "src/arm64/delayed-masm-arm64.h"
-#include "src/lithium.h"
+#include "src/crankshaft/arm64/delayed-masm-arm64.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -92,6 +92,7 @@ class LGapResolver BASE_EMBEDDED {
LOperand* saved_destination_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+#endif // V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
diff --git a/chromium/v8/src/hydrogen-alias-analysis.h b/chromium/v8/src/crankshaft/hydrogen-alias-analysis.h
index 368dd5f020d..de8d0bdbe5a 100644
--- a/chromium/v8/src/hydrogen-alias-analysis.h
+++ b/chromium/v8/src/crankshaft/hydrogen-alias-analysis.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_ALIAS_ANALYSIS_H_
-#define V8_HYDROGEN_ALIAS_ANALYSIS_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
+#define V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -68,6 +68,7 @@ class HAliasAnalyzer : public ZoneObject {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_ALIAS_ANALYSIS_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
diff --git a/chromium/v8/src/hydrogen-bce.cc b/chromium/v8/src/crankshaft/hydrogen-bce.cc
index 30c218f82a0..d00d8ce25cc 100644
--- a/chromium/v8/src/hydrogen-bce.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-bce.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-bce.h"
+#include "src/crankshaft/hydrogen-bce.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-bce.h b/chromium/v8/src/crankshaft/hydrogen-bce.h
index 70c0a07d066..e819ffc403c 100644
--- a/chromium/v8/src/hydrogen-bce.h
+++ b/chromium/v8/src/crankshaft/hydrogen-bce.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_BCE_H_
-#define V8_HYDROGEN_BCE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_BCE_H_
+#define V8_CRANKSHAFT_HYDROGEN_BCE_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -46,6 +46,7 @@ class HBoundsCheckEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_BCE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_BCE_H_
diff --git a/chromium/v8/src/hydrogen-bch.cc b/chromium/v8/src/crankshaft/hydrogen-bch.cc
index a4c0ae4e253..060e0bcdabb 100644
--- a/chromium/v8/src/hydrogen-bch.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-bch.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-bch.h"
+#include "src/crankshaft/hydrogen-bch.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-bch.h b/chromium/v8/src/crankshaft/hydrogen-bch.h
index 852c264c4f1..cdcd407a090 100644
--- a/chromium/v8/src/hydrogen-bch.h
+++ b/chromium/v8/src/crankshaft/hydrogen-bch.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_BCH_H_
-#define V8_HYDROGEN_BCH_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_BCH_H_
+#define V8_CRANKSHAFT_HYDROGEN_BCH_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -27,6 +27,7 @@ class HBoundsCheckHoistingPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_BCE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_BCE_H_
diff --git a/chromium/v8/src/hydrogen-canonicalize.cc b/chromium/v8/src/crankshaft/hydrogen-canonicalize.cc
index 25911eb353a..4a07357d58d 100644
--- a/chromium/v8/src/hydrogen-canonicalize.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-canonicalize.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-canonicalize.h"
-#include "src/hydrogen-redundant-phi.h"
+#include "src/crankshaft/hydrogen-canonicalize.h"
+
+#include "src/crankshaft/hydrogen-redundant-phi.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-canonicalize.h b/chromium/v8/src/crankshaft/hydrogen-canonicalize.h
index eb230332fdd..a17557ac8b0 100644
--- a/chromium/v8/src/hydrogen-canonicalize.h
+++ b/chromium/v8/src/crankshaft/hydrogen-canonicalize.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_CANONICALIZE_H_
-#define V8_HYDROGEN_CANONICALIZE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
+#define V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -23,6 +23,7 @@ class HCanonicalizePhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_CANONICALIZE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
diff --git a/chromium/v8/src/hydrogen-check-elimination.cc b/chromium/v8/src/crankshaft/hydrogen-check-elimination.cc
index 74be2e42f4a..548e4cd8bdb 100644
--- a/chromium/v8/src/hydrogen-check-elimination.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-check-elimination.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-check-elimination.h"
+#include "src/crankshaft/hydrogen-check-elimination.h"
-#include "src/hydrogen-alias-analysis.h"
-#include "src/hydrogen-flow-engine.h"
+#include "src/crankshaft/hydrogen-alias-analysis.h"
+#include "src/crankshaft/hydrogen-flow-engine.h"
#define GLOBAL 1
diff --git a/chromium/v8/src/hydrogen-check-elimination.h b/chromium/v8/src/crankshaft/hydrogen-check-elimination.h
index 7102a439f3b..d6339df34cf 100644
--- a/chromium/v8/src/hydrogen-check-elimination.h
+++ b/chromium/v8/src/crankshaft/hydrogen-check-elimination.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_CHECK_ELIMINATION_H_
-#define V8_HYDROGEN_CHECK_ELIMINATION_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
+#define V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
-#include "src/hydrogen.h"
-#include "src/hydrogen-alias-analysis.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/hydrogen-alias-analysis.h"
namespace v8 {
namespace internal {
@@ -68,6 +68,7 @@ class HCheckEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_CHECK_ELIMINATION_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
diff --git a/chromium/v8/src/hydrogen-dce.cc b/chromium/v8/src/crankshaft/hydrogen-dce.cc
index 50a300bd940..3cb9cf4a075 100644
--- a/chromium/v8/src/hydrogen-dce.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-dce.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-dce.h"
+#include "src/crankshaft/hydrogen-dce.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-dce.h b/chromium/v8/src/crankshaft/hydrogen-dce.h
index af3679d9d39..f620a3cfa85 100644
--- a/chromium/v8/src/hydrogen-dce.h
+++ b/chromium/v8/src/crankshaft/hydrogen-dce.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_DCE_H_
-#define V8_HYDROGEN_DCE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_DCE_H_
+#define V8_CRANKSHAFT_HYDROGEN_DCE_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -29,6 +29,7 @@ class HDeadCodeEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_DCE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_DCE_H_
diff --git a/chromium/v8/src/hydrogen-dehoist.cc b/chromium/v8/src/crankshaft/hydrogen-dehoist.cc
index e521c25cdae..34de94afc58 100644
--- a/chromium/v8/src/hydrogen-dehoist.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-dehoist.cc
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-dehoist.h"
+#include "src/crankshaft/hydrogen-dehoist.h"
+
#include "src/base/safe_math.h"
namespace v8 {
diff --git a/chromium/v8/src/hydrogen-dehoist.h b/chromium/v8/src/crankshaft/hydrogen-dehoist.h
index 4aab30fafa1..d68f62cf7bc 100644
--- a/chromium/v8/src/hydrogen-dehoist.h
+++ b/chromium/v8/src/crankshaft/hydrogen-dehoist.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_DEHOIST_H_
-#define V8_HYDROGEN_DEHOIST_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
+#define V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -23,6 +23,7 @@ class HDehoistIndexComputationsPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_DEHOIST_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
diff --git a/chromium/v8/src/hydrogen-environment-liveness.cc b/chromium/v8/src/crankshaft/hydrogen-environment-liveness.cc
index 7cc4dc04a84..ae0bd088376 100644
--- a/chromium/v8/src/hydrogen-environment-liveness.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-environment-liveness.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
-#include "src/hydrogen-environment-liveness.h"
+#include "src/crankshaft/hydrogen-environment-liveness.h"
namespace v8 {
diff --git a/chromium/v8/src/hydrogen-environment-liveness.h b/chromium/v8/src/crankshaft/hydrogen-environment-liveness.h
index e595927f9d4..d9e156b7e98 100644
--- a/chromium/v8/src/hydrogen-environment-liveness.h
+++ b/chromium/v8/src/crankshaft/hydrogen-environment-liveness.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_
-#define V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
+#define V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
-
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -63,6 +62,7 @@ class HEnvironmentLivenessAnalysisPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif /* V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_ */
+#endif // V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
diff --git a/chromium/v8/src/hydrogen-escape-analysis.cc b/chromium/v8/src/crankshaft/hydrogen-escape-analysis.cc
index 36137371926..ab3bff2edcd 100644
--- a/chromium/v8/src/hydrogen-escape-analysis.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-escape-analysis.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-escape-analysis.h"
+#include "src/crankshaft/hydrogen-escape-analysis.h"
namespace v8 {
namespace internal {
@@ -142,6 +142,7 @@ HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
// necessary.
HValue* HEscapeAnalysisPhase::NewLoadReplacement(
HLoadNamedField* load, HValue* load_value) {
+ isolate()->counters()->crankshaft_escape_loads_replaced()->Increment();
HValue* replacement = load_value;
Representation representation = load->representation();
if (representation.IsSmiOrInteger32() || representation.IsDouble()) {
@@ -319,6 +320,8 @@ void HEscapeAnalysisPhase::Run() {
for (int i = 0; i < max_fixpoint_iteration_count; i++) {
CollectCapturedValues();
if (captured_.is_empty()) break;
+ isolate()->counters()->crankshaft_escape_allocs_replaced()->Increment(
+ captured_.length());
PerformScalarReplacement();
captured_.Rewind(0);
}
diff --git a/chromium/v8/src/hydrogen-escape-analysis.h b/chromium/v8/src/crankshaft/hydrogen-escape-analysis.h
index 0726b8edbe4..7dac6debe0e 100644
--- a/chromium/v8/src/hydrogen-escape-analysis.h
+++ b/chromium/v8/src/crankshaft/hydrogen-escape-analysis.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_ESCAPE_ANALYSIS_H_
-#define V8_HYDROGEN_ESCAPE_ANALYSIS_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
+#define V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
#include "src/allocation.h"
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -65,6 +65,7 @@ class HEscapeAnalysisPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_ESCAPE_ANALYSIS_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
diff --git a/chromium/v8/src/hydrogen-flow-engine.h b/chromium/v8/src/crankshaft/hydrogen-flow-engine.h
index 257ab466a1e..3a488ddc18a 100644
--- a/chromium/v8/src/hydrogen-flow-engine.h
+++ b/chromium/v8/src/crankshaft/hydrogen-flow-engine.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_FLOW_ENGINE_H_
-#define V8_HYDROGEN_FLOW_ENGINE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
+#define V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
-#include "src/hydrogen.h"
-#include "src/hydrogen-instructions.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/hydrogen-instructions.h"
#include "src/zone.h"
namespace v8 {
@@ -214,6 +214,7 @@ class HFlowEngine {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_FLOW_ENGINE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
diff --git a/chromium/v8/src/hydrogen-gvn.cc b/chromium/v8/src/crankshaft/hydrogen-gvn.cc
index 31a2cd68a55..07bfabc79ac 100644
--- a/chromium/v8/src/hydrogen-gvn.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-gvn.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen.h"
-#include "src/hydrogen-gvn.h"
+#include "src/crankshaft/hydrogen-gvn.h"
+
+#include "src/crankshaft/hydrogen.h"
#include "src/v8.h"
namespace v8 {
diff --git a/chromium/v8/src/hydrogen-gvn.h b/chromium/v8/src/crankshaft/hydrogen-gvn.h
index fc7f27368e2..a5e2168603d 100644
--- a/chromium/v8/src/hydrogen-gvn.h
+++ b/chromium/v8/src/crankshaft/hydrogen-gvn.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_GVN_H_
-#define V8_HYDROGEN_GVN_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_GVN_H_
+#define V8_CRANKSHAFT_HYDROGEN_GVN_H_
#include <iosfwd>
-#include "src/hydrogen.h"
-#include "src/hydrogen-instructions.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/hydrogen-instructions.h"
#include "src/zone.h"
namespace v8 {
@@ -148,6 +148,7 @@ class HGlobalValueNumberingPhase final : public HPhase {
DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_GVN_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_GVN_H_
diff --git a/chromium/v8/src/hydrogen-infer-representation.cc b/chromium/v8/src/crankshaft/hydrogen-infer-representation.cc
index 6687aefed8f..74f264e17a2 100644
--- a/chromium/v8/src/hydrogen-infer-representation.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-infer-representation.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-infer-representation.h"
+#include "src/crankshaft/hydrogen-infer-representation.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-infer-representation.h b/chromium/v8/src/crankshaft/hydrogen-infer-representation.h
index d07f89d973f..92f2bc8c597 100644
--- a/chromium/v8/src/hydrogen-infer-representation.h
+++ b/chromium/v8/src/crankshaft/hydrogen-infer-representation.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_INFER_REPRESENTATION_H_
-#define V8_HYDROGEN_INFER_REPRESENTATION_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
+#define V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -29,6 +29,7 @@ class HInferRepresentationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_INFER_REPRESENTATION_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
diff --git a/chromium/v8/src/hydrogen-infer-types.cc b/chromium/v8/src/crankshaft/hydrogen-infer-types.cc
index ea69662b40b..bfd3dd22814 100644
--- a/chromium/v8/src/hydrogen-infer-types.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-infer-types.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-infer-types.h"
+#include "src/crankshaft/hydrogen-infer-types.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-infer-types.h b/chromium/v8/src/crankshaft/hydrogen-infer-types.h
index 41337ac5c0d..8acfeabd60d 100644
--- a/chromium/v8/src/hydrogen-infer-types.h
+++ b/chromium/v8/src/crankshaft/hydrogen-infer-types.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_INFER_TYPES_H_
-#define V8_HYDROGEN_INFER_TYPES_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
+#define V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -31,6 +31,7 @@ class HInferTypesPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_INFER_TYPES_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
diff --git a/chromium/v8/src/hydrogen-instructions.cc b/chromium/v8/src/crankshaft/hydrogen-instructions.cc
index 4482155fbea..e2e026fb5f0 100644
--- a/chromium/v8/src/hydrogen-instructions.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-instructions.cc
@@ -2,36 +2,35 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-instructions.h"
+#include "src/crankshaft/hydrogen-instructions.h"
#include "src/base/bits.h"
+#include "src/base/safe_math.h"
+#include "src/crankshaft/hydrogen-infer-representation.h"
#include "src/double.h"
#include "src/elements.h"
#include "src/factory.h"
-#include "src/hydrogen-infer-representation.h"
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/lithium-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
-#include "src/base/safe_math.h"
-
namespace v8 {
namespace internal {
@@ -777,7 +776,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kBlockEntry:
case HValue::kBoundsCheckBaseIndexInformation:
case HValue::kCallFunction:
- case HValue::kCallNew:
case HValue::kCallNewArray:
case HValue::kCallStub:
case HValue::kCapturedObject:
@@ -804,15 +802,12 @@ bool HInstruction::CanDeoptimize() {
case HValue::kHasInstanceTypeAndBranch:
case HValue::kInnerAllocatedObject:
case HValue::kInstanceOf:
- case HValue::kIsConstructCallAndBranch:
- case HValue::kHasInPrototypeChainAndBranch:
case HValue::kIsSmiAndBranch:
case HValue::kIsStringAndBranch:
case HValue::kIsUndetectableAndBranch:
case HValue::kLeaveInlined:
case HValue::kLoadFieldByIndex:
case HValue::kLoadGlobalGeneric:
- case HValue::kLoadGlobalViaContext:
case HValue::kLoadNamedField:
case HValue::kLoadNamedGeneric:
case HValue::kLoadRoot:
@@ -821,12 +816,10 @@ bool HInstruction::CanDeoptimize() {
case HValue::kParameter:
case HValue::kPhi:
case HValue::kPushArguments:
- case HValue::kRegExpLiteral:
case HValue::kReturn:
case HValue::kSeqStringGetChar:
case HValue::kStoreCodeEntry:
case HValue::kStoreFrameContext:
- case HValue::kStoreGlobalViaContext:
case HValue::kStoreKeyed:
case HValue::kStoreNamedField:
case HValue::kStoreNamedGeneric:
@@ -856,11 +849,11 @@ bool HInstruction::CanDeoptimize() {
case HValue::kCheckSmi:
case HValue::kCheckValue:
case HValue::kClampToUint8:
- case HValue::kDateField:
case HValue::kDeoptimize:
case HValue::kDiv:
case HValue::kForInCacheArray:
case HValue::kForInPrepareMap:
+ case HValue::kHasInPrototypeChainAndBranch:
case HValue::kInvokeFunction:
case HValue::kLoadContextSlot:
case HValue::kLoadFunctionPrototype:
@@ -952,6 +945,7 @@ std::ostream& HCallFunction::PrintDataTo(std::ostream& os) const { // NOLINT
if (HasVectorAndSlot()) {
os << " (type-feedback-vector icslot " << slot().ToInt() << ")";
}
+ os << " (convert mode" << convert_mode() << ")";
return os;
}
@@ -1528,7 +1522,7 @@ HValue* HChange::Canonicalize() {
HValue* HWrapReceiver::Canonicalize() {
if (HasNoUses()) return NULL;
- if (receiver()->type().IsJSObject()) {
+ if (receiver()->type().IsJSReceiver()) {
return receiver();
}
return this;
@@ -1584,9 +1578,10 @@ HValue* HUnaryMathOperation::Canonicalize() {
HDiv* hdiv = HDiv::cast(value());
HValue* left = hdiv->left();
- if (left->representation().IsInteger32()) {
+ if (left->representation().IsInteger32() && !left->CheckFlag(kUint32)) {
// A value with an integer representation does not need to be transformed.
- } else if (left->IsChange() && HChange::cast(left)->from().IsInteger32()) {
+ } else if (left->IsChange() && HChange::cast(left)->from().IsInteger32() &&
+ !HChange::cast(left)->value()->CheckFlag(kUint32)) {
// A change from an integer32 can be replaced by the integer32 value.
left = HChange::cast(left)->value();
} else if (hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
@@ -1600,10 +1595,12 @@ HValue* HUnaryMathOperation::Canonicalize() {
if (right->IsInteger32Constant()) {
right = Prepend(HConstant::cast(right)->CopyToRepresentation(
Representation::Integer32(), right->block()->zone()));
- } else if (right->representation().IsInteger32()) {
+ } else if (right->representation().IsInteger32() &&
+ !right->CheckFlag(kUint32)) {
// A value with an integer representation does not need to be transformed.
} else if (right->IsChange() &&
- HChange::cast(right)->from().IsInteger32()) {
+ HChange::cast(right)->from().IsInteger32() &&
+ !HChange::cast(right)->value()->CheckFlag(kUint32)) {
// A change from an integer32 can be replaced by the integer32 value.
right = HChange::cast(right)->value();
} else if (hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
@@ -1621,7 +1618,7 @@ HValue* HUnaryMathOperation::Canonicalize() {
HValue* HCheckInstanceType::Canonicalize() {
- if ((check_ == IS_SPEC_OBJECT && value()->type().IsJSObject()) ||
+ if ((check_ == IS_JS_RECEIVER && value()->type().IsJSReceiver()) ||
(check_ == IS_JS_ARRAY && value()->type().IsJSArray()) ||
(check_ == IS_STRING && value()->type().IsString())) {
return value();
@@ -1640,9 +1637,9 @@ void HCheckInstanceType::GetCheckInterval(InstanceType* first,
InstanceType* last) {
DCHECK(is_interval_check());
switch (check_) {
- case IS_SPEC_OBJECT:
- *first = FIRST_SPEC_OBJECT_TYPE;
- *last = LAST_SPEC_OBJECT_TYPE;
+ case IS_JS_RECEIVER:
+ *first = FIRST_JS_RECEIVER_TYPE;
+ *last = LAST_JS_RECEIVER_TYPE;
return;
case IS_JS_ARRAY:
*first = *last = JS_ARRAY_TYPE;
@@ -1717,7 +1714,7 @@ HValue* HCheckValue::Canonicalize() {
const char* HCheckInstanceType::GetCheckName() const {
switch (check_) {
- case IS_SPEC_OBJECT: return "object";
+ case IS_JS_RECEIVER: return "object";
case IS_JS_ARRAY: return "array";
case IS_JS_DATE:
return "date";
@@ -3277,7 +3274,7 @@ bool HIsStringAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
value()->type().IsNull() ||
value()->type().IsBoolean() ||
value()->type().IsUndefined() ||
- value()->type().IsJSObject()) {
+ value()->type().IsJSReceiver()) {
*block = SecondSuccessor();
return true;
}
@@ -3545,7 +3542,7 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
HInstruction* index = HLoadKeyed::New(
block()->graph()->isolate(), block()->graph()->zone(),
block()->graph()->GetInvalidContext(), index_cache, key_load->key(),
- key_load->key(), key_load->elements_kind());
+ key_load->key(), nullptr, key_load->elements_kind());
map_check->InsertBefore(this);
index->InsertBefore(this);
return Prepend(new(block()->zone()) HLoadFieldByIndex(
@@ -3566,13 +3563,6 @@ std::ostream& HStoreNamedGeneric::PrintDataTo(
}
-std::ostream& HStoreGlobalViaContext::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << " depth:" << depth() << " slot:" << slot_index() << " = "
- << NameOf(value());
-}
-
-
std::ostream& HStoreNamedField::PrintDataTo(std::ostream& os) const { // NOLINT
os << NameOf(object()) << access_ << " = " << NameOf(value());
if (NeedsWriteBarrier()) os << " (write-barrier)";
@@ -3623,12 +3613,6 @@ std::ostream& HLoadGlobalGeneric::PrintDataTo(
}
-std::ostream& HLoadGlobalViaContext::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << "depth:" << depth() << " slot:" << slot_index();
-}
-
-
std::ostream& HInnerAllocatedObject::PrintDataTo(
std::ostream& os) const { // NOLINT
os << NameOf(base_object()) << " offset ";
@@ -4093,11 +4077,13 @@ HInstruction* HUnaryMathOperation::New(Isolate* isolate, Zone* zone,
}
switch (op) {
case kMathExp:
- return H_CONSTANT_DOUBLE(fast_exp(d));
+ lazily_initialize_fast_exp(isolate);
+ return H_CONSTANT_DOUBLE(fast_exp(d, isolate));
case kMathLog:
return H_CONSTANT_DOUBLE(std::log(d));
case kMathSqrt:
- return H_CONSTANT_DOUBLE(fast_sqrt(d));
+ lazily_initialize_fast_sqrt(isolate);
+ return H_CONSTANT_DOUBLE(fast_sqrt(d, isolate));
case kMathPowHalf:
return H_CONSTANT_DOUBLE(power_double_double(d, 0.5));
case kMathAbs:
@@ -4169,8 +4155,8 @@ HInstruction* HPower::New(Isolate* isolate, Zone* zone, HValue* context,
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
- double result = power_helper(c_left->DoubleValue(),
- c_right->DoubleValue());
+ double result =
+ power_helper(isolate, c_left->DoubleValue(), c_right->DoubleValue());
return H_CONSTANT_DOUBLE(std::isnan(result)
? std::numeric_limits<double>::quiet_NaN()
: result);
@@ -4693,13 +4679,13 @@ std::ostream& operator<<(std::ostream& os, const HObjectAccess& access) {
break;
case HObjectAccess::kDouble: // fall through
case HObjectAccess::kInobject:
- if (!access.name().is_null()) {
+ if (!access.name().is_null() && access.name()->IsString()) {
os << Handle<String>::cast(access.name())->ToCString().get();
}
os << "[in-object]";
break;
case HObjectAccess::kBackingStore:
- if (!access.name().is_null()) {
+ if (!access.name().is_null() && access.name()->IsString()) {
os << Handle<String>::cast(access.name())->ToCString().get();
}
os << "[backing-store]";
diff --git a/chromium/v8/src/hydrogen-instructions.h b/chromium/v8/src/crankshaft/hydrogen-instructions.h
index 9f5bc2099c2..13ada8c606c 100644
--- a/chromium/v8/src/hydrogen-instructions.h
+++ b/chromium/v8/src/crankshaft/hydrogen-instructions.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_INSTRUCTIONS_H_
-#define V8_HYDROGEN_INSTRUCTIONS_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_
+#define V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_
#include <cstring>
#include <iosfwd>
@@ -13,10 +13,10 @@
#include "src/bit-vector.h"
#include "src/code-stubs.h"
#include "src/conversions.h"
+#include "src/crankshaft/hydrogen-types.h"
+#include "src/crankshaft/unique.h"
#include "src/deoptimizer.h"
-#include "src/hydrogen-types.h"
#include "src/small-pointer-list.h"
-#include "src/unique.h"
#include "src/utils.h"
#include "src/zone.h"
@@ -62,7 +62,6 @@ class LChunkBuilder;
V(CallWithDescriptor) \
V(CallJSFunction) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -86,7 +85,6 @@ class LChunkBuilder;
V(Constant) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -105,7 +103,6 @@ class LChunkBuilder;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
@@ -115,7 +112,6 @@ class LChunkBuilder;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -132,7 +128,6 @@ class LChunkBuilder;
V(Power) \
V(Prologue) \
V(PushArguments) \
- V(RegExpLiteral) \
V(Return) \
V(Ror) \
V(Sar) \
@@ -145,7 +140,6 @@ class LChunkBuilder;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -780,7 +774,7 @@ class HValue : public ZoneObject {
bool ToStringOrToNumberCanBeObserved() const {
if (type().IsTaggedPrimitive()) return false;
- if (type().IsJSObject()) return true;
+ if (type().IsJSReceiver()) return true;
return !representation().IsSmiOrInteger32() && !representation().IsDouble();
}
@@ -931,6 +925,12 @@ std::ostream& operator<<(std::ostream& os, const ChangesOf& v);
return new (zone) I(p1, p2, p3, p4, p5, p6); \
}
+#define DECLARE_INSTRUCTION_FACTORY_P7(I, P1, P2, P3, P4, P5, P6, P7) \
+ static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+ P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) { \
+ return new (zone) I(p1, p2, p3, p4, p5, p6, p7); \
+ }
+
#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \
static I* New(Isolate* isolate, Zone* zone, HValue* context) { \
return new (zone) I(context); \
@@ -2396,21 +2396,20 @@ class HInvokeFunction final : public HBinaryCall {
class HCallFunction final : public HBinaryCall {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int);
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(
- HCallFunction, HValue*, int, CallFunctionFlags);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallFunction, HValue*, int,
+ ConvertReceiverMode);
HValue* context() const { return first(); }
HValue* function() const { return second(); }
- CallFunctionFlags function_flags() const { return function_flags_; }
- FeedbackVectorICSlot slot() const { return slot_; }
+ ConvertReceiverMode convert_mode() const { return convert_mode_; }
+ FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
bool HasVectorAndSlot() const { return !feedback_vector_.is_null(); }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
feedback_vector_ = vector;
slot_ = slot;
}
@@ -2423,28 +2422,12 @@ class HCallFunction final : public HBinaryCall {
private:
HCallFunction(HValue* context, HValue* function, int argument_count,
- CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS)
+ ConvertReceiverMode convert_mode)
: HBinaryCall(context, function, argument_count),
- function_flags_(flags),
- slot_(FeedbackVectorICSlot::Invalid()) {}
- CallFunctionFlags function_flags_;
+ convert_mode_(convert_mode) {}
Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorICSlot slot_;
-};
-
-
-class HCallNew final : public HBinaryCall {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNew, HValue*, int);
-
- HValue* context() { return first(); }
- HValue* constructor() { return second(); }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew)
-
- private:
- HCallNew(HValue* context, HValue* constructor, int argument_count)
- : HBinaryCall(context, constructor, argument_count) {}
+ FeedbackVectorSlot slot_;
+ ConvertReceiverMode convert_mode_;
};
@@ -2883,7 +2866,7 @@ class HCheckValue final : public HUnaryOperation {
class HCheckInstanceType final : public HUnaryOperation {
public:
enum Check {
- IS_SPEC_OBJECT,
+ IS_JS_RECEIVER,
IS_JS_ARRAY,
IS_JS_DATE,
IS_STRING,
@@ -2901,10 +2884,9 @@ class HCheckInstanceType final : public HUnaryOperation {
HType CalculateInferredType() override {
switch (check_) {
- case IS_SPEC_OBJECT: return HType::JSObject();
+ case IS_JS_RECEIVER: return HType::JSReceiver();
case IS_JS_ARRAY: return HType::JSArray();
- case IS_JS_DATE:
- return HType::JSObject();
+ case IS_JS_DATE: return HType::JSObject();
case IS_STRING: return HType::String();
case IS_INTERNALIZED_STRING: return HType::String();
}
@@ -3272,7 +3254,7 @@ class HPhi final : public HValue {
Representation RepresentationFromInputs() override;
Range* InferRange(Zone* zone) override;
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
Representation RequiredInputRepresentation(int index) override {
return representation();
}
@@ -3828,15 +3810,15 @@ class HBinaryOperation : public HTemplateInstruction<3> {
return observed_input_representation_[index - 1];
}
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
Representation rep = !FLAG_smi_binop && new_rep.IsSmi()
? Representation::Integer32() : new_rep;
HValue::UpdateRepresentation(rep, h_infer, reason);
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
Representation RepresentationFromInputs() override;
Representation RepresentationFromOutput();
void AssumeRepresentation(Representation r) override;
@@ -4067,7 +4049,7 @@ class HBoundsCheck final : public HTemplateInstruction<2> {
}
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
HValue* index() const { return OperandAt(0); }
HValue* length() const { return OperandAt(1); }
@@ -4161,9 +4143,9 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
// We only generate either int32 or generic tagged bitwise operations.
if (new_rep.IsDouble()) new_rep = Representation::Integer32();
HBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
@@ -4175,8 +4157,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
return r;
}
- virtual void initialize_output_representation(
- Representation observed) override {
+ void initialize_output_representation(Representation observed) override {
if (observed.IsDouble()) observed = Representation::Integer32();
HBinaryOperation::initialize_output_representation(observed);
}
@@ -4310,7 +4291,7 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
observed_input_representation_[1] = right;
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
Representation RequiredInputRepresentation(int index) override {
return representation();
@@ -4358,7 +4339,7 @@ class HCompareHoleAndBranch final : public HUnaryControlInstruction {
DECLARE_INSTRUCTION_FACTORY_P3(HCompareHoleAndBranch, HValue*,
HBasicBlock*, HBasicBlock*);
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
Representation RequiredInputRepresentation(int index) override {
return representation();
@@ -4381,7 +4362,7 @@ class HCompareMinusZeroAndBranch final : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCompareMinusZeroAndBranch, HValue*);
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
Representation RequiredInputRepresentation(int index) override {
return representation();
@@ -4564,20 +4545,6 @@ class HStringCompareAndBranch final : public HTemplateControlInstruction<2, 3> {
};
-class HIsConstructCallAndBranch : public HTemplateControlInstruction<2, 0> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P0(HIsConstructCallAndBranch);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch)
- private:
- HIsConstructCallAndBranch() {}
-};
-
-
class HHasInstanceTypeAndBranch final : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P2(
@@ -4954,9 +4921,9 @@ class HMul final : public HArithmeticBinaryOperation {
// Only commutative if it is certain that not two objects are multiplicated.
bool IsCommutative() const override { return !representation().IsTagged(); }
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -4985,9 +4952,9 @@ class HMod final : public HArithmeticBinaryOperation {
HValue* Canonicalize() override;
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5017,9 +4984,9 @@ class HDiv final : public HArithmeticBinaryOperation {
HValue* Canonicalize() override;
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5051,7 +5018,7 @@ class HMathMinMax final : public HArithmeticBinaryOperation {
return RequiredInputRepresentation(index);
}
- virtual void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+ void InferRepresentation(HInferRepresentationPhase* h_infer) override;
Representation RepresentationFromInputs() override {
Representation left_rep = left()->representation();
@@ -5151,9 +5118,9 @@ class HShl final : public HBitwiseBinaryOperation {
Range* InferRange(Zone* zone) override;
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
if (new_rep.IsSmi() &&
!(right()->IsInteger32Constant() &&
right()->GetInteger32Constant() >= 0)) {
@@ -5193,9 +5160,9 @@ class HShr final : public HBitwiseBinaryOperation {
Range* InferRange(Zone* zone) override;
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5231,9 +5198,9 @@ class HSar final : public HBitwiseBinaryOperation {
Range* InferRange(Zone* zone) override;
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5257,9 +5224,9 @@ class HRor final : public HBitwiseBinaryOperation {
return new (zone) HRor(context, left, right, strength);
}
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentationPhase* h_infer,
- const char* reason) override {
+ void UpdateRepresentation(Representation new_rep,
+ HInferRepresentationPhase* h_infer,
+ const char* reason) override {
if (new_rep.IsSmi()) new_rep = Representation::Integer32();
HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
}
@@ -5415,13 +5382,13 @@ class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
HValue* global_object() { return OperandAt(1); }
Handle<String> name() const { return name_; }
TypeofMode typeof_mode() const { return typeof_mode_; }
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
bool HasVectorAndSlot() const { return true; }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
feedback_vector_ = vector;
slot_ = slot;
}
@@ -5437,9 +5404,7 @@ class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
private:
HLoadGlobalGeneric(HValue* context, HValue* global_object,
Handle<String> name, TypeofMode typeof_mode)
- : name_(name),
- typeof_mode_(typeof_mode),
- slot_(FeedbackVectorICSlot::Invalid()) {
+ : name_(name), typeof_mode_(typeof_mode) {
SetOperandAt(0, context);
SetOperandAt(1, global_object);
set_representation(Representation::Tagged());
@@ -5449,36 +5414,7 @@ class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
Handle<String> name_;
TypeofMode typeof_mode_;
Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorICSlot slot_;
-};
-
-
-class HLoadGlobalViaContext final : public HTemplateInstruction<1> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadGlobalViaContext, int, int);
-
- HValue* context() { return OperandAt(0); }
- int depth() const { return depth_; }
- int slot_index() const { return slot_index_; }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext)
-
- private:
- HLoadGlobalViaContext(HValue* context, int depth, int slot_index)
- : depth_(depth), slot_index_(slot_index) {
- SetOperandAt(0, context);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- int const depth_;
- int const slot_index_;
+ FeedbackVectorSlot slot_;
};
@@ -5555,8 +5491,8 @@ class HAllocate final : public HTemplateInstruction<2> {
flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
}
- virtual bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) override;
+ bool HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) override;
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
@@ -5745,15 +5681,6 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
if (value->IsConstant() && HConstant::cast(value)->NotInNewSpace()) {
return false;
}
- // Stores to old space allocations require no write barriers if the value is
- // an old space allocation.
- while (value->IsInnerAllocatedObject()) {
- value = HInnerAllocatedObject::cast(value)->base_object();
- }
- if (value->IsAllocate() &&
- !HAllocate::cast(value)->IsNewSpaceAllocation()) {
- return false;
- }
}
return true;
}
@@ -6226,8 +6153,16 @@ class HObjectAccess final {
JSArrayBufferView::kByteLengthOffset);
}
- static HObjectAccess ForGlobalObjectNativeContext() {
- return HObjectAccess(kInobject, GlobalObject::kNativeContextOffset);
+ static HObjectAccess ForJSGlobalObjectNativeContext() {
+ return HObjectAccess(kInobject, JSGlobalObject::kNativeContextOffset);
+ }
+
+ static HObjectAccess ForJSRegExpFlags() {
+ return HObjectAccess(kInobject, JSRegExp::kFlagsOffset);
+ }
+
+ static HObjectAccess ForJSRegExpSource() {
+ return HObjectAccess(kInobject, JSRegExp::kSourceOffset);
}
static HObjectAccess ForJSCollectionTable() {
@@ -6475,13 +6410,13 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
InlineCacheState initialization_state() const {
return initialization_state_;
}
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
bool HasVectorAndSlot() const { return true; }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
feedback_vector_ = vector;
slot_ = slot;
}
@@ -6501,7 +6436,6 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
LanguageMode language_mode,
InlineCacheState initialization_state)
: name_(name),
- slot_(FeedbackVectorICSlot::Invalid()),
language_mode_(language_mode),
initialization_state_(initialization_state) {
SetOperandAt(0, context);
@@ -6512,7 +6446,7 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
Handle<Name> name_;
Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
LanguageMode language_mode_;
InlineCacheState initialization_state_;
};
@@ -6569,14 +6503,14 @@ enum LoadKeyedHoleMode {
};
-class HLoadKeyed final : public HTemplateInstruction<3>,
+class HLoadKeyed final : public HTemplateInstruction<4>,
public ArrayInstructionInterface {
public:
- DECLARE_INSTRUCTION_FACTORY_P4(HLoadKeyed, HValue*, HValue*, HValue*,
+ DECLARE_INSTRUCTION_FACTORY_P5(HLoadKeyed, HValue*, HValue*, HValue*, HValue*,
ElementsKind);
- DECLARE_INSTRUCTION_FACTORY_P5(HLoadKeyed, HValue*, HValue*, HValue*,
+ DECLARE_INSTRUCTION_FACTORY_P6(HLoadKeyed, HValue*, HValue*, HValue*, HValue*,
ElementsKind, LoadKeyedHoleMode);
- DECLARE_INSTRUCTION_FACTORY_P6(HLoadKeyed, HValue*, HValue*, HValue*,
+ DECLARE_INSTRUCTION_FACTORY_P7(HLoadKeyed, HValue*, HValue*, HValue*, HValue*,
ElementsKind, LoadKeyedHoleMode, int);
bool is_fixed_typed_array() const {
@@ -6589,6 +6523,11 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
return OperandAt(2);
}
bool HasDependency() const { return OperandAt(0) != OperandAt(2); }
+ HValue* backing_store_owner() const {
+ DCHECK(HasBackingStoreOwner());
+ return OperandAt(3);
+ }
+ bool HasBackingStoreOwner() const { return OperandAt(0) != OperandAt(3); }
uint32_t base_offset() const { return BaseOffsetField::decode(bit_field_); }
bool TryIncreaseBaseOffset(uint32_t increase_by_value) override;
HValue* GetKey() override { return key(); }
@@ -6619,7 +6558,12 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
return ArrayInstructionInterface::KeyedAccessIndexRequirement(
OperandAt(1)->representation());
}
- return Representation::None();
+ if (index == 2) {
+ return Representation::None();
+ }
+ DCHECK_EQ(3, index);
+ return HasBackingStoreOwner() ? Representation::Tagged()
+ : Representation::None();
}
Representation observed_input_representation(int index) override {
@@ -6647,7 +6591,7 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
private:
HLoadKeyed(HValue* obj, HValue* key, HValue* dependency,
- ElementsKind elements_kind,
+ HValue* backing_store_owner, ElementsKind elements_kind,
LoadKeyedHoleMode mode = NEVER_RETURN_HOLE,
int offset = kDefaultKeyedHeaderOffsetSentinel)
: bit_field_(0) {
@@ -6660,7 +6604,9 @@ class HLoadKeyed final : public HTemplateInstruction<3>,
SetOperandAt(0, obj);
SetOperandAt(1, key);
- SetOperandAt(2, dependency != NULL ? dependency : obj);
+ SetOperandAt(2, dependency != nullptr ? dependency : obj);
+ SetOperandAt(3, backing_store_owner != nullptr ? backing_store_owner : obj);
+ DCHECK_EQ(HasBackingStoreOwner(), is_fixed_typed_array());
if (!is_fixed_typed_array()) {
// I can detect the case between storing double (holey and fast) and
@@ -6753,7 +6699,7 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
InlineCacheState initialization_state() const {
return initialization_state_;
}
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
@@ -6762,7 +6708,7 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
return !feedback_vector_.is_null();
}
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
feedback_vector_ = vector;
slot_ = slot;
}
@@ -6784,8 +6730,7 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key,
LanguageMode language_mode,
InlineCacheState initialization_state)
- : slot_(FeedbackVectorICSlot::Invalid()),
- initialization_state_(initialization_state),
+ : initialization_state_(initialization_state),
language_mode_(language_mode) {
set_representation(Representation::Tagged());
SetOperandAt(0, obj);
@@ -6795,7 +6740,7 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
}
Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
InlineCacheState initialization_state_;
LanguageMode language_mode_;
};
@@ -6850,8 +6795,8 @@ class HStoreNamedField final : public HTemplateInstruction<3> {
}
return Representation::Tagged();
}
- virtual bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) override {
+ bool HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) override {
DCHECK(side_effect == kNewSpacePromotion);
if (!FLAG_use_write_barrier_elimination) return false;
dominator_ = dominator;
@@ -6980,13 +6925,13 @@ class HStoreNamedGeneric final : public HTemplateInstruction<3> {
return Representation::Tagged();
}
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
- bool HasVectorAndSlot() const { return FLAG_vector_stores; }
+ bool HasVectorAndSlot() const { return true; }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
feedback_vector_ = vector;
slot_ = slot;
}
@@ -6998,7 +6943,6 @@ class HStoreNamedGeneric final : public HTemplateInstruction<3> {
HValue* value, LanguageMode language_mode,
InlineCacheState initialization_state)
: name_(name),
- slot_(FeedbackVectorICSlot::Invalid()),
language_mode_(language_mode),
initialization_state_(initialization_state) {
SetOperandAt(0, object);
@@ -7009,54 +6953,22 @@ class HStoreNamedGeneric final : public HTemplateInstruction<3> {
Handle<Name> name_;
Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
LanguageMode language_mode_;
InlineCacheState initialization_state_;
};
-class HStoreGlobalViaContext final : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreGlobalViaContext, HValue*,
- int, int, LanguageMode);
- HValue* context() const { return OperandAt(0); }
- HValue* value() const { return OperandAt(1); }
- int depth() const { return depth_; }
- int slot_index() const { return slot_index_; }
- LanguageMode language_mode() const { return language_mode_; }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext)
-
- private:
- HStoreGlobalViaContext(HValue* context, HValue* value, int depth,
- int slot_index, LanguageMode language_mode)
- : depth_(depth), slot_index_(slot_index), language_mode_(language_mode) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- SetAllSideEffects();
- }
-
- int const depth_;
- int const slot_index_;
- LanguageMode const language_mode_;
-};
-
-
-class HStoreKeyed final : public HTemplateInstruction<3>,
+class HStoreKeyed final : public HTemplateInstruction<4>,
public ArrayInstructionInterface {
public:
- DECLARE_INSTRUCTION_FACTORY_P4(HStoreKeyed, HValue*, HValue*, HValue*,
- ElementsKind);
DECLARE_INSTRUCTION_FACTORY_P5(HStoreKeyed, HValue*, HValue*, HValue*,
- ElementsKind, StoreFieldOrKeyedMode);
+ HValue*, ElementsKind);
DECLARE_INSTRUCTION_FACTORY_P6(HStoreKeyed, HValue*, HValue*, HValue*,
- ElementsKind, StoreFieldOrKeyedMode, int);
+ HValue*, ElementsKind, StoreFieldOrKeyedMode);
+ DECLARE_INSTRUCTION_FACTORY_P7(HStoreKeyed, HValue*, HValue*, HValue*,
+ HValue*, ElementsKind, StoreFieldOrKeyedMode,
+ int);
Representation RequiredInputRepresentation(int index) override {
// kind_fast: tagged[int32] = tagged
@@ -7070,10 +6982,13 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
} else if (index == 1) {
return ArrayInstructionInterface::KeyedAccessIndexRequirement(
OperandAt(1)->representation());
+ } else if (index == 2) {
+ return RequiredValueRepresentation(elements_kind(), store_mode());
}
- DCHECK_EQ(index, 2);
- return RequiredValueRepresentation(elements_kind(), store_mode());
+ DCHECK_EQ(3, index);
+ return HasBackingStoreOwner() ? Representation::Tagged()
+ : Representation::None();
}
static Representation RequiredValueRepresentation(
@@ -7102,7 +7017,7 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
}
Representation observed_input_representation(int index) override {
- if (index < 2) return RequiredInputRepresentation(index);
+ if (index != 2) return RequiredInputRepresentation(index);
if (IsUninitialized()) {
return Representation::None();
}
@@ -7116,6 +7031,11 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
HValue* elements() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
HValue* value() const { return OperandAt(2); }
+ HValue* backing_store_owner() const {
+ DCHECK(HasBackingStoreOwner());
+ return OperandAt(3);
+ }
+ bool HasBackingStoreOwner() const { return OperandAt(0) != OperandAt(3); }
bool value_is_smi() const { return IsFastSmiElementsKind(elements_kind()); }
StoreFieldOrKeyedMode store_mode() const {
return StoreModeField::decode(bit_field_);
@@ -7142,8 +7062,8 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
return value()->IsConstant() && HConstant::cast(value())->IsTheHole();
}
- virtual bool HandleSideEffectDominator(GVNFlag side_effect,
- HValue* dominator) override {
+ bool HandleSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) override {
DCHECK(side_effect == kNewSpacePromotion);
dominator_ = dominator;
return false;
@@ -7171,7 +7091,8 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
private:
- HStoreKeyed(HValue* obj, HValue* key, HValue* val, ElementsKind elements_kind,
+ HStoreKeyed(HValue* obj, HValue* key, HValue* val,
+ HValue* backing_store_owner, ElementsKind elements_kind,
StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE,
int offset = kDefaultKeyedHeaderOffsetSentinel)
: base_offset_(offset == kDefaultKeyedHeaderOffsetSentinel
@@ -7185,6 +7106,8 @@ class HStoreKeyed final : public HTemplateInstruction<3>,
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
+ SetOperandAt(3, backing_store_owner != nullptr ? backing_store_owner : obj);
+ DCHECK_EQ(HasBackingStoreOwner(), is_fixed_typed_array());
if (IsFastObjectElementsKind(elements_kind)) {
SetFlag(kTrackSideEffectDominators);
@@ -7239,17 +7162,15 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
return Representation::Tagged();
}
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
bool HasVectorAndSlot() const {
- DCHECK(!(FLAG_vector_stores && initialization_state_ != MEGAMORPHIC) ||
- !feedback_vector_.is_null());
return !feedback_vector_.is_null();
}
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
feedback_vector_ = vector;
slot_ = slot;
}
@@ -7262,8 +7183,7 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
HStoreKeyedGeneric(HValue* context, HValue* object, HValue* key,
HValue* value, LanguageMode language_mode,
InlineCacheState initialization_state)
- : slot_(FeedbackVectorICSlot::Invalid()),
- language_mode_(language_mode),
+ : language_mode_(language_mode),
initialization_state_(initialization_state) {
SetOperandAt(0, object);
SetOperandAt(1, key);
@@ -7273,7 +7193,7 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
}
Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
LanguageMode language_mode_;
InlineCacheState initialization_state_;
};
@@ -7485,75 +7405,6 @@ class HStringCharFromCode final : public HTemplateInstruction<2> {
};
-template <int V>
-class HMaterializedLiteral : public HTemplateInstruction<V> {
- public:
- HMaterializedLiteral<V>(int index, int depth, AllocationSiteMode mode)
- : literal_index_(index), depth_(depth), allocation_site_mode_(mode) {
- this->set_representation(Representation::Tagged());
- }
-
- HMaterializedLiteral<V>(int index, int depth)
- : literal_index_(index), depth_(depth),
- allocation_site_mode_(DONT_TRACK_ALLOCATION_SITE) {
- this->set_representation(Representation::Tagged());
- }
-
- int literal_index() const { return literal_index_; }
- int depth() const { return depth_; }
- AllocationSiteMode allocation_site_mode() const {
- return allocation_site_mode_;
- }
-
- private:
- bool IsDeletable() const final { return true; }
-
- int literal_index_;
- int depth_;
- AllocationSiteMode allocation_site_mode_;
-};
-
-
-class HRegExpLiteral final : public HMaterializedLiteral<1> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HRegExpLiteral,
- Handle<FixedArray>,
- Handle<String>,
- Handle<String>,
- int);
-
- HValue* context() { return OperandAt(0); }
- Handle<FixedArray> literals() { return literals_; }
- Handle<String> pattern() { return pattern_; }
- Handle<String> flags() { return flags_; }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
-
- private:
- HRegExpLiteral(HValue* context,
- Handle<FixedArray> literals,
- Handle<String> pattern,
- Handle<String> flags,
- int literal_index)
- : HMaterializedLiteral<1>(literal_index, 0),
- literals_(literals),
- pattern_(pattern),
- flags_(flags) {
- SetOperandAt(0, context);
- SetAllSideEffects();
- set_type(HType::JSObject());
- }
-
- Handle<FixedArray> literals_;
- Handle<String> pattern_;
- Handle<String> flags_;
-};
-
-
class HTypeof final : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*);
@@ -7680,28 +7531,6 @@ class HToFastProperties final : public HUnaryOperation {
};
-class HDateField final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HDateField, HValue*, Smi*);
-
- Smi* index() const { return index_; }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField)
-
- private:
- HDateField(HValue* date, Smi* index)
- : HUnaryOperation(date), index_(index) {
- set_representation(Representation::Tagged());
- }
-
- Smi* index_;
-};
-
-
class HSeqStringGetChar final : public HTemplateInstruction<2> {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
@@ -7983,6 +7812,7 @@ class HAllocateBlockContext: public HTemplateInstruction<2> {
#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_INSTRUCTIONS_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_
diff --git a/chromium/v8/src/hydrogen-load-elimination.cc b/chromium/v8/src/crankshaft/hydrogen-load-elimination.cc
index a4536fd750a..da8d1864a6d 100644
--- a/chromium/v8/src/hydrogen-load-elimination.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-load-elimination.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-alias-analysis.h"
-#include "src/hydrogen-flow-engine.h"
-#include "src/hydrogen-instructions.h"
-#include "src/hydrogen-load-elimination.h"
+#include "src/crankshaft/hydrogen-load-elimination.h"
+
+#include "src/crankshaft/hydrogen-alias-analysis.h"
+#include "src/crankshaft/hydrogen-flow-engine.h"
+#include "src/crankshaft/hydrogen-instructions.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-load-elimination.h b/chromium/v8/src/crankshaft/hydrogen-load-elimination.h
index e6b432c6aca..e5656459c95 100644
--- a/chromium/v8/src/hydrogen-load-elimination.h
+++ b/chromium/v8/src/crankshaft/hydrogen-load-elimination.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_LOAD_ELIMINATION_H_
-#define V8_HYDROGEN_LOAD_ELIMINATION_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
+#define V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -22,6 +22,7 @@ class HLoadEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_LOAD_ELIMINATION_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
diff --git a/chromium/v8/src/hydrogen-mark-deoptimize.cc b/chromium/v8/src/crankshaft/hydrogen-mark-deoptimize.cc
index fe7a88614cb..a706d913232 100644
--- a/chromium/v8/src/hydrogen-mark-deoptimize.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-mark-deoptimize.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-mark-deoptimize.h"
+#include "src/crankshaft/hydrogen-mark-deoptimize.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-mark-deoptimize.h b/chromium/v8/src/crankshaft/hydrogen-mark-deoptimize.h
index 52a6ef96c9e..45d40acd953 100644
--- a/chromium/v8/src/hydrogen-mark-deoptimize.h
+++ b/chromium/v8/src/crankshaft/hydrogen-mark-deoptimize.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_MARK_DEOPTIMIZE_H_
-#define V8_HYDROGEN_MARK_DEOPTIMIZE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
+#define V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -47,6 +47,7 @@ class HComputeChangeUndefinedToNaN : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_MARK_DEOPTIMIZE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
diff --git a/chromium/v8/src/hydrogen-mark-unreachable.cc b/chromium/v8/src/crankshaft/hydrogen-mark-unreachable.cc
index affe7ce2054..4e1dd689eea 100644
--- a/chromium/v8/src/hydrogen-mark-unreachable.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-mark-unreachable.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-mark-unreachable.h"
+#include "src/crankshaft/hydrogen-mark-unreachable.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-mark-unreachable.h b/chromium/v8/src/crankshaft/hydrogen-mark-unreachable.h
index d43d22bbba0..1243b1fcbe9 100644
--- a/chromium/v8/src/hydrogen-mark-unreachable.h
+++ b/chromium/v8/src/crankshaft/hydrogen-mark-unreachable.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_MARK_UNREACHABLE_H_
-#define V8_HYDROGEN_MARK_UNREACHABLE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
+#define V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -25,6 +25,7 @@ class HMarkUnreachableBlocksPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_MARK_UNREACHABLE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
diff --git a/chromium/v8/src/hydrogen-osr.cc b/chromium/v8/src/crankshaft/hydrogen-osr.cc
index 8a4780c3d77..c98bbf627f9 100644
--- a/chromium/v8/src/hydrogen-osr.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-osr.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-osr.h"
+
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-osr.h b/chromium/v8/src/crankshaft/hydrogen-osr.h
index 433548c1a8e..e2cfd304282 100644
--- a/chromium/v8/src/hydrogen-osr.h
+++ b/chromium/v8/src/crankshaft/hydrogen-osr.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_OSR_H_
-#define V8_HYDROGEN_OSR_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_OSR_H_
+#define V8_CRANKSHAFT_HYDROGEN_OSR_H_
-#include "src/hydrogen.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/crankshaft/hydrogen.h"
#include "src/zone.h"
namespace v8 {
@@ -49,6 +49,7 @@ class HOsrBuilder : public ZoneObject {
ZoneList<HUnknownOSRValue*>* osr_values_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_OSR_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_OSR_H_
diff --git a/chromium/v8/src/hydrogen-range-analysis.cc b/chromium/v8/src/crankshaft/hydrogen-range-analysis.cc
index c81dc1d3659..f5eba5e571e 100644
--- a/chromium/v8/src/hydrogen-range-analysis.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-range-analysis.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-range-analysis.h"
+#include "src/crankshaft/hydrogen-range-analysis.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-range-analysis.h b/chromium/v8/src/crankshaft/hydrogen-range-analysis.h
index 1269ec7529c..cff7026e14a 100644
--- a/chromium/v8/src/hydrogen-range-analysis.h
+++ b/chromium/v8/src/crankshaft/hydrogen-range-analysis.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_RANGE_ANALYSIS_H_
-#define V8_HYDROGEN_RANGE_ANALYSIS_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
+#define V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -45,6 +45,7 @@ class HRangeAnalysisPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_RANGE_ANALYSIS_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
diff --git a/chromium/v8/src/hydrogen-redundant-phi.cc b/chromium/v8/src/crankshaft/hydrogen-redundant-phi.cc
index 1b3c94a3db0..ef8b29159d4 100644
--- a/chromium/v8/src/hydrogen-redundant-phi.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-redundant-phi.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-redundant-phi.h"
+#include "src/crankshaft/hydrogen-redundant-phi.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-redundant-phi.h b/chromium/v8/src/crankshaft/hydrogen-redundant-phi.h
index 7f5ec4e52dd..e8735c82d39 100644
--- a/chromium/v8/src/hydrogen-redundant-phi.h
+++ b/chromium/v8/src/crankshaft/hydrogen-redundant-phi.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_REDUNDANT_PHI_H_
-#define V8_HYDROGEN_REDUNDANT_PHI_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
+#define V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -28,6 +28,7 @@ class HRedundantPhiEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_REDUNDANT_PHI_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
diff --git a/chromium/v8/src/hydrogen-removable-simulates.cc b/chromium/v8/src/crankshaft/hydrogen-removable-simulates.cc
index eb13cb28bd4..ceef7430eba 100644
--- a/chromium/v8/src/hydrogen-removable-simulates.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-removable-simulates.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-flow-engine.h"
-#include "src/hydrogen-instructions.h"
-#include "src/hydrogen-removable-simulates.h"
+#include "src/crankshaft/hydrogen-removable-simulates.h"
+
+#include "src/crankshaft/hydrogen-flow-engine.h"
+#include "src/crankshaft/hydrogen-instructions.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-removable-simulates.h b/chromium/v8/src/crankshaft/hydrogen-removable-simulates.h
index 9bd25056bdf..34500012cbf 100644
--- a/chromium/v8/src/hydrogen-removable-simulates.h
+++ b/chromium/v8/src/crankshaft/hydrogen-removable-simulates.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_REMOVABLE_SIMULATES_H_
-#define V8_HYDROGEN_REMOVABLE_SIMULATES_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
+#define V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -23,6 +23,7 @@ class HMergeRemovableSimulatesPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_REMOVABLE_SIMULATES_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
diff --git a/chromium/v8/src/hydrogen-representation-changes.cc b/chromium/v8/src/crankshaft/hydrogen-representation-changes.cc
index 4af4e01a5b8..32b614c56c3 100644
--- a/chromium/v8/src/hydrogen-representation-changes.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-representation-changes.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-representation-changes.h"
+#include "src/crankshaft/hydrogen-representation-changes.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-representation-changes.h b/chromium/v8/src/crankshaft/hydrogen-representation-changes.h
index 2f5958a70f3..d8403947c35 100644
--- a/chromium/v8/src/hydrogen-representation-changes.h
+++ b/chromium/v8/src/crankshaft/hydrogen-representation-changes.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_REPRESENTATION_CHANGES_H_
-#define V8_HYDROGEN_REPRESENTATION_CHANGES_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
+#define V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -27,6 +27,7 @@ class HRepresentationChangesPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_REPRESENTATION_CHANGES_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
diff --git a/chromium/v8/src/hydrogen-sce.cc b/chromium/v8/src/crankshaft/hydrogen-sce.cc
index 235a94142de..91e91d20334 100644
--- a/chromium/v8/src/hydrogen-sce.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-sce.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-sce.h"
+#include "src/crankshaft/hydrogen-sce.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-sce.h b/chromium/v8/src/crankshaft/hydrogen-sce.h
index 276d3486764..bb896bad6b2 100644
--- a/chromium/v8/src/hydrogen-sce.h
+++ b/chromium/v8/src/crankshaft/hydrogen-sce.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_SCE_H_
-#define V8_HYDROGEN_SCE_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_SCE_H_
+#define V8_CRANKSHAFT_HYDROGEN_SCE_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -20,6 +20,7 @@ class HStackCheckEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_SCE_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_SCE_H_
diff --git a/chromium/v8/src/hydrogen-store-elimination.cc b/chromium/v8/src/crankshaft/hydrogen-store-elimination.cc
index f04ec44e44b..ba32c8ad6b4 100644
--- a/chromium/v8/src/hydrogen-store-elimination.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-store-elimination.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-instructions.h"
-#include "src/hydrogen-store-elimination.h"
+#include "src/crankshaft/hydrogen-store-elimination.h"
+
+#include "src/crankshaft/hydrogen-instructions.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-store-elimination.h b/chromium/v8/src/crankshaft/hydrogen-store-elimination.h
index 35a23a26602..2a9e0c1488e 100644
--- a/chromium/v8/src/hydrogen-store-elimination.h
+++ b/chromium/v8/src/crankshaft/hydrogen-store-elimination.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_STORE_ELIMINATION_H_
-#define V8_HYDROGEN_STORE_ELIMINATION_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
+#define V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
-#include "src/hydrogen.h"
-#include "src/hydrogen-alias-analysis.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/hydrogen-alias-analysis.h"
namespace v8 {
namespace internal {
@@ -29,6 +29,7 @@ class HStoreEliminationPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif
+#endif // V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
diff --git a/chromium/v8/src/hydrogen-types.cc b/chromium/v8/src/crankshaft/hydrogen-types.cc
index 7c50a1d8876..9c5e34194ee 100644
--- a/chromium/v8/src/hydrogen-types.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-types.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-types.h"
+#include "src/crankshaft/hydrogen-types.h"
#include "src/ostreams.h"
#include "src/types-inl.h"
diff --git a/chromium/v8/src/hydrogen-types.h b/chromium/v8/src/crankshaft/hydrogen-types.h
index fe13345f76a..87148ee4cd8 100644
--- a/chromium/v8/src/hydrogen-types.h
+++ b/chromium/v8/src/crankshaft/hydrogen-types.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef HYDROGEN_TYPES_H_
-#define HYDROGEN_TYPES_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_TYPES_H_
+#define V8_CRANKSHAFT_HYDROGEN_TYPES_H_
#include <climits>
#include <iosfwd>
@@ -86,6 +86,7 @@ class HType final {
std::ostream& operator<<(std::ostream& os, const HType& t);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // HYDROGEN_TYPES_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_TYPES_H_
diff --git a/chromium/v8/src/hydrogen-uint32-analysis.cc b/chromium/v8/src/crankshaft/hydrogen-uint32-analysis.cc
index c6cbc9bc357..ac4a63f8f23 100644
--- a/chromium/v8/src/hydrogen-uint32-analysis.cc
+++ b/chromium/v8/src/crankshaft/hydrogen-uint32-analysis.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen-uint32-analysis.h"
+#include "src/crankshaft/hydrogen-uint32-analysis.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/hydrogen-uint32-analysis.h b/chromium/v8/src/crankshaft/hydrogen-uint32-analysis.h
index 4d2797fa3a1..0d959b59531 100644
--- a/chromium/v8/src/hydrogen-uint32-analysis.h
+++ b/chromium/v8/src/crankshaft/hydrogen-uint32-analysis.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_UINT32_ANALYSIS_H_
-#define V8_HYDROGEN_UINT32_ANALYSIS_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
+#define V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
@@ -31,6 +31,7 @@ class HUint32AnalysisPhase : public HPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_UINT32_ANALYSIS_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
diff --git a/chromium/v8/src/hydrogen.cc b/chromium/v8/src/crankshaft/hydrogen.cc
index 901e10721d3..98337be052e 100644
--- a/chromium/v8/src/hydrogen.cc
+++ b/chromium/v8/src/crankshaft/hydrogen.cc
@@ -2,63 +2,63 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
#include <sstream>
#include "src/allocation-site-scopes.h"
-#include "src/ast-numbering.h"
+#include "src/ast/ast-numbering.h"
+#include "src/ast/scopeinfo.h"
#include "src/code-factory.h"
+#include "src/crankshaft/hydrogen-bce.h"
+#include "src/crankshaft/hydrogen-bch.h"
+#include "src/crankshaft/hydrogen-canonicalize.h"
+#include "src/crankshaft/hydrogen-check-elimination.h"
+#include "src/crankshaft/hydrogen-dce.h"
+#include "src/crankshaft/hydrogen-dehoist.h"
+#include "src/crankshaft/hydrogen-environment-liveness.h"
+#include "src/crankshaft/hydrogen-escape-analysis.h"
+#include "src/crankshaft/hydrogen-gvn.h"
+#include "src/crankshaft/hydrogen-infer-representation.h"
+#include "src/crankshaft/hydrogen-infer-types.h"
+#include "src/crankshaft/hydrogen-load-elimination.h"
+#include "src/crankshaft/hydrogen-mark-deoptimize.h"
+#include "src/crankshaft/hydrogen-mark-unreachable.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-range-analysis.h"
+#include "src/crankshaft/hydrogen-redundant-phi.h"
+#include "src/crankshaft/hydrogen-removable-simulates.h"
+#include "src/crankshaft/hydrogen-representation-changes.h"
+#include "src/crankshaft/hydrogen-sce.h"
+#include "src/crankshaft/hydrogen-store-elimination.h"
+#include "src/crankshaft/hydrogen-uint32-analysis.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/crankshaft/typing.h"
#include "src/full-codegen/full-codegen.h"
-#include "src/hydrogen-bce.h"
-#include "src/hydrogen-bch.h"
-#include "src/hydrogen-canonicalize.h"
-#include "src/hydrogen-check-elimination.h"
-#include "src/hydrogen-dce.h"
-#include "src/hydrogen-dehoist.h"
-#include "src/hydrogen-environment-liveness.h"
-#include "src/hydrogen-escape-analysis.h"
-#include "src/hydrogen-gvn.h"
-#include "src/hydrogen-infer-representation.h"
-#include "src/hydrogen-infer-types.h"
-#include "src/hydrogen-load-elimination.h"
-#include "src/hydrogen-mark-deoptimize.h"
-#include "src/hydrogen-mark-unreachable.h"
-#include "src/hydrogen-osr.h"
-#include "src/hydrogen-range-analysis.h"
-#include "src/hydrogen-redundant-phi.h"
-#include "src/hydrogen-removable-simulates.h"
-#include "src/hydrogen-representation-changes.h"
-#include "src/hydrogen-sce.h"
-#include "src/hydrogen-store-elimination.h"
-#include "src/hydrogen-uint32-analysis.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
// GetRootConstructor
#include "src/ic/ic-inl.h"
#include "src/isolate-inl.h"
-#include "src/lithium-allocator.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/runtime/runtime.h"
-#include "src/scopeinfo.h"
-#include "src/typing.h"
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-codegen-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-codegen-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-codegen-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-codegen-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-codegen-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-codegen-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-codegen-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
@@ -1372,7 +1372,8 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(
HValue* checked_elements = environment()->Top();
// Write zero to ensure that the new element is initialized with some smi.
- Add<HStoreKeyed>(checked_elements, key, graph()->GetConstant0(), kind);
+ Add<HStoreKeyed>(checked_elements, key, graph()->GetConstant0(), nullptr,
+ kind);
}
length_checker.Else();
@@ -1594,18 +1595,11 @@ void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) {
Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
HValue* instance_type =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
- STATIC_ASSERT(JS_BUILTINS_OBJECT_TYPE == JS_GLOBAL_OBJECT_TYPE + 1);
- HValue* min_global_type = Add<HConstant>(JS_GLOBAL_OBJECT_TYPE);
- HValue* max_global_type = Add<HConstant>(JS_BUILTINS_OBJECT_TYPE);
+ HValue* global_type = Add<HConstant>(JS_GLOBAL_OBJECT_TYPE);
IfBuilder if_global_object(this);
- if_global_object.If<HCompareNumericAndBranch>(instance_type,
- max_global_type,
- Token::LTE);
- if_global_object.And();
- if_global_object.If<HCompareNumericAndBranch>(instance_type,
- min_global_type,
- Token::GTE);
+ if_global_object.If<HCompareNumericAndBranch>(instance_type, global_type,
+ Token::EQ);
if_global_object.ThenDeopt(Deoptimizer::kReceiverWasAGlobalObject);
if_global_object.End();
}
@@ -1683,7 +1677,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
LanguageMode language_mode) {
HValue* capacity =
Add<HLoadKeyed>(elements, Add<HConstant>(NameDictionary::kCapacityIndex),
- nullptr, FAST_ELEMENTS);
+ nullptr, nullptr, FAST_ELEMENTS);
HValue* mask = AddUncasted<HSub>(capacity, graph()->GetConstant1());
mask->ChangeRepresentation(Representation::Integer32());
@@ -1714,7 +1708,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
key_index->ClearFlag(HValue::kCanOverflow);
HValue* candidate_key =
- Add<HLoadKeyed>(elements, key_index, nullptr, FAST_ELEMENTS);
+ Add<HLoadKeyed>(elements, key_index, nullptr, nullptr, FAST_ELEMENTS);
IfBuilder if_undefined(this);
if_undefined.If<HCompareObjectEqAndBranch>(candidate_key,
graph()->GetConstantUndefined());
@@ -1757,7 +1751,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
if_update_with_internalized.Then();
// Replace a key that is a non-internalized string by the equivalent
// internalized string for faster further lookups.
- Add<HStoreKeyed>(elements, key_index, key, FAST_ELEMENTS);
+ Add<HStoreKeyed>(elements, key_index, key, nullptr, FAST_ELEMENTS);
if_update_with_internalized.Else();
if_update_with_internalized.JoinContinuation(&found_key_match_continuation);
@@ -1770,8 +1764,8 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
HValue* details_index =
AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 2));
details_index->ClearFlag(HValue::kCanOverflow);
- HValue* details =
- Add<HLoadKeyed>(elements, details_index, nullptr, FAST_ELEMENTS);
+ HValue* details = Add<HLoadKeyed>(elements, details_index, nullptr, nullptr,
+ FAST_ELEMENTS);
int details_mask = PropertyDetails::TypeField::kMask;
details = AddUncasted<HBitwise>(Token::BIT_AND, details,
Add<HConstant>(details_mask));
@@ -1782,7 +1776,8 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
HValue* result_index =
AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 1));
result_index->ClearFlag(HValue::kCanOverflow);
- Push(Add<HLoadKeyed>(elements, result_index, nullptr, FAST_ELEMENTS));
+ Push(Add<HLoadKeyed>(elements, result_index, nullptr, nullptr,
+ FAST_ELEMENTS));
details_compare.Else();
Add<HPushArguments>(receiver, key);
Push(Add<HCallRuntime>(
@@ -1852,7 +1847,7 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
HValue* index,
HValue* input) {
NoObservableSideEffectsScope scope(this);
- HConstant* max_length = Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ HConstant* max_length = Add<HConstant>(JSArray::kInitialMaxFastElementArray);
Add<HBoundsCheck>(length, max_length);
// Generate size calculation code here in order to make it dominate
@@ -1866,11 +1861,9 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
NOT_TENURED, JS_ARRAY_TYPE);
// Initialize the JSRegExpResult header.
- HValue* global_object = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HValue* native_context = Add<HLoadNamedField>(
- global_object, nullptr, HObjectAccess::ForGlobalObjectNativeContext());
+ context(), nullptr,
+ HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
Add<HStoreNamedField>(
result, HObjectAccess::ForMap(),
Add<HLoadNamedField>(
@@ -1954,7 +1947,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
// Load the key.
HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
HValue* key = Add<HLoadKeyed>(number_string_cache, key_index, nullptr,
- FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
// Check if object == key.
IfBuilder if_objectiskey(this);
@@ -1989,8 +1982,9 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
// Load the key.
HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
- HValue* key = Add<HLoadKeyed>(number_string_cache, key_index, nullptr,
- FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+ HValue* key =
+ Add<HLoadKeyed>(number_string_cache, key_index, nullptr, nullptr,
+ FAST_ELEMENTS, ALLOW_RETURN_HOLE);
// Check if the key is a heap number and compare it with the object.
IfBuilder if_keyisnotsmi(this);
@@ -2042,7 +2036,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
// Load the value in case of cache hit.
HValue* key_index = Pop();
HValue* value_index = AddUncasted<HAdd>(key_index, graph()->GetConstant1());
- Push(Add<HLoadKeyed>(number_string_cache, value_index, nullptr,
+ Push(Add<HLoadKeyed>(number_string_cache, value_index, nullptr, nullptr,
FAST_ELEMENTS, ALLOW_RETURN_HOLE));
}
if_found.Else();
@@ -2088,7 +2082,7 @@ HValue* HGraphBuilder::BuildToObject(HValue* receiver) {
// First check whether {receiver} is already a spec object (fast case).
IfBuilder receiver_is_not_spec_object(this);
receiver_is_not_spec_object.If<HCompareNumericAndBranch>(
- receiver_instance_type, Add<HConstant>(FIRST_SPEC_OBJECT_TYPE),
+ receiver_instance_type, Add<HConstant>(FIRST_JS_RECEIVER_TYPE),
Token::LT);
receiver_is_not_spec_object.Then();
{
@@ -2126,7 +2120,7 @@ HValue* HGraphBuilder::BuildToObject(HValue* receiver) {
// Determine the initial map for the global constructor.
HValue* constructor = Add<HLoadKeyed>(native_context, constructor_index,
- nullptr, FAST_ELEMENTS);
+ nullptr, nullptr, FAST_ELEMENTS);
HValue* constructor_initial_map = Add<HLoadNamedField>(
constructor, nullptr, HObjectAccess::ForPrototypeOrInitialMap());
// Allocate and initialize a JSValue wrapper.
@@ -2411,55 +2405,65 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0);
HValue* size = BuildObjectSizeAlignment(Pop(), SeqString::kHeaderSize);
- // Allocate the string object. HAllocate does not care whether we pass
- // STRING_TYPE or ONE_BYTE_STRING_TYPE here, so we just use STRING_TYPE.
- HAllocate* result = BuildAllocate(
- size, HType::String(), STRING_TYPE, allocation_mode);
- Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
-
- // Initialize the string fields.
- Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
- Add<HConstant>(String::kEmptyHashField));
- Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
-
- // Copy characters to the result string.
- IfBuilder if_twobyte(this);
- if_twobyte.If<HCompareObjectEqAndBranch>(map, string_map);
- if_twobyte.Then();
+ IfBuilder if_size(this);
+ if_size.If<HCompareNumericAndBranch>(
+ size, Add<HConstant>(Page::kMaxRegularHeapObjectSize), Token::LT);
+ if_size.Then();
{
- // Copy characters from the left string.
- BuildCopySeqStringChars(
- left, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
- result, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
- left_length);
-
- // Copy characters from the right string.
- BuildCopySeqStringChars(
- right, graph()->GetConstant0(), String::TWO_BYTE_ENCODING,
- result, left_length, String::TWO_BYTE_ENCODING,
- right_length);
- }
- if_twobyte.Else();
- {
- // Copy characters from the left string.
- BuildCopySeqStringChars(
- left, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
- result, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
- left_length);
-
- // Copy characters from the right string.
- BuildCopySeqStringChars(
- right, graph()->GetConstant0(), String::ONE_BYTE_ENCODING,
- result, left_length, String::ONE_BYTE_ENCODING,
- right_length);
- }
- if_twobyte.End();
+ // Allocate the string object. HAllocate does not care whether we pass
+ // STRING_TYPE or ONE_BYTE_STRING_TYPE here, so we just use STRING_TYPE.
+ HAllocate* result =
+ BuildAllocate(size, HType::String(), STRING_TYPE, allocation_mode);
+ Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
+
+ // Initialize the string fields.
+ Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
+ Add<HConstant>(String::kEmptyHashField));
+ Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
+
+ // Copy characters to the result string.
+ IfBuilder if_twobyte(this);
+ if_twobyte.If<HCompareObjectEqAndBranch>(map, string_map);
+ if_twobyte.Then();
+ {
+ // Copy characters from the left string.
+ BuildCopySeqStringChars(
+ left, graph()->GetConstant0(), String::TWO_BYTE_ENCODING, result,
+ graph()->GetConstant0(), String::TWO_BYTE_ENCODING, left_length);
+
+ // Copy characters from the right string.
+ BuildCopySeqStringChars(
+ right, graph()->GetConstant0(), String::TWO_BYTE_ENCODING, result,
+ left_length, String::TWO_BYTE_ENCODING, right_length);
+ }
+ if_twobyte.Else();
+ {
+ // Copy characters from the left string.
+ BuildCopySeqStringChars(
+ left, graph()->GetConstant0(), String::ONE_BYTE_ENCODING, result,
+ graph()->GetConstant0(), String::ONE_BYTE_ENCODING, left_length);
+
+ // Copy characters from the right string.
+ BuildCopySeqStringChars(
+ right, graph()->GetConstant0(), String::ONE_BYTE_ENCODING, result,
+ left_length, String::ONE_BYTE_ENCODING, right_length);
+ }
+ if_twobyte.End();
- // Count the native string addition.
- AddIncrementCounter(isolate()->counters()->string_add_native());
+ // Count the native string addition.
+ AddIncrementCounter(isolate()->counters()->string_add_native());
- // Return the sequential string.
- Push(result);
+ // Return the sequential string.
+ Push(result);
+ }
+ if_size.Else();
+ {
+ // Fallback to the runtime to add the two strings. The string has to be
+ // allocated in LO space.
+ Add<HPushArguments>(left, right);
+ Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kStringAdd), 2));
+ }
+ if_size.End();
}
if_sameencodingandsequential.Else();
{
@@ -2587,7 +2591,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
key, graph()->GetConstant0(), Token::GTE);
negative_checker.Then();
HInstruction* result = AddElementAccess(
- backing_store, key, val, bounds_check, elements_kind, access_type);
+ backing_store, key, val, bounds_check, checked_object->ActualValue(),
+ elements_kind, access_type);
negative_checker.ElseDeopt(Deoptimizer::kNegativeKeyEncountered);
negative_checker.End();
length_checker.End();
@@ -2595,9 +2600,9 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
} else {
DCHECK(store_mode == STANDARD_STORE);
checked_key = Add<HBoundsCheck>(key, length);
- return AddElementAccess(
- backing_store, checked_key, val,
- checked_object, elements_kind, access_type);
+ return AddElementAccess(backing_store, checked_key, val, checked_object,
+ checked_object->ActualValue(), elements_kind,
+ access_type);
}
}
DCHECK(fast_smi_only_elements ||
@@ -2636,7 +2641,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
}
}
- return AddElementAccess(elements, checked_key, val, checked_object,
+ return AddElementAccess(elements, checked_key, val, checked_object, nullptr,
elements_kind, access_type, load_mode);
}
@@ -2658,7 +2663,7 @@ HValue* HGraphBuilder::BuildAllocateArrayFromLength(
HValue* constant_zero = graph()->GetConstant0();
HConstant* max_alloc_length =
- Add<HConstant>(JSObject::kInitialMaxFastElementArray);
+ Add<HConstant>(JSArray::kInitialMaxFastElementArray);
HInstruction* checked_length = Add<HBoundsCheck>(length_argument,
max_alloc_length);
IfBuilder if_builder(this);
@@ -2799,26 +2804,23 @@ void HGraphBuilder::BuildJSArrayHeader(HValue* array,
HInstruction* HGraphBuilder::AddElementAccess(
- HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- PropertyAccessType access_type,
- LoadKeyedHoleMode load_mode) {
+ HValue* elements, HValue* checked_key, HValue* val, HValue* dependency,
+ HValue* backing_store_owner, ElementsKind elements_kind,
+ PropertyAccessType access_type, LoadKeyedHoleMode load_mode) {
if (access_type == STORE) {
DCHECK(val != NULL);
if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
val = Add<HClampToUint8>(val);
}
- return Add<HStoreKeyed>(elements, checked_key, val, elements_kind,
- STORE_TO_INITIALIZED_ENTRY);
+ return Add<HStoreKeyed>(elements, checked_key, val, backing_store_owner,
+ elements_kind, STORE_TO_INITIALIZED_ENTRY);
}
DCHECK(access_type == LOAD);
DCHECK(val == NULL);
- HLoadKeyed* load = Add<HLoadKeyed>(
- elements, checked_key, dependency, elements_kind, load_mode);
+ HLoadKeyed* load =
+ Add<HLoadKeyed>(elements, checked_key, dependency, backing_store_owner,
+ elements_kind, load_mode);
if (elements_kind == UINT32_ELEMENTS) {
graph()->RecordUint32Instruction(load);
}
@@ -2919,7 +2921,7 @@ void HGraphBuilder::BuildFillElementsWithValue(HValue* elements,
if (initial_capacity >= 0) {
for (int i = 0; i < initial_capacity; i++) {
HInstruction* key = Add<HConstant>(i);
- Add<HStoreKeyed>(elements, key, value, elements_kind);
+ Add<HStoreKeyed>(elements, key, value, nullptr, elements_kind);
}
} else {
// Carefully loop backwards so that the "from" remains live through the loop
@@ -2933,7 +2935,7 @@ void HGraphBuilder::BuildFillElementsWithValue(HValue* elements,
HValue* adjusted_key = AddUncasted<HSub>(key, graph()->GetConstant1());
adjusted_key->ClearFlag(HValue::kCanOverflow);
- Add<HStoreKeyed>(elements, adjusted_key, value, elements_kind);
+ Add<HStoreKeyed>(elements, adjusted_key, value, nullptr, elements_kind);
builder.EndBody();
}
@@ -2976,9 +2978,10 @@ void HGraphBuilder::BuildCopyProperties(HValue* from_properties,
key = AddUncasted<HSub>(key, graph()->GetConstant1());
key->ClearFlag(HValue::kCanOverflow);
- HValue* element = Add<HLoadKeyed>(from_properties, key, nullptr, kind);
+ HValue* element =
+ Add<HLoadKeyed>(from_properties, key, nullptr, nullptr, kind);
- Add<HStoreKeyed>(to_properties, key, element, kind);
+ Add<HStoreKeyed>(to_properties, key, element, nullptr, kind);
builder.EndBody();
}
@@ -3015,9 +3018,10 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
// Unroll the loop for small elements kinds.
for (int i = 0; i < constant_capacity; i++) {
HValue* key_constant = Add<HConstant>(i);
- HInstruction* value = Add<HLoadKeyed>(from_elements, key_constant,
- nullptr, from_elements_kind);
- Add<HStoreKeyed>(to_elements, key_constant, value, to_elements_kind);
+ HInstruction* value = Add<HLoadKeyed>(
+ from_elements, key_constant, nullptr, nullptr, from_elements_kind);
+ Add<HStoreKeyed>(to_elements, key_constant, value, nullptr,
+ to_elements_kind);
}
} else {
if (!pre_fill_with_holes &&
@@ -3034,7 +3038,7 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
key = AddUncasted<HSub>(key, graph()->GetConstant1());
key->ClearFlag(HValue::kCanOverflow);
- HValue* element = Add<HLoadKeyed>(from_elements, key, nullptr,
+ HValue* element = Add<HLoadKeyed>(from_elements, key, nullptr, nullptr,
from_elements_kind, ALLOW_RETURN_HOLE);
ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
@@ -3049,13 +3053,15 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
? Add<HConstant>(HConstant::kHoleNaN)
: graph()->GetConstantHole();
- Add<HStoreKeyed>(to_elements, key, hole_constant, kind);
+ Add<HStoreKeyed>(to_elements, key, hole_constant, nullptr, kind);
if_hole.Else();
- HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
+ HStoreKeyed* store =
+ Add<HStoreKeyed>(to_elements, key, element, nullptr, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
if_hole.End();
} else {
- HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
+ HStoreKeyed* store =
+ Add<HStoreKeyed>(to_elements, key, element, nullptr, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
}
@@ -3128,10 +3134,10 @@ HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
// This function implicitly relies on the fact that the
// FastCloneShallowArrayStub is called only for literals shorter than
- // JSObject::kInitialMaxFastElementArray.
+ // JSArray::kInitialMaxFastElementArray.
// Can't add HBoundsCheck here because otherwise the stub will eager a frame.
HConstant* size_upper_bound = EstablishElementsAllocationSize(
- kind, JSObject::kInitialMaxFastElementArray);
+ kind, JSArray::kInitialMaxFastElementArray);
elements->set_size_upper_bound(size_upper_bound);
Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements);
@@ -3257,13 +3263,9 @@ void HGraphBuilder::BuildCreateAllocationMemento(
HInstruction* HGraphBuilder::BuildGetNativeContext() {
- // Get the global object, then the native context
- HValue* global_object = Add<HLoadNamedField>(
+ return Add<HLoadNamedField>(
context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- return Add<HLoadNamedField>(global_object, nullptr,
- HObjectAccess::ForObservableJSObjectOffset(
- GlobalObject::kNativeContextOffset));
+ HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
}
@@ -3271,12 +3273,9 @@ HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* closure) {
// Get the global object, then the native context
HInstruction* context = Add<HLoadNamedField>(
closure, nullptr, HObjectAccess::ForFunctionContextPointer());
- HInstruction* global_object = Add<HLoadNamedField>(
+ return Add<HLoadNamedField>(
context, nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
- GlobalObject::kNativeContextOffset);
- return Add<HLoadNamedField>(global_object, nullptr, access);
+ HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
}
@@ -3342,7 +3341,8 @@ HInstruction* HGraphBuilder::BuildGetArrayFunction() {
HInstruction* native_context = BuildGetNativeContext();
HInstruction* index =
Add<HConstant>(static_cast<int32_t>(Context::ARRAY_FUNCTION_INDEX));
- return Add<HLoadKeyed>(native_context, index, nullptr, FAST_ELEMENTS);
+ return Add<HLoadKeyed>(native_context, index, nullptr, nullptr,
+ FAST_ELEMENTS);
}
@@ -3427,16 +3427,9 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
? builder()->BuildGetNativeContext(constructor_function_)
: builder()->BuildGetNativeContext();
- HInstruction* index = builder()->Add<HConstant>(
- static_cast<int32_t>(Context::JS_ARRAY_MAPS_INDEX));
-
- HInstruction* map_array =
- builder()->Add<HLoadKeyed>(native_context, index, nullptr, FAST_ELEMENTS);
-
- HInstruction* kind_index = builder()->Add<HConstant>(kind_);
-
- return builder()->Add<HLoadKeyed>(map_array, kind_index, nullptr,
- FAST_ELEMENTS);
+ HObjectAccess access =
+ HObjectAccess::ForContextSlot(Context::ArrayMapIndex(kind_));
+ return builder()->Add<HLoadNamedField>(native_context, nullptr, access);
}
@@ -3504,6 +3497,11 @@ HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
HValue* elements_size =
builder()->BuildCalculateElementsSize(kind_, capacity);
+ // Bail out for large objects.
+ HValue* max_regular_heap_object_size =
+ builder()->Add<HConstant>(Page::kMaxRegularHeapObjectSize);
+ builder()->Add<HBoundsCheck>(elements_size, max_regular_heap_object_size);
+
// Allocate (dealing with failure appropriately)
HAllocate* array_object = builder()->AllocateJSArrayObject(mode_);
@@ -3542,12 +3540,7 @@ HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
HValue* HGraphBuilder::AddLoadJSBuiltin(int context_index) {
- HValue* global_object = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
- GlobalObject::kNativeContextOffset);
- HValue* native_context = Add<HLoadNamedField>(global_object, nullptr, access);
+ HValue* native_context = BuildGetNativeContext();
HObjectAccess function_access = HObjectAccess::ForContextSlot(context_index);
return Add<HLoadNamedField>(native_context, nullptr, function_access);
}
@@ -3566,7 +3559,7 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
function_state_ = &initial_function_state_;
- InitializeAstVisitor(info->isolate(), info->zone());
+ InitializeAstVisitor(info->isolate());
if (top_info()->is_tracking_positions()) {
SetSourcePosition(info->shared_info()->start_position());
}
@@ -3679,7 +3672,7 @@ HGraph::HGraph(CompilationInfo* info)
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
}
- start_environment_->set_ast_id(BailoutId::Prologue());
+ start_environment_->set_ast_id(BailoutId::FunctionContext());
entry_block_ = CreateBasicBlock();
entry_block_->SetInitialEnvironment(start_environment_);
}
@@ -4414,7 +4407,7 @@ void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block) {
- TestContext for_test(this, expr, true_block, false_block);
+ TestContext for_control(this, expr, true_block, false_block);
Visit(expr);
}
@@ -4690,6 +4683,12 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
environment()->Bind(scope->arguments(), graph()->GetArgumentsObject());
}
+ int rest_index;
+ Variable* rest = scope->rest_parameter(&rest_index);
+ if (rest) {
+ return Bailout(kRestParameter);
+ }
+
if (scope->this_function_var() != nullptr ||
scope->new_target_var() != nullptr) {
return Bailout(kSuperReference);
@@ -4969,8 +4968,8 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
HValue* receiver = environment()->arguments_environment()->Lookup(0);
HHasInstanceTypeAndBranch* typecheck =
New<HHasInstanceTypeAndBranch>(return_value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
+ FIRST_JS_RECEIVER_TYPE,
+ LAST_JS_RECEIVER_TYPE);
HBasicBlock* if_spec_object = graph()->CreateBasicBlock();
HBasicBlock* not_spec_object = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_spec_object);
@@ -5050,7 +5049,8 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
// Generate a compare and branch.
- CHECK_ALIVE(VisitForValue(clause->label()));
+ CHECK_BAILOUT(VisitForValue(clause->label()));
+ if (current_block() == NULL) return Bailout(kUnsupportedSwitchStatement);
HValue* label_value = Pop();
Type* label_type = clause->label()->bounds().lower;
@@ -5384,7 +5384,7 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
HValue* key =
Add<HLoadKeyed>(environment()->ExpressionStackAt(2), // Enum cache.
- index, index, FAST_ELEMENTS);
+ index, index, nullptr, FAST_ELEMENTS);
if (fast) {
// Check if the expected map still matches that of the enumerable.
@@ -5520,6 +5520,14 @@ void HOptimizedGraphBuilder::VisitNativeFunctionLiteral(
}
+void HOptimizedGraphBuilder::VisitDoExpression(DoExpression* expr) {
+ DCHECK(!HasStackOverflow());
+ DCHECK(current_block() != NULL);
+ DCHECK(current_block()->HasPredecessor());
+ return Bailout(kDoExpression);
+}
+
+
void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
@@ -5618,7 +5626,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return ast_context()->ReturnInstruction(instr, expr->id());
}
- Handle<GlobalObject> global(current_info()->global_object());
+ Handle<JSGlobalObject> global(current_info()->global_object());
// Lookup in script contexts.
{
@@ -5696,20 +5704,10 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
instr->SetDependsOnFlag(kGlobalVars);
return ast_context()->ReturnInstruction(instr, expr->id());
}
- } else if (variable->IsGlobalSlot()) {
- DCHECK(variable->index() > 0);
- DCHECK(variable->IsStaticGlobalObjectProperty());
- int slot_index = variable->index();
- int depth = scope()->ContextChainLength(variable->scope());
-
- HLoadGlobalViaContext* instr =
- New<HLoadGlobalViaContext>(depth, slot_index);
- return ast_context()->ReturnInstruction(instr, expr->id());
-
} else {
HValue* global_object = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ BuildGetNativeContext(), nullptr,
+ HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
HLoadGlobalGeneric* instr = New<HLoadGlobalGeneric>(
global_object, variable->name(), ast_context()->typeof_mode());
instr->SetVectorAndSlot(handle(current_feedback_vector(), isolate()),
@@ -5768,12 +5766,14 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- Handle<JSFunction> closure = function_state()->compilation_info()->closure();
- Handle<LiteralsArray> literals(closure->literals());
- HRegExpLiteral* instr = New<HRegExpLiteral>(literals,
- expr->pattern(),
- expr->flags(),
- expr->literal_index());
+ Callable callable = CodeFactory::FastCloneRegExp(isolate());
+ HValue* values[] = {
+ context(), AddThisFunction(), Add<HConstant>(expr->literal_index()),
+ Add<HConstant>(expr->pattern()), Add<HConstant>(expr->flags())};
+ HConstant* stub_value = Add<HConstant>(callable.code());
+ HInstruction* instr = New<HCallWithDescriptor>(
+ stub_value, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)), NORMAL_CALL);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5884,13 +5884,11 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
site_context.ExitScope(site, boilerplate);
} else {
NoObservableSideEffectsScope no_effects(this);
- Handle<LiteralsArray> closure_literals(closure->literals(), isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int literal_index = expr->literal_index();
int flags = expr->ComputeFlags(true);
- Add<HPushArguments>(Add<HConstant>(closure_literals),
- Add<HConstant>(literal_index),
+ Add<HPushArguments>(AddThisFunction(), Add<HConstant>(literal_index),
Add<HConstant>(constant_properties),
Add<HConstant>(flags));
@@ -5924,7 +5922,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Map> map = property->GetReceiverType();
Handle<String> name = key->AsPropertyName();
HValue* store;
- FeedbackVectorICSlot slot = property->GetSlot();
+ FeedbackVectorSlot slot = property->GetSlot();
if (map.is_null()) {
// If we don't know the monomorphic type, do a generic store.
CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot, literal,
@@ -5989,7 +5987,6 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- expr->BuildConstantElements(isolate());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
HInstruction* literal;
@@ -6054,10 +6051,8 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
int literal_index = expr->literal_index();
int flags = expr->ComputeFlags(true);
- Add<HPushArguments>(Add<HConstant>(literals),
- Add<HConstant>(literal_index),
- Add<HConstant>(constants),
- Add<HConstant>(flags));
+ Add<HPushArguments>(AddThisFunction(), Add<HConstant>(literal_index),
+ Add<HConstant>(constants), Add<HConstant>(flags));
Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral;
literal = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 4);
@@ -6069,8 +6064,6 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// The array is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
Push(literal);
- // The literal index is on the stack, too.
- Push(Add<HConstant>(expr->literal_index()));
HInstruction* elements = NULL;
@@ -6099,7 +6092,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
case FAST_HOLEY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS: {
- HStoreKeyed* instr = Add<HStoreKeyed>(elements, key, value,
+ HStoreKeyed* instr = Add<HStoreKeyed>(elements, key, value, nullptr,
boilerplate_elements_kind);
instr->SetUninitialized(uninitialized);
break;
@@ -6112,7 +6105,6 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Add<HSimulate>(expr->GetIdForElement(i));
}
- Drop(1); // array literal index
return ast_context()->ReturnValue(Pop());
}
@@ -6498,9 +6490,12 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
Handle<Map> HOptimizedGraphBuilder::PropertyAccessInfo::map() {
- JSFunction* ctor = IC::GetRootConstructor(
- *map_, current_info()->closure()->context()->native_context());
- if (ctor != NULL) return handle(ctor->initial_map());
+ Handle<JSFunction> ctor;
+ if (Map::GetConstructorFunction(
+ map_, handle(current_info()->closure()->context()->native_context()))
+ .ToHandle(&ctor)) {
+ return handle(ctor->initial_map());
+ }
return map_;
}
@@ -6581,7 +6576,8 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
if (info->NeedsWrappingFor(info->accessor())) {
HValue* function = Add<HConstant>(info->accessor());
PushArgumentsFromEnvironment(argument_count);
- return New<HCallFunction>(function, argument_count, WRAP_AND_CALL);
+ return New<HCallFunction>(function, argument_count,
+ ConvertReceiverMode::kNotNullOrUndefined);
} else if (FLAG_inline_accessors && can_inline_accessor) {
bool success = info->IsLoad()
? TryInlineGetter(info->accessor(), info->map(), ast_id, return_id)
@@ -6604,9 +6600,9 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
- PropertyAccessType access_type, Expression* expr, FeedbackVectorICSlot slot,
+ PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
BailoutId ast_id, BailoutId return_id, HValue* object, HValue* value,
- SmallMapList* maps, Handle<String> name) {
+ SmallMapList* maps, Handle<Name> name) {
// Something did not match; must use a polymorphic load.
int count = 0;
HBasicBlock* join = NULL;
@@ -6774,7 +6770,7 @@ static bool AreStringTypes(SmallMapList* maps) {
void HOptimizedGraphBuilder::BuildStore(Expression* expr, Property* prop,
- FeedbackVectorICSlot slot,
+ FeedbackVectorSlot slot,
BailoutId ast_id, BailoutId return_id,
bool is_uninitialized) {
if (!prop->key()->IsPropertyName()) {
@@ -6834,9 +6830,8 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
// superclass of Assignment and CountOperation, we cannot just pass the
// owning expression instead of position and ast_id separately.
void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
- Variable* var, HValue* value, FeedbackVectorICSlot ic_slot,
- BailoutId ast_id) {
- Handle<GlobalObject> global(current_info()->global_object());
+ Variable* var, HValue* value, FeedbackVectorSlot slot, BailoutId ast_id) {
+ Handle<JSGlobalObject> global(current_info()->global_object());
// Lookup in script contexts.
{
@@ -6923,30 +6918,16 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
- } else if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int slot_index = var->index();
- int depth = scope()->ContextChainLength(var->scope());
-
- HStoreGlobalViaContext* instr = Add<HStoreGlobalViaContext>(
- value, depth, slot_index, function_language_mode());
- USE(instr);
- DCHECK(instr->HasObservableSideEffects());
- Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
-
} else {
HValue* global_object = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ BuildGetNativeContext(), nullptr,
+ HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
HStoreNamedGeneric* instr =
Add<HStoreNamedGeneric>(global_object, var->name(), value,
function_language_mode(), PREMONOMORPHIC);
- if (FLAG_vector_stores) {
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- instr->SetVectorAndSlot(vector, ic_slot);
- }
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ instr->SetVectorAndSlot(vector, slot);
USE(instr);
DCHECK(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -7063,6 +7044,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
+
VariableProxy* proxy = expr->target()->AsVariableProxy();
Property* prop = expr->target()->AsProperty();
DCHECK(proxy == NULL || prop == NULL);
@@ -7078,11 +7060,11 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
Variable* var = proxy->var();
if (var->mode() == CONST) {
- if (expr->op() != Token::INIT_CONST) {
+ if (expr->op() != Token::INIT) {
return Bailout(kNonInitializerAssignmentToConst);
}
} else if (var->mode() == CONST_LEGACY) {
- if (expr->op() != Token::INIT_CONST_LEGACY) {
+ if (expr->op() != Token::INIT) {
CHECK_ALIVE(VisitForValue(expr->value()));
return ast_context()->ReturnValue(Pop());
}
@@ -7156,14 +7138,13 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
default:
mode = HStoreContextSlot::kNoCheck;
}
- } else if (expr->op() == Token::INIT_VAR ||
- expr->op() == Token::INIT_LET ||
- expr->op() == Token::INIT_CONST) {
- mode = HStoreContextSlot::kNoCheck;
} else {
- DCHECK(expr->op() == Token::INIT_CONST_LEGACY);
-
- mode = HStoreContextSlot::kCheckIgnoreAssignment;
+ DCHECK_EQ(Token::INIT, expr->op());
+ if (var->mode() == CONST_LEGACY) {
+ mode = HStoreContextSlot::kCheckIgnoreAssignment;
+ } else {
+ mode = HStoreContextSlot::kNoCheck;
+ }
}
HValue* context = BuildContextChainWalk(var);
@@ -7248,7 +7229,7 @@ HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* string) {
HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
- PropertyAccessType access_type, Expression* expr, FeedbackVectorICSlot slot,
+ PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
HValue* object, Handle<Name> name, HValue* value, bool is_uninitialized) {
if (is_uninitialized) {
Add<HDeoptimize>(
@@ -7276,9 +7257,8 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
result->SetVectorAndSlot(vector, slot);
return result;
} else {
- if (FLAG_vector_stores &&
- current_feedback_vector()->GetKind(slot) ==
- FeedbackVectorSlotKind::KEYED_STORE_IC) {
+ if (current_feedback_vector()->GetKind(slot) ==
+ FeedbackVectorSlotKind::KEYED_STORE_IC) {
// It's possible that a keyed store of a constant string was converted
// to a named store. Here, at the last minute, we need to make sure to
// use a generic Keyed Store if we are using the type vector, because
@@ -7294,18 +7274,16 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
HStoreNamedGeneric* result = New<HStoreNamedGeneric>(
object, name, value, function_language_mode(), PREMONOMORPHIC);
- if (FLAG_vector_stores) {
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- result->SetVectorAndSlot(vector, slot);
- }
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ result->SetVectorAndSlot(vector, slot);
return result;
}
}
HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
- PropertyAccessType access_type, Expression* expr, FeedbackVectorICSlot slot,
+ PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
HValue* object, HValue* key, HValue* value) {
if (access_type == LOAD) {
InlineCacheState initial_state = expr->AsProperty()->GetInlineCacheState();
@@ -7323,11 +7301,9 @@ HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
} else {
HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
object, key, value, function_language_mode(), PREMONOMORPHIC);
- if (FLAG_vector_stores) {
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- result->SetVectorAndSlot(vector, slot);
- }
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ result->SetVectorAndSlot(vector, slot);
return result;
}
}
@@ -7373,6 +7349,8 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
PrototypeIterator iter(map);
JSObject* holder = NULL;
while (!iter.IsAtEnd()) {
+ // JSProxies can't occur here because we wouldn't have installed a
+ // non-generic IC if there were any.
holder = *PrototypeIterator::GetCurrent<JSObject>(iter);
iter.Advance();
}
@@ -7499,7 +7477,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
- Expression* expr, FeedbackVectorICSlot slot, HValue* object, HValue* key,
+ Expression* expr, FeedbackVectorSlot slot, HValue* object, HValue* key,
HValue* val, SmallMapList* maps, PropertyAccessType access_type,
KeyedAccessStoreMode store_mode, bool* has_side_effects) {
*has_side_effects = false;
@@ -7632,21 +7610,44 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
HValue* obj, HValue* key, HValue* val, Expression* expr,
- FeedbackVectorICSlot slot, BailoutId ast_id, BailoutId return_id,
+ FeedbackVectorSlot slot, BailoutId ast_id, BailoutId return_id,
PropertyAccessType access_type, bool* has_side_effects) {
- if (key->ActualValue()->IsConstant()) {
+ // A keyed name access with type feedback may contain the name.
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ HValue* expected_key = key;
+ if (!key->ActualValue()->IsConstant()) {
+ Name* name = nullptr;
+ if (access_type == LOAD) {
+ KeyedLoadICNexus nexus(vector, slot);
+ name = nexus.FindFirstName();
+ } else {
+ KeyedStoreICNexus nexus(vector, slot);
+ name = nexus.FindFirstName();
+ }
+ if (name != nullptr) {
+ Handle<Name> handle_name(name);
+ expected_key = Add<HConstant>(handle_name);
+ // We need a check against the key.
+ bool in_new_space = isolate()->heap()->InNewSpace(*handle_name);
+ Unique<Name> unique_name = Unique<Name>::CreateUninitialized(handle_name);
+ Add<HCheckValue>(key, unique_name, in_new_space);
+ }
+ }
+ if (expected_key->ActualValue()->IsConstant()) {
Handle<Object> constant =
- HConstant::cast(key->ActualValue())->handle(isolate());
+ HConstant::cast(expected_key->ActualValue())->handle(isolate());
uint32_t array_index;
- if (constant->IsString() &&
- !Handle<String>::cast(constant)->AsArrayIndex(&array_index)) {
+ if ((constant->IsString() &&
+ !Handle<String>::cast(constant)->AsArrayIndex(&array_index)) ||
+ constant->IsSymbol()) {
if (!constant->IsUniqueName()) {
constant = isolate()->factory()->InternalizeString(
Handle<String>::cast(constant));
}
HValue* access =
BuildNamedAccess(access_type, ast_id, return_id, expr, slot, obj,
- Handle<String>::cast(constant), val, false);
+ Handle<Name>::cast(constant), val, false);
if (access == NULL || access->IsPhi() ||
HInstruction::cast(access)->IsLinked()) {
*has_side_effects = false;
@@ -7817,8 +7818,8 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
HValue* HOptimizedGraphBuilder::BuildNamedAccess(
PropertyAccessType access, BailoutId ast_id, BailoutId return_id,
- Expression* expr, FeedbackVectorICSlot slot, HValue* object,
- Handle<String> name, HValue* value, bool is_uninitialized) {
+ Expression* expr, FeedbackVectorSlot slot, HValue* object,
+ Handle<Name> name, HValue* value, bool is_uninitialized) {
SmallMapList* maps;
ComputeReceiverTypes(expr, object, &maps, zone());
DCHECK(maps != NULL);
@@ -7863,7 +7864,7 @@ void HOptimizedGraphBuilder::PushLoad(Property* expr,
void HOptimizedGraphBuilder::BuildLoad(Property* expr,
BailoutId ast_id) {
HInstruction* instr = NULL;
- if (expr->IsStringAccess()) {
+ if (expr->IsStringAccess() && expr->GetKeyType() == ELEMENT) {
HValue* index = Pop();
HValue* string = Pop();
HInstruction* char_code = BuildStringCharCodeAt(string, index);
@@ -7964,16 +7965,15 @@ HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(HValue* fun,
HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
HValue* fun, HValue* context,
int argument_count, HValue* expected_param_count) {
- ArgumentAdaptorDescriptor descriptor(isolate());
+ HValue* new_target = graph()->GetConstantUndefined();
HValue* arity = Add<HConstant>(argument_count - 1);
- HValue* op_vals[] = { context, fun, arity, expected_param_count };
+ HValue* op_vals[] = {context, fun, new_target, arity, expected_param_count};
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- HConstant* adaptor_value = Add<HConstant>(adaptor);
+ Callable callable = CodeFactory::ArgumentAdaptor(isolate());
+ HConstant* stub = Add<HConstant>(callable.code());
- return New<HCallWithDescriptor>(adaptor_value, argument_count, descriptor,
+ return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
Vector<HValue*>(op_vals, arraysize(op_vals)));
}
@@ -8149,10 +8149,11 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
// use the regular CallFunctionStub for method calls to wrap the receiver.
// TODO(verwaest): Support creation of value wrappers directly in
// HWrapReceiver.
- HInstruction* call = needs_wrapping
- ? NewUncasted<HCallFunction>(
- function, argument_count, WRAP_AND_CALL)
- : BuildCallConstantFunction(target, argument_count);
+ HInstruction* call =
+ needs_wrapping ? NewUncasted<HCallFunction>(
+ function, argument_count,
+ ConvertReceiverMode::kNotNullOrUndefined)
+ : BuildCallConstantFunction(target, argument_count);
PushArgumentsFromEnvironment(argument_count);
AddInstruction(call);
Drop(1); // Drop the function.
@@ -8181,10 +8182,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
environment()->SetExpressionStackAt(0, receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- CallFunctionFlags flags = receiver->type().IsJSObject()
- ? NO_CALL_FUNCTION_FLAGS : CALL_AS_METHOD;
HInstruction* call = New<HCallFunction>(
- function, argument_count, flags);
+ function, argument_count, ConvertReceiverMode::kNotNullOrUndefined);
PushArgumentsFromEnvironment(argument_count);
@@ -8247,7 +8246,7 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
if (target_shared->force_inline()) {
return 0;
}
- if (target->IsBuiltin()) {
+ if (target->shared()->IsBuiltin()) {
return kNotInlinable;
}
@@ -8265,11 +8264,12 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
}
// Target must be inlineable.
- if (!target_shared->IsInlineable()) {
+ BailoutReason noopt_reason = target_shared->disable_optimization_reason();
+ if (!target_shared->IsInlineable() && noopt_reason != kHydrogenFilter) {
TraceInline(target, caller, "target not inlineable");
return kNotInlinable;
}
- if (target_shared->disable_optimization_reason() != kNoReason) {
+ if (noopt_reason != kNoReason && noopt_reason != kHydrogenFilter) {
TraceInline(target, caller, "target contains unsupported syntax [early]");
return kNotInlinable;
}
@@ -8340,6 +8340,11 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
CompilationInfo target_info(&parse_info);
Handle<SharedFunctionInfo> target_shared(target->shared());
+
+ if (IsClassConstructor(target_shared->kind())) {
+ TraceInline(target, caller, "target is classConstructor");
+ return false;
+ }
if (target_shared->HasDebugInfo()) {
TraceInline(target, caller, "target is being debugged");
return false;
@@ -8358,6 +8363,14 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
TraceInline(target, caller, "target has context-allocated variables");
return false;
}
+
+ int rest_index;
+ Variable* rest = target_info.scope()->rest_parameter(&rest_index);
+ if (rest) {
+ TraceInline(target, caller, "target uses rest parameters");
+ return false;
+ }
+
FunctionLiteral* function = target_info.literal();
// The following conditions must be checked again after re-parsing, because
@@ -8382,6 +8395,13 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
}
}
+ // Unsupported variable references present.
+ if (function->scope()->this_function_var() != nullptr ||
+ function->scope()->new_target_var() != nullptr) {
+ TraceInline(target, caller, "target uses new target or this function");
+ return false;
+ }
+
// All declarations must be inlineable.
ZoneList<Declaration*>* decls = target_info.scope()->declarations();
int decl_count = decls->length();
@@ -8392,13 +8412,6 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
}
}
- // Generate the deoptimization data for the unoptimized version of
- // the target function if we don't already have it.
- if (!Compiler::EnsureDeoptimizationSupport(&target_info)) {
- TraceInline(target, caller, "could not generate deoptimization info");
- return false;
- }
-
// In strong mode it is an error to call a function with too few arguments.
// In that case do not inline because then the arity check would be skipped.
if (is_strong(function->language_mode()) &&
@@ -8408,6 +8421,17 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
return false;
}
+ // Generate the deoptimization data for the unoptimized version of
+ // the target function if we don't already have it.
+ if (!Compiler::EnsureDeoptimizationSupport(&target_info)) {
+ TraceInline(target, caller, "could not generate deoptimization info");
+ return false;
+ }
+ // Remember that we inlined this function. This needs to be called right
+ // after the EnsureDeoptimizationSupport call so that the code flusher
+ // does not remove the code with the deoptimization support.
+ top_info()->AddInlinedFunction(target_info.shared_info());
+
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function (so
// TryInline should always return true).
@@ -8679,7 +8703,7 @@ bool HOptimizedGraphBuilder::IsReadOnlyLengthDescriptor(
// static
bool HOptimizedGraphBuilder::CanInlineArrayResizeOperation(
Handle<Map> receiver_map) {
- return !receiver_map.is_null() &&
+ return !receiver_map.is_null() && receiver_map->prototype()->IsJSObject() &&
receiver_map->instance_type() == JS_ARRAY_TYPE &&
IsFastElementsKind(receiver_map->elements_kind()) &&
!receiver_map->is_dictionary_map() && !receiver_map->is_observed() &&
@@ -8840,16 +8864,16 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
elements_kind, length);
}
reduced_length = AddUncasted<HSub>(length, graph()->GetConstant1());
- result = AddElementAccess(elements, reduced_length, NULL,
- bounds_check, elements_kind, LOAD);
+ result = AddElementAccess(elements, reduced_length, nullptr,
+ bounds_check, nullptr, elements_kind, LOAD);
HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
? graph()->GetConstantHole()
: Add<HConstant>(HConstant::kHoleNaN);
if (IsFastSmiOrObjectElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
}
- AddElementAccess(
- elements, reduced_length, hole, bounds_check, elements_kind, STORE);
+ AddElementAccess(elements, reduced_length, hole, bounds_check, nullptr,
+ elements_kind, STORE);
Add<HStoreNamedField>(
checked_object, HObjectAccess::ForArrayLength(elements_kind),
reduced_length, STORE_TO_INITIALIZED_ENTRY);
@@ -8974,8 +8998,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
{
// Remember the result.
if (!ast_context()->IsEffect()) {
- Push(AddElementAccess(elements, graph()->GetConstant0(), NULL,
- lengthiszero, kind, LOAD));
+ Push(AddElementAccess(elements, graph()->GetConstant0(), nullptr,
+ lengthiszero, nullptr, kind, LOAD));
}
// Compute the new length.
@@ -8992,10 +9016,11 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
key->ClearFlag(HValue::kCanOverflow);
ElementsKind copy_kind =
kind == FAST_HOLEY_SMI_ELEMENTS ? FAST_HOLEY_ELEMENTS : kind;
- HValue* element = AddUncasted<HLoadKeyed>(
- elements, key, lengthiszero, copy_kind, ALLOW_RETURN_HOLE);
- HStoreKeyed* store =
- Add<HStoreKeyed>(elements, new_key, element, copy_kind);
+ HValue* element =
+ AddUncasted<HLoadKeyed>(elements, key, lengthiszero, nullptr,
+ copy_kind, ALLOW_RETURN_HOLE);
+ HStoreKeyed* store = Add<HStoreKeyed>(elements, new_key, element,
+ nullptr, copy_kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
}
loop.EndBody();
@@ -9005,8 +9030,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
? graph()->GetConstantHole()
: Add<HConstant>(HConstant::kHoleNaN);
if (IsFastSmiOrObjectElementsKind(kind)) kind = FAST_HOLEY_ELEMENTS;
- Add<HStoreKeyed>(
- elements, new_length, hole, kind, INITIALIZING_STORE);
+ Add<HStoreKeyed>(elements, new_length, hole, nullptr, kind,
+ INITIALIZING_STORE);
// Remember new length.
Add<HStoreNamedField>(
@@ -9332,6 +9357,7 @@ bool HOptimizedGraphBuilder::TryIndirectCall(Call* expr) {
}
+// f.apply(...)
void HOptimizedGraphBuilder::BuildFunctionApply(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
CHECK_ALIVE(VisitForValue(args->at(0)));
@@ -9467,8 +9493,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
- HValue* element = AddUncasted<HLoadKeyed>(elements, index, nullptr, kind,
- ALLOW_RETURN_HOLE);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
IfBuilder if_issame(this);
if_issame.If<HCompareNumericAndBranch>(element, search_element,
Token::EQ_STRICT);
@@ -9489,8 +9515,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
- HValue* element = AddUncasted<HLoadKeyed>(elements, index, nullptr,
- kind, ALLOW_RETURN_HOLE);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
IfBuilder if_issame(this);
if_issame.If<HIsStringAndBranch>(element);
if_issame.AndIf<HStringCompareAndBranch>(
@@ -9519,8 +9545,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
- HValue* element = AddUncasted<HLoadKeyed>(elements, index, nullptr,
- kind, ALLOW_RETURN_HOLE);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
IfBuilder if_element_isnumber(this);
if_element_isnumber.If<HIsSmiAndBranch>(element);
@@ -9551,8 +9577,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
LoopBuilder loop(this, context(), direction);
{
HValue* index = loop.BeginBody(initial, terminating, token);
- HValue* element = AddUncasted<HLoadKeyed>(elements, index, nullptr,
- kind, ALLOW_RETURN_HOLE);
+ HValue* element = AddUncasted<HLoadKeyed>(
+ elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
IfBuilder if_issame(this);
if_issame.If<HCompareObjectEqAndBranch>(
element, search_element);
@@ -9682,8 +9708,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// the receiver.
// TODO(verwaest): Support creation of value wrappers directly in
// HWrapReceiver.
- call = New<HCallFunction>(
- function, argument_count, WRAP_AND_CALL);
+ call = New<HCallFunction>(function, argument_count,
+ ConvertReceiverMode::kNotNullOrUndefined);
} else if (TryInlineCall(expr)) {
return;
} else {
@@ -9706,9 +9732,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
Push(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments(), arguments_flag));
- CallFunctionFlags flags = receiver->type().IsJSObject()
- ? NO_CALL_FUNCTION_FLAGS : CALL_AS_METHOD;
- call = New<HCallFunction>(function, argument_count, flags);
+ call = New<HCallFunction>(function, argument_count,
+ ConvertReceiverMode::kNotNullOrUndefined);
}
PushArgumentsFromEnvironment(argument_count);
@@ -9733,7 +9758,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitExpressions(expr->arguments()));
- if (expr->IsMonomorphic()) {
+ if (expr->IsMonomorphic() &&
+ !IsClassConstructor(expr->target()->shared()->kind())) {
Add<HCheckValue>(function, expr->target());
// Patch the global object on the stack by the expected receiver.
@@ -9757,8 +9783,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
call = BuildCallConstantFunction(expr->target(), argument_count);
} else {
PushArgumentsFromEnvironment(argument_count);
- HCallFunction* call_function =
- New<HCallFunction>(function, argument_count);
+ HCallFunction* call_function = New<HCallFunction>(
+ function, argument_count, ConvertReceiverMode::kNullOrUndefined);
call = call_function;
if (expr->is_uninitialized() &&
expr->IsUsingCallFeedbackICSlot(isolate())) {
@@ -9766,7 +9792,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// through the type vector.
Handle<TypeFeedbackVector> vector =
handle(current_feedback_vector(), isolate());
- FeedbackVectorICSlot slot = expr->CallFeedbackICSlot();
+ FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
call_function->SetVectorAndSlot(vector, slot);
}
}
@@ -9826,6 +9852,7 @@ void HOptimizedGraphBuilder::BuildInlinedCallArray(
// Checks whether allocation using the given constructor can be inlined.
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
+ !IsClassConstructor(constructor->shared()->kind()) &&
constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
constructor->initial_map()->instance_size() <
HAllocate::kMaxInlineSize;
@@ -9902,13 +9929,16 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
expr->IsMonomorphic() &&
IsAllocationInlineable(expr->target())) {
Handle<JSFunction> constructor = expr->target();
+ DCHECK(
+ constructor->shared()->construct_stub() ==
+ isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric) ||
+ constructor->shared()->construct_stub() ==
+ isolate()->builtins()->builtin(Builtins::kJSConstructStubApi));
HValue* check = Add<HCheckValue>(function, constructor);
// Force completion of inobject slack tracking before generating
// allocation code to finalize instance size.
- if (constructor->IsInobjectSlackTrackingInProgress()) {
- constructor->CompleteInobjectSlackTracking();
- }
+ constructor->CompleteInobjectSlackTrackingIfActive();
// Calculate instance size from initial map of constructor.
DCHECK(constructor->has_initial_map());
@@ -9965,18 +9995,21 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
instr = prev_instr;
} while (instr != check);
environment()->SetExpressionStackAt(receiver_index, function);
- HInstruction* call =
- PreProcessCall(New<HCallNew>(function, argument_count));
- return ast_context()->ReturnInstruction(call, expr->id());
} else {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
if (TryHandleArrayCallNew(expr, function)) return;
-
- HInstruction* call =
- PreProcessCall(New<HCallNew>(function, argument_count));
- return ast_context()->ReturnInstruction(call, expr->id());
}
+
+ HValue* arity = Add<HConstant>(argument_count - 1);
+ HValue* op_vals[] = {context(), function, function, arity};
+ Callable callable = CodeFactory::Construct(isolate());
+ HConstant* stub = Add<HConstant>(callable.code());
+ PushArgumentsFromEnvironment(argument_count);
+ HInstruction* construct =
+ New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
+ Vector<HValue*>(op_vals, arraysize(op_vals)));
+ return ast_context()->ReturnInstruction(construct, expr->id());
}
@@ -10005,11 +10038,7 @@ HValue* HGraphBuilder::BuildAllocateEmptyArrayBuffer(HValue* byte_length) {
BuildAllocate(Add<HConstant>(JSArrayBuffer::kSizeWithInternalFields),
HType::JSObject(), JS_ARRAY_BUFFER_TYPE, HAllocationMode());
- HValue* global_object = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- HValue* native_context = Add<HLoadNamedField>(
- global_object, nullptr, HObjectAccess::ForGlobalObjectNativeContext());
+ HValue* native_context = BuildGetNativeContext();
Add<HStoreNamedField>(
result, HObjectAccess::ForMap(),
Add<HLoadNamedField>(
@@ -10101,25 +10130,6 @@ void HOptimizedGraphBuilder::GenerateDataViewInitialize(
}
-static Handle<Map> TypedArrayMap(Isolate* isolate,
- ExternalArrayType array_type,
- ElementsKind target_kind) {
- Handle<Context> native_context = isolate->native_context();
- Handle<JSFunction> fun;
- switch (array_type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- fun = Handle<JSFunction>(native_context->type##_array_fun()); \
- break;
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- }
- Handle<Map> map(fun->initial_map());
- return Map::AsElementsKind(map, target_kind);
-}
-
-
HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
ExternalArrayType array_type,
bool is_zero_byte_offset,
@@ -10222,7 +10232,7 @@ HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
HValue* key = builder.BeginBody(
Add<HConstant>(static_cast<int32_t>(0)),
length, Token::LT);
- Add<HStoreKeyed>(backing_store, key, filler, fixed_elements_kind);
+ Add<HStoreKeyed>(backing_store, key, filler, elements, fixed_elements_kind);
builder.EndBody();
}
@@ -10333,9 +10343,6 @@ void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
if (buffer != NULL) {
elements = BuildAllocateExternalElements(
array_type, is_zero_byte_offset, buffer, byte_offset, length);
- Handle<Map> obj_map =
- TypedArrayMap(isolate(), array_type, fixed_elements_kind);
- AddStoreMapConstant(obj, obj_map);
} else {
DCHECK(is_zero_byte_offset);
elements = BuildAllocateFixedTypedArray(array_type, element_size,
@@ -10623,9 +10630,8 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
void HOptimizedGraphBuilder::BuildStoreForEffect(
- Expression* expr, Property* prop, FeedbackVectorICSlot slot,
- BailoutId ast_id, BailoutId return_id, HValue* object, HValue* key,
- HValue* value) {
+ Expression* expr, Property* prop, FeedbackVectorSlot slot, BailoutId ast_id,
+ BailoutId return_id, HValue* object, HValue* key, HValue* value) {
EffectContext for_effect(this);
Push(object);
if (key != NULL) Push(key);
@@ -11466,7 +11472,8 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
Handle<JSFunction> constructor =
Handle<JSFunction>::cast(HConstant::cast(right)->handle(isolate()));
- if (!constructor->map()->has_non_instance_prototype()) {
+ if (constructor->IsConstructor() &&
+ !constructor->map()->has_non_instance_prototype()) {
JSFunction::EnsureHasInitialMap(constructor);
DCHECK(constructor->has_initial_map());
Handle<Map> initial_map(constructor->initial_map(), isolate());
@@ -11549,7 +11556,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
} else {
BuildCheckHeapObject(operand_to_check);
Add<HCheckInstanceType>(operand_to_check,
- HCheckInstanceType::IS_SPEC_OBJECT);
+ HCheckInstanceType::IS_JS_RECEIVER);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
return result;
@@ -11715,6 +11722,11 @@ void HOptimizedGraphBuilder::VisitEmptyParentheses(EmptyParentheses* expr) {
}
+HValue* HOptimizedGraphBuilder::AddThisFunction() {
+ return AddInstruction(BuildThisFunction());
+}
+
+
HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
// If we share optimized code between different closures, the
// this-function is not a constant, except inside an inlined body.
@@ -11963,10 +11975,11 @@ void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
int elements_length = elements->length();
for (int i = 0; i < elements_length; i++) {
HValue* key_constant = Add<HConstant>(i);
- HInstruction* value_instruction = Add<HLoadKeyed>(
- boilerplate_elements, key_constant, nullptr, kind, ALLOW_RETURN_HOLE);
+ HInstruction* value_instruction =
+ Add<HLoadKeyed>(boilerplate_elements, key_constant, nullptr, nullptr,
+ kind, ALLOW_RETURN_HOLE);
HInstruction* store = Add<HStoreKeyed>(object_elements, key_constant,
- value_instruction, kind);
+ value_instruction, nullptr, kind);
store->SetFlag(HValue::kAllowUndefinedAsNaN);
}
}
@@ -11989,15 +12002,15 @@ void HOptimizedGraphBuilder::BuildEmitFixedArray(
HInstruction* result =
BuildFastLiteral(value_object, site_context);
site_context->ExitScope(current_site, value_object);
- Add<HStoreKeyed>(object_elements, key_constant, result, kind);
+ Add<HStoreKeyed>(object_elements, key_constant, result, nullptr, kind);
} else {
ElementsKind copy_kind =
kind == FAST_HOLEY_SMI_ELEMENTS ? FAST_HOLEY_ELEMENTS : kind;
HInstruction* value_instruction =
- Add<HLoadKeyed>(boilerplate_elements, key_constant, nullptr,
+ Add<HLoadKeyed>(boilerplate_elements, key_constant, nullptr, nullptr,
copy_kind, ALLOW_RETURN_HOLE);
Add<HStoreKeyed>(object_elements, key_constant, value_instruction,
- copy_kind);
+ nullptr, copy_kind);
}
}
}
@@ -12136,6 +12149,12 @@ void HOptimizedGraphBuilder::VisitExportDeclaration(
}
+void HOptimizedGraphBuilder::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ CHECK_ALIVE(Visit(node->expression()));
+}
+
+
// Generators for inline runtime functions.
// Support for types.
void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
@@ -12147,14 +12166,14 @@ void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsJSReceiver(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
New<HHasInstanceTypeAndBranch>(value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
+ FIRST_JS_RECEIVER_TYPE,
+ LAST_JS_RECEIVER_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -12163,8 +12182,8 @@ void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceTypeAndBranch* result =
- New<HHasInstanceTypeAndBranch>(value, JS_FUNCTION_TYPE);
+ HHasInstanceTypeAndBranch* result = New<HHasInstanceTypeAndBranch>(
+ value, FIRST_FUNCTION_TYPE, LAST_FUNCTION_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -12272,6 +12291,38 @@ void HOptimizedGraphBuilder::GenerateToString(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateToLength(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ Callable callable = CodeFactory::ToLength(isolate());
+ HValue* input = Pop();
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context(), input};
+ HInstruction* result =
+ New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateToNumber(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ Callable callable = CodeFactory::ToNumber(isolate());
+ HValue* input = Pop();
+ if (input->type().IsTaggedNumber()) {
+ return ast_context()->ReturnValue(input);
+ } else {
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context(), input};
+ HInstruction* result =
+ New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
+ return ast_context()->ReturnInstruction(result, call->id());
+ }
+}
+
+
void HOptimizedGraphBuilder::GenerateIsJSProxy(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12285,10 +12336,7 @@ void HOptimizedGraphBuilder::GenerateIsJSProxy(CallRuntime* call) {
HValue* instance_type =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
if_proxy.If<HCompareNumericAndBranch>(
- instance_type, Add<HConstant>(FIRST_JS_PROXY_TYPE), Token::GTE);
- if_proxy.And();
- if_proxy.If<HCompareNumericAndBranch>(
- instance_type, Add<HConstant>(LAST_JS_PROXY_TYPE), Token::LTE);
+ instance_type, Add<HConstant>(JS_PROXY_TYPE), Token::EQ);
if_proxy.CaptureContinuation(&continuation);
return ast_context()->ReturnContinuation(&continuation, call->id());
@@ -12324,22 +12372,6 @@ void HOptimizedGraphBuilder::GenerateHasFastPackedElements(CallRuntime* call) {
}
-// Support for construct call checks.
-void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 0);
- if (function_state()->outer() != NULL) {
- // We are generating graph for inlined function.
- HValue* value = function_state()->inlining_kind() == CONSTRUCT_CALL_RETURN
- ? graph()->GetConstantTrue()
- : graph()->GetConstantFalse();
- return ast_context()->ReturnValue(value);
- } else {
- return ast_context()->ReturnControl(New<HIsConstructCallAndBranch>(),
- call->id());
- }
-}
-
-
// Support for arguments.length and arguments[?].
void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
DCHECK(call->arguments()->length() == 0);
@@ -12431,25 +12463,6 @@ void HOptimizedGraphBuilder::GenerateIsDate(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateThrowNotDateError(CallRuntime* call) {
- DCHECK_EQ(0, call->arguments()->length());
- Add<HDeoptimize>(Deoptimizer::kNotADateObject, Deoptimizer::EAGER);
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
-void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 2);
- DCHECK_NOT_NULL(call->arguments()->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value()));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* date = Pop();
- HDateField* result = New<HDateField>(date, index);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
CallRuntime* call) {
DCHECK(call->arguments()->length() == 3);
@@ -12568,18 +12581,6 @@ void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
}
-// Fast support for StringAdd.
-void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
- DCHECK_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* right = Pop();
- HValue* left = Pop();
- HInstruction* result = NewUncasted<HStringAdd>(left, right);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
// Fast support for SubString.
void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
DCHECK_EQ(3, call->arguments()->length());
@@ -12590,15 +12591,6 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateStringGetLength(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* string = Pop();
- HInstruction* result = BuildLoadStringLength(string);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
// Support for direct calls from JavaScript to native RegExp code.
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
DCHECK_EQ(4, call->arguments()->length());
@@ -12609,6 +12601,26 @@ void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateRegExpFlags(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ HValue* regexp = Pop();
+ HInstruction* result =
+ New<HLoadNamedField>(regexp, nullptr, HObjectAccess::ForJSRegExpFlags());
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateRegExpSource(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ HValue* regexp = Pop();
+ HInstruction* result =
+ New<HLoadNamedField>(regexp, nullptr, HObjectAccess::ForJSRegExpSource());
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
void HOptimizedGraphBuilder::GenerateDoubleLo(CallRuntime* call) {
DCHECK_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12679,52 +12691,6 @@ void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
}
-// Fast call for custom callbacks.
-void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
- // 1 ~ The function to call is not itself an argument to the call.
- int arg_count = call->arguments()->length() - 1;
- DCHECK(arg_count >= 1); // There's always at least a receiver.
-
- CHECK_ALIVE(VisitExpressions(call->arguments()));
- // The function is the last argument
- HValue* function = Pop();
- // Push the arguments to the stack
- PushArgumentsFromEnvironment(arg_count);
-
- IfBuilder if_is_jsfunction(this);
- if_is_jsfunction.If<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE);
-
- if_is_jsfunction.Then();
- {
- HInstruction* invoke_result =
- Add<HInvokeFunction>(function, arg_count);
- if (!ast_context()->IsEffect()) {
- Push(invoke_result);
- }
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- }
-
- if_is_jsfunction.Else();
- {
- HInstruction* call_result =
- Add<HCallFunction>(function, arg_count);
- if (!ast_context()->IsEffect()) {
- Push(call_result);
- }
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- }
- if_is_jsfunction.End();
-
- if (ast_context()->IsEffect()) {
- // EffectContext::ReturnValue ignores the value, so we can just pass
- // 'undefined' (as we do not have the call result anymore).
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
- } else {
- return ast_context()->ReturnValue(Pop());
- }
-}
-
-
// Fast call to math functions.
void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
DCHECK_EQ(2, call->arguments()->length());
@@ -12773,29 +12739,6 @@ void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateLikely(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- Visit(call->arguments()->at(0));
-}
-
-
-void HOptimizedGraphBuilder::GenerateUnlikely(CallRuntime* call) {
- return GenerateLikely(call);
-}
-
-
-void HOptimizedGraphBuilder::GenerateHasInPrototypeChain(CallRuntime* call) {
- DCHECK_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* prototype = Pop();
- HValue* object = Pop();
- HHasInPrototypeChainAndBranch* result =
- New<HHasInPrototypeChainAndBranch>(object, prototype);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateFixedArrayGet(CallRuntime* call) {
DCHECK(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12803,7 +12746,7 @@ void HOptimizedGraphBuilder::GenerateFixedArrayGet(CallRuntime* call) {
HValue* index = Pop();
HValue* object = Pop();
HInstruction* result = New<HLoadKeyed>(
- object, index, nullptr, FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
+ object, index, nullptr, nullptr, FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -12817,7 +12760,7 @@ void HOptimizedGraphBuilder::GenerateFixedArraySet(CallRuntime* call) {
HValue* index = Pop();
HValue* object = Pop();
NoObservableSideEffectsScope no_effects(this);
- Add<HStoreKeyed>(object, index, value, FAST_HOLEY_ELEMENTS);
+ Add<HStoreKeyed>(object, index, value, nullptr, FAST_HOLEY_ELEMENTS);
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
@@ -13510,10 +13453,10 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type,
int assigned_reg = op->index();
if (op->IsDoubleRegister()) {
trace_.Add(" \"%s\"",
- DoubleRegister::AllocationIndexToString(assigned_reg));
+ DoubleRegister::from_code(assigned_reg).ToString());
} else {
DCHECK(op->IsRegister());
- trace_.Add(" \"%s\"", Register::AllocationIndexToString(assigned_reg));
+ trace_.Add(" \"%s\"", Register::from_code(assigned_reg).ToString());
}
} else if (range->IsSpilled()) {
LOperand* op = range->TopLevel()->GetSpillOperand();
diff --git a/chromium/v8/src/hydrogen.h b/chromium/v8/src/crankshaft/hydrogen.h
index c1215a33ba8..40a18347bee 100644
--- a/chromium/v8/src/hydrogen.h
+++ b/chromium/v8/src/crankshaft/hydrogen.h
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_H_
-#define V8_HYDROGEN_H_
+#ifndef V8_CRANKSHAFT_HYDROGEN_H_
+#define V8_CRANKSHAFT_HYDROGEN_H_
#include "src/accessors.h"
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
#include "src/compiler.h"
-#include "src/hydrogen-instructions.h"
-#include "src/scopes.h"
+#include "src/crankshaft/hydrogen-instructions.h"
#include "src/zone.h"
namespace v8 {
@@ -788,15 +788,13 @@ class EffectContext final : public AstContext {
explicit EffectContext(HOptimizedGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {
}
- virtual ~EffectContext();
+ ~EffectContext() override;
void ReturnValue(HValue* value) override;
- virtual void ReturnInstruction(HInstruction* instr,
- BailoutId ast_id) override;
- virtual void ReturnControl(HControlInstruction* instr,
- BailoutId ast_id) override;
- virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) override;
+ void ReturnInstruction(HInstruction* instr, BailoutId ast_id) override;
+ void ReturnControl(HControlInstruction* instr, BailoutId ast_id) override;
+ void ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) override;
};
@@ -805,15 +803,13 @@ class ValueContext final : public AstContext {
ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
: AstContext(owner, Expression::kValue), flag_(flag) {
}
- virtual ~ValueContext();
+ ~ValueContext() override;
void ReturnValue(HValue* value) override;
- virtual void ReturnInstruction(HInstruction* instr,
- BailoutId ast_id) override;
- virtual void ReturnControl(HControlInstruction* instr,
- BailoutId ast_id) override;
- virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) override;
+ void ReturnInstruction(HInstruction* instr, BailoutId ast_id) override;
+ void ReturnControl(HControlInstruction* instr, BailoutId ast_id) override;
+ void ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) override;
bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
@@ -835,12 +831,10 @@ class TestContext final : public AstContext {
}
void ReturnValue(HValue* value) override;
- virtual void ReturnInstruction(HInstruction* instr,
- BailoutId ast_id) override;
- virtual void ReturnControl(HControlInstruction* instr,
- BailoutId ast_id) override;
- virtual void ReturnContinuation(HIfContinuation* continuation,
- BailoutId ast_id) override;
+ void ReturnInstruction(HInstruction* instr, BailoutId ast_id) override;
+ void ReturnControl(HControlInstruction* instr, BailoutId ast_id) override;
+ void ReturnContinuation(HIfContinuation* continuation,
+ BailoutId ast_id) override;
static TestContext* cast(AstContext* context) {
DCHECK(context->IsTest());
@@ -1409,11 +1403,8 @@ class HGraphBuilder {
KeyedAccessStoreMode store_mode);
HInstruction* AddElementAccess(
- HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
+ HValue* elements, HValue* checked_key, HValue* val, HValue* dependency,
+ HValue* backing_store_owner, ElementsKind elements_kind,
PropertyAccessType access_type,
LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE);
@@ -2208,16 +2199,12 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(IsTypedArray) \
F(IsRegExp) \
F(IsJSProxy) \
- F(IsConstructCall) \
F(Call) \
- F(CallFunction) \
F(ArgumentsLength) \
F(Arguments) \
F(ValueOf) \
F(SetValueOf) \
F(IsDate) \
- F(DateField) \
- F(ThrowNotDateError) \
F(StringCharFromCode) \
F(StringCharAt) \
F(OneByteSeqStringSetChar) \
@@ -2226,8 +2213,10 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(ToInteger) \
F(ToObject) \
F(ToString) \
+ F(ToLength) \
+ F(ToNumber) \
F(IsFunction) \
- F(IsSpecObject) \
+ F(IsJSReceiver) \
F(MathPow) \
F(IsMinusZero) \
F(HasCachedArrayIndex) \
@@ -2235,15 +2224,13 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(FastOneByteArrayJoin) \
F(DebugBreakInOptimizedCode) \
F(StringCharCodeAt) \
- F(StringAdd) \
F(SubString) \
F(RegExpExec) \
F(RegExpConstructResult) \
+ F(RegExpFlags) \
+ F(RegExpSource) \
F(NumberToString) \
F(DebugIsActive) \
- F(Likely) \
- F(Unlikely) \
- F(HasInPrototypeChain) \
/* Typed Arrays */ \
F(TypedArrayInitialize) \
F(DataViewInitialize) \
@@ -2276,8 +2263,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(CreateIterResultObject) \
/* Arrays */ \
F(HasFastPackedElements) \
- /* Strings */ \
- F(StringGetLength) \
/* JSValue */ \
F(JSValueGetValue)
@@ -2390,7 +2375,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void SetUpScope(Scope* scope);
void VisitStatements(ZoneList<Statement*>* statements) override;
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
@@ -2483,15 +2468,15 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
const char* failure_reason);
void HandleGlobalVariableAssignment(Variable* var, HValue* value,
- FeedbackVectorICSlot ic_slot,
+ FeedbackVectorSlot slot,
BailoutId ast_id);
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
void HandlePolymorphicNamedFieldAccess(
- PropertyAccessType access_type, Expression* expr,
- FeedbackVectorICSlot slot, BailoutId ast_id, BailoutId return_id,
- HValue* object, HValue* value, SmallMapList* types, Handle<String> name);
+ PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
+ BailoutId ast_id, BailoutId return_id, HValue* object, HValue* value,
+ SmallMapList* types, Handle<Name> name);
HValue* BuildAllocateExternalElements(
ExternalArrayType array_type,
@@ -2740,8 +2725,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* BuildNamedAccess(PropertyAccessType access, BailoutId ast_id,
BailoutId reutrn_id, Expression* expr,
- FeedbackVectorICSlot slot, HValue* object,
- Handle<String> name, HValue* value,
+ FeedbackVectorSlot slot, HValue* object,
+ Handle<Name> name, HValue* value,
bool is_uninitialized = false);
void HandlePolymorphicCallNamed(Call* expr,
@@ -2777,7 +2762,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
- Expression* expr, FeedbackVectorICSlot slot,
+ Expression* expr, FeedbackVectorSlot slot,
HValue* object, HValue* key, HValue* value);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
@@ -2796,18 +2781,18 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
KeyedAccessStoreMode store_mode);
HValue* HandlePolymorphicElementAccess(
- Expression* expr, FeedbackVectorICSlot slot, HValue* object, HValue* key,
+ Expression* expr, FeedbackVectorSlot slot, HValue* object, HValue* key,
HValue* val, SmallMapList* maps, PropertyAccessType access_type,
KeyedAccessStoreMode store_mode, bool* has_side_effects);
HValue* HandleKeyedElementAccess(HValue* obj, HValue* key, HValue* val,
- Expression* expr, FeedbackVectorICSlot slot,
+ Expression* expr, FeedbackVectorSlot slot,
BailoutId ast_id, BailoutId return_id,
PropertyAccessType access_type,
bool* has_side_effects);
HInstruction* BuildNamedGeneric(PropertyAccessType access, Expression* expr,
- FeedbackVectorICSlot slot, HValue* object,
+ FeedbackVectorSlot slot, HValue* object,
Handle<Name> name, HValue* value,
bool is_uninitialized = false);
@@ -2820,12 +2805,12 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* key);
void BuildStoreForEffect(Expression* expression, Property* prop,
- FeedbackVectorICSlot slot, BailoutId ast_id,
+ FeedbackVectorSlot slot, BailoutId ast_id,
BailoutId return_id, HValue* object, HValue* key,
HValue* value);
void BuildStore(Expression* expression, Property* prop,
- FeedbackVectorICSlot slot, BailoutId ast_id,
+ FeedbackVectorSlot slot, BailoutId ast_id,
BailoutId return_id, bool is_uninitialized = false);
HInstruction* BuildLoadNamedField(PropertyAccessInfo* info,
@@ -2836,6 +2821,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* BuildContextChainWalk(Variable* var);
+ HValue* AddThisFunction();
HInstruction* BuildThisFunction();
HInstruction* BuildFastLiteral(Handle<JSObject> boilerplate_object,
@@ -3073,6 +3059,7 @@ class NoObservableSideEffectsScope final {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_HYDROGEN_H_
+#endif // V8_CRANKSHAFT_HYDROGEN_H_
diff --git a/chromium/v8/src/ia32/lithium-codegen-ia32.cc b/chromium/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 850c182144f..4ec33ab1467 100644
--- a/chromium/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/chromium/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -4,14 +4,15 @@
#if V8_TARGET_ARCH_IA32
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
+
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/crankshaft/hydrogen-osr.h"
#include "src/deoptimizer.h"
-#include "src/hydrogen-osr.h"
#include "src/ia32/frames-ia32.h"
-#include "src/ia32/lithium-codegen-ia32.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/profiler/cpu-profiler.h"
@@ -101,7 +102,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ movsd(MemOperand(esp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ XMMRegister::from_code(save_iterator.Current()));
save_iterator.Advance();
count++;
}
@@ -116,8 +117,8 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(esp, count * kDoubleSize));
+ __ movsd(XMMRegister::from_code(save_iterator.Current()),
+ MemOperand(esp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
@@ -137,26 +138,6 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ mov(ecx, Operand(esp, receiver_offset));
-
- __ cmp(ecx, isolate()->factory()->undefined_value());
- __ j(not_equal, &ok, Label::kNear);
-
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
-
- __ mov(Operand(esp, receiver_offset), ecx);
-
- __ bind(&ok);
- }
-
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
__ Move(edx, Immediate(kNoAlignmentPadding));
@@ -190,9 +171,8 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
- info()->AddNoFrameRange(0, masm_->pc_offset());
}
if (info()->IsOptimizing() &&
@@ -265,7 +245,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(edi);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -274,7 +254,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -515,13 +495,13 @@ bool LCodeGen::GenerateSafepointTable() {
}
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+Register LCodeGen::ToRegister(int code) const {
+ return Register::from_code(code);
}
-XMMRegister LCodeGen::ToDoubleRegister(int index) const {
- return XMMRegister::FromAllocationIndex(index);
+XMMRegister LCodeGen::ToDoubleRegister(int code) const {
+ return XMMRegister::from_code(code);
}
@@ -900,60 +880,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1749,37 +1675,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(result));
- DCHECK(object.is(eax));
-
- if (index->value() == 0) {
- __ mov(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand::StaticVariable(stamp));
- __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ mov(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(Operand(esp, 0), object);
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
Operand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -2164,7 +2059,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
@@ -2515,29 +2410,11 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
DCHECK(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- __ j(equal, is_true);
- __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
__ j(equal, is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(above, is_false);
+ __ j(equal, is_false);
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
@@ -2617,6 +2494,15 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
+ // Deoptimize for proxies.
+ __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+ DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
+
__ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, prototype);
EmitTrueBranch(instr, equal);
@@ -2693,7 +2579,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(eax);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) RestoreCallerDoubles();
if (dynamic_frame_alignment_) {
@@ -2701,11 +2587,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ mov(edx, Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(esp, ebp);
__ pop(ebp);
- no_frame_start = masm_->pc_offset();
}
if (dynamic_frame_alignment_) {
Label no_padding;
@@ -2717,9 +2601,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
EmitReturn(instr, false);
- if (no_frame_start != -1) {
- info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -2734,7 +2615,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ mov(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
}
@@ -2748,7 +2629,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ mov(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
}
@@ -2769,24 +2650,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3225,16 +3088,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
- __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
+ __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
__ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
- const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ mov(receiver, Operand(receiver, global_offset));
- const int proxy_offset = GlobalObject::kGlobalProxyOffset;
- __ mov(receiver, FieldOperand(receiver, proxy_offset));
+ __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
+ __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
__ bind(&receiver_ok);
}
@@ -3275,7 +3136,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3316,7 +3178,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3334,7 +3196,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize eax to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ mov(edx, factory()->undefined_value());
__ mov(eax, arity);
// Invoke function directly.
@@ -3397,11 +3260,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(edi));
DCHECK(ToRegister(instr->result()).is(eax));
- __ mov(eax, instr->arity());
-
// Change context.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ mov(edx, factory()->undefined_value());
+ __ mov(eax, instr->arity());
+
bool is_self_call = false;
if (instr->hydrogen()->function()->IsConstant()) {
HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
@@ -3782,7 +3647,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3797,7 +3662,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -3811,32 +3676,16 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ mov(vector_register, vector);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ Set(eax, arity);
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->constructor()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- // No cell in ebx for construct type feedback in optimized code
- __ mov(ebx, isolate()->factory()->undefined_value());
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ Move(eax, Immediate(instr->arity()));
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->constructor()).is(edi));
@@ -3860,7 +3709,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -3875,17 +3724,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4026,30 +3875,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(instr->language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
if (instr->index()->IsConstantOperand()) {
@@ -4465,7 +4290,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -5193,11 +5019,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- __ jmp(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
@@ -5272,58 +5095,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- Label materialized;
- // Registers will be used as follows:
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
- // esi = context.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(ecx, instr->hydrogen()->literals());
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(instr->hydrogen()->pattern()));
- __ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated, Label::kNear);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->value()).is(ebx));
@@ -5406,8 +5177,8 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ JumpIfSmi(input, false_label, false_distance);
__ cmp(input, factory()->null_value());
__ j(equal, true_label, true_distance);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, input);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
__ j(below, false_label, false_distance);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
@@ -5432,32 +5203,6 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp);
- EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker, Label::kNear);
- __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5586,8 +5331,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ test(eax, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
@@ -5599,7 +5344,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
@@ -5719,7 +5464,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/chromium/v8/src/ia32/lithium-codegen-ia32.h b/chromium/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
index a26903a9ace..06a3e10bf26 100644
--- a/chromium/v8/src/ia32/lithium-codegen-ia32.h
+++ b/chromium/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
-#define V8_IA32_LITHIUM_CODEGEN_IA32_H_
-
-#include "src/ia32/lithium-ia32.h"
+#ifndef V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
+#define V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
+#include "src/ast/scopes.h"
#include "src/base/logging.h"
+#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
+#include "src/crankshaft/ia32/lithium-ia32.h"
+#include "src/crankshaft/lithium-codegen.h"
#include "src/deoptimizer.h"
-#include "src/ia32/lithium-gap-resolver-ia32.h"
-#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -27,15 +26,11 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
dynamic_frame_alignment_(false),
support_aligned_spilled_doubles_(false),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -182,6 +177,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, argc, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
@@ -217,9 +217,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
@@ -278,10 +275,6 @@ class LCodeGen: public LCodeGenBase {
Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -319,15 +312,11 @@ class LCodeGen: public LCodeGenBase {
void MakeSureStackPagesMapped(int offset);
#endif
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
bool dynamic_frame_alignment_;
bool support_aligned_spilled_doubles_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@@ -398,6 +387,7 @@ class LDeferredCode : public ZoneObject {
int instruction_index_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_IA32_LITHIUM_CODEGEN_IA32_H_
+#endif // V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
diff --git a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc b/chromium/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
index 0926a0f21a1..c3284df8827 100644
--- a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/chromium/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
@@ -4,8 +4,9 @@
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-codegen-ia32.h"
-#include "src/ia32/lithium-gap-resolver-ia32.h"
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
+#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
+#include "src/register-configuration.h"
namespace v8 {
namespace internal {
@@ -165,10 +166,14 @@ int LGapResolver::CountSourceUses(LOperand* operand) {
Register LGapResolver::GetFreeRegisterNot(Register reg) {
- int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
- return Register::FromAllocationIndex(i);
+ int skip_index = reg.is(no_reg) ? -1 : reg.code();
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
+ code != skip_index) {
+ return Register::from_code(code);
}
}
return no_reg;
@@ -178,10 +183,12 @@ Register LGapResolver::GetFreeRegisterNot(Register reg) {
bool LGapResolver::HasBeenReset() {
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
-
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] != 0) return false;
- if (destination_uses_[i] != 0) return false;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ if (source_uses_[code] != 0) return false;
+ if (destination_uses_[code] != 0) return false;
}
return true;
}
@@ -204,7 +211,7 @@ void LGapResolver::Verify() {
void LGapResolver::Finish() {
if (spilled_register_ >= 0) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
+ __ pop(Register::from_code(spilled_register_));
spilled_register_ = -1;
}
moves_.Rewind(0);
@@ -213,7 +220,7 @@ void LGapResolver::Finish() {
void LGapResolver::EnsureRestored(LOperand* operand) {
if (operand->IsRegister() && operand->index() == spilled_register_) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
+ __ pop(Register::from_code(spilled_register_));
spilled_register_ = -1;
}
}
@@ -222,7 +229,7 @@ void LGapResolver::EnsureRestored(LOperand* operand) {
Register LGapResolver::EnsureTempRegister() {
// 1. We may have already spilled to create a temp register.
if (spilled_register_ >= 0) {
- return Register::FromAllocationIndex(spilled_register_);
+ return Register::from_code(spilled_register_);
}
// 2. We may have a free register that we can use without spilling.
@@ -231,19 +238,22 @@ Register LGapResolver::EnsureTempRegister() {
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
- Register scratch = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
+ Register scratch = Register::from_code(code);
__ push(scratch);
- spilled_register_ = i;
+ spilled_register_ = code;
return scratch;
}
}
// 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
- Register scratch = Register::FromAllocationIndex(0);
+ spilled_register_ = config->GetAllocatableGeneralCode(0);
+ Register scratch = Register::from_code(spilled_register_);
__ push(scratch);
- spilled_register_ = 0;
return scratch;
}
@@ -359,7 +369,9 @@ void LGapResolver::EmitSwap(int index) {
// Register-register.
Register src = cgen_->ToRegister(source);
Register dst = cgen_->ToRegister(destination);
- __ xchg(dst, src);
+ __ push(src);
+ __ mov(src, dst);
+ __ pop(dst);
} else if ((source->IsRegister() && destination->IsStackSlot()) ||
(source->IsStackSlot() && destination->IsRegister())) {
diff --git a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.h b/chromium/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.h
index d36e78b5f66..687087feb3a 100644
--- a/chromium/v8/src/ia32/lithium-gap-resolver-ia32.h
+++ b/chromium/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-#define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
+#ifndef V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
+#define V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -72,14 +72,15 @@ class LGapResolver final BASE_EMBEDDED {
ZoneList<LMoveOperands> moves_;
// Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kMaxNumAllocatableRegisters];
- int destination_uses_[Register::kMaxNumAllocatableRegisters];
+ int source_uses_[Register::kNumRegisters];
+ int destination_uses_[DoubleRegister::kMaxNumRegisters];
// If we had to spill on demand, the currently spilled register's
// allocation index.
int spilled_register_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
+#endif // V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
diff --git a/chromium/v8/src/ia32/lithium-ia32.cc b/chromium/v8/src/crankshaft/ia32/lithium-ia32.cc
index 884067b776e..a0cb93975f9 100644
--- a/chromium/v8/src/ia32/lithium-ia32.cc
+++ b/chromium/v8/src/crankshaft/ia32/lithium-ia32.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ia32/lithium-ia32.h"
+#include "src/crankshaft/ia32/lithium-ia32.h"
#include <sstream>
#if V8_TARGET_ARCH_IA32
-#include "src/hydrogen-osr.h"
-#include "src/ia32/lithium-codegen-ia32.h"
-#include "src/lithium-inl.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
+#include "src/crankshaft/lithium-inl.h"
namespace v8 {
namespace internal {
@@ -319,15 +319,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
context()->PrintTo(stream);
@@ -372,11 +363,6 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -395,12 +381,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -482,14 +462,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -1041,7 +1020,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
LOperand* temp = TempRegister();
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
+ return AssignEnvironment(result);
}
@@ -1264,14 +1245,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* constructor = UseFixed(instr->constructor(), edi);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
@@ -1813,14 +1786,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* date = UseFixed(instr->value(), eax);
- LDateField* result =
- new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2137,15 +2102,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2224,7 +2180,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (!instr->is_fixed_typed_array()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
@@ -2232,7 +2188,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
(IsDoubleOrFloatElementsKind(instr->elements_kind()))));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2297,7 +2255,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* val = NULL;
val = UseRegisterAtStart(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
} else {
DCHECK(instr->value()->representation().IsSmiOrTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2312,7 +2270,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
val = UseRegisterOrConstantAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
}
- return new(zone()) LStoreKeyed(obj, key, val);
+ return new (zone()) LStoreKeyed(obj, key, val, nullptr);
}
}
@@ -2325,13 +2283,14 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
DCHECK(instr->elements()->representation().IsExternal());
LOperand* backing_store = UseRegister(instr->elements());
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
LOperand* val = GetStoreKeyedValueOperand(instr);
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2478,19 +2437,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
@@ -2531,13 +2477,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2645,12 +2584,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/chromium/v8/src/ia32/lithium-ia32.h b/chromium/v8/src/crankshaft/ia32/lithium-ia32.h
index 9e4b885c48f..ab7a4b55167 100644
--- a/chromium/v8/src/ia32/lithium-ia32.h
+++ b/chromium/v8/src/crankshaft/ia32/lithium-ia32.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_LITHIUM_IA32_H_
-#define V8_IA32_LITHIUM_IA32_H_
+#ifndef V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_
+#define V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -37,7 +37,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -65,7 +64,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -93,7 +91,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -103,7 +100,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -134,7 +130,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -145,7 +140,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1115,19 +1109,6 @@ class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -1369,27 +1350,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index)
- : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- Smi* index() const { return index_; }
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1608,14 +1568,16 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1687,22 +1649,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1924,25 +1870,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2195,34 +2122,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val,
+ LOperand* backing_store_owner) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2231,6 +2138,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -2556,19 +2464,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
@@ -2885,6 +2780,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_IA32_LITHIUM_IA32_H_
+#endif // V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_
diff --git a/chromium/v8/src/lithium-allocator-inl.h b/chromium/v8/src/crankshaft/lithium-allocator-inl.h
index 98923ae3aa7..22611b5efb6 100644
--- a/chromium/v8/src/lithium-allocator-inl.h
+++ b/chromium/v8/src/crankshaft/lithium-allocator-inl.h
@@ -2,27 +2,27 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LITHIUM_ALLOCATOR_INL_H_
-#define V8_LITHIUM_ALLOCATOR_INL_H_
+#ifndef V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
+#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/lithium-allocator.h"
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/lithium-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
#error "Unknown architecture."
#endif
@@ -54,6 +54,7 @@ void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_LITHIUM_ALLOCATOR_INL_H_
+#endif // V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
diff --git a/chromium/v8/src/lithium-allocator.cc b/chromium/v8/src/crankshaft/lithium-allocator.cc
index 36a12e75b3b..5d052926420 100644
--- a/chromium/v8/src/lithium-allocator.cc
+++ b/chromium/v8/src/crankshaft/lithium-allocator.cc
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/lithium-allocator.h"
-#include "src/hydrogen.h"
-#include "src/lithium-inl.h"
-#include "src/lithium-allocator-inl.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/lithium-allocator-inl.h"
+#include "src/register-configuration.h"
#include "src/string-stream.h"
namespace v8 {
@@ -585,7 +586,7 @@ void LAllocator::AddInitialIntervals(HBasicBlock* block,
int LAllocator::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - Register::kMaxNumAllocatableRegisters;
+ return -index - 1 - Register::kNumRegisters;
}
@@ -617,7 +618,7 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
LiveRange* LAllocator::FixedLiveRangeFor(int index) {
- DCHECK(index < Register::kMaxNumAllocatableRegisters);
+ DCHECK(index < Register::kNumRegisters);
LiveRange* result = fixed_live_ranges_[index];
if (result == NULL) {
result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
@@ -631,7 +632,7 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
- DCHECK(index < DoubleRegister::NumAllocatableRegisters());
+ DCHECK(index < DoubleRegister::kMaxNumRegisters);
LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) {
result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
@@ -939,25 +940,27 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
}
if (instr->ClobbersRegisters()) {
- for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
- if (output == NULL || !output->IsRegister() ||
- output->index() != i) {
- LiveRange* range = FixedLiveRangeFor(i);
- range->AddUseInterval(curr_position,
- curr_position.InstructionEnd(),
- zone());
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if (Register::from_code(i).IsAllocatable()) {
+ if (output == NULL || !output->IsRegister() ||
+ output->index() != i) {
+ LiveRange* range = FixedLiveRangeFor(i);
+ range->AddUseInterval(curr_position,
+ curr_position.InstructionEnd(), zone());
+ }
}
}
}
if (instr->ClobbersDoubleRegisters(isolate())) {
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- if (output == NULL || !output->IsDoubleRegister() ||
- output->index() != i) {
- LiveRange* range = FixedDoubleLiveRangeFor(i);
- range->AddUseInterval(curr_position,
- curr_position.InstructionEnd(),
- zone());
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ if (DoubleRegister::from_code(i).IsAllocatable()) {
+ if (output == NULL || !output->IsDoubleRegister() ||
+ output->index() != i) {
+ LiveRange* range = FixedDoubleLiveRangeFor(i);
+ range->AddUseInterval(curr_position,
+ curr_position.InstructionEnd(), zone());
+ }
}
}
}
@@ -1069,11 +1072,9 @@ bool LAllocator::Allocate(LChunk* chunk) {
DCHECK(chunk_ == NULL);
chunk_ = static_cast<LPlatformChunk*>(chunk);
assigned_registers_ =
- new(chunk->zone()) BitVector(Register::NumAllocatableRegisters(),
- chunk->zone());
- assigned_double_registers_ =
- new(chunk->zone()) BitVector(DoubleRegister::NumAllocatableRegisters(),
- chunk->zone());
+ new (chunk->zone()) BitVector(Register::kNumRegisters, chunk->zone());
+ assigned_double_registers_ = new (chunk->zone())
+ BitVector(DoubleRegister::kMaxNumRegisters, chunk->zone());
MeetRegisterConstraints();
if (!AllocationOk()) return false;
ResolvePhis();
@@ -1460,7 +1461,12 @@ void LAllocator::PopulatePointerMaps() {
void LAllocator::AllocateGeneralRegisters() {
LAllocatorPhase phase("L_Allocate general registers", this);
- num_registers_ = Register::NumAllocatableRegisters();
+ num_registers_ =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->num_allocatable_general_registers();
+ allocatable_register_codes_ =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_general_codes();
mode_ = GENERAL_REGISTERS;
AllocateRegisters();
}
@@ -1468,7 +1474,12 @@ void LAllocator::AllocateGeneralRegisters() {
void LAllocator::AllocateDoubleRegisters() {
LAllocatorPhase phase("L_Allocate double registers", this);
- num_registers_ = DoubleRegister::NumAllocatableRegisters();
+ num_registers_ =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->num_allocatable_double_registers();
+ allocatable_register_codes_ =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->allocatable_double_codes();
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
@@ -1492,7 +1503,7 @@ void LAllocator::AllocateRegisters() {
DCHECK(inactive_live_ranges_.is_empty());
if (mode_ == DOUBLE_REGISTERS) {
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+ for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
LiveRange* current = fixed_double_live_ranges_.at(i);
if (current != NULL) {
AddToInactive(current);
@@ -1586,9 +1597,9 @@ void LAllocator::AllocateRegisters() {
const char* LAllocator::RegisterName(int allocation_index) {
if (mode_ == GENERAL_REGISTERS) {
- return Register::AllocationIndexToString(allocation_index);
+ return Register::from_code(allocation_index).ToString();
} else {
- return DoubleRegister::AllocationIndexToString(allocation_index);
+ return DoubleRegister::from_code(allocation_index).ToString();
}
}
@@ -1750,16 +1761,12 @@ void LAllocator::InactiveToActive(LiveRange* range) {
}
-// TryAllocateFreeReg and AllocateBlockedReg assume this
-// when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
- Register::kMaxNumAllocatableRegisters);
-
-
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+ DCHECK(DoubleRegister::kMaxNumRegisters >= Register::kNumRegisters);
+
+ LifetimePosition free_until_pos[DoubleRegister::kMaxNumRegisters];
- for (int i = 0; i < num_registers_; i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
@@ -1800,10 +1807,11 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
}
// Find the register which stays free for the longest time.
- int reg = 0;
+ int reg = allocatable_register_codes_[0];
for (int i = 1; i < RegisterCount(); ++i) {
- if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
- reg = i;
+ int code = allocatable_register_codes_[i];
+ if (free_until_pos[code].Value() > free_until_pos[reg].Value()) {
+ reg = code;
}
}
@@ -1845,10 +1853,10 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
}
- LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+ LifetimePosition use_pos[DoubleRegister::kMaxNumRegisters];
+ LifetimePosition block_pos[DoubleRegister::kMaxNumRegisters];
- for (int i = 0; i < num_registers_; i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
@@ -1883,10 +1891,11 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
}
}
- int reg = 0;
+ int reg = allocatable_register_codes_[0];
for (int i = 1; i < RegisterCount(); ++i) {
- if (use_pos[i].Value() > use_pos[reg].Value()) {
- reg = i;
+ int code = allocatable_register_codes_[i];
+ if (use_pos[code].Value() > use_pos[reg].Value()) {
+ reg = code;
}
}
diff --git a/chromium/v8/src/lithium-allocator.h b/chromium/v8/src/crankshaft/lithium-allocator.h
index 7c947724509..46289e0fbbc 100644
--- a/chromium/v8/src/lithium-allocator.h
+++ b/chromium/v8/src/crankshaft/lithium-allocator.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LITHIUM_ALLOCATOR_H_
-#define V8_LITHIUM_ALLOCATOR_H_
+#ifndef V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
+#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
#include "src/allocation.h"
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
#include "src/zone.h"
namespace v8 {
@@ -520,9 +520,8 @@ class LAllocator BASE_EMBEDDED {
ZoneList<LiveRange*> live_ranges_;
// Lists of live ranges
- EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
- fixed_live_ranges_;
- EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
+ EmbeddedVector<LiveRange*, Register::kNumRegisters> fixed_live_ranges_;
+ EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumRegisters>
fixed_double_live_ranges_;
ZoneList<LiveRange*> unhandled_live_ranges_;
ZoneList<LiveRange*> active_live_ranges_;
@@ -536,6 +535,7 @@ class LAllocator BASE_EMBEDDED {
RegisterKind mode_;
int num_registers_;
+ const int* allocatable_register_codes_;
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
@@ -568,6 +568,7 @@ class LAllocatorPhase : public CompilationPhase {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_LITHIUM_ALLOCATOR_H_
+#endif // V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
diff --git a/chromium/v8/src/lithium-codegen.cc b/chromium/v8/src/crankshaft/lithium-codegen.cc
index 267df58ccd0..5bd1e6a9b87 100644
--- a/chromium/v8/src/lithium-codegen.cc
+++ b/chromium/v8/src/crankshaft/lithium-codegen.cc
@@ -2,34 +2,34 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/lithium-codegen.h"
+#include "src/crankshaft/lithium-codegen.h"
#include <sstream>
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h" // NOLINT
-#include "src/ia32/lithium-codegen-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h" // NOLINT
-#include "src/x64/lithium-codegen-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h" // NOLINT
-#include "src/arm/lithium-codegen-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h" // NOLINT
-#include "src/arm64/lithium-codegen-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h" // NOLINT
-#include "src/mips/lithium-codegen-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-mips64.h" // NOLINT
-#include "src/mips64/lithium-codegen-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h" // NOLINT
-#include "src/x87/lithium-codegen-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/lithium-ppc.h" // NOLINT
-#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
@@ -53,8 +53,12 @@ LCodeGenBase::LCodeGenBase(LChunk* chunk, MacroAssembler* assembler,
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
+ deoptimizations_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
- last_lazy_deopt_pc_(0) {}
+ translations_(info->zone()),
+ inlined_function_count_(0),
+ last_lazy_deopt_pc_(0),
+ osr_pc_offset_(-1) {}
bool LCodeGenBase::GenerateBody() {
@@ -280,6 +284,68 @@ void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
}
+void LCodeGenBase::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+ Handle<DeoptimizationInputData> data =
+ DeoptimizationInputData::New(isolate(), length, TENURED);
+
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
+ data->SetWeakCellCache(Smi::FromInt(0));
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ {
+ AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, env->ast_id());
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
+ }
+ code->set_deoptimization_data(*data);
+}
+
+
+void LCodeGenBase::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ DCHECK_EQ(0, deoptimization_literals_.length());
+ for (Handle<SharedFunctionInfo> function : chunk()->inlined_functions()) {
+ DefineDeoptimizationLiteral(function);
+ }
+ inlined_function_count_ = deoptimization_literals_.length();
+
+ // Define deoptimization literals for all unoptimized code objects of inlined
+ // functions. This ensures unoptimized code is kept alive by optimized code.
+ AllowDeferredHandleDereference allow_shared_function_info_dereference;
+ for (Handle<SharedFunctionInfo> function : chunk()->inlined_functions()) {
+ DefineDeoptimizationLiteral(handle(function->code()));
+ }
+}
+
+
Deoptimizer::DeoptInfo LCodeGenBase::MakeDeoptInfo(
LInstruction* instr, Deoptimizer::DeoptReason deopt_reason) {
Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position(),
diff --git a/chromium/v8/src/lithium-codegen.h b/chromium/v8/src/crankshaft/lithium-codegen.h
index ce04da90060..b1f7dac2e59 100644
--- a/chromium/v8/src/lithium-codegen.h
+++ b/chromium/v8/src/crankshaft/lithium-codegen.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LITHIUM_CODEGEN_H_
-#define V8_LITHIUM_CODEGEN_H_
+#ifndef V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
+#define V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
#include "src/bailout-reason.h"
#include "src/compiler.h"
@@ -53,6 +53,9 @@ class LCodeGenBase BASE_EMBEDDED {
Translation* translation);
int DefineDeoptimizationLiteral(Handle<Object> literal);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
// Check that an environment assigned via AssignEnvironment is actually being
// used. Redundant assignments keep things alive longer than necessary, and
// consequently lead to worse code, so it's important to minimize this.
@@ -74,8 +77,12 @@ class LCodeGenBase BASE_EMBEDDED {
int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
+ ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Handle<Object> > deoptimization_literals_;
+ TranslationBuffer translations_;
+ int inlined_function_count_;
int last_lazy_deopt_pc_;
+ int osr_pc_offset_;
bool is_unused() const { return status_ == UNUSED; }
bool is_generating() const { return status_ == GENERATING; }
@@ -91,6 +98,7 @@ class LCodeGenBase BASE_EMBEDDED {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_LITHIUM_CODEGEN_H_
+#endif // V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
diff --git a/chromium/v8/src/lithium-inl.h b/chromium/v8/src/crankshaft/lithium-inl.h
index 1a10773390a..9044b4ca7af 100644
--- a/chromium/v8/src/lithium-inl.h
+++ b/chromium/v8/src/crankshaft/lithium-inl.h
@@ -2,27 +2,27 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LITHIUM_INL_H_
-#define V8_LITHIUM_INL_H_
+#ifndef V8_CRANKSHAFT_LITHIUM_INL_H_
+#define V8_CRANKSHAFT_LITHIUM_INL_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/lithium-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
#error "Unknown architecture."
#endif
@@ -108,7 +108,7 @@ LOperand* UseIterator::Current() {
void UseIterator::Advance() {
input_iterator_.Done() ? env_iterator_.Advance() : input_iterator_.Advance();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_LITHIUM_INL_H_
+#endif // V8_CRANKSHAFT_LITHIUM_INL_H_
diff --git a/chromium/v8/src/lithium.cc b/chromium/v8/src/crankshaft/lithium.cc
index 7d37532ace7..82ad6962be4 100644
--- a/chromium/v8/src/lithium.cc
+++ b/chromium/v8/src/crankshaft/lithium.cc
@@ -2,34 +2,34 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
-#include "src/scopes.h"
+#include "src/ast/scopes.h"
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h" // NOLINT
-#include "src/ia32/lithium-codegen-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h" // NOLINT
-#include "src/x64/lithium-codegen-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
+#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h" // NOLINT
-#include "src/arm/lithium-codegen-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
+#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/lithium-ppc.h" // NOLINT
-#include "src/ppc/lithium-codegen-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h" // NOLINT
-#include "src/mips/lithium-codegen-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
+#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h" // NOLINT
-#include "src/arm64/lithium-codegen-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/lithium-mips64.h" // NOLINT
-#include "src/mips64/lithium-codegen-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h" // NOLINT
-#include "src/x87/lithium-codegen-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
+#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
#else
#error "Unknown architecture."
#endif
@@ -56,24 +56,22 @@ void LOperand::PrintTo(StringStream* stream) {
break;
case LUnallocated::FIXED_REGISTER: {
int reg_index = unalloc->fixed_register_index();
- if (reg_index < 0 ||
- reg_index >= Register::kMaxNumAllocatableRegisters) {
+ if (reg_index < 0 || reg_index >= Register::kNumRegisters) {
stream->Add("(=invalid_reg#%d)", reg_index);
} else {
const char* register_name =
- Register::AllocationIndexToString(reg_index);
+ Register::from_code(reg_index).ToString();
stream->Add("(=%s)", register_name);
}
break;
}
case LUnallocated::FIXED_DOUBLE_REGISTER: {
int reg_index = unalloc->fixed_register_index();
- if (reg_index < 0 ||
- reg_index >= DoubleRegister::kMaxNumAllocatableRegisters) {
+ if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) {
stream->Add("(=invalid_double_reg#%d)", reg_index);
} else {
const char* double_register_name =
- DoubleRegister::AllocationIndexToString(reg_index);
+ DoubleRegister::from_code(reg_index).ToString();
stream->Add("(=%s)", double_register_name);
}
break;
@@ -106,21 +104,19 @@ void LOperand::PrintTo(StringStream* stream) {
break;
case REGISTER: {
int reg_index = index();
- if (reg_index < 0 || reg_index >= Register::kMaxNumAllocatableRegisters) {
+ if (reg_index < 0 || reg_index >= Register::kNumRegisters) {
stream->Add("(=invalid_reg#%d|R)", reg_index);
} else {
- stream->Add("[%s|R]", Register::AllocationIndexToString(reg_index));
+ stream->Add("[%s|R]", Register::from_code(reg_index).ToString());
}
break;
}
case DOUBLE_REGISTER: {
int reg_index = index();
- if (reg_index < 0 ||
- reg_index >= DoubleRegister::kMaxNumAllocatableRegisters) {
+ if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) {
stream->Add("(=invalid_double_reg#%d|R)", reg_index);
} else {
- stream->Add("[%s|R]",
- DoubleRegister::AllocationIndexToString(reg_index));
+ stream->Add("[%s|R]", DoubleRegister::from_code(reg_index).ToString());
}
break;
}
@@ -410,54 +406,6 @@ Representation LChunk::LookupLiteralRepresentation(
}
-static void AddWeakObjectToCodeDependency(Isolate* isolate,
- Handle<HeapObject> object,
- Handle<Code> code) {
- Handle<WeakCell> cell = Code::WeakCellFor(code);
- Heap* heap = isolate->heap();
- Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
- dep = DependentCode::InsertWeakCode(dep, DependentCode::kWeakCodeGroup, cell);
- heap->AddWeakObjectToCodeDependency(object, dep);
-}
-
-
-void LChunk::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) const {
- DCHECK(code->is_optimized_code());
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<HeapObject> > objects(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::CELL);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::CELL &&
- code->IsWeakObjectInOptimizedCode(it.rinfo()->target_cell())) {
- objects.Add(Handle<HeapObject>(it.rinfo()->target_cell()), zone());
- } else if (mode == RelocInfo::EMBEDDED_OBJECT &&
- code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else {
- Handle<HeapObject> object(
- HeapObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- }
- }
- }
- for (int i = 0; i < maps.length(); i++) {
- if (maps.at(i)->dependent_code()->number_of_entries(
- DependentCode::kWeakCodeGroup) == 0) {
- isolate()->heap()->AddRetainedMap(maps.at(i));
- }
- Map::AddDependentCode(maps.at(i), DependentCode::kWeakCodeGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate(), objects.at(i), code);
- }
- code->set_can_have_weak_objects(true);
-}
-
-
void LChunk::CommitDependencies(Handle<Code> code) const {
if (!code->is_optimized_code()) return;
HandleScope scope(isolate());
@@ -475,7 +423,6 @@ void LChunk::CommitDependencies(Handle<Code> code) const {
}
info_->dependencies()->Commit(code);
- RegisterWeakObjectsInOptimizedCode(code);
}
@@ -507,7 +454,8 @@ LChunk* LChunk::NewChunk(HGraph* graph) {
Handle<Code> LChunk::Codegen() {
- MacroAssembler assembler(info()->isolate(), NULL, 0);
+ MacroAssembler assembler(info()->isolate(), NULL, 0,
+ CodeObjectRequired::kYes);
LOG_CODE_EVENT(info()->isolate(),
CodeStartLinePosInfoRecordEvent(
assembler.positions_recorder()));
diff --git a/chromium/v8/src/lithium.h b/chromium/v8/src/crankshaft/lithium.h
index 046de19fd07..10e980e9836 100644
--- a/chromium/v8/src/lithium.h
+++ b/chromium/v8/src/crankshaft/lithium.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LITHIUM_H_
-#define V8_LITHIUM_H_
+#ifndef V8_CRANKSHAFT_LITHIUM_H_
+#define V8_CRANKSHAFT_LITHIUM_H_
#include <set>
#include "src/allocation.h"
#include "src/bailout-reason.h"
-#include "src/hydrogen.h"
+#include "src/crankshaft/hydrogen.h"
#include "src/safepoint-table.h"
#include "src/zone-allocator.h"
@@ -690,7 +690,6 @@ class LChunk : public ZoneObject {
int spill_slot_count_;
private:
- void RegisterWeakObjectsInOptimizedCode(Handle<Code> code) const;
void CommitDependencies(Handle<Code> code) const;
CompilationInfo* info_;
@@ -835,6 +834,7 @@ class UseIterator BASE_EMBEDDED {
class LInstruction;
class LCodeGen;
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_LITHIUM_H_
+#endif // V8_CRANKSHAFT_LITHIUM_H_
diff --git a/chromium/v8/src/crankshaft/mips/OWNERS b/chromium/v8/src/crankshaft/mips/OWNERS
new file mode 100644
index 00000000000..89455a4fbd7
--- /dev/null
+++ b/chromium/v8/src/crankshaft/mips/OWNERS
@@ -0,0 +1,6 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/mips/lithium-codegen-mips.cc b/chromium/v8/src/crankshaft/mips/lithium-codegen-mips.cc
index bf158b4c43a..2414f0d61cb 100644
--- a/chromium/v8/src/mips/lithium-codegen-mips.cc
+++ b/chromium/v8/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -25,14 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "src/crankshaft/mips/lithium-codegen-mips.h"
+
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/mips/lithium-codegen-mips.h"
-#include "src/mips/lithium-gap-resolver-mips.h"
#include "src/profiler/cpu-profiler.h"
@@ -96,7 +97,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -112,7 +113,7 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -137,24 +138,6 @@ bool LCodeGen::GeneratePrologue() {
// cp: Callee's context.
// fp: Caller's frame pointer.
// lr: Caller's pc.
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ lw(a2, MemOperand(sp, receiver_offset));
- __ Branch(&ok, ne, a2, Operand(at));
-
- __ lw(a2, GlobalObjectOperand());
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
-
- __ sw(a2, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
- }
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -162,10 +145,9 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
frame_is_built_ = true;
- info_->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -207,7 +189,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(a1);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -216,7 +198,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -235,7 +217,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
// Load parameter from stack.
__ lw(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ sw(a0, target);
// Update the write barrier. This clobbers a3 and a0.
if (need_write_barrier) {
@@ -403,12 +385,12 @@ bool LCodeGen::GenerateSafepointTable() {
Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+ return Register::from_code(index);
}
DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
+ return DoubleRegister::from_code(index);
}
@@ -859,60 +841,6 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1448,9 +1376,10 @@ void LCodeGen::DoMulI(LMulI* instr) {
switch (constant) {
case -1:
if (overflow) {
- __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
- Operand(zero_reg));
+ Label no_overflow;
+ __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow);
} else {
__ Subu(result, zero_reg, left);
}
@@ -1676,21 +1605,19 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
}
} else { // can_overflow.
- Register overflow = scratch0();
- Register scratch = scratch1();
+ Register scratch = scratch0();
+ Label no_overflow_label;
if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, scratch);
- __ SubuAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- right_reg,
- overflow); // Reg at also used as scratch.
+ __ SubBranchNoOvf(ToRegister(result), ToRegister(left),
+ Operand(right_reg), &no_overflow_label);
} else {
DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ SubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
- ToOperand(right), overflow, scratch);
+ __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+ &no_overflow_label, scratch);
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow_label);
}
}
@@ -1708,20 +1635,6 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
DCHECK(instr->result()->IsDoubleRegister());
DoubleRegister result = ToDoubleRegister(instr->result());
-#if V8_HOST_ARCH_IA32
- // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
- // builds.
- uint64_t bits = instr->bits();
- if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
- V8_UINT64_C(0x7FF0000000000000)) {
- uint32_t lo = static_cast<uint32_t>(bits);
- uint32_t hi = static_cast<uint32_t>(bits >> 32);
- __ li(at, Operand(lo));
- __ li(scratch0(), Operand(hi));
- __ Move(result, at, scratch0());
- return;
- }
-#endif
double v = instr->value();
__ Move(result, v);
}
@@ -1746,39 +1659,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(a0));
- DCHECK(result.is(v0));
- DCHECK(!scratch.is(scratch0()));
- DCHECK(!scratch.is(object));
-
- if (index->value() == 0) {
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ li(scratch, Operand(stamp));
- __ lw(scratch, MemOperand(scratch));
- __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Branch(&runtime, ne, scratch, Operand(scratch0()));
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ li(a1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -1872,21 +1752,19 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
}
} else { // can_overflow.
- Register overflow = scratch0();
Register scratch = scratch1();
+ Label no_overflow_label;
if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, scratch);
- __ AdduAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- right_reg,
- overflow); // Reg at also used as scratch.
+ __ AddBranchNoOvf(ToRegister(result), ToRegister(left),
+ Operand(right_reg), &no_overflow_label);
} else {
DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ AdduAndCheckForOverflow(ToRegister(result), ToRegister(left),
- ToOperand(right), overflow, scratch);
+ __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+ &no_overflow_label, scratch);
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow_label);
}
}
@@ -2171,7 +2049,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
- ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+ ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -2537,31 +2415,13 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
DCHECK(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
-
+ __ GetObjectType(input, temp, temp2);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-
- __ GetObjectType(input, temp, temp2);
- __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+ __ Branch(is_true, eq, temp2, Operand(JS_FUNCTION_TYPE));
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ GetObjectType(input, temp, temp2);
- __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ Branch(is_false, eq, temp2, Operand(JS_FUNCTION_TYPE));
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
Register instance_type = scratch1();
DCHECK(!instance_type.is(temp));
@@ -2627,6 +2487,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
Register const object_map = scratch0();
+ Register const object_instance_type = scratch1();
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
@@ -2637,10 +2498,25 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ SmiTst(object, at);
EmitFalseBranch(instr, eq, at, Operand(zero_reg));
}
+
// Loop through the {object}s prototype chain looking for the {prototype}.
__ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ And(object_instance_type, object_instance_type,
+ Operand(1 << Map::kIsAccessCheckNeeded));
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
+ Operand(zero_reg));
+ // Deoptimize for proxies.
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
+ Operand(JS_PROXY_TYPE));
+
__ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
__ LoadRoot(at, Heap::kNullValueRootIndex);
@@ -2681,15 +2557,13 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(v0);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(sp, fp);
- no_frame_start = masm_->pc_offset();
__ Pop(ra, fp);
}
if (instr->has_constant_parameter_count()) {
@@ -2708,10 +2582,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ Jump(ra);
-
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -2726,7 +2596,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ li(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ li(slot_register, Operand(Smi::FromInt(index)));
}
@@ -2740,7 +2610,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ li(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ li(slot_register, Operand(Smi::FromInt(index)));
}
@@ -2761,29 +2631,11 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ lw(result, ContextOperand(context, instr->slot_index()));
+ __ lw(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -2803,7 +2655,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
Register scratch = scratch0();
- MemOperand target = ContextOperand(context, instr->slot_index());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
Label skip_assignment;
@@ -3278,15 +3130,13 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ GetObjectType(receiver, scratch, scratch);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
- Operand(FIRST_SPEC_OBJECT_TYPE));
+ Operand(FIRST_JS_RECEIVER_TYPE));
__ Branch(&result_in_receiver);
__ bind(&global_object);
__ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ lw(result,
- ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ lw(result,
- FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+ __ lw(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ lw(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
@@ -3345,7 +3195,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3388,7 +3239,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ Push(scratch0(), scratch1());
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3407,7 +3258,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize a0 to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
__ li(a0, Operand(arity));
// Invoke function.
@@ -3765,7 +3617,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3815,11 +3667,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(ToRegister(instr->result()).is(v0));
- __ li(a0, Operand(instr->arity()));
-
// Change context.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ li(a0, Operand(instr->arity()));
+
// Load the code entry address
__ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Call(at);
@@ -3834,7 +3688,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -3848,32 +3702,16 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ li(vector_register, vector);
__ li(slot_register, Operand(Smi::FromInt(index)));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ li(a0, Operand(arity));
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- __ li(a0, Operand(instr->arity()));
- // No cell in a2 for construct type feedback in optimized code
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(a1));
@@ -3896,7 +3734,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -3910,17 +3748,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4053,30 +3891,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
Operand operand(0);
@@ -4560,7 +4374,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -4586,9 +4401,7 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
- FPURegister dbl_scratch = double_scratch0();
- __ mtc1(ToRegister(input), dbl_scratch);
- __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
+ __ Cvt_d_uw(ToDoubleRegister(output), ToRegister(input), f22);
}
@@ -4673,8 +4486,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
__ mtc1(src, dbl_scratch);
__ cvt_d_w(dbl_scratch, dbl_scratch);
} else {
- __ mtc1(src, dbl_scratch);
- __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
+ __ Cvt_d_uw(dbl_scratch, src, f22);
}
if (FLAG_inline_new) {
@@ -5325,11 +5137,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- __ jmp(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
@@ -5405,59 +5214,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Label materialized;
- // Registers will be used as follows:
- // t3 = literals array.
- // a1 = regexp literal.
- // a0 = regexp literal clone.
- // a2 and t0-t2 are used as temporaries.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ li(t3, instr->hydrogen()->literals());
- __ lw(a1, FieldMemOperand(t3, literal_offset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&materialized, ne, a1, Operand(at));
-
- // Create regexp literal using runtime function
- // Result will be in v0.
- __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(t1, Operand(instr->hydrogen()->pattern()));
- __ li(t0, Operand(instr->hydrogen()->flags()));
- __ Push(t3, t2, t1, t0);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(a1, v0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ Push(a1, a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(a1);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ lw(a3, FieldMemOperand(a1, i));
- __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
- __ sw(a3, FieldMemOperand(v0, i));
- __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
- __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(a3));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -5565,9 +5321,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ JumpIfSmi(input, false_label);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(input, scratch, scratch1());
- __ Branch(false_label, lt, scratch1(), Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
// Check for callable or undetectable objects => false.
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ And(at, scratch,
@@ -5599,34 +5355,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp1, scratch0());
-
- EmitBranch(instr, eq, temp1,
- Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- DCHECK(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne, temp2,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5757,10 +5485,10 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ And(at, object, kSmiTagMask);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
__ GetObjectType(object, a1, a1);
DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
- Operand(LAST_JS_PROXY_TYPE));
+ Operand(JS_PROXY_TYPE));
Label use_cache, call_runtime;
DCHECK(object.is(a0));
@@ -5774,7 +5502,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(object);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
@@ -5898,7 +5626,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ li(at, scope_info);
__ Push(at, ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/chromium/v8/src/mips/lithium-codegen-mips.h b/chromium/v8/src/crankshaft/mips/lithium-codegen-mips.h
index 858c7f12bcb..160ab9a665b 100644
--- a/chromium/v8/src/mips/lithium-codegen-mips.h
+++ b/chromium/v8/src/crankshaft/mips/lithium-codegen-mips.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#define V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#include "src/ast/scopes.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
+#include "src/crankshaft/mips/lithium-mips.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
-#include "src/mips/lithium-gap-resolver-mips.h"
-#include "src/mips/lithium-mips.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -24,13 +24,9 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -201,6 +197,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
@@ -235,9 +236,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
@@ -310,10 +308,6 @@ class LCodeGen: public LCodeGenBase {
Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -346,13 +340,9 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorStoreICRegisters(T* instr);
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@@ -426,6 +416,7 @@ class LDeferredCode : public ZoneObject {
int instruction_index_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#endif // V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
diff --git a/chromium/v8/src/mips/lithium-gap-resolver-mips.cc b/chromium/v8/src/crankshaft/mips/lithium-gap-resolver-mips.cc
index cdaf2463a0c..e25a32dffda 100644
--- a/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/chromium/v8/src/crankshaft/mips/lithium-gap-resolver-mips.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/mips/lithium-codegen-mips.h"
-#include "src/mips/lithium-gap-resolver-mips.h"
+#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
+
+#include "src/crankshaft/mips/lithium-codegen-mips.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/mips/lithium-gap-resolver-mips.h b/chromium/v8/src/crankshaft/mips/lithium-gap-resolver-mips.h
index 7374da7727f..6c5fd037a34 100644
--- a/chromium/v8/src/mips/lithium-gap-resolver-mips.h
+++ b/chromium/v8/src/crankshaft/mips/lithium-gap-resolver-mips.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -53,6 +53,7 @@ class LGapResolver final BASE_EMBEDDED {
LOperand* saved_destination_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#endif // V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/chromium/v8/src/mips/lithium-mips.cc b/chromium/v8/src/crankshaft/mips/lithium-mips.cc
index 42ecab4b8e3..a9978e1068c 100644
--- a/chromium/v8/src/mips/lithium-mips.cc
+++ b/chromium/v8/src/crankshaft/mips/lithium-mips.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/mips/lithium-mips.h"
+#include "src/crankshaft/mips/lithium-mips.h"
#include <sstream>
#if V8_TARGET_ARCH_MIPS
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
-#include "src/mips/lithium-codegen-mips.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/mips/lithium-codegen-mips.h"
namespace v8 {
namespace internal {
@@ -305,13 +305,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -330,11 +323,6 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -353,12 +341,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -451,14 +433,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -1005,7 +986,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ return AssignEnvironment(result);
}
@@ -1233,14 +1216,6 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), a1);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), a1);
@@ -1797,14 +1772,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), a0);
- LDateField* result =
- new(zone()) LDateField(object, FixedTemp(a1), instr->index());
- return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2088,15 +2055,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2173,7 +2131,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
DCHECK(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
}
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
@@ -2181,7 +2139,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(elements_kind)));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2246,7 +2206,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
}
- return new(zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
}
DCHECK(
@@ -2258,7 +2218,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2380,19 +2341,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
@@ -2433,13 +2381,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2534,12 +2475,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/chromium/v8/src/mips/lithium-mips.h b/chromium/v8/src/crankshaft/mips/lithium-mips.h
index ed3332ca651..880d243312a 100644
--- a/chromium/v8/src/mips/lithium-mips.h
+++ b/chromium/v8/src/crankshaft/mips/lithium-mips.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_LITHIUM_MIPS_H_
-#define V8_MIPS_LITHIUM_MIPS_H_
+#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_
+#define V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -33,7 +33,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -61,7 +60,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -89,7 +87,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -100,7 +97,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -131,7 +127,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -142,7 +137,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1342,25 +1336,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1570,15 +1545,17 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1635,22 +1612,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1874,25 +1835,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2152,34 +2094,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2188,6 +2110,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -2511,19 +2434,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
@@ -2568,19 +2478,6 @@ class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
@@ -2843,6 +2740,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MIPS_LITHIUM_MIPS_H_
+#endif // V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_
diff --git a/chromium/v8/src/crankshaft/mips64/OWNERS b/chromium/v8/src/crankshaft/mips64/OWNERS
new file mode 100644
index 00000000000..89455a4fbd7
--- /dev/null
+++ b/chromium/v8/src/crankshaft/mips64/OWNERS
@@ -0,0 +1,6 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/mips64/lithium-codegen-mips64.cc b/chromium/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index a26d099a629..29d19ee809e 100644
--- a/chromium/v8/src/mips64/lithium-codegen-mips64.cc
+++ b/chromium/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
+
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/mips64/lithium-codegen-mips64.h"
-#include "src/mips64/lithium-gap-resolver-mips64.h"
#include "src/profiler/cpu-profiler.h"
namespace v8 {
@@ -71,7 +72,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -87,7 +88,7 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -112,24 +113,6 @@ bool LCodeGen::GeneratePrologue() {
// cp: Callee's context.
// fp: Caller's frame pointer.
// lr: Caller's pc.
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ ld(a2, MemOperand(sp, receiver_offset));
- __ Branch(&ok, ne, a2, Operand(at));
-
- __ ld(a2, GlobalObjectOperand());
- __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
-
- __ sd(a2, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
- }
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -137,10 +120,9 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
frame_is_built_ = true;
- info_->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -182,7 +164,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(a1);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -191,7 +173,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -210,7 +192,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
// Load parameter from stack.
__ ld(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ sd(a0, target);
// Update the write barrier. This clobbers a3 and a0.
if (need_write_barrier) {
@@ -386,12 +368,12 @@ bool LCodeGen::GenerateSafepointTable() {
Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+ return Register::from_code(index);
}
DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
+ return DoubleRegister::from_code(index);
}
@@ -848,60 +830,6 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1828,39 +1756,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(a0));
- DCHECK(result.is(v0));
- DCHECK(!scratch.is(scratch0()));
- DCHECK(!scratch.is(object));
-
- if (index->value() == 0) {
- __ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ li(scratch, Operand(stamp));
- __ ld(scratch, MemOperand(scratch));
- __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Branch(&runtime, ne, scratch, Operand(scratch0()));
- __ ld(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ li(a1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -2272,7 +2167,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
- ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+ ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -2642,27 +2537,11 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
+ __ GetObjectType(input, temp, temp2);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-
- __ GetObjectType(input, temp, temp2);
- __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+ __ Branch(is_true, eq, temp2, Operand(JS_FUNCTION_TYPE));
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ GetObjectType(input, temp, temp2);
- __ Dsubu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ Branch(is_false, eq, temp2, Operand(JS_FUNCTION_TYPE));
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
@@ -2733,6 +2612,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
Register const object_map = scratch0();
+ Register const object_instance_type = scratch1();
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
@@ -2748,6 +2628,19 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ And(object_instance_type, object_instance_type,
+ Operand(1 << Map::kIsAccessCheckNeeded));
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
+ Operand(zero_reg));
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
+ Operand(JS_PROXY_TYPE));
+
__ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
__ LoadRoot(at, Heap::kNullValueRootIndex);
@@ -2789,15 +2682,13 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(v0);
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(sp, fp);
- no_frame_start = masm_->pc_offset();
__ Pop(ra, fp);
}
if (instr->has_constant_parameter_count()) {
@@ -2816,10 +2707,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ Jump(ra);
-
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -2834,7 +2721,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ li(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ li(slot_register, Operand(Smi::FromInt(index)));
}
@@ -2848,7 +2735,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ li(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ li(slot_register, Operand(Smi::FromInt(index)));
}
@@ -2869,29 +2756,11 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ ld(result, ContextOperand(context, instr->slot_index()));
+ __ ld(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -2911,7 +2780,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
Register scratch = scratch0();
- MemOperand target = ContextOperand(context, instr->slot_index());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
Label skip_assignment;
@@ -3447,15 +3316,13 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ GetObjectType(receiver, scratch, scratch);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
- Operand(FIRST_SPEC_OBJECT_TYPE));
+ Operand(FIRST_JS_RECEIVER_TYPE));
__ Branch(&result_in_receiver);
__ bind(&global_object);
__ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ ld(result,
- ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ ld(result,
- FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+ __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
@@ -3514,7 +3381,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3557,7 +3425,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ Push(scratch0(), scratch1());
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3576,7 +3444,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize a0 to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
__ li(a0, Operand(arity));
// Invoke function.
@@ -3953,7 +3822,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -4003,11 +3872,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(ToRegister(instr->result()).is(v0));
- __ li(a0, Operand(instr->arity()));
-
// Change context.
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ li(a0, Operand(instr->arity()));
+
// Load the code entry address
__ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Call(at);
@@ -4022,7 +3893,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -4036,32 +3907,16 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ li(vector_register, vector);
__ li(slot_register, Operand(Smi::FromInt(index)));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ li(a0, Operand(arity));
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- __ li(a0, Operand(instr->arity()));
- // No cell in a2 for construct type feedback in optimized code
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(a1));
@@ -4084,7 +3939,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -4098,17 +3953,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4250,30 +4105,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
Operand operand((int64_t)0);
@@ -4777,7 +4608,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -4805,7 +4637,7 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
FPURegister dbl_scratch = double_scratch0();
__ mtc1(ToRegister(input), dbl_scratch);
- __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22); // TODO(plind): f22?
+ __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch);
}
@@ -4862,7 +4694,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
__ cvt_d_w(dbl_scratch, dbl_scratch);
} else {
__ mtc1(src, dbl_scratch);
- __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
+ __ Cvt_d_uw(dbl_scratch, dbl_scratch);
}
if (FLAG_inline_new) {
@@ -5508,11 +5340,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- __ jmp(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
@@ -5590,59 +5419,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Label materialized;
- // Registers will be used as follows:
- // a7 = literals array.
- // a1 = regexp literal.
- // a0 = regexp literal clone.
- // a2 and a4-a6 are used as temporaries.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ li(a7, instr->hydrogen()->literals());
- __ ld(a1, FieldMemOperand(a7, literal_offset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&materialized, ne, a1, Operand(at));
-
- // Create regexp literal using runtime function
- // Result will be in v0.
- __ li(a6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(a5, Operand(instr->hydrogen()->pattern()));
- __ li(a4, Operand(instr->hydrogen()->flags()));
- __ Push(a7, a6, a5, a4);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(a1, v0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ Push(a1, a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(a1);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ ld(a3, FieldMemOperand(a1, i));
- __ ld(a2, FieldMemOperand(a1, i + kPointerSize));
- __ sd(a3, FieldMemOperand(v0, i));
- __ sd(a2, FieldMemOperand(v0, i + kPointerSize));
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ ld(a3, FieldMemOperand(a1, size - kPointerSize));
- __ sd(a3, FieldMemOperand(v0, size - kPointerSize));
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(a3));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -5750,9 +5526,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ JumpIfSmi(input, false_label);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(input, scratch, scratch1());
- __ Branch(false_label, lt, scratch1(), Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
// Check for callable or undetectable objects => false.
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ And(at, scratch,
@@ -5785,34 +5561,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp1, scratch0());
-
- EmitBranch(instr, eq, temp1,
- Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- DCHECK(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ ld(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ld(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne, temp2,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5944,10 +5692,10 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ And(at, object, kSmiTagMask);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
__ GetObjectType(object, a1, a1);
DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
- Operand(LAST_JS_PROXY_TYPE));
+ Operand(JS_PROXY_TYPE));
Label use_cache, call_runtime;
DCHECK(object.is(a0));
@@ -5961,7 +5709,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(object);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
@@ -6083,7 +5831,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ li(at, scope_info);
__ Push(at, ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/chromium/v8/src/mips64/lithium-codegen-mips64.h b/chromium/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
index b08de167bea..efadb0f26bc 100644
--- a/chromium/v8/src/mips64/lithium-codegen-mips64.h
+++ b/chromium/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
+#define V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
+#include "src/ast/scopes.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
+#include "src/crankshaft/mips64/lithium-mips64.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
-#include "src/mips64/lithium-gap-resolver-mips64.h"
-#include "src/mips64/lithium-mips64.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -24,13 +24,9 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -203,6 +199,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
@@ -237,9 +238,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
@@ -313,10 +311,6 @@ class LCodeGen: public LCodeGenBase {
Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -349,13 +343,9 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorStoreICRegisters(T* instr);
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@@ -429,6 +419,7 @@ class LDeferredCode : public ZoneObject {
int instruction_index_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
diff --git a/chromium/v8/src/mips64/lithium-gap-resolver-mips64.cc b/chromium/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc
index 9e3114bc34c..0374cbc7bb4 100644
--- a/chromium/v8/src/mips64/lithium-gap-resolver-mips64.cc
+++ b/chromium/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/mips64/lithium-codegen-mips64.h"
-#include "src/mips64/lithium-gap-resolver-mips64.h"
+#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
+
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/mips64/lithium-gap-resolver-mips64.h b/chromium/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.h
index 7374da7727f..85d8e2920c6 100644
--- a/chromium/v8/src/mips64/lithium-gap-resolver-mips64.h
+++ b/chromium/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -53,6 +53,7 @@ class LGapResolver final BASE_EMBEDDED {
LOperand* saved_destination_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/chromium/v8/src/mips64/lithium-mips64.cc b/chromium/v8/src/crankshaft/mips64/lithium-mips64.cc
index 4f2f1615241..129f61587f5 100644
--- a/chromium/v8/src/mips64/lithium-mips64.cc
+++ b/chromium/v8/src/crankshaft/mips64/lithium-mips64.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/mips64/lithium-mips64.h"
+#include "src/crankshaft/mips64/lithium-mips64.h"
#include <sstream>
#if V8_TARGET_ARCH_MIPS64
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
-#include "src/mips64/lithium-codegen-mips64.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
namespace v8 {
namespace internal {
@@ -305,13 +305,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -330,11 +323,6 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -353,12 +341,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -451,14 +433,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -1005,7 +986,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ return AssignEnvironment(result);
}
@@ -1233,14 +1216,6 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), a1);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), a1);
@@ -1803,14 +1778,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), a0);
- LDateField* result =
- new(zone()) LDateField(object, FixedTemp(a1), instr->index());
- return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2091,15 +2058,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2177,7 +2135,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
instr->representation().IsInteger32());
obj = UseRegisterAtStart(instr->elements());
}
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
@@ -2185,7 +2143,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(elements_kind)));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2251,7 +2211,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
}
- return new(zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
}
DCHECK(
@@ -2263,7 +2223,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2385,19 +2346,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), a1);
@@ -2438,13 +2386,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2539,12 +2480,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/chromium/v8/src/mips64/lithium-mips64.h b/chromium/v8/src/crankshaft/mips64/lithium-mips64.h
index 01463c9d63d..01dc234c5af 100644
--- a/chromium/v8/src/mips64/lithium-mips64.h
+++ b/chromium/v8/src/crankshaft/mips64/lithium-mips64.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_LITHIUM_MIPS_H_
-#define V8_MIPS_LITHIUM_MIPS_H_
+#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_
+#define V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -35,7 +35,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -63,7 +62,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -91,7 +89,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -102,7 +99,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -133,7 +129,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -144,7 +139,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1374,25 +1368,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1632,15 +1607,17 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1697,22 +1674,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1936,25 +1897,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2198,34 +2140,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2234,6 +2156,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -2557,19 +2480,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
@@ -2614,19 +2524,6 @@ class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
@@ -2889,6 +2786,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_MIPS_LITHIUM_MIPS_H_
+#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_
diff --git a/chromium/v8/src/crankshaft/ppc/OWNERS b/chromium/v8/src/crankshaft/ppc/OWNERS
new file mode 100644
index 00000000000..eb007cb9081
--- /dev/null
+++ b/chromium/v8/src/crankshaft/ppc/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/chromium/v8/src/ppc/lithium-codegen-ppc.cc b/chromium/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
index ad6d8db13d3..936b8a76efb 100644
--- a/chromium/v8/src/ppc/lithium-codegen-ppc.cc
+++ b/chromium/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
+
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/ppc/lithium-codegen-ppc.h"
-#include "src/ppc/lithium-gap-resolver-ppc.h"
#include "src/profiler/cpu-profiler.h"
namespace v8 {
@@ -73,7 +74,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ stfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ __ stfd(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -89,7 +90,7 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ lfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ __ lfd(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -116,24 +117,6 @@ bool LCodeGen::GeneratePrologue() {
// fp: Caller's frame pointer.
// lr: Caller's pc.
// ip: Our own function entry (required by the prologue)
-
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
- __ LoadP(r5, MemOperand(sp, receiver_offset));
- __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
- __ bne(&ok);
-
- __ LoadP(r5, GlobalObjectOperand());
- __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
-
- __ StoreP(r5, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
- }
}
int prologue_offset = masm_->pc_offset();
@@ -147,12 +130,11 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(prologue_offset);
if (NeedsEagerFrame()) {
if (info()->IsStub()) {
- __ StubPrologue(prologue_offset);
+ __ StubPrologue(ip, prologue_offset);
} else {
- __ Prologue(info()->IsCodePreAgingActive(), prologue_offset);
+ __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
}
frame_is_built_ = true;
- info_->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -193,7 +175,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(r4);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -202,7 +184,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(r4);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -221,7 +203,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
// Load parameter from stack.
__ LoadP(r3, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ StoreP(r3, target, r0);
// Update the write barrier. This clobbers r6 and r3.
if (need_write_barrier) {
@@ -395,13 +377,13 @@ bool LCodeGen::GenerateSafepointTable() {
}
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+Register LCodeGen::ToRegister(int code) const {
+ return Register::from_code(code);
}
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
+DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
+ return DoubleRegister::from_code(code);
}
@@ -821,61 +803,6 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- {
- AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1886,41 +1813,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(result));
- DCHECK(object.is(r3));
- DCHECK(!scratch.is(scratch0()));
- DCHECK(!scratch.is(object));
-
- if (index->value() == 0) {
- __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand(stamp));
- __ LoadP(scratch, MemOperand(scratch));
- __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch, scratch0());
- __ bne(&runtime);
- __ LoadP(result,
- FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ b(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ LoadSmiLiteral(r4, index);
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
String::Encoding encoding) {
if (index->IsConstantOperand()) {
@@ -2295,7 +2187,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
__ bge(instr->TrueLabel(chunk_));
}
@@ -2683,39 +2575,20 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
__ JumpIfSmi(input, is_false);
+ __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
- __ blt(is_false);
- __ beq(is_true);
- __ cmpi(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
__ beq(is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbz(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
- __ subi(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ bgt(is_false);
+ __ beq(is_false);
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
Register instance_type = ip;
__ GetMapConstructor(temp, temp, temp2, instance_type);
// Objects with a non-function constructor have class 'Object'.
__ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
- if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
+ if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
__ bne(is_true);
} else {
__ bne(is_false);
@@ -2774,6 +2647,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
Register const object_map = scratch0();
+ Register const object_instance_type = ip;
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
@@ -2789,6 +2663,15 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ lbz(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
+ // Deoptimize for proxies.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
__ LoadP(object_prototype,
FieldMemOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, prototype);
@@ -2840,17 +2723,16 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(r3);
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
}
- int no_frame_start = -1;
if (instr->has_constant_parameter_count()) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
int32_t sp_delta = (parameter_count + 1) * kPointerSize;
if (NeedsEagerFrame()) {
- no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+ masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
} else if (sp_delta != 0) {
__ addi(sp, sp, Operand(sp_delta));
}
@@ -2859,17 +2741,13 @@ void LCodeGen::DoReturn(LReturn* instr) {
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
if (NeedsEagerFrame()) {
- no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
+ masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
}
__ SmiToPtrArrayOffset(r0, reg);
__ add(sp, sp, r0);
}
__ blr();
-
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -2884,7 +2762,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ LoadSmiLiteral(slot_register, Smi::FromInt(index));
}
@@ -2898,7 +2776,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ LoadSmiLiteral(slot_register, Smi::FromInt(index));
}
@@ -2919,28 +2797,10 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(r3));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ LoadP(result, ContextOperand(context, instr->slot_index()));
+ __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
@@ -2968,7 +2828,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
Register scratch = scratch0();
- MemOperand target = ContextOperand(context, instr->slot_index());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
Label skip_assignment;
@@ -3483,28 +3343,13 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
if (!instr->hydrogen()->known_function()) {
// Do not transform the receiver to object for strict mode
- // functions.
+ // functions or builtins.
__ LoadP(scratch,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ lwz(scratch,
FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(scratch,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kStrictModeFunction,
-#else
- SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
-#endif
- r0);
- __ bne(&result_in_receiver, cr0);
-
- // Do not transform the receiver to object for builtins.
- __ TestBit(scratch,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kNative,
-#else
- SharedFunctionInfo::kNative + kSmiTagSize,
-#endif
- r0);
+ __ andi(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
+ (1 << SharedFunctionInfo::kNativeBit)));
__ bne(&result_in_receiver, cr0);
}
@@ -3519,14 +3364,15 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ TestIfSmi(receiver, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
- __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
__ b(&result_in_receiver);
__ bind(&global_object);
__ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+ __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
+
if (result.is(receiver)) {
__ bind(&result_in_receiver);
} else {
@@ -3583,7 +3429,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is r3, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3625,7 +3472,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(scratch0());
__ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
__ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3645,7 +3492,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize r3 to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
__ mov(r3, Operand(arity));
bool is_self_call = function.is_identical_to(info()->closure());
@@ -4009,7 +3857,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(r4, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(r4, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -4059,11 +3907,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(r4));
DCHECK(ToRegister(instr->result()).is(r3));
- __ mov(r3, Operand(instr->arity()));
-
// Change context.
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ mov(r3, Operand(instr->arity()));
+
bool is_self_call = false;
if (instr->hydrogen()->function()->IsConstant()) {
HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
@@ -4089,7 +3939,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(r3));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -4103,32 +3953,16 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ Move(vector_register, vector);
__ LoadSmiLiteral(slot_register, Smi::FromInt(index));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ mov(r3, Operand(arity));
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(r4));
- DCHECK(ToRegister(instr->result()).is(r3));
-
- __ mov(r3, Operand(instr->arity()));
- // No cell in r5 for construct type feedback in optimized code
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(r4));
@@ -4151,7 +3985,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -4165,17 +3999,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ElementsKind holey_kind = GetHoleyElementsKind(kind);
ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ b(&done);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4321,29 +4155,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode()).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(instr->language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Representation representation = instr->hydrogen()->length()->representation();
DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
@@ -4852,7 +4663,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(r3, result);
}
@@ -5604,11 +5416,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
- } else {
- __ b(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
@@ -5686,50 +5495,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Label materialized;
- // Registers will be used as follows:
- // r10 = literals array.
- // r4 = regexp literal.
- // r3 = regexp literal clone.
- // r5 and r7-r9 are used as temporaries.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ Move(r10, instr->hydrogen()->literals());
- __ LoadP(r4, FieldMemOperand(r10, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r4, ip);
- __ bne(&materialized);
-
- // Create regexp literal using runtime function
- // Result will be in r3.
- __ LoadSmiLiteral(r9, Smi::FromInt(instr->hydrogen()->literal_index()));
- __ mov(r8, Operand(instr->hydrogen()->pattern()));
- __ mov(r7, Operand(instr->hydrogen()->flags()));
- __ Push(r10, r9, r8, r7);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mr(r4, r3);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
- __ b(&allocated);
-
- __ bind(&runtime_allocate);
- __ LoadSmiLiteral(r3, Smi::FromInt(size));
- __ Push(r4, r3);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(r4);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- __ CopyFields(r3, r4, r5.bit(), size / kPointerSize);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(r6));
DCHECK(ToRegister(instr->result()).is(r3));
@@ -5808,8 +5573,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
__ JumpIfSmi(input, false_label);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ beq(true_label);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(input, scratch, ip, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
__ blt(false_label);
// Check for callable or undetectable objects => false.
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
@@ -5837,33 +5602,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp1, scratch0());
- EmitBranch(instr, eq);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- DCHECK(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ LoadP(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ LoadP(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&check_frame_marker);
- __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
- __ CmpSmiLiteral(temp1, Smi::FromInt(StackFrame::CONSTRUCT), r0);
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5992,8 +5730,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ TestIfSmi(r3, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
@@ -6007,7 +5745,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r3);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
@@ -6128,7 +5866,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/chromium/v8/src/ppc/lithium-codegen-ppc.h b/chromium/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
index 117dc574d54..b0f016d3091 100644
--- a/chromium/v8/src/ppc/lithium-codegen-ppc.h
+++ b/chromium/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PPC_LITHIUM_CODEGEN_PPC_H_
-#define V8_PPC_LITHIUM_CODEGEN_PPC_H_
+#ifndef V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
+#define V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
-#include "src/ppc/lithium-ppc.h"
-
-#include "src/ppc/lithium-gap-resolver-ppc.h"
+#include "src/ast/scopes.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
+#include "src/crankshaft/ppc/lithium-ppc.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -25,13 +24,9 @@ class LCodeGen : public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -137,7 +132,7 @@ class LCodeGen : public LCodeGenBase {
Scope* scope() const { return scope_; }
- Register scratch0() { return r11; }
+ Register scratch0() { return kLithiumScratch; }
DoubleRegister double_scratch0() { return kScratchDoubleReg; }
LInstruction* GetNextInstruction();
@@ -184,6 +179,11 @@ class LCodeGen : public LCodeGenBase {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
LInstruction* instr, LOperand* context);
@@ -209,9 +209,6 @@ class LCodeGen : public LCodeGenBase {
LOperand* op, bool is_tagged, bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
@@ -260,10 +257,6 @@ class LCodeGen : public LCodeGenBase {
Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object, Register result, Register source,
@@ -282,13 +275,9 @@ class LCodeGen : public LCodeGenBase {
template <class T>
void EmitVectorStoreICRegisters(T* instr);
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@@ -358,7 +347,7 @@ class LDeferredCode : public ZoneObject {
Label* external_exit_;
int instruction_index_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_PPC_LITHIUM_CODEGEN_PPC_H_
+#endif // V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
diff --git a/chromium/v8/src/ppc/lithium-gap-resolver-ppc.cc b/chromium/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc
index 16fb665dda8..4e249808f76 100644
--- a/chromium/v8/src/ppc/lithium-gap-resolver-ppc.cc
+++ b/chromium/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ppc/lithium-codegen-ppc.h"
-#include "src/ppc/lithium-gap-resolver-ppc.h"
+#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
+
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/ppc/lithium-gap-resolver-ppc.h b/chromium/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.h
index 7741080e553..6eeea5eee58 100644
--- a/chromium/v8/src/ppc/lithium-gap-resolver-ppc.h
+++ b/chromium/v8/src/crankshaft/ppc/lithium-gap-resolver-ppc.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
-#define V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
+#ifndef V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
+#define V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -52,7 +52,7 @@ class LGapResolver final BASE_EMBEDDED {
bool in_cycle_;
LOperand* saved_destination_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
+#endif // V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
diff --git a/chromium/v8/src/ppc/lithium-ppc.cc b/chromium/v8/src/crankshaft/ppc/lithium-ppc.cc
index 767c771fb31..63aead7a3c8 100644
--- a/chromium/v8/src/ppc/lithium-ppc.cc
+++ b/chromium/v8/src/crankshaft/ppc/lithium-ppc.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ppc/lithium-ppc.h"
+#include "src/crankshaft/ppc/lithium-ppc.h"
#include <sstream>
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
-#include "src/ppc/lithium-codegen-ppc.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
namespace v8 {
namespace internal {
@@ -311,13 +311,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -336,11 +329,6 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -359,12 +347,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -457,14 +439,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -1014,7 +995,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ return AssignEnvironment(result);
}
@@ -1234,14 +1217,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), r4);
- LCallNew* result = new (zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r4);
@@ -1813,14 +1788,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), r3);
- LDateField* result =
- new (zone()) LDateField(object, FixedTemp(r4), instr->index());
- return MarkAsCall(DefineFixed(result, r3), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2102,15 +2069,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2186,14 +2144,16 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
} else {
obj = UseRegisterAtStart(instr->elements());
}
- result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK((instr->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(elements_kind)));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new (zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2256,7 +2216,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
}
- return new (zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
}
DCHECK((instr->value()->representation().IsInteger32() &&
@@ -2267,7 +2227,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
- return new (zone()) LStoreKeyed(backing_store, key, val);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2387,19 +2348,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r4);
@@ -2439,13 +2387,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new (zone()) LRegExpLiteral(context), r3),
- instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2540,12 +2481,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new (zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/chromium/v8/src/ppc/lithium-ppc.h b/chromium/v8/src/crankshaft/ppc/lithium-ppc.h
index e862a11f638..e86edc9afc9 100644
--- a/chromium/v8/src/ppc/lithium-ppc.h
+++ b/chromium/v8/src/crankshaft/ppc/lithium-ppc.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PPC_LITHIUM_PPC_H_
-#define V8_PPC_LITHIUM_PPC_H_
+#ifndef V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_
+#define V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -33,7 +33,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -61,7 +60,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -89,7 +87,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -100,7 +97,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -132,7 +128,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -143,7 +138,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1330,25 +1324,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1549,15 +1524,17 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
@@ -1612,22 +1589,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
@@ -1843,25 +1804,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2104,34 +2046,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2140,6 +2062,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
@@ -2448,17 +2371,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) { inputs_[0] = value; }
@@ -2499,17 +2411,6 @@ class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) { temps_[0] = temp; }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
@@ -2760,7 +2661,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_PPC_LITHIUM_PPC_H_
+#endif // V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_
diff --git a/chromium/v8/src/typing.cc b/chromium/v8/src/crankshaft/typing.cc
index bd5114e89a4..df50f81167c 100644
--- a/chromium/v8/src/typing.cc
+++ b/chromium/v8/src/crankshaft/typing.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/typing.h"
+#include "src/crankshaft/typing.h"
+#include "src/ast/scopes.h"
#include "src/frames.h"
#include "src/frames-inl.h"
#include "src/ostreams.h"
-#include "src/parser.h" // for CompileTimeValue; TODO(rossberg): should move
-#include "src/scopes.h"
+#include "src/parsing/parser.h" // for CompileTimeValue; TODO(rossberg): move
#include "src/splay-tree-inl.h"
namespace v8 {
@@ -17,7 +17,9 @@ namespace internal {
AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root)
- : closure_(closure),
+ : isolate_(isolate),
+ zone_(zone),
+ closure_(closure),
scope_(scope),
osr_ast_id_(osr_ast_id),
root_(root),
@@ -25,7 +27,7 @@ AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
handle(closure->shared()->feedback_vector()),
handle(closure->context()->native_context())),
store_(zone) {
- InitializeAstVisitor(isolate, zone);
+ InitializeAstVisitor(isolate);
}
@@ -51,7 +53,7 @@ void AstTyper::ObserveTypesAtOsrEntry(IterationStatement* stmt) {
if (stmt->OsrEntryId() != osr_ast_id_) return;
DisallowHeapAllocation no_gc;
- JavaScriptFrameIterator it(isolate());
+ JavaScriptFrameIterator it(isolate_);
JavaScriptFrame* frame = it.frame();
// Assert that the frame on the stack belongs to the function we want to OSR.
@@ -348,6 +350,13 @@ void AstTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
}
+void AstTyper::VisitDoExpression(DoExpression* expr) {
+ RECURSE(VisitBlock(expr->block()));
+ RECURSE(VisitVariableProxy(expr->result()));
+ NarrowType(expr, expr->result()->bounds());
+}
+
+
void AstTyper::VisitConditional(Conditional* expr) {
// Collect type feedback.
expr->condition()->RecordToBooleanTypeFeedback(oracle());
@@ -401,14 +410,9 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
prop->key()->AsLiteral()->value()->IsInternalizedString() &&
prop->emit_store()) {
// Record type feed back for the property.
- TypeFeedbackId id = prop->key()->AsLiteral()->LiteralFeedbackId();
- FeedbackVectorICSlot slot = prop->GetSlot();
+ FeedbackVectorSlot slot = prop->GetSlot();
SmallMapList maps;
- if (FLAG_vector_stores) {
- oracle()->CollectReceiverTypes(slot, &maps);
- } else {
- oracle()->CollectReceiverTypes(id, &maps);
- }
+ oracle()->CollectReceiverTypes(slot, &maps);
prop->set_receiver_type(maps.length() == 1 ? maps.at(0)
: Handle<Map>::null());
}
@@ -436,32 +440,20 @@ void AstTyper::VisitAssignment(Assignment* expr) {
// Collect type feedback.
Property* prop = expr->target()->AsProperty();
if (prop != NULL) {
- TypeFeedbackId id = expr->AssignmentFeedbackId();
- FeedbackVectorICSlot slot = expr->AssignmentSlot();
- expr->set_is_uninitialized(FLAG_vector_stores
- ? oracle()->StoreIsUninitialized(slot)
- : oracle()->StoreIsUninitialized(id));
+ FeedbackVectorSlot slot = expr->AssignmentSlot();
+ expr->set_is_uninitialized(oracle()->StoreIsUninitialized(slot));
if (!expr->IsUninitialized()) {
SmallMapList* receiver_types = expr->GetReceiverTypes();
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
DCHECK(lit_key != NULL && lit_key->value()->IsString());
Handle<String> name = Handle<String>::cast(lit_key->value());
- if (FLAG_vector_stores) {
- oracle()->AssignmentReceiverTypes(slot, name, receiver_types);
- } else {
- oracle()->AssignmentReceiverTypes(id, name, receiver_types);
- }
+ oracle()->AssignmentReceiverTypes(slot, name, receiver_types);
} else {
KeyedAccessStoreMode store_mode;
IcCheckType key_type;
- if (FLAG_vector_stores) {
- oracle()->KeyedAssignmentReceiverTypes(slot, receiver_types,
- &store_mode, &key_type);
- } else {
- oracle()->KeyedAssignmentReceiverTypes(id, receiver_types,
- &store_mode, &key_type);
- }
+ oracle()->KeyedAssignmentReceiverTypes(slot, receiver_types,
+ &store_mode, &key_type);
expr->set_store_mode(store_mode);
expr->set_key_type(key_type);
}
@@ -499,8 +491,7 @@ void AstTyper::VisitThrow(Throw* expr) {
void AstTyper::VisitProperty(Property* expr) {
// Collect type feedback.
- FeedbackVectorICSlot slot(FeedbackVectorICSlot::Invalid());
- slot = expr->PropertyFeedbackSlot();
+ FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
expr->set_inline_cache_state(oracle()->LoadInlineCacheState(slot));
if (!expr->IsUninitialized()) {
@@ -530,8 +521,8 @@ void AstTyper::VisitCall(Call* expr) {
// Collect type feedback.
RECURSE(Visit(expr->expression()));
bool is_uninitialized = true;
- if (expr->IsUsingCallFeedbackICSlot(isolate())) {
- FeedbackVectorICSlot slot = expr->CallFeedbackICSlot();
+ if (expr->IsUsingCallFeedbackICSlot(isolate_)) {
+ FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
is_uninitialized = oracle()->CallIsUninitialized(slot);
if (!expr->expression()->IsProperty() &&
oracle()->CallIsMonomorphic(slot)) {
@@ -550,7 +541,7 @@ void AstTyper::VisitCall(Call* expr) {
}
VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate_)) {
store_.Forget(); // Eval could do whatever to local variables.
}
@@ -621,17 +612,11 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
void AstTyper::VisitCountOperation(CountOperation* expr) {
// Collect type feedback.
- TypeFeedbackId store_id = expr->CountStoreFeedbackId();
- FeedbackVectorICSlot slot = expr->CountSlot();
+ FeedbackVectorSlot slot = expr->CountSlot();
KeyedAccessStoreMode store_mode;
IcCheckType key_type;
- if (FLAG_vector_stores) {
- oracle()->GetStoreModeAndKeyType(slot, &store_mode, &key_type);
- oracle()->CountReceiverTypes(slot, expr->GetReceiverTypes());
- } else {
- oracle()->GetStoreModeAndKeyType(store_id, &store_mode, &key_type);
- oracle()->CountReceiverTypes(store_id, expr->GetReceiverTypes());
- }
+ oracle()->GetStoreModeAndKeyType(slot, &store_mode, &key_type);
+ oracle()->CountReceiverTypes(slot, expr->GetReceiverTypes());
expr->set_store_mode(store_mode);
expr->set_key_type(key_type);
expr->set_type(oracle()->CountType(expr->CountBinOpFeedbackId()));
@@ -785,6 +770,12 @@ void AstTyper::VisitSuperPropertyReference(SuperPropertyReference* expr) {}
void AstTyper::VisitSuperCallReference(SuperCallReference* expr) {}
+void AstTyper::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ Visit(expr->expression());
+}
+
+
void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
for (int i = 0; i < decls->length(); ++i) {
Declaration* decl = decls->at(i);
diff --git a/chromium/v8/src/typing.h b/chromium/v8/src/crankshaft/typing.h
index 8b3e97b67ca..40b538aef33 100644
--- a/chromium/v8/src/typing.h
+++ b/chromium/v8/src/crankshaft/typing.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TYPING_H_
-#define V8_TYPING_H_
+#ifndef V8_CRANKSHAFT_TYPING_H_
+#define V8_CRANKSHAFT_TYPING_H_
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/effects.h"
-#include "src/scopes.h"
#include "src/type-info.h"
#include "src/types.h"
#include "src/zone.h"
@@ -33,6 +33,8 @@ class AstTyper: public AstVisitor {
typedef v8::internal::Effects<int, kNoVar> Effects;
typedef v8::internal::NestedEffects<int, kNoVar> Store;
+ Isolate* isolate_;
+ Zone* zone_;
Handle<JSFunction> closure_;
Scope* scope_;
BailoutId osr_ast_id_;
@@ -40,6 +42,7 @@ class AstTyper: public AstVisitor {
TypeFeedbackOracle oracle_;
Store store_;
+ Zone* zone() const { return zone_; }
TypeFeedbackOracle* oracle() { return &oracle_; }
void NarrowType(Expression* e, Bounds b) {
@@ -69,13 +72,14 @@ class AstTyper: public AstVisitor {
void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
void VisitStatements(ZoneList<Statement*>* statements) override;
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
DISALLOW_COPY_AND_ASSIGN(AstTyper);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_TYPING_H_
+#endif // V8_CRANKSHAFT_TYPING_H_
diff --git a/chromium/v8/src/unique.h b/chromium/v8/src/crankshaft/unique.h
index 8805218b1ff..54abfa77106 100644
--- a/chromium/v8/src/unique.h
+++ b/chromium/v8/src/crankshaft/unique.h
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_UNIQUE_H_
-#define V8_UNIQUE_H_
+#ifndef V8_CRANKSHAFT_UNIQUE_H_
+#define V8_CRANKSHAFT_UNIQUE_H_
#include <ostream> // NOLINT(readability/streams)
+#include "src/assert-scope.h"
#include "src/base/functional.h"
#include "src/handles.h"
#include "src/utils.h"
@@ -355,6 +356,7 @@ class UniqueSet final : public ZoneObject {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_UNIQUE_H_
+#endif // V8_CRANKSHAFT_UNIQUE_H_
diff --git a/chromium/v8/src/x64/lithium-codegen-x64.cc b/chromium/v8/src/crankshaft/x64/lithium-codegen-x64.cc
index dbdd146a1eb..3f7e9ba825c 100644
--- a/chromium/v8/src/x64/lithium-codegen-x64.cc
+++ b/chromium/v8/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -4,14 +4,15 @@
#if V8_TARGET_ARCH_X64
+#include "src/crankshaft/x64/lithium-codegen-x64.h"
+
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/profiler/cpu-profiler.h"
-#include "src/x64/lithium-codegen-x64.h"
namespace v8 {
namespace internal {
@@ -88,8 +89,8 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ movsd(MemOperand(rsp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ __ Movsd(MemOperand(rsp, count * kDoubleSize),
+ XMMRegister::from_code(save_iterator.Current()));
save_iterator.Advance();
count++;
}
@@ -104,7 +105,7 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ __ Movsd(XMMRegister::from_code(save_iterator.Current()),
MemOperand(rsp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -124,24 +125,6 @@ bool LCodeGen::GeneratePrologue() {
__ int3();
}
#endif
-
- // Sloppy mode functions need to replace the receiver with the global proxy
- // when called as functions (without an explicit receiver object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- StackArgumentsAccessor args(rsp, scope()->num_parameters());
- __ movp(rcx, args.GetReceiverOperand());
-
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &ok, Label::kNear);
-
- __ movp(rcx, GlobalObjectOperand());
- __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
-
- __ movp(args.GetReceiverOperand(), rcx);
-
- __ bind(&ok);
- }
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -151,9 +134,8 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
- info()->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
@@ -202,7 +184,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ Push(rdi);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -211,7 +193,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ Push(rdi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -437,12 +419,12 @@ bool LCodeGen::GenerateSafepointTable() {
Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+ return Register::from_code(index);
}
XMMRegister LCodeGen::ToDoubleRegister(int index) const {
- return XMMRegister::FromAllocationIndex(index);
+ return XMMRegister::from_code(index);
}
@@ -826,60 +808,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode, int argc) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1739,44 +1667,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Smi* index = instr->index();
- DCHECK(object.is(result));
- DCHECK(object.is(rax));
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(object);
- __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
- __ Check(equal, kOperandIsNotADate);
- }
-
- if (index->value() == 0) {
- __ movp(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- Operand stamp_operand = __ ExternalOperand(stamp);
- __ movp(kScratchRegister, stamp_operand);
- __ cmpp(kScratchRegister, FieldOperand(object,
- JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ movp(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2);
- __ movp(arg_reg_1, object);
- __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
Operand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -1964,35 +1854,40 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ bind(&return_left);
} else {
DCHECK(instr->hydrogen()->representation().IsDouble());
- Label check_nan_left, check_zero, return_left, return_right;
+ Label not_nan, distinct, return_left, return_right;
Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
XMMRegister left_reg = ToDoubleRegister(left);
XMMRegister right_reg = ToDoubleRegister(right);
- __ ucomisd(left_reg, right_reg);
- __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
+ __ Ucomisd(left_reg, right_reg);
+ __ j(parity_odd, &not_nan, Label::kNear); // Both are not NaN.
- __ bind(&check_zero);
+ // One of the numbers is NaN. Find which one and return it.
+ __ Ucomisd(left_reg, left_reg);
+ __ j(parity_even, &return_left, Label::kNear); // left is NaN.
+ __ jmp(&return_right, Label::kNear); // right is NaN.
+
+ __ bind(&not_nan);
+ __ j(not_equal, &distinct, Label::kNear); // left != right.
+
+ // left == right
XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(left_reg, xmm_scratch);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
- // At this point, both left and right are either 0 or -0.
+
+ // At this point, both left and right are either +0 or -0.
if (operation == HMathMinMax::kMathMin) {
- __ orps(left_reg, right_reg);
+ __ Orpd(left_reg, right_reg);
} else {
- // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
- __ addsd(left_reg, right_reg);
+ __ Andpd(left_reg, right_reg);
}
__ jmp(&return_left, Label::kNear);
- __ bind(&check_nan_left);
- __ ucomisd(left_reg, left_reg); // NaN check.
- __ j(parity_even, &return_left, Label::kNear);
+ __ bind(&distinct);
+ __ j(condition, &return_left, Label::kNear);
+
__ bind(&return_right);
- __ movaps(left_reg, right_reg);
+ __ Movapd(left_reg, right_reg);
__ bind(&return_left);
}
@@ -2041,16 +1936,16 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
}
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result
- __ movaps(result, result);
+ __ Movapd(result, result);
break;
case Token::MOD: {
XMMRegister xmm_scratch = double_scratch0();
__ PrepareCallCFunction(2);
- __ movaps(xmm_scratch, left);
+ __ Movapd(xmm_scratch, left);
DCHECK(right.is(xmm1));
__ CallCFunction(
ExternalReference::mod_two_doubles_operation(isolate()), 2);
- __ movaps(result, xmm_scratch);
+ __ Movapd(result, xmm_scratch);
break;
}
default:
@@ -2129,8 +2024,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
DCHECK(!info()->IsStub());
XMMRegister reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(reg, xmm_scratch);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
DCHECK(r.IsTagged());
@@ -2150,8 +2045,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (type.IsHeapNumber()) {
DCHECK(!info()->IsStub());
XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
DCHECK(!info()->IsStub());
@@ -2206,7 +2101,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
@@ -2239,8 +2134,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &not_heap_number, Label::kNear);
XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, instr->FalseLabel(chunk_));
__ jmp(instr->TrueLabel(chunk_));
__ bind(&not_heap_number);
@@ -2320,7 +2215,7 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ Ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, instr->FalseLabel(chunk_));
} else {
int32_t value;
@@ -2388,11 +2283,11 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ ucomisd(input_reg, input_reg);
+ __ Ucomisd(input_reg, input_reg);
EmitFalseBranch(instr, parity_odd);
__ subp(rsp, Immediate(kDoubleSize));
- __ movsd(MemOperand(rsp, 0), input_reg);
+ __ Movsd(MemOperand(rsp, 0), input_reg);
__ addp(rsp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
@@ -2408,10 +2303,10 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
if (rep.IsDouble()) {
XMMRegister value = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, value);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(xmm_scratch, value);
EmitFalseBranch(instr, not_equal);
- __ movmskpd(kScratchRegister, value);
+ __ Movmskpd(kScratchRegister, value);
__ testl(kScratchRegister, Immediate(1));
EmitBranch(instr, not_zero);
} else {
@@ -2565,32 +2460,13 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- __ j(equal, is_true);
- __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
__ j(equal, is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(above, is_false);
+ __ j(equal, is_false);
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ GetMapConstructor(temp, temp, kScratchRegister);
@@ -2669,6 +2545,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+
+ // Deoptimize if the object needs to be access checked.
+ __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
+ // Deoptimize for proxies.
+ __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+ DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
+
__ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
__ cmpp(object_prototype, prototype);
EmitTrueBranch(instr, equal);
@@ -2707,16 +2593,14 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ Push(rax);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ movp(rsp, rbp);
__ popq(rbp);
- no_frame_start = masm_->pc_offset();
}
if (instr->has_constant_parameter_count()) {
__ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
@@ -2732,9 +2616,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ addp(rsp, reg);
__ jmp(return_addr_reg);
}
- if (no_frame_start != -1) {
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -2749,7 +2630,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Move(slot_register, Smi::FromInt(index));
}
@@ -2763,7 +2644,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Move(slot_register, Smi::FromInt(index));
}
@@ -2784,23 +2665,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->result()).is(rax));
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ Set(LoadGlobalViaContextDescriptor::SlotRegister(), slot);
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2875,7 +2739,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (instr->hydrogen()->representation().IsDouble()) {
DCHECK(access.IsInobject());
XMMRegister result = ToDoubleRegister(instr->result());
- __ movsd(result, FieldOperand(object, offset));
+ __ Movsd(result, FieldOperand(object, offset));
return;
}
@@ -3005,10 +2869,9 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
if (elements_kind == FLOAT32_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
+ __ Cvtss2sd(result, operand);
} else if (elements_kind == FLOAT64_ELEMENTS) {
- __ movsd(ToDoubleRegister(instr->result()), operand);
+ __ Movsd(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
@@ -3079,7 +2942,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
instr->base_offset());
- __ movsd(result, double_load_operand);
+ __ Movsd(result, double_load_operand);
}
@@ -3296,16 +3159,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
- __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
+ __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister);
DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
__ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
- __ movp(receiver,
- Operand(receiver,
- Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset));
+ __ movp(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
+ __ movp(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
__ bind(&receiver_ok);
}
@@ -3349,7 +3210,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(rax);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3385,7 +3247,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
__ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3404,7 +3266,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize rax to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ Set(rax, arity);
// Invoke function.
@@ -3422,7 +3285,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(function_reg, no_reg, expected, count, CALL_FUNCTION,
+ generator);
}
}
@@ -3468,11 +3332,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(rdi));
DCHECK(ToRegister(instr->result()).is(rax));
- __ Set(rax, instr->arity());
-
// Change context.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ Set(rax, instr->arity());
+
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
@@ -3582,9 +3448,9 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
if (r.IsDouble()) {
XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ xorps(scratch, scratch);
- __ subsd(scratch, input_reg);
- __ andps(input_reg, scratch);
+ __ Xorpd(scratch, scratch);
+ __ Subsd(scratch, input_reg);
+ __ Andpd(input_reg, scratch);
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else if (r.IsSmi()) {
@@ -3610,19 +3476,19 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
CpuFeatureScope scope(masm(), SSE4_1);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Deoptimize if minus zero.
- __ movq(output_reg, input_reg);
+ __ Movq(output_reg, input_reg);
__ subq(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
}
- __ roundsd(xmm_scratch, input_reg, kRoundDown);
- __ cvttsd2si(output_reg, xmm_scratch);
+ __ Roundsd(xmm_scratch, input_reg, kRoundDown);
+ __ Cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
} else {
Label negative_sign, done;
// Deoptimize on unordered.
- __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
+ __ Xorpd(xmm_scratch, xmm_scratch); // Zero the register.
+ __ Ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
__ j(below, &negative_sign, Label::kNear);
@@ -3630,8 +3496,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Check for negative zero.
Label positive_sign;
__ j(above, &positive_sign, Label::kNear);
- __ movmskpd(output_reg, input_reg);
- __ testq(output_reg, Immediate(1));
+ __ Movmskpd(output_reg, input_reg);
+ __ testl(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
__ Set(output_reg, 0);
__ jmp(&done);
@@ -3639,7 +3505,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
// Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, input_reg);
+ __ Cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
@@ -3648,9 +3514,9 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Non-zero negative reaches here.
__ bind(&negative_sign);
// Truncate, then compare and compensate.
- __ cvttsd2si(output_reg, input_reg);
+ __ Cvttsd2si(output_reg, input_reg);
__ Cvtlsi2sd(xmm_scratch, output_reg);
- __ ucomisd(input_reg, xmm_scratch);
+ __ Ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
@@ -3671,13 +3537,13 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
Label done, round_to_zero, below_one_half;
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ movq(kScratchRegister, one_half);
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(xmm_scratch, input_reg);
+ __ Movq(xmm_scratch, kScratchRegister);
+ __ Ucomisd(xmm_scratch, input_reg);
__ j(above, &below_one_half, Label::kNear);
// CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
- __ addsd(xmm_scratch, input_reg);
- __ cvttsd2si(output_reg, xmm_scratch);
+ __ Addsd(xmm_scratch, input_reg);
+ __ Cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
@@ -3685,21 +3551,21 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ bind(&below_one_half);
__ movq(kScratchRegister, minus_one_half);
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(xmm_scratch, input_reg);
+ __ Movq(xmm_scratch, kScratchRegister);
+ __ Ucomisd(xmm_scratch, input_reg);
__ j(below_equal, &round_to_zero, Label::kNear);
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
// compare and compensate.
- __ movq(input_temp, input_reg); // Do not alter input_reg.
- __ subsd(input_temp, xmm_scratch);
- __ cvttsd2si(output_reg, input_temp);
+ __ Movapd(input_temp, input_reg); // Do not alter input_reg.
+ __ Subsd(input_temp, xmm_scratch);
+ __ Cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ Cvtlsi2sd(xmm_scratch, output_reg);
- __ ucomisd(xmm_scratch, input_temp);
+ __ Ucomisd(xmm_scratch, input_temp);
__ j(equal, &done, dist);
__ subl(output_reg, Immediate(1));
// No overflow because we already ruled out minint.
@@ -3709,7 +3575,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
// we can ignore the difference between a result of -0 and +0.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ movq(output_reg, input_reg);
+ __ Movq(output_reg, input_reg);
__ testq(output_reg, output_reg);
DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
}
@@ -3721,8 +3587,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
void LCodeGen::DoMathFround(LMathFround* instr) {
XMMRegister input_reg = ToDoubleRegister(instr->value());
XMMRegister output_reg = ToDoubleRegister(instr->result());
- __ cvtsd2ss(output_reg, input_reg);
- __ cvtss2sd(output_reg, output_reg);
+ __ Cvtsd2ss(output_reg, input_reg);
+ __ Cvtss2sd(output_reg, output_reg);
}
@@ -3730,10 +3596,10 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
XMMRegister output = ToDoubleRegister(instr->result());
if (instr->value()->IsDoubleRegister()) {
XMMRegister input = ToDoubleRegister(instr->value());
- __ sqrtsd(output, input);
+ __ Sqrtsd(output, input);
} else {
Operand input = ToOperand(instr->value());
- __ sqrtsd(output, input);
+ __ Sqrtsd(output, input);
}
}
@@ -3750,22 +3616,22 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
// Check base for -Infinity. According to IEEE-754, double-precision
// -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
__ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(xmm_scratch, input_reg);
+ __ Movq(xmm_scratch, kScratchRegister);
+ __ Ucomisd(xmm_scratch, input_reg);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &sqrt, Label::kNear);
__ j(carry, &sqrt, Label::kNear);
// If input is -Infinity, return Infinity.
- __ xorps(input_reg, input_reg);
- __ subsd(input_reg, xmm_scratch);
+ __ Xorpd(input_reg, input_reg);
+ __ Subsd(input_reg, xmm_scratch);
__ jmp(&done, Label::kNear);
// Square root.
__ bind(&sqrt);
- __ xorps(xmm_scratch, xmm_scratch);
- __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
- __ sqrtsd(input_reg, input_reg);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Addsd(input_reg, xmm_scratch); // Convert -0 to +0.
+ __ Sqrtsd(input_reg, input_reg);
__ bind(&done);
}
@@ -3821,26 +3687,26 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
XMMRegister input_reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
Label positive, done, zero;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(input_reg, xmm_scratch);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
__ j(not_carry, &zero, Label::kNear);
- __ pcmpeqd(input_reg, input_reg);
+ __ Pcmpeqd(input_reg, input_reg);
__ jmp(&done, Label::kNear);
__ bind(&zero);
ExternalReference ninf =
ExternalReference::address_of_negative_infinity();
Operand ninf_operand = masm()->ExternalOperand(ninf);
- __ movsd(input_reg, ninf_operand);
+ __ Movsd(input_reg, ninf_operand);
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
__ subp(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), input_reg);
+ __ Movsd(Operand(rsp, 0), input_reg);
__ fld_d(Operand(rsp, 0));
__ fyl2x();
__ fstp_d(Operand(rsp, 0));
- __ movsd(input_reg, Operand(rsp, 0));
+ __ Movsd(input_reg, Operand(rsp, 0));
__ addp(rsp, Immediate(kDoubleSize));
__ bind(&done);
}
@@ -3864,7 +3730,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(rdi, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3879,7 +3745,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -3893,32 +3759,16 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ Move(vector_register, vector);
__ Move(slot_register, Smi::FromInt(index));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ Set(rax, arity);
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->constructor()).is(rdi));
- DCHECK(ToRegister(instr->result()).is(rax));
-
- __ Set(rax, instr->arity());
- // No cell in ebx for construct type feedback in optimized code
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->constructor()).is(rdi));
@@ -3942,7 +3792,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -3957,17 +3807,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4031,7 +3881,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
DCHECK(!hinstr->has_transition());
DCHECK(!hinstr->NeedsWriteBarrier());
XMMRegister value = ToDoubleRegister(instr->value());
- __ movsd(FieldOperand(object, offset), value);
+ __ Movsd(FieldOperand(object, offset), value);
return;
}
@@ -4079,7 +3929,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
XMMRegister value = ToDoubleRegister(instr->value());
- __ movsd(operand, value);
+ __ Movsd(operand, value);
} else if (instr->value()->IsRegister()) {
Register value = ToRegister(instr->value());
@@ -4141,29 +3991,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ Set(StoreGlobalViaContextDescriptor::SlotRegister(), slot);
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(instr->language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Representation representation = instr->hydrogen()->length()->representation();
DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
@@ -4249,10 +4076,10 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == FLOAT32_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
- __ cvtsd2ss(value, value);
- __ movss(operand, value);
+ __ Cvtsd2ss(value, value);
+ __ Movss(operand, value);
} else if (elements_kind == FLOAT64_ELEMENTS) {
- __ movsd(operand, ToDoubleRegister(instr->value()));
+ __ Movsd(operand, ToDoubleRegister(instr->value()));
} else {
Register value(ToRegister(instr->value()));
switch (elements_kind) {
@@ -4299,8 +4126,8 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
if (instr->NeedsCanonicalization()) {
XMMRegister xmm_scratch = double_scratch0();
// Turn potential sNaN value into qNaN.
- __ xorps(xmm_scratch, xmm_scratch);
- __ subsd(value, xmm_scratch);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Subsd(value, xmm_scratch);
}
Operand double_store_operand = BuildFastArrayOperand(
@@ -4310,7 +4137,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset());
- __ movsd(double_store_operand, value);
+ __ Movsd(double_store_operand, value);
}
@@ -4676,7 +4503,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ Integer32ToSmi(char_code, char_code);
__ Push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -4780,7 +4608,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// the value in there. If that fails, call the runtime system.
__ SmiToInteger32(reg, reg);
__ xorl(reg, Immediate(0x80000000));
- __ cvtlsi2sd(temp_xmm, reg);
+ __ Cvtlsi2sd(temp_xmm, reg);
} else {
DCHECK(signedness == UNSIGNED_INT32);
__ LoadUint32(temp_xmm, reg);
@@ -4817,7 +4645,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// Done. Put the value in temp_xmm into the value of the allocated heap
// number.
__ bind(&done);
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
+ __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
}
@@ -4844,7 +4672,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
}
@@ -4920,7 +4748,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// On x64 it is safe to load at heap number offset before evaluating the map
// check, since all heap objects are at least two words long.
- __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ Movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
@@ -4930,11 +4758,11 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (deoptimize_on_minus_zero) {
XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, result_reg);
+ __ Xorpd(xmm_scratch, xmm_scratch);
+ __ Ucomisd(xmm_scratch, result_reg);
__ j(not_equal, &done, Label::kNear);
- __ movmskpd(kScratchRegister, result_reg);
- __ testq(kScratchRegister, Immediate(1));
+ __ Movmskpd(kScratchRegister, result_reg);
+ __ testl(kScratchRegister, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done, Label::kNear);
@@ -4946,7 +4774,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
- __ pcmpeqd(result_reg, result_reg);
+ __ Pcmpeqd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
}
} else {
@@ -4999,16 +4827,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, xmm0);
+ __ Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ Cvttsd2si(input_reg, xmm0);
__ Cvtlsi2sd(scratch, input_reg);
- __ ucomisd(xmm0, scratch);
+ __ Ucomisd(xmm0, scratch);
DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ testl(input_reg, input_reg);
__ j(not_zero, done);
- __ movmskpd(input_reg, xmm0);
+ __ Movmskpd(input_reg, xmm0);
__ andl(input_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
@@ -5311,7 +5139,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ Movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
@@ -5328,10 +5156,10 @@ void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
XMMRegister value_reg = ToDoubleRegister(instr->value());
Register result_reg = ToRegister(instr->result());
if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
- __ movq(result_reg, value_reg);
+ __ Movq(result_reg, value_reg);
__ shrq(result_reg, Immediate(32));
} else {
- __ movd(result_reg, value_reg);
+ __ Movd(result_reg, value_reg);
}
}
@@ -5340,11 +5168,10 @@ void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
Register hi_reg = ToRegister(instr->hi());
Register lo_reg = ToRegister(instr->lo());
XMMRegister result_reg = ToDoubleRegister(instr->result());
- XMMRegister xmm_scratch = double_scratch0();
- __ movd(result_reg, hi_reg);
- __ psllq(result_reg, 32);
- __ movd(xmm_scratch, lo_reg);
- __ orps(result_reg, xmm_scratch);
+ __ movl(kScratchRegister, hi_reg);
+ __ shlq(kScratchRegister, Immediate(32));
+ __ orq(kScratchRegister, lo_reg);
+ __ Movq(result_reg, kScratchRegister);
}
@@ -5378,11 +5205,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- __ jmp(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
@@ -5450,57 +5274,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- Label materialized;
- // Registers will be used as follows:
- // rcx = literals array.
- // rbx = regexp literal.
- // rax = regexp literal clone.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ Move(rcx, instr->hydrogen()->literals());
- __ movp(rbx, FieldOperand(rcx, literal_offset));
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in rax.
- __ Push(rcx);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->pattern());
- __ Push(instr->hydrogen()->flags());
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ movp(rbx, rax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated, Label::kNear);
-
- __ bind(&runtime_allocate);
- __ Push(rbx);
- __ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ Pop(rbx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movp(rdx, FieldOperand(rbx, i));
- __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movp(FieldOperand(rax, i), rdx);
- __ movp(FieldOperand(rax, i + kPointerSize), rcx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movp(FieldOperand(rax, size - kPointerSize), rdx);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->value()).is(rbx));
@@ -5598,8 +5371,8 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ JumpIfSmi(input, false_label, false_distance);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, true_label, true_distance);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, input);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
__ j(below, false_label, false_distance);
// Check for callable or undetectable objects => false.
__ testb(FieldOperand(input, Map::kBitFieldOffset),
@@ -5625,32 +5398,6 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp);
- EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker, Label::kNear);
- __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5775,8 +5522,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Condition cc = masm()->CheckSmi(rax);
DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(rax, JS_PROXY_TYPE, rcx);
DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
@@ -5790,7 +5537,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ Push(rax);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
@@ -5910,7 +5657,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ Push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/chromium/v8/src/x64/lithium-codegen-x64.h b/chromium/v8/src/crankshaft/x64/lithium-codegen-x64.h
index e05b310dec2..6fb918bf84d 100644
--- a/chromium/v8/src/x64/lithium-codegen-x64.h
+++ b/chromium/v8/src/crankshaft/x64/lithium-codegen-x64.h
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
-#define V8_X64_LITHIUM_CODEGEN_X64_H_
+#ifndef V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
+#define V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
-#include "src/x64/lithium-x64.h"
+#include "src/ast/scopes.h"
#include "src/base/logging.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/x64/lithium-gap-resolver-x64.h"
+#include "src/crankshaft/x64/lithium-x64.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
-#include "src/x64/lithium-gap-resolver-x64.h"
namespace v8 {
namespace internal {
@@ -26,13 +26,9 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -180,6 +176,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
@@ -215,9 +216,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
@@ -273,10 +271,6 @@ class LCodeGen: public LCodeGenBase {
Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp);
-
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
@@ -310,13 +304,9 @@ class LCodeGen: public LCodeGenBase {
void MakeSureStackPagesMapped(int offset);
#endif
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
@@ -387,6 +377,7 @@ class LDeferredCode: public ZoneObject {
int instruction_index_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_X64_LITHIUM_CODEGEN_X64_H_
+#endif // V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
diff --git a/chromium/v8/src/x64/lithium-gap-resolver-x64.cc b/chromium/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
index 800fb3f61c7..3808c377dc1 100644
--- a/chromium/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/chromium/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
@@ -4,8 +4,9 @@
#if V8_TARGET_ARCH_X64
-#include "src/x64/lithium-codegen-x64.h"
-#include "src/x64/lithium-gap-resolver-x64.h"
+#include "src/crankshaft/x64/lithium-gap-resolver-x64.h"
+
+#include "src/crankshaft/x64/lithium-codegen-x64.h"
namespace v8 {
namespace internal {
@@ -189,10 +190,10 @@ void LGapResolver::EmitMove(int index) {
uint64_t int_val = bit_cast<uint64_t, double>(v);
XMMRegister dst = cgen_->ToDoubleRegister(destination);
if (int_val == 0) {
- __ xorps(dst, dst);
+ __ Xorpd(dst, dst);
} else {
__ Set(kScratchRegister, int_val);
- __ movq(dst, kScratchRegister);
+ __ Movq(dst, kScratchRegister);
}
} else {
DCHECK(destination->IsStackSlot());
@@ -211,19 +212,19 @@ void LGapResolver::EmitMove(int index) {
} else if (source->IsDoubleRegister()) {
XMMRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
- __ movaps(cgen_->ToDoubleRegister(destination), src);
+ __ Movapd(cgen_->ToDoubleRegister(destination), src);
} else {
DCHECK(destination->IsDoubleStackSlot());
- __ movsd(cgen_->ToOperand(destination), src);
+ __ Movsd(cgen_->ToOperand(destination), src);
}
} else if (source->IsDoubleStackSlot()) {
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
- __ movsd(cgen_->ToDoubleRegister(destination), src);
+ __ Movsd(cgen_->ToDoubleRegister(destination), src);
} else {
DCHECK(destination->IsDoubleStackSlot());
- __ movsd(xmm0, src);
- __ movsd(cgen_->ToOperand(destination), xmm0);
+ __ Movsd(xmm0, src);
+ __ Movsd(cgen_->ToOperand(destination), xmm0);
}
} else {
UNREACHABLE();
@@ -243,7 +244,9 @@ void LGapResolver::EmitSwap(int index) {
// Swap two general-purpose registers.
Register src = cgen_->ToRegister(source);
Register dst = cgen_->ToRegister(destination);
- __ xchgq(dst, src);
+ __ movp(kScratchRegister, src);
+ __ movp(src, dst);
+ __ movp(dst, kScratchRegister);
} else if ((source->IsRegister() && destination->IsStackSlot()) ||
(source->IsStackSlot() && destination->IsRegister())) {
@@ -261,18 +264,18 @@ void LGapResolver::EmitSwap(int index) {
// Swap two stack slots or two double stack slots.
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
- __ movsd(xmm0, src);
+ __ Movsd(xmm0, src);
__ movp(kScratchRegister, dst);
- __ movsd(dst, xmm0);
+ __ Movsd(dst, xmm0);
__ movp(src, kScratchRegister);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// Swap two double registers.
XMMRegister source_reg = cgen_->ToDoubleRegister(source);
XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
- __ movaps(xmm0, source_reg);
- __ movaps(source_reg, destination_reg);
- __ movaps(destination_reg, xmm0);
+ __ Movapd(xmm0, source_reg);
+ __ Movapd(source_reg, destination_reg);
+ __ Movapd(destination_reg, xmm0);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
// Swap a double register and a double stack slot.
@@ -284,9 +287,9 @@ void LGapResolver::EmitSwap(int index) {
LOperand* other = source->IsDoubleRegister() ? destination : source;
DCHECK(other->IsDoubleStackSlot());
Operand other_operand = cgen_->ToOperand(other);
- __ movsd(xmm0, other_operand);
- __ movsd(other_operand, reg);
- __ movaps(reg, xmm0);
+ __ Movapd(xmm0, reg);
+ __ Movsd(reg, other_operand);
+ __ Movsd(other_operand, xmm0);
} else {
// No other combinations are possible.
diff --git a/chromium/v8/src/x64/lithium-gap-resolver-x64.h b/chromium/v8/src/crankshaft/x64/lithium-gap-resolver-x64.h
index 7882da56e0a..641f0ee69f7 100644
--- a/chromium/v8/src/x64/lithium-gap-resolver-x64.h
+++ b/chromium/v8/src/crankshaft/x64/lithium-gap-resolver-x64.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
-#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
+#ifndef V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_
+#define V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -44,6 +44,7 @@ class LGapResolver final BASE_EMBEDDED {
ZoneList<LMoveOperands> moves_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
+#endif // V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_
diff --git a/chromium/v8/src/x64/lithium-x64.cc b/chromium/v8/src/crankshaft/x64/lithium-x64.cc
index 9df3a7dabf3..3c932a24abd 100644
--- a/chromium/v8/src/x64/lithium-x64.cc
+++ b/chromium/v8/src/crankshaft/x64/lithium-x64.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/x64/lithium-x64.h"
+#include "src/crankshaft/x64/lithium-x64.h"
#include <sstream>
#if V8_TARGET_ARCH_X64
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
-#include "src/x64/lithium-codegen-x64.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/x64/lithium-codegen-x64.h"
namespace v8 {
namespace internal {
@@ -311,13 +311,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -367,11 +360,6 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -390,12 +378,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -470,14 +452,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -1022,7 +1003,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ return AssignEnvironment(result);
}
@@ -1243,14 +1226,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* constructor = UseFixed(instr->constructor(), rdi);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* constructor = UseFixed(instr->constructor(), rdi);
@@ -1804,13 +1779,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), rax);
- LDateField* result = new(zone()) LDateField(object, instr->index());
- return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2100,15 +2068,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2224,7 +2183,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (!instr->is_fixed_typed_array()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
@@ -2232,7 +2191,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
(IsDoubleOrFloatElementsKind(elements_kind))));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2304,7 +2265,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
}
}
- return new(zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
}
DCHECK(
@@ -2328,7 +2289,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
: UseRegisterOrConstantAtStart(instr->key());
}
LOperand* backing_store = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2468,19 +2430,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* left = UseFixed(instr->left(), rdx);
@@ -2521,13 +2470,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LRegExpLiteral* result = new(zone()) LRegExpLiteral(context);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2630,12 +2572,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/chromium/v8/src/x64/lithium-x64.h b/chromium/v8/src/crankshaft/x64/lithium-x64.h
index 61295165150..ebe1ef9e5db 100644
--- a/chromium/v8/src/x64/lithium-x64.h
+++ b/chromium/v8/src/crankshaft/x64/lithium-x64.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_LITHIUM_X64_H_
-#define V8_X64_LITHIUM_X64_H_
+#ifndef V8_CRANKSHAFT_X64_LITHIUM_X64_H_
+#define V8_CRANKSHAFT_X64_LITHIUM_X64_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -33,7 +33,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -61,7 +60,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -89,7 +87,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -100,7 +97,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -130,7 +126,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -141,7 +136,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1348,23 +1342,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDateField(LOperand* date, Smi* index) : index_(index) {
- inputs_[0] = date;
- }
-
- LOperand* date() { return inputs_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1596,11 +1573,12 @@ inline static bool ExternalArrayOpRequiresTemp(
}
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
@@ -1611,6 +1589,7 @@ class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
void PrintDataTo(StringStream* stream) override;
uint32_t base_offset() const { return hydrogen()->base_offset(); }
ElementsKind elements_kind() const {
@@ -1660,22 +1639,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1897,25 +1860,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2167,34 +2111,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* backing_store_owner) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2203,6 +2127,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
@@ -2521,19 +2446,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
@@ -2578,20 +2490,6 @@ class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCallAndBranch)
-};
-
-
class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
@@ -2864,6 +2762,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::int
+} // namespace internal
+} // namespace v8
-#endif // V8_X64_LITHIUM_X64_H_
+#endif // V8_CRANKSHAFT_X64_LITHIUM_X64_H_
diff --git a/chromium/v8/src/crankshaft/x87/OWNERS b/chromium/v8/src/crankshaft/x87/OWNERS
new file mode 100644
index 00000000000..dd9998b2610
--- /dev/null
+++ b/chromium/v8/src/crankshaft/x87/OWNERS
@@ -0,0 +1 @@
+weiliang.lin@intel.com
diff --git a/chromium/v8/src/x87/lithium-codegen-x87.cc b/chromium/v8/src/crankshaft/x87/lithium-codegen-x87.cc
index 921259e9648..fe2baa5bb8a 100644
--- a/chromium/v8/src/x87/lithium-codegen-x87.cc
+++ b/chromium/v8/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -4,17 +4,18 @@
#if V8_TARGET_ARCH_X87
+#include "src/crankshaft/x87/lithium-codegen-x87.h"
+
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/crankshaft/hydrogen-osr.h"
#include "src/deoptimizer.h"
-#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/profiler/cpu-profiler.h"
#include "src/x87/frames-x87.h"
-#include "src/x87/lithium-codegen-x87.h"
namespace v8 {
namespace internal {
@@ -106,26 +107,6 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ mov(ecx, Operand(esp, receiver_offset));
-
- __ cmp(ecx, isolate()->factory()->undefined_value());
- __ j(not_equal, &ok, Label::kNear);
-
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
-
- __ mov(Operand(esp, receiver_offset), ecx);
-
- __ bind(&ok);
- }
-
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
__ Move(edx, Immediate(kNoAlignmentPadding));
@@ -159,9 +140,8 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
- info()->AddNoFrameRange(0, masm_->pc_offset());
}
if (info()->IsOptimizing() &&
@@ -236,7 +216,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(edi);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -245,7 +225,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -518,13 +498,13 @@ bool LCodeGen::GenerateSafepointTable() {
}
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
+Register LCodeGen::ToRegister(int code) const {
+ return Register::from_code(code);
}
-X87Register LCodeGen::ToX87Register(int index) const {
- return X87Register::FromAllocationIndex(index);
+X87Register LCodeGen::ToX87Register(int code) const {
+ return X87Register::from_code(code);
}
@@ -700,7 +680,7 @@ void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
DCHECK(is_mutable_);
// Assert the reg is prepared to write, but not on the virtual stack yet
DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) &&
- stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
+ stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
stack_depth_++;
}
@@ -1191,60 +1171,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -2026,37 +1952,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(result));
- DCHECK(object.is(eax));
-
- if (index->value() == 0) {
- __ mov(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand::StaticVariable(stamp));
- __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ mov(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(Operand(esp, 0), object);
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
Operand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -2436,7 +2331,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
@@ -2798,29 +2693,11 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
DCHECK(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- __ j(equal, is_true);
- __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
__ j(equal, is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(above, is_false);
+ __ j(equal, is_false);
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
@@ -2900,6 +2777,15 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
+ // Deoptimize for proxies.
+ __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+ DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
+
__ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, prototype);
EmitTrueBranch(instr, equal);
@@ -2975,18 +2861,16 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(eax);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (dynamic_frame_alignment_) {
// Fetch the state of the dynamic frame alignment.
__ mov(edx, Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
}
- int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ mov(esp, ebp);
__ pop(ebp);
- no_frame_start = masm_->pc_offset();
}
if (dynamic_frame_alignment_) {
Label no_padding;
@@ -2998,9 +2882,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
EmitReturn(instr, false);
- if (no_frame_start != -1) {
- info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
- }
}
@@ -3015,7 +2896,7 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ mov(vector_register, vector);
// No need to allocate this register.
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
}
@@ -3029,7 +2910,7 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ mov(vector_register, vector);
- FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+ FeedbackVectorSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
}
@@ -3050,24 +2931,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
}
-void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- Handle<Code> stub =
- CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3496,16 +3359,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
- __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
+ __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
__ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
- const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ mov(receiver, Operand(receiver, global_offset));
- const int proxy_offset = GlobalObject::kGlobalProxyOffset;
- __ mov(receiver, FieldOperand(receiver, proxy_offset));
+ __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
+ __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
__ bind(&receiver_ok);
}
@@ -3546,7 +3407,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3587,7 +3449,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3605,7 +3467,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize eax to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ mov(edx, factory()->undefined_value());
__ mov(eax, arity);
// Invoke function directly.
@@ -3668,11 +3531,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(edi));
DCHECK(ToRegister(instr->result()).is(eax));
- __ mov(eax, instr->arity());
-
// Change context.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ mov(edx, factory()->undefined_value());
+ __ mov(eax, instr->arity());
+
bool is_self_call = false;
if (instr->hydrogen()->function()->IsConstant()) {
HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
@@ -3906,65 +3771,11 @@ void LCodeGen::DoMathFround(LMathFround* instr) {
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- X87Register input = ToX87Register(instr->value());
- X87Register result_reg = ToX87Register(instr->result());
- Register temp_result = ToRegister(instr->temp1());
- Register temp = ToRegister(instr->temp2());
- Label slow, done, smi, finish;
- DCHECK(result_reg.is(input));
-
- // Store input into Heap number and call runtime function kMathExpRT.
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
- __ jmp(&done, Label::kNear);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- {
- // TODO(3095996): Put a valid pointer value in the stack slot where the
- // result register is stored, as this register is in the pointer map, but
- // contains an integer value.
- __ Move(temp_result, Immediate(0));
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(temp_result, eax);
- }
- __ bind(&done);
- X87LoadForUsage(input);
- __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
-
- {
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ push(temp_result);
- __ CallRuntimeSaveDoubles(Runtime::kMathSqrt);
- RecordSafepointWithRegisters(instr->pointer_map(), 1,
- Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(temp_result, eax);
- }
- X87PrepareToWrite(result_reg);
- // return value of MathExpRT is Smi or Heap Number.
- __ JumpIfSmi(temp_result, &smi);
- // Heap number(double)
- __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
- __ jmp(&finish);
- // SMI
- __ bind(&smi);
- __ SmiUntag(temp_result);
- __ push(temp_result);
- __ fild_s(MemOperand(esp, 0));
- __ pop(temp_result);
- __ bind(&finish);
- X87CommitWrite(result_reg);
+ X87Register input_reg = ToX87Register(instr->value());
+ __ X87SetFPUCW(0x027F);
+ X87Fxch(input_reg);
+ __ fsqrt();
+ __ X87SetFPUCW(0x037F);
}
@@ -4189,7 +4000,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -4204,7 +4015,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- CallFunctionFlags flags = instr->hydrogen()->function_flags();
+ ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
@@ -4218,32 +4029,16 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ mov(vector_register, vector);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
- CallICState::CallType call_type =
- (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
-
Handle<Code> ic =
- CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
- CallFunctionStub stub(isolate(), arity, flags);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ Set(eax, arity);
+ CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->constructor()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- // No cell in ebx for construct type feedback in optimized code
- __ mov(ebx, isolate()->factory()->undefined_value());
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ Move(eax, Immediate(instr->arity()));
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->constructor()).is(edi));
@@ -4267,7 +4062,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -4282,17 +4077,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4430,30 +4225,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->value())
- .is(StoreGlobalViaContextDescriptor::ValueRegister()));
-
- int const slot = instr->slot_index();
- int const depth = instr->depth();
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
- isolate(), depth, instr->language_mode())
- .code();
- CallCode(stub, RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
- __ CallRuntime(is_strict(instr->language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
if (instr->index()->IsConstantOperand()) {
@@ -4925,7 +4696,8 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+ instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -5805,11 +5577,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (size <= Page::kMaxRegularHeapObjectSize) {
- __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- __ jmp(deferred->entry());
- }
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
@@ -5884,58 +5653,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- Label materialized;
- // Registers will be used as follows:
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
- // esi = context.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(ecx, instr->hydrogen()->literals());
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(instr->hydrogen()->pattern()));
- __ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated, Label::kNear);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->value()).is(ebx));
@@ -6018,8 +5735,8 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ JumpIfSmi(input, false_label, false_distance);
__ cmp(input, factory()->null_value());
__ j(equal, true_label, true_distance);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, input);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
__ j(below, false_label, false_distance);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
@@ -6044,32 +5761,6 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp);
- EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker, Label::kNear);
- __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -6200,8 +5891,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ test(eax, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
@@ -6213,7 +5904,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
@@ -6334,7 +6025,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/chromium/v8/src/x87/lithium-codegen-x87.h b/chromium/v8/src/crankshaft/x87/lithium-codegen-x87.h
index 2da1a31461b..63463448836 100644
--- a/chromium/v8/src/x87/lithium-codegen-x87.h
+++ b/chromium/v8/src/crankshaft/x87/lithium-codegen-x87.h
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X87_LITHIUM_CODEGEN_X87_H_
-#define V8_X87_LITHIUM_CODEGEN_X87_H_
+#ifndef V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_
+#define V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_
#include <map>
-#include "src/x87/lithium-x87.h"
+#include "src/ast/scopes.h"
#include "src/base/logging.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/x87/lithium-gap-resolver-x87.h"
+#include "src/crankshaft/x87/lithium-x87.h"
#include "src/deoptimizer.h"
-#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
-#include "src/x87/lithium-gap-resolver-x87.h"
namespace v8 {
namespace internal {
@@ -28,15 +28,11 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
dynamic_frame_alignment_(false),
support_aligned_spilled_doubles_(false),
- osr_pc_offset_(-1),
frame_is_built_(false),
x87_stack_(assembler),
safepoints_(info->zone()),
@@ -209,6 +205,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, argc, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
@@ -244,9 +245,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
X87Register ToX87Register(int index) const;
@@ -306,10 +304,6 @@ class LCodeGen: public LCodeGenBase {
Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -353,15 +347,11 @@ class LCodeGen: public LCodeGenBase {
void MakeSureStackPagesMapped(int offset);
#endif
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
bool dynamic_frame_alignment_;
bool support_aligned_spilled_doubles_;
- int osr_pc_offset_;
bool frame_is_built_;
class X87Stack : public ZoneObject {
@@ -499,6 +489,7 @@ class LDeferredCode : public ZoneObject {
LCodeGen::X87Stack x87_stack_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_X87_LITHIUM_CODEGEN_X87_H_
+#endif // V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_
diff --git a/chromium/v8/src/x87/lithium-gap-resolver-x87.cc b/chromium/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc
index edafcb2b166..aa9183541fc 100644
--- a/chromium/v8/src/x87/lithium-gap-resolver-x87.cc
+++ b/chromium/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc
@@ -4,8 +4,10 @@
#if V8_TARGET_ARCH_X87
-#include "src/x87/lithium-codegen-x87.h"
-#include "src/x87/lithium-gap-resolver-x87.h"
+#include "src/crankshaft/x87/lithium-gap-resolver-x87.h"
+#include "src/register-configuration.h"
+
+#include "src/crankshaft/x87/lithium-codegen-x87.h"
namespace v8 {
namespace internal {
@@ -165,10 +167,14 @@ int LGapResolver::CountSourceUses(LOperand* operand) {
Register LGapResolver::GetFreeRegisterNot(Register reg) {
- int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
- return Register::FromAllocationIndex(i);
+ int skip_index = reg.is(no_reg) ? -1 : reg.code();
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
+ code != skip_index) {
+ return Register::from_code(code);
}
}
return no_reg;
@@ -178,10 +184,12 @@ Register LGapResolver::GetFreeRegisterNot(Register reg) {
bool LGapResolver::HasBeenReset() {
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
-
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] != 0) return false;
- if (destination_uses_[i] != 0) return false;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ if (source_uses_[code] != 0) return false;
+ if (destination_uses_[code] != 0) return false;
}
return true;
}
@@ -204,7 +212,7 @@ void LGapResolver::Verify() {
void LGapResolver::Finish() {
if (spilled_register_ >= 0) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
+ __ pop(Register::from_code(spilled_register_));
spilled_register_ = -1;
}
moves_.Rewind(0);
@@ -213,7 +221,7 @@ void LGapResolver::Finish() {
void LGapResolver::EnsureRestored(LOperand* operand) {
if (operand->IsRegister() && operand->index() == spilled_register_) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
+ __ pop(Register::from_code(spilled_register_));
spilled_register_ = -1;
}
}
@@ -222,7 +230,7 @@ void LGapResolver::EnsureRestored(LOperand* operand) {
Register LGapResolver::EnsureTempRegister() {
// 1. We may have already spilled to create a temp register.
if (spilled_register_ >= 0) {
- return Register::FromAllocationIndex(spilled_register_);
+ return Register::from_code(spilled_register_);
}
// 2. We may have a free register that we can use without spilling.
@@ -231,19 +239,22 @@ Register LGapResolver::EnsureTempRegister() {
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
- Register scratch = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
+ Register scratch = Register::from_code(code);
__ push(scratch);
- spilled_register_ = i;
+ spilled_register_ = code;
return scratch;
}
}
// 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
- Register scratch = Register::FromAllocationIndex(0);
+ spilled_register_ = config->GetAllocatableGeneralCode(0);
+ Register scratch = Register::from_code(spilled_register_);
__ push(scratch);
- spilled_register_ = 0;
return scratch;
}
diff --git a/chromium/v8/src/x87/lithium-gap-resolver-x87.h b/chromium/v8/src/crankshaft/x87/lithium-gap-resolver-x87.h
index cdd26b87763..6b6e2e64b6b 100644
--- a/chromium/v8/src/x87/lithium-gap-resolver-x87.h
+++ b/chromium/v8/src/crankshaft/x87/lithium-gap-resolver-x87.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
-#define V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
+#ifndef V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_
+#define V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_
-#include "src/lithium.h"
+#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
@@ -72,14 +72,15 @@ class LGapResolver final BASE_EMBEDDED {
ZoneList<LMoveOperands> moves_;
// Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kMaxNumAllocatableRegisters];
- int destination_uses_[Register::kMaxNumAllocatableRegisters];
+ int source_uses_[Register::kNumRegisters];
+ int destination_uses_[DoubleRegister::kMaxNumRegisters];
// If we had to spill on demand, the currently spilled register's
// allocation index.
int spilled_register_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_X87_LITHIUM_GAP_RESOLVER_X87_H_
+#endif // V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_
diff --git a/chromium/v8/src/x87/lithium-x87.cc b/chromium/v8/src/crankshaft/x87/lithium-x87.cc
index cb429b2f21a..b422e1235b4 100644
--- a/chromium/v8/src/x87/lithium-x87.cc
+++ b/chromium/v8/src/crankshaft/x87/lithium-x87.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/x87/lithium-x87.h"
+#include "src/crankshaft/x87/lithium-x87.h"
#include <sstream>
#if V8_TARGET_ARCH_X87
-#include "src/hydrogen-osr.h"
-#include "src/lithium-inl.h"
-#include "src/x87/lithium-codegen-x87.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/x87/lithium-codegen-x87.h"
namespace v8 {
namespace internal {
@@ -330,15 +330,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
context()->PrintTo(stream);
@@ -383,11 +374,6 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
}
-void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d", depth(), slot_index());
-}
-
-
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@@ -406,12 +392,6 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
- stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
- value()->PrintTo(stream);
-}
-
-
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@@ -493,14 +473,13 @@ LPlatformChunk* LChunkBuilder::Build() {
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
+ return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
}
LUnallocated* LChunkBuilder::ToUnallocated(X87Register reg) {
- return new (zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- X87Register::ToAllocationIndex(reg));
+ return new (zone())
+ LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
}
@@ -1059,7 +1038,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
LOperand* temp = TempRegister();
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
+ return AssignEnvironment(result);
}
@@ -1274,14 +1255,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* constructor = UseFixed(instr->constructor(), edi);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
@@ -1818,14 +1791,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* date = UseFixed(instr->value(), eax);
- LDateField* result =
- new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2135,15 +2100,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
- HLoadGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- DCHECK(instr->slot_index() > 0);
- LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2222,7 +2178,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (!instr->is_fixed_typed_array()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
@@ -2230,7 +2186,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
(IsDoubleOrFloatElementsKind(instr->elements_kind()))));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ result = DefineAsRegister(
+ new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
}
bool needs_environment;
@@ -2301,7 +2259,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
? NULL
: UseRegisterAtStart(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(object, key, val);
+ return new (zone()) LStoreKeyed(object, key, val, nullptr);
} else {
DCHECK(instr->value()->representation().IsSmiOrTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2316,7 +2274,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
val = UseRegisterOrConstantAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
}
- return new(zone()) LStoreKeyed(obj, key, val);
+ return new (zone()) LStoreKeyed(obj, key, val, nullptr);
}
}
@@ -2335,7 +2293,8 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+ return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
}
@@ -2482,19 +2441,6 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
- HStoreGlobalViaContext* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* value = UseFixed(instr->value(),
- StoreGlobalViaContextDescriptor::ValueRegister());
- DCHECK(instr->slot_index() > 0);
-
- LStoreGlobalViaContext* result =
- new (zone()) LStoreGlobalViaContext(context, value);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
@@ -2535,13 +2481,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2649,12 +2588,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/chromium/v8/src/x87/lithium-x87.h b/chromium/v8/src/crankshaft/x87/lithium-x87.h
index cc1a43fbaf5..e033902617c 100644
--- a/chromium/v8/src/x87/lithium-x87.h
+++ b/chromium/v8/src/crankshaft/x87/lithium-x87.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X87_LITHIUM_X87_H_
-#define V8_X87_LITHIUM_X87_H_
+#ifndef V8_CRANKSHAFT_X87_LITHIUM_X87_H_
+#define V8_CRANKSHAFT_X87_LITHIUM_X87_H_
-#include "src/hydrogen.h"
-#include "src/lithium.h"
-#include "src/lithium-allocator.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
@@ -37,7 +37,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -66,7 +65,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -94,7 +92,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -104,7 +101,6 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
- V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -135,7 +131,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -146,7 +141,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1127,19 +1121,6 @@ class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -1375,27 +1356,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index)
- : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- Smi* index() const { return index_; }
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1616,14 +1576,16 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
inputs_[0] = elements;
inputs_[1] = key;
+ inputs_[2] = backing_store_owner;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* backing_store_owner() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -1695,22 +1657,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
};
-class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- LOperand* context() { return inputs_[0]; }
-
- int depth() const { return hydrogen()->depth(); }
- int slot_index() const { return hydrogen()->slot_index(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1932,25 +1878,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2199,34 +2126,14 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
-class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
public:
- LStoreGlobalViaContext(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
- "store-global-via-context")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
-
- void PrintDataTo(StringStream* stream) override;
-
- int depth() { return hydrogen()->depth(); }
- int slot_index() { return hydrogen()->slot_index(); }
- LanguageMode language_mode() { return hydrogen()->language_mode(); }
-};
-
-
-class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val,
+ LOperand* backing_store_owner) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
+ inputs_[3] = backing_store_owner;
}
bool is_fixed_typed_array() const {
@@ -2235,6 +2142,7 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* backing_store_owner() { return inputs_[3]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
@@ -2570,19 +2478,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
@@ -2897,6 +2792,7 @@ class LChunkBuilder final : public LChunkBuilderBase {
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_X87_LITHIUM_X87_H_
+#endif // V8_CRANKSHAFT_X87_LITHIUM_X87_H_
diff --git a/chromium/v8/src/d8.cc b/chromium/v8/src/d8.cc
index b73ab0bd6a6..c58c172640e 100644
--- a/chromium/v8/src/d8.cc
+++ b/chromium/v8/src/d8.cc
@@ -101,6 +101,70 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
};
+#ifndef V8_SHARED
+// Predictable v8::Platform implementation. All background and foreground
+// tasks are run immediately, delayed tasks are not executed at all.
+class PredictablePlatform : public Platform {
+ public:
+ PredictablePlatform() {}
+
+ void CallOnBackgroundThread(Task* task,
+ ExpectedRuntime expected_runtime) override {
+ task->Run();
+ delete task;
+ }
+
+ void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
+ task->Run();
+ delete task;
+ }
+
+ void CallDelayedOnForegroundThread(v8::Isolate* isolate, Task* task,
+ double delay_in_seconds) override {
+ delete task;
+ }
+
+ void CallIdleOnForegroundThread(v8::Isolate* isolate,
+ IdleTask* task) override {
+ UNREACHABLE();
+ }
+
+ bool IdleTasksEnabled(v8::Isolate* isolate) override { return false; }
+
+ double MonotonicallyIncreasingTime() override {
+ return synthetic_time_in_sec_ += 0.00001;
+ }
+
+ uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
+ const char* name, uint64_t id, uint64_t bind_id,
+ int numArgs, const char** argNames,
+ const uint8_t* argTypes, const uint64_t* argValues,
+ unsigned int flags) override {
+ return 0;
+ }
+
+ void UpdateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name, uint64_t handle) override {}
+
+ const uint8_t* GetCategoryGroupEnabled(const char* name) override {
+ static uint8_t no = 0;
+ return &no;
+ }
+
+ const char* GetCategoryGroupName(
+ const uint8_t* categoryEnabledFlag) override {
+ static const char* dummy = "dummy";
+ return dummy;
+ }
+
+ private:
+ double synthetic_time_in_sec_ = 0.0;
+
+ DISALLOW_COPY_AND_ASSIGN(PredictablePlatform);
+};
+#endif // !V8_SHARED
+
+
v8::Platform* g_platform = NULL;
@@ -425,13 +489,11 @@ int PerIsolateData::RealmIndexOrThrow(
#ifndef V8_SHARED
// performance.now() returns a time stamp as double, measured in milliseconds.
-// When FLAG_verify_predictable mode is enabled it returns current value
-// of Heap::allocations_count().
+// When FLAG_verify_predictable mode is enabled it returns result of
+// v8::Platform::MonotonicallyIncreasingTime().
void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (i::FLAG_verify_predictable) {
- Isolate* v8_isolate = args.GetIsolate();
- i::Heap* heap = reinterpret_cast<i::Isolate*>(v8_isolate)->heap();
- args.GetReturnValue().Set(heap->synthetic_time());
+ args.GetReturnValue().Set(g_platform->MonotonicallyIncreasingTime());
} else {
base::TimeDelta delta =
base::TimeTicks::HighResolutionNow() - kInitialTicks;
@@ -594,9 +656,13 @@ void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Explicitly catch potential exceptions in toString().
v8::TryCatch try_catch(args.GetIsolate());
+ Local<Value> arg = args[i];
Local<String> str_obj;
- if (!args[i]
- ->ToString(args.GetIsolate()->GetCurrentContext())
+
+ if (arg->IsSymbol()) {
+ arg = Local<Symbol>::Cast(arg)->Name();
+ }
+ if (!arg->ToString(args.GetIsolate()->GetCurrentContext())
.ToLocal(&str_obj)) {
try_catch.ReThrow();
return;
@@ -1046,7 +1112,7 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
i::JSFunction::cast(*compiled_script)->shared()->script()))
: i::Handle<i::Script>(i::Script::cast(
i::SharedFunctionInfo::cast(*compiled_script)->script()));
- script_object->set_type(i::Script::TYPE_NATIVE);
+ script_object->set_type(i::Script::TYPE_EXTENSION);
}
#endif // !V8_SHARED
@@ -2016,7 +2082,13 @@ void Shell::CollectGarbage(Isolate* isolate) {
void Shell::EmptyMessageQueues(Isolate* isolate) {
- while (v8::platform::PumpMessageLoop(g_platform, isolate)) continue;
+#ifndef V8_SHARED
+ if (!i::FLAG_verify_predictable) {
+#endif
+ while (v8::platform::PumpMessageLoop(g_platform, isolate)) continue;
+#ifndef V8_SHARED
+ }
+#endif
}
@@ -2358,7 +2430,14 @@ int Shell::Main(int argc, char* argv[]) {
#endif // defined(_WIN32) || defined(_WIN64)
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICU(options.icu_data_file);
+#ifndef V8_SHARED
+ g_platform = i::FLAG_verify_predictable
+ ? new PredictablePlatform()
+ : v8::platform::CreateDefaultPlatform();
+#else
g_platform = v8::platform::CreateDefaultPlatform();
+#endif // !V8_SHARED
+
v8::V8::InitializePlatform(g_platform);
v8::V8::Initialize();
if (options.natives_blob || options.snapshot_blob) {
@@ -2426,7 +2505,7 @@ int Shell::Main(int argc, char* argv[]) {
result = RunMain(isolate, argc, argv, last_run);
}
printf("======== Full Deoptimization =======\n");
- Testing::DeoptimizeAll();
+ Testing::DeoptimizeAll(isolate);
#if !defined(V8_SHARED)
} else if (i::FLAG_stress_runs > 0) {
options.stress_runs = i::FLAG_stress_runs;
diff --git a/chromium/v8/src/d8.gyp b/chromium/v8/src/d8.gyp
index 104bc940a60..f249a78856a 100644
--- a/chromium/v8/src/d8.gyp
+++ b/chromium/v8/src/d8.gyp
@@ -50,10 +50,6 @@
'd8.h',
'd8.cc',
],
- 'defines': [
- # TODO(jochen): Remove again after this is globally turned on.
- 'V8_IMMINENT_DEPRECATION_WARNINGS',
- ],
'conditions': [
[ 'want_separate_host_toolset==1', {
'toolsets': [ '<(v8_toolset_for_d8)', ],
@@ -80,6 +76,13 @@
'd8_js2c',
],
}],
+ [ 'v8_postmortem_support=="true"', {
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ '-Wl,-force_load,<(PRODUCT_DIR)/libv8_base.a'
+ ],
+ },
+ }],
],
}],
['v8_enable_vtunejit==1', {
@@ -98,9 +101,6 @@
'<(icu_gyp_path):icudata',
],
}],
- ['v8_wasm!=0', {
- 'include_dirs': ['../third_party/wasm'],
- }],
],
},
{
@@ -109,7 +109,7 @@
'variables': {
'js_files': [
'd8.js',
- 'macros.py',
+ 'js/macros.py',
],
},
'conditions': [
diff --git a/chromium/v8/src/d8.js b/chromium/v8/src/d8.js
index 8d55c788e2b..27a0bc39cd0 100644
--- a/chromium/v8/src/d8.js
+++ b/chromium/v8/src/d8.js
@@ -8,11 +8,28 @@
// Used by the d8 shell to output results.
var stringifyDepthLimit = 4; // To avoid crashing on cyclic objects
+// Hacky solution to circumvent forcing --allow-natives-syntax for d8
+function isProxy(o) { return false };
+function JSProxyGetTarget(proxy) { };
+function JSProxyGetHandler(proxy) { };
+
+try {
+ isProxy = Function(['object'], 'return %_IsJSProxy(object)');
+ JSProxyGetTarget = Function(['proxy'],
+ 'return %JSProxyGetTarget(proxy)');
+ JSProxyGetHandler = Function(['proxy'],
+ 'return %JSProxyGetHandler(proxy)');
+} catch(e) {};
+
+
function Stringify(x, depth) {
if (depth === undefined)
depth = stringifyDepthLimit;
else if (depth === 0)
- return "*";
+ return "...";
+ if (isProxy(x)) {
+ return StringifyProxy(x, depth);
+ }
switch (typeof x) {
case "undefined":
return "undefined";
@@ -63,3 +80,12 @@ function Stringify(x, depth) {
return "[crazy non-standard value]";
}
}
+
+function StringifyProxy(proxy, depth) {
+ var proxy_type = typeof proxy;
+ var info_object = {
+ target: JSProxyGetTarget(proxy),
+ handler: JSProxyGetHandler(proxy)
+ }
+ return '[' + proxy_type + ' Proxy ' + Stringify(info_object, depth-1) + ']';
+}
diff --git a/chromium/v8/src/date.cc b/chromium/v8/src/date.cc
index 3106b1622b3..f98ad64f1f8 100644
--- a/chromium/v8/src/date.cc
+++ b/chromium/v8/src/date.cc
@@ -71,7 +71,7 @@ void DateCache::YearMonthDayFromDays(
*year = 400 * (days / kDaysIn400Years) - kYearsOffset;
days %= kDaysIn400Years;
- DCHECK(DaysFromYearMonth(*year, 0) + days == save_days);
+ DCHECK_EQ(save_days, DaysFromYearMonth(*year, 0) + days);
days--;
int yd1 = days / kDaysIn100Years;
@@ -175,6 +175,20 @@ int DateCache::DaysFromYearMonth(int year, int month) {
}
+void DateCache::BreakDownTime(int64_t time_ms, int* year, int* month, int* day,
+ int* weekday, int* hour, int* min, int* sec,
+ int* ms) {
+ int const days = DaysFromTime(time_ms);
+ int const time_in_day_ms = TimeInDay(time_ms, days);
+ YearMonthDayFromDays(days, year, month, day);
+ *weekday = Weekday(days);
+ *hour = time_in_day_ms / (60 * 60 * 1000);
+ *min = (time_in_day_ms / (60 * 1000)) % 60;
+ *sec = (time_in_day_ms / 1000) % 60;
+ *ms = time_in_day_ms % 1000;
+}
+
+
void DateCache::ExtendTheAfterSegment(int time_sec, int offset_ms) {
if (after_->offset_ms == offset_ms &&
after_->start_sec <= time_sec + kDefaultDSTDeltaInSec &&
diff --git a/chromium/v8/src/date.h b/chromium/v8/src/date.h
index 813d3126ede..0a3e91beb26 100644
--- a/chromium/v8/src/date.h
+++ b/chromium/v8/src/date.h
@@ -18,6 +18,7 @@ class DateCache {
static const int kMsPerMin = 60 * 1000;
static const int kSecPerDay = 24 * 60 * 60;
static const int64_t kMsPerDay = kSecPerDay * 1000;
+ static const int64_t kMsPerMonth = kMsPerDay * 30;
// The largest time that can be passed to OS date-time library functions.
static const int kMaxEpochTimeInSec = kMaxInt;
@@ -30,8 +31,7 @@ class DateCache {
// Conservative upper bound on time that can be stored in JSDate
// before UTC conversion.
- static const int64_t kMaxTimeBeforeUTCInMs =
- kMaxTimeInMs + 10 * kMsPerDay;
+ static const int64_t kMaxTimeBeforeUTCInMs = kMaxTimeInMs + kMsPerMonth;
// Sentinel that denotes an invalid local offset.
static const int kInvalidLocalOffsetInMs = kMaxInt;
@@ -190,6 +190,10 @@ class DateCache {
// the first day of the given month in the given year.
int DaysFromYearMonth(int year, int month);
+ // Breaks down the time value.
+ void BreakDownTime(int64_t time_ms, int* year, int* month, int* day,
+ int* weekday, int* hour, int* min, int* sec, int* ms);
+
// Cache stamp is used for invalidating caches in JSDate.
// We increment the stamp each time when the timezone information changes.
// JSDate objects perform stamp check and invalidate their caches if
@@ -276,6 +280,7 @@ class DateCache {
base::TimezoneCache* tz_cache_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif
diff --git a/chromium/v8/src/date.js b/chromium/v8/src/date.js
deleted file mode 100644
index d2d59152ef9..00000000000
--- a/chromium/v8/src/date.js
+++ /dev/null
@@ -1,885 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-var $createDate;
-
-// -------------------------------------------------------------------
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalDate = global.Date;
-var GlobalObject = global.Object;
-var InternalArray = utils.InternalArray;
-var IsFinite;
-var MathAbs;
-var MathFloor;
-var ToNumber;
-
-utils.Import(function(from) {
- IsFinite = from.IsFinite;
- MathAbs = from.MathAbs;
- MathFloor = from.MathFloor;
- ToNumber = from.ToNumber;
-});
-
-// -------------------------------------------------------------------
-
-// This file contains date support implemented in JavaScript.
-
-var timezone_cache_time = NAN;
-var timezone_cache_timezone;
-
-function LocalTimezone(t) {
- if (NUMBER_IS_NAN(t)) return "";
- CheckDateCacheCurrent();
- if (t == timezone_cache_time) {
- return timezone_cache_timezone;
- }
- var timezone = %DateLocalTimezone(t);
- timezone_cache_time = t;
- timezone_cache_timezone = timezone;
- return timezone;
-}
-
-
-function UTC(time) {
- if (NUMBER_IS_NAN(time)) return time;
- // local_time_offset is needed before the call to DaylightSavingsOffset,
- // so it may be uninitialized.
- return %DateToUTC(time);
-}
-
-
-// ECMA 262 - 15.9.1.11
-function MakeTime(hour, min, sec, ms) {
- if (!IsFinite(hour)) return NAN;
- if (!IsFinite(min)) return NAN;
- if (!IsFinite(sec)) return NAN;
- if (!IsFinite(ms)) return NAN;
- return TO_INTEGER(hour) * msPerHour
- + TO_INTEGER(min) * msPerMinute
- + TO_INTEGER(sec) * msPerSecond
- + TO_INTEGER(ms);
-}
-
-
-// ECMA 262 - 15.9.1.12
-function TimeInYear(year) {
- return DaysInYear(year) * msPerDay;
-}
-
-
-// Compute number of days given a year, month, date.
-// Note that month and date can lie outside the normal range.
-// For example:
-// MakeDay(2007, -4, 20) --> MakeDay(2006, 8, 20)
-// MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
-// MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
-function MakeDay(year, month, date) {
- if (!IsFinite(year) || !IsFinite(month) || !IsFinite(date)) return NAN;
-
- // Convert to integer and map -0 to 0.
- year = TO_INTEGER_MAP_MINUS_ZERO(year);
- month = TO_INTEGER_MAP_MINUS_ZERO(month);
- date = TO_INTEGER_MAP_MINUS_ZERO(date);
-
- if (year < kMinYear || year > kMaxYear ||
- month < kMinMonth || month > kMaxMonth) {
- return NAN;
- }
-
- // Now we rely on year and month being SMIs.
- return %DateMakeDay(year | 0, month | 0) + date - 1;
-}
-
-
-// ECMA 262 - 15.9.1.13
-function MakeDate(day, time) {
- var time = day * msPerDay + time;
- // Some of our runtime funtions for computing UTC(time) rely on
- // times not being significantly larger than MAX_TIME_MS. If there
- // is no way that the time can be within range even after UTC
- // conversion we return NaN immediately instead of relying on
- // TimeClip to do it.
- if (MathAbs(time) > MAX_TIME_BEFORE_UTC) return NAN;
- return time;
-}
-
-
-// ECMA 262 - 15.9.1.14
-function TimeClip(time) {
- if (!IsFinite(time)) return NAN;
- if (MathAbs(time) > MAX_TIME_MS) return NAN;
- return TO_INTEGER(time);
-}
-
-
-// The Date cache is used to limit the cost of parsing the same Date
-// strings over and over again.
-var Date_cache = {
- // Cached time value.
- time: 0,
- // String input for which the cached time is valid.
- string: null
-};
-
-
-function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
- if (!%_IsConstructCall()) {
- // ECMA 262 - 15.9.2
- return %_CallFunction(new GlobalDate(), DateToString);
- }
-
- // ECMA 262 - 15.9.3
- var argc = %_ArgumentsLength();
- var value;
- if (argc == 0) {
- value = %DateCurrentTime();
- SET_UTC_DATE_VALUE(this, value);
- } else if (argc == 1) {
- if (IS_NUMBER(year)) {
- value = year;
-
- } else if (IS_STRING(year)) {
- // Probe the Date cache. If we already have a time value for the
- // given time, we re-use that instead of parsing the string again.
- CheckDateCacheCurrent();
- var cache = Date_cache;
- if (cache.string === year) {
- value = cache.time;
- } else {
- value = DateParse(year);
- if (!NUMBER_IS_NAN(value)) {
- cache.time = value;
- cache.string = year;
- }
- }
-
- } else if (IS_DATE(year)) {
- value = UTC_DATE_VALUE(year);
-
- } else {
- var time = TO_PRIMITIVE(year);
- value = IS_STRING(time) ? DateParse(time) : ToNumber(time);
- }
- SET_UTC_DATE_VALUE(this, value);
- } else {
- year = ToNumber(year);
- month = ToNumber(month);
- date = argc > 2 ? ToNumber(date) : 1;
- hours = argc > 3 ? ToNumber(hours) : 0;
- minutes = argc > 4 ? ToNumber(minutes) : 0;
- seconds = argc > 5 ? ToNumber(seconds) : 0;
- ms = argc > 6 ? ToNumber(ms) : 0;
- year = (!NUMBER_IS_NAN(year) &&
- 0 <= TO_INTEGER(year) &&
- TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
- var day = MakeDay(year, month, date);
- var time = MakeTime(hours, minutes, seconds, ms);
- value = MakeDate(day, time);
- SET_LOCAL_DATE_VALUE(this, value);
- }
-}
-
-
-var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
-var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
- 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
-
-
-function TwoDigitString(value) {
- return value < 10 ? "0" + value : "" + value;
-}
-
-
-function DateString(date) {
- CHECK_DATE(date);
- return WeekDays[LOCAL_WEEKDAY(date)] + ' '
- + Months[LOCAL_MONTH(date)] + ' '
- + TwoDigitString(LOCAL_DAY(date)) + ' '
- + LOCAL_YEAR(date);
-}
-
-
-var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
- 'Thursday', 'Friday', 'Saturday'];
-var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June',
- 'July', 'August', 'September', 'October', 'November', 'December'];
-
-
-function LongDateString(date) {
- CHECK_DATE(date);
- return LongWeekDays[LOCAL_WEEKDAY(date)] + ', '
- + LongMonths[LOCAL_MONTH(date)] + ' '
- + TwoDigitString(LOCAL_DAY(date)) + ', '
- + LOCAL_YEAR(date);
-}
-
-
-function TimeString(date) {
- CHECK_DATE(date);
- return TwoDigitString(LOCAL_HOUR(date)) + ':'
- + TwoDigitString(LOCAL_MIN(date)) + ':'
- + TwoDigitString(LOCAL_SEC(date));
-}
-
-
-function TimeStringUTC(date) {
- CHECK_DATE(date);
- return TwoDigitString(UTC_HOUR(date)) + ':'
- + TwoDigitString(UTC_MIN(date)) + ':'
- + TwoDigitString(UTC_SEC(date));
-}
-
-
-function LocalTimezoneString(date) {
- CHECK_DATE(date);
- var timezone = LocalTimezone(UTC_DATE_VALUE(date));
-
- var timezoneOffset = -TIMEZONE_OFFSET(date);
- var sign = (timezoneOffset >= 0) ? 1 : -1;
- var hours = MathFloor((sign * timezoneOffset)/60);
- var min = MathFloor((sign * timezoneOffset)%60);
- var gmt = ' GMT' + ((sign == 1) ? '+' : '-') +
- TwoDigitString(hours) + TwoDigitString(min);
- return gmt + ' (' + timezone + ')';
-}
-
-
-function DatePrintString(date) {
- CHECK_DATE(date);
- return DateString(date) + ' ' + TimeString(date);
-}
-
-// -------------------------------------------------------------------
-
-// Reused output buffer. Used when parsing date strings.
-var parse_buffer = new InternalArray(8);
-
-// ECMA 262 - 15.9.4.2
-function DateParse(string) {
- var arr = %DateParseString(string, parse_buffer);
- if (IS_NULL(arr)) return NAN;
-
- var day = MakeDay(arr[0], arr[1], arr[2]);
- var time = MakeTime(arr[3], arr[4], arr[5], arr[6]);
- var date = MakeDate(day, time);
-
- if (IS_NULL(arr[7])) {
- return TimeClip(UTC(date));
- } else {
- return TimeClip(date - arr[7] * 1000);
- }
-}
-
-
-// ECMA 262 - 15.9.4.3
-function DateUTC(year, month, date, hours, minutes, seconds, ms) {
- year = ToNumber(year);
- month = ToNumber(month);
- var argc = %_ArgumentsLength();
- date = argc > 2 ? ToNumber(date) : 1;
- hours = argc > 3 ? ToNumber(hours) : 0;
- minutes = argc > 4 ? ToNumber(minutes) : 0;
- seconds = argc > 5 ? ToNumber(seconds) : 0;
- ms = argc > 6 ? ToNumber(ms) : 0;
- year = (!NUMBER_IS_NAN(year) &&
- 0 <= TO_INTEGER(year) &&
- TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
- var day = MakeDay(year, month, date);
- var time = MakeTime(hours, minutes, seconds, ms);
- return TimeClip(MakeDate(day, time));
-}
-
-
-// ECMA 262 - 15.9.4.4
-function DateNow() {
- return %DateCurrentTime();
-}
-
-
-// ECMA 262 - 15.9.5.2
-function DateToString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this)
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- var time_zone_string = LocalTimezoneString(this)
- return DatePrintString(this) + time_zone_string;
-}
-
-
-// ECMA 262 - 15.9.5.3
-function DateToDateString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return DateString(this);
-}
-
-
-// ECMA 262 - 15.9.5.4
-function DateToTimeString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- var time_zone_string = LocalTimezoneString(this);
- return TimeString(this) + time_zone_string;
-}
-
-
-// ECMA 262 - 15.9.5.5
-function DateToLocaleString() {
- CHECK_DATE(this);
- return %_CallFunction(this, DateToString);
-}
-
-
-// ECMA 262 - 15.9.5.6
-function DateToLocaleDateString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return LongDateString(this);
-}
-
-
-// ECMA 262 - 15.9.5.7
-function DateToLocaleTimeString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return TimeString(this);
-}
-
-
-// ECMA 262 - 15.9.5.8
-function DateValueOf() {
- CHECK_DATE(this);
- return UTC_DATE_VALUE(this);
-}
-
-
-// ECMA 262 - 15.9.5.9
-function DateGetTime() {
- CHECK_DATE(this);
- return UTC_DATE_VALUE(this);
-}
-
-
-// ECMA 262 - 15.9.5.10
-function DateGetFullYear() {
- CHECK_DATE(this);
- return LOCAL_YEAR(this);
-}
-
-
-// ECMA 262 - 15.9.5.11
-function DateGetUTCFullYear() {
- CHECK_DATE(this);
- return UTC_YEAR(this);
-}
-
-
-// ECMA 262 - 15.9.5.12
-function DateGetMonth() {
- CHECK_DATE(this);
- return LOCAL_MONTH(this);
-}
-
-
-// ECMA 262 - 15.9.5.13
-function DateGetUTCMonth() {
- CHECK_DATE(this);
- return UTC_MONTH(this);
-}
-
-
-// ECMA 262 - 15.9.5.14
-function DateGetDate() {
- CHECK_DATE(this);
- return LOCAL_DAY(this);
-}
-
-
-// ECMA 262 - 15.9.5.15
-function DateGetUTCDate() {
- CHECK_DATE(this);
- return UTC_DAY(this);
-}
-
-
-// ECMA 262 - 15.9.5.16
-function DateGetDay() {
- CHECK_DATE(this);
- return LOCAL_WEEKDAY(this);
-}
-
-
-// ECMA 262 - 15.9.5.17
-function DateGetUTCDay() {
- CHECK_DATE(this);
- return UTC_WEEKDAY(this);
-}
-
-
-// ECMA 262 - 15.9.5.18
-function DateGetHours() {
- CHECK_DATE(this);
- return LOCAL_HOUR(this);
-}
-
-
-// ECMA 262 - 15.9.5.19
-function DateGetUTCHours() {
- CHECK_DATE(this);
- return UTC_HOUR(this);
-}
-
-
-// ECMA 262 - 15.9.5.20
-function DateGetMinutes() {
- CHECK_DATE(this);
- return LOCAL_MIN(this);
-}
-
-
-// ECMA 262 - 15.9.5.21
-function DateGetUTCMinutes() {
- CHECK_DATE(this);
- return UTC_MIN(this);
-}
-
-
-// ECMA 262 - 15.9.5.22
-function DateGetSeconds() {
- CHECK_DATE(this);
- return LOCAL_SEC(this);
-}
-
-
-// ECMA 262 - 15.9.5.23
-function DateGetUTCSeconds() {
- CHECK_DATE(this);
- return UTC_SEC(this)
-}
-
-
-// ECMA 262 - 15.9.5.24
-function DateGetMilliseconds() {
- CHECK_DATE(this);
- return LOCAL_MS(this);
-}
-
-
-// ECMA 262 - 15.9.5.25
-function DateGetUTCMilliseconds() {
- CHECK_DATE(this);
- return UTC_MS(this);
-}
-
-
-// ECMA 262 - 15.9.5.26
-function DateGetTimezoneOffset() {
- CHECK_DATE(this);
- return TIMEZONE_OFFSET(this);
-}
-
-
-// ECMA 262 - 15.9.5.27
-function DateSetTime(ms) {
- CHECK_DATE(this);
- SET_UTC_DATE_VALUE(this, ToNumber(ms));
- return UTC_DATE_VALUE(this);
-}
-
-
-// ECMA 262 - 15.9.5.28
-function DateSetMilliseconds(ms) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- ms = ToNumber(ms);
- var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), LOCAL_SEC(this), ms);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.29
-function DateSetUTCMilliseconds(ms) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- ms = ToNumber(ms);
- var time = MakeTime(UTC_HOUR(this),
- UTC_MIN(this),
- UTC_SEC(this),
- ms);
- return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.30
-function DateSetSeconds(sec, ms) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- sec = ToNumber(sec);
- ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : ToNumber(ms);
- var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), sec, ms);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.31
-function DateSetUTCSeconds(sec, ms) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- sec = ToNumber(sec);
- ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : ToNumber(ms);
- var time = MakeTime(UTC_HOUR(this), UTC_MIN(this), sec, ms);
- return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.33
-function DateSetMinutes(min, sec, ms) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- min = ToNumber(min);
- var argc = %_ArgumentsLength();
- sec = argc < 2 ? LOCAL_SEC(this) : ToNumber(sec);
- ms = argc < 3 ? LOCAL_MS(this) : ToNumber(ms);
- var time = MakeTime(LOCAL_HOUR(this), min, sec, ms);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.34
-function DateSetUTCMinutes(min, sec, ms) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- min = ToNumber(min);
- var argc = %_ArgumentsLength();
- sec = argc < 2 ? UTC_SEC(this) : ToNumber(sec);
- ms = argc < 3 ? UTC_MS(this) : ToNumber(ms);
- var time = MakeTime(UTC_HOUR(this), min, sec, ms);
- return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.35
-function DateSetHours(hour, min, sec, ms) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- hour = ToNumber(hour);
- var argc = %_ArgumentsLength();
- min = argc < 2 ? LOCAL_MIN(this) : ToNumber(min);
- sec = argc < 3 ? LOCAL_SEC(this) : ToNumber(sec);
- ms = argc < 4 ? LOCAL_MS(this) : ToNumber(ms);
- var time = MakeTime(hour, min, sec, ms);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.34
-function DateSetUTCHours(hour, min, sec, ms) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- hour = ToNumber(hour);
- var argc = %_ArgumentsLength();
- min = argc < 2 ? UTC_MIN(this) : ToNumber(min);
- sec = argc < 3 ? UTC_SEC(this) : ToNumber(sec);
- ms = argc < 4 ? UTC_MS(this) : ToNumber(ms);
- var time = MakeTime(hour, min, sec, ms);
- return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.36
-function DateSetDate(date) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- date = ToNumber(date);
- var day = MakeDay(LOCAL_YEAR(this), LOCAL_MONTH(this), date);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
-}
-
-
-// ECMA 262 - 15.9.5.37
-function DateSetUTCDate(date) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- date = ToNumber(date);
- var day = MakeDay(UTC_YEAR(this), UTC_MONTH(this), date);
- return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
-}
-
-
-// ECMA 262 - 15.9.5.38
-function DateSetMonth(month, date) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- month = ToNumber(month);
- date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : ToNumber(date);
- var day = MakeDay(LOCAL_YEAR(this), month, date);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
-}
-
-
-// ECMA 262 - 15.9.5.39
-function DateSetUTCMonth(month, date) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- month = ToNumber(month);
- date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : ToNumber(date);
- var day = MakeDay(UTC_YEAR(this), month, date);
- return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
-}
-
-
-// ECMA 262 - 15.9.5.40
-function DateSetFullYear(year, month, date) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- year = ToNumber(year);
- var argc = %_ArgumentsLength();
- var time ;
- if (NUMBER_IS_NAN(t)) {
- month = argc < 2 ? 0 : ToNumber(month);
- date = argc < 3 ? 1 : ToNumber(date);
- time = 0;
- } else {
- month = argc < 2 ? LOCAL_MONTH(this) : ToNumber(month);
- date = argc < 3 ? LOCAL_DAY(this) : ToNumber(date);
- time = LOCAL_TIME_IN_DAY(this);
- }
- var day = MakeDay(year, month, date);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(day, time));
-}
-
-
-// ECMA 262 - 15.9.5.41
-function DateSetUTCFullYear(year, month, date) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- year = ToNumber(year);
- var argc = %_ArgumentsLength();
- var time ;
- if (NUMBER_IS_NAN(t)) {
- month = argc < 2 ? 0 : ToNumber(month);
- date = argc < 3 ? 1 : ToNumber(date);
- time = 0;
- } else {
- month = argc < 2 ? UTC_MONTH(this) : ToNumber(month);
- date = argc < 3 ? UTC_DAY(this) : ToNumber(date);
- time = UTC_TIME_IN_DAY(this);
- }
- var day = MakeDay(year, month, date);
- return SET_UTC_DATE_VALUE(this, MakeDate(day, time));
-}
-
-
-// ECMA 262 - 15.9.5.42
-function DateToUTCString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- // Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
- return WeekDays[UTC_WEEKDAY(this)] + ', '
- + TwoDigitString(UTC_DAY(this)) + ' '
- + Months[UTC_MONTH(this)] + ' '
- + UTC_YEAR(this) + ' '
- + TimeStringUTC(this) + ' GMT';
-}
-
-
-// ECMA 262 - B.2.4
-function DateGetYear() {
- CHECK_DATE(this);
- return LOCAL_YEAR(this) - 1900;
-}
-
-
-// ECMA 262 - B.2.5
-function DateSetYear(year) {
- CHECK_DATE(this);
- year = ToNumber(year);
- if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, NAN);
- year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
- ? 1900 + TO_INTEGER(year) : year;
- var t = LOCAL_DATE_VALUE(this);
- var month, date, time;
- if (NUMBER_IS_NAN(t)) {
- month = 0;
- date = 1;
- time = 0;
- } else {
- month = LOCAL_MONTH(this);
- date = LOCAL_DAY(this);
- time = LOCAL_TIME_IN_DAY(this);
- }
- var day = MakeDay(year, month, date);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(day, time));
-}
-
-
-// ECMA 262 - B.2.6
-//
-// Notice that this does not follow ECMA 262 completely. ECMA 262
-// says that toGMTString should be the same Function object as
-// toUTCString. JSC does not do this, so for compatibility we do not
-// do that either. Instead, we create a new function whose name
-// property will return toGMTString.
-function DateToGMTString() {
- return %_CallFunction(this, DateToUTCString);
-}
-
-
-function PadInt(n, digits) {
- if (digits == 1) return n;
- return n < %_MathPow(10, digits - 1) ? '0' + PadInt(n, digits - 1) : n;
-}
-
-
-// ECMA 262 - 20.3.4.36
-function DateToISOString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) throw MakeRangeError(kInvalidTimeValue);
- var year = UTC_YEAR(this);
- var year_string;
- if (year >= 0 && year <= 9999) {
- year_string = PadInt(year, 4);
- } else {
- if (year < 0) {
- year_string = "-" + PadInt(-year, 6);
- } else {
- year_string = "+" + PadInt(year, 6);
- }
- }
- return year_string +
- '-' + PadInt(UTC_MONTH(this) + 1, 2) +
- '-' + PadInt(UTC_DAY(this), 2) +
- 'T' + PadInt(UTC_HOUR(this), 2) +
- ':' + PadInt(UTC_MIN(this), 2) +
- ':' + PadInt(UTC_SEC(this), 2) +
- '.' + PadInt(UTC_MS(this), 3) +
- 'Z';
-}
-
-
-// 20.3.4.37 Date.prototype.toJSON ( key )
-function DateToJSON(key) {
- var o = TO_OBJECT(this);
- var tv = TO_PRIMITIVE_NUMBER(o);
- if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
- return null;
- }
- return o.toISOString();
-}
-
-
-var date_cache_version_holder;
-var date_cache_version = NAN;
-
-
-function CheckDateCacheCurrent() {
- if (!date_cache_version_holder) {
- date_cache_version_holder = %DateCacheVersion();
- if (!date_cache_version_holder) return;
- }
- if (date_cache_version_holder[0] == date_cache_version) {
- return;
- }
- date_cache_version = date_cache_version_holder[0];
-
- // Reset the timezone cache:
- timezone_cache_time = NAN;
- timezone_cache_timezone = UNDEFINED;
-
- // Reset the date cache:
- Date_cache.time = NAN;
- Date_cache.string = null;
-}
-
-
-function CreateDate(time) {
- var date = new GlobalDate();
- date.setTime(time);
- return date;
-}
-
-// -------------------------------------------------------------------
-
-%SetCode(GlobalDate, DateConstructor);
-%FunctionSetPrototype(GlobalDate, new GlobalObject());
-
-// Set up non-enumerable properties of the Date object itself.
-utils.InstallFunctions(GlobalDate, DONT_ENUM, [
- "UTC", DateUTC,
- "parse", DateParse,
- "now", DateNow
-]);
-
-// Set up non-enumerable constructor property of the Date prototype object.
-%AddNamedProperty(GlobalDate.prototype, "constructor", GlobalDate, DONT_ENUM);
-
-// Set up non-enumerable functions of the Date prototype object and
-// set their names.
-utils.InstallFunctions(GlobalDate.prototype, DONT_ENUM, [
- "toString", DateToString,
- "toDateString", DateToDateString,
- "toTimeString", DateToTimeString,
- "toLocaleString", DateToLocaleString,
- "toLocaleDateString", DateToLocaleDateString,
- "toLocaleTimeString", DateToLocaleTimeString,
- "valueOf", DateValueOf,
- "getTime", DateGetTime,
- "getFullYear", DateGetFullYear,
- "getUTCFullYear", DateGetUTCFullYear,
- "getMonth", DateGetMonth,
- "getUTCMonth", DateGetUTCMonth,
- "getDate", DateGetDate,
- "getUTCDate", DateGetUTCDate,
- "getDay", DateGetDay,
- "getUTCDay", DateGetUTCDay,
- "getHours", DateGetHours,
- "getUTCHours", DateGetUTCHours,
- "getMinutes", DateGetMinutes,
- "getUTCMinutes", DateGetUTCMinutes,
- "getSeconds", DateGetSeconds,
- "getUTCSeconds", DateGetUTCSeconds,
- "getMilliseconds", DateGetMilliseconds,
- "getUTCMilliseconds", DateGetUTCMilliseconds,
- "getTimezoneOffset", DateGetTimezoneOffset,
- "setTime", DateSetTime,
- "setMilliseconds", DateSetMilliseconds,
- "setUTCMilliseconds", DateSetUTCMilliseconds,
- "setSeconds", DateSetSeconds,
- "setUTCSeconds", DateSetUTCSeconds,
- "setMinutes", DateSetMinutes,
- "setUTCMinutes", DateSetUTCMinutes,
- "setHours", DateSetHours,
- "setUTCHours", DateSetUTCHours,
- "setDate", DateSetDate,
- "setUTCDate", DateSetUTCDate,
- "setMonth", DateSetMonth,
- "setUTCMonth", DateSetUTCMonth,
- "setFullYear", DateSetFullYear,
- "setUTCFullYear", DateSetUTCFullYear,
- "toGMTString", DateToGMTString,
- "toUTCString", DateToUTCString,
- "getYear", DateGetYear,
- "setYear", DateSetYear,
- "toISOString", DateToISOString,
- "toJSON", DateToJSON
-]);
-
-%InstallToContext(["create_date_fun", CreateDate]);
-
-})
diff --git a/chromium/v8/src/dateparser-inl.h b/chromium/v8/src/dateparser-inl.h
index e70c34a831b..7e5c4e355e1 100644
--- a/chromium/v8/src/dateparser-inl.h
+++ b/chromium/v8/src/dateparser-inl.h
@@ -137,17 +137,29 @@ bool DateParser::Parse(Vector<Char> str,
tz.SetSign(token.ascii_sign());
// The following number may be empty.
int n = 0;
+ int length = 0;
if (scanner.Peek().IsNumber()) {
- n = scanner.Next().number();
+ DateToken token = scanner.Next();
+ length = token.length();
+ n = token.number();
}
has_read_number = true;
if (scanner.Peek().IsSymbol(':')) {
tz.SetAbsoluteHour(n);
+ // TODO(littledan): Use minutes as part of timezone?
tz.SetAbsoluteMinute(kNone);
- } else {
+ } else if (length == 2 || length == 1) {
+ // Handle time zones like GMT-8
+ tz.SetAbsoluteHour(n);
+ tz.SetAbsoluteMinute(0);
+ } else if (length == 4 || length == 3) {
+ // Looks like the hhmm format
tz.SetAbsoluteHour(n / 100);
tz.SetAbsoluteMinute(n % 100);
+ } else {
+ // No need to accept time zones like GMT-12345
+ return false;
}
} else if ((token.IsAsciiSign() || token.IsSymbol(')')) &&
has_read_number) {
@@ -329,6 +341,7 @@ DateParser::DateToken DateParser::ParseES5DateTime(
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DATEPARSER_INL_H_
diff --git a/chromium/v8/src/dateparser.cc b/chromium/v8/src/dateparser.cc
index 09dbf1127db..d096a7ec9fc 100644
--- a/chromium/v8/src/dateparser.cc
+++ b/chromium/v8/src/dateparser.cc
@@ -100,8 +100,15 @@ bool DateParser::TimeZoneComposer::Write(FixedArray* output) {
if (sign_ != kNone) {
if (hour_ == kNone) hour_ = 0;
if (minute_ == kNone) minute_ = 0;
- int total_seconds = sign_ * (hour_ * 3600 + minute_ * 60);
- if (!Smi::IsValid(total_seconds)) return false;
+ // Avoid signed integer overflow (undefined behavior) by doing unsigned
+ // arithmetic.
+ unsigned total_seconds_unsigned = hour_ * 3600U + minute_ * 60U;
+ if (total_seconds_unsigned > Smi::kMaxValue) return false;
+ int total_seconds = static_cast<int>(total_seconds_unsigned);
+ if (sign_ < 0) {
+ total_seconds = -total_seconds;
+ }
+ DCHECK(Smi::IsValid(total_seconds));
output->set(UTC_OFFSET, Smi::FromInt(total_seconds));
} else {
output->set_null(UTC_OFFSET);
diff --git a/chromium/v8/src/dateparser.h b/chromium/v8/src/dateparser.h
index 616318db294..533173984c4 100644
--- a/chromium/v8/src/dateparser.h
+++ b/chromium/v8/src/dateparser.h
@@ -7,7 +7,7 @@
#include "src/allocation.h"
#include "src/char-predicates.h"
-#include "src/scanner.h"
+#include "src/parsing/scanner.h"
namespace v8 {
namespace internal {
@@ -364,6 +364,7 @@ class DateParser : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DATEPARSER_H_
diff --git a/chromium/v8/src/debug/arm/debug-arm.cc b/chromium/v8/src/debug/arm/debug-arm.cc
index 7f1542e1830..2d4cbf13d76 100644
--- a/chromium/v8/src/debug/arm/debug-arm.cc
+++ b/chromium/v8/src/debug/arm/debug-arm.cc
@@ -24,25 +24,25 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
Assembler::BlockConstPoolScope block_const_pool(masm);
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
// Patch the code changing the debug break slot code from
// mov r2, r2
// mov r2, r2
@@ -113,19 +113,7 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(ip, Operand(restarter_frame_function_slot));
- __ mov(r1, Operand::Zero());
- __ str(r1, MemOperand(ip, 0));
-
// Load the function pointer off of our current stack frame.
__ ldr(r1, MemOperand(fp,
StandardFrameConstants::kConstantPoolOffset - kPointerSize));
@@ -134,10 +122,16 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// FLAG_enable_embedded_constant_pool).
__ LeaveFrame(StackFrame::INTERNAL);
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(r1, no_reg, dummy, dummy);
+
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
// Load context from the function.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Clear new.target as a safety measure.
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+
// Get function code.
__ ldr(ip, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
diff --git a/chromium/v8/src/debug/arm64/debug-arm64.cc b/chromium/v8/src/debug/arm64/debug-arm64.cc
index 7272fe7bcfa..c2b60a9326b 100644
--- a/chromium/v8/src/debug/arm64/debug-arm64.cc
+++ b/chromium/v8/src/debug/arm64/debug-arm64.cc
@@ -25,26 +25,26 @@ void EmitDebugBreakSlot(Assembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc),
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ PatchingAssembler patcher(isolate, reinterpret_cast<Instruction*>(pc),
Assembler::kDebugBreakSlotInstructions);
EmitDebugBreakSlot(&patcher);
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
- PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc),
+ PatchingAssembler patcher(isolate, reinterpret_cast<Instruction*>(pc),
Assembler::kDebugBreakSlotInstructions);
// Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
// break slot code from
@@ -124,30 +124,25 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.AcquireX();
-
- __ Mov(scratch, restarter_frame_function_slot);
- __ Str(xzr, MemOperand(scratch));
-
// We do not know our frame height, but set sp based on fp.
__ Sub(masm->StackPointer(), fp, kPointerSize);
__ AssertStackConsistency();
__ Pop(x1, fp, lr); // Function, Frame, Return address.
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(x1, no_reg, dummy, dummy);
+
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+
// Load context from the function.
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+ // Clear new.target as a safety measure.
+ __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
+
// Get function code.
__ Ldr(scratch, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset));
diff --git a/chromium/v8/src/debug/debug-evaluate.cc b/chromium/v8/src/debug/debug-evaluate.cc
index b2495613242..e19b93eebea 100644
--- a/chromium/v8/src/debug/debug-evaluate.cc
+++ b/chromium/v8/src/debug/debug-evaluate.cc
@@ -20,10 +20,9 @@ static inline bool IsDebugContext(Isolate* isolate, Context* context) {
}
-MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
- Handle<String> source,
- bool disable_break,
- Handle<Object> context_extension) {
+MaybeHandle<Object> DebugEvaluate::Global(
+ Isolate* isolate, Handle<String> source, bool disable_break,
+ Handle<HeapObject> context_extension) {
// Handle the processing of break.
DisableBreak disable_break_scope(isolate->debug(), disable_break);
@@ -50,7 +49,7 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
int inlined_jsframe_index,
Handle<String> source,
bool disable_break,
- Handle<Object> context_extension) {
+ Handle<HeapObject> context_extension) {
// Handle the processing of break.
DisableBreak disable_break_scope(isolate->debug(), disable_break);
@@ -65,15 +64,23 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
SaveContext savex(isolate);
isolate->set_context(*(save->context()));
- // Materialize stack locals and the arguments object.
+ // This is not a lot different than DebugEvaluate::Global, except that
+ // variables accessible by the function we are evaluating from are
+ // materialized and included on top of the native context. Changes to
+ // the materialized object are written back afterwards.
+ // Note that the native context is taken from the original context chain,
+ // which may not be the current native context of the isolate.
ContextBuilder context_builder(isolate, frame, inlined_jsframe_index);
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
- Handle<Object> receiver(frame->receiver(), isolate);
+ Handle<Context> context = context_builder.native_context();
+ Handle<JSObject> receiver(context->global_proxy());
MaybeHandle<Object> maybe_result = Evaluate(
isolate, context_builder.outer_info(),
context_builder.innermost_context(), context_extension, receiver, source);
- if (!maybe_result.is_null()) context_builder.UpdateValues();
+ if (!maybe_result.is_null() && !FLAG_debug_eval_readonly_locals) {
+ context_builder.UpdateValues();
+ }
return maybe_result;
}
@@ -81,7 +88,7 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
// Compile and evaluate source for the given context.
MaybeHandle<Object> DebugEvaluate::Evaluate(
Isolate* isolate, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, Handle<Object> context_extension,
+ Handle<Context> context, Handle<HeapObject> context_extension,
Handle<Object> receiver, Handle<String> source) {
if (context_extension->IsJSObject()) {
Handle<JSObject> extension = Handle<JSObject>::cast(context_extension);
@@ -120,42 +127,68 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
frame_(frame),
inlined_jsframe_index_(inlined_jsframe_index) {
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- Handle<JSFunction> function =
+ Handle<JSFunction> local_function =
handle(JSFunction::cast(frame_inspector.GetFunction()));
- Handle<Context> outer_context = handle(function->context(), isolate);
- outer_info_ = handle(function->shared());
+ Handle<Context> outer_context(local_function->context());
+ native_context_ = Handle<Context>(outer_context->native_context());
+ Handle<JSFunction> global_function(native_context_->closure());
+ outer_info_ = handle(global_function->shared());
Handle<Context> inner_context;
bool stop = false;
- for (ScopeIterator it(isolate, &frame_inspector);
+
+ // Iterate the original context chain to create a context chain that reflects
+ // our needs. The original context chain may look like this:
+ // <native context> <outer contexts> <function context> <inner contexts>
+ // In the resulting context chain, we want to materialize the receiver,
+ // the parameters of the current function, the stack locals. We only
+ // materialize context variables that the function already references,
+ // because only for those variables we can be sure that they will be resolved
+ // correctly. Variables that are not referenced by the function may be
+ // context-allocated and thus accessible, but may be shadowed by stack-
+ // allocated variables and the resolution would be incorrect.
+ // The result will look like this:
+ // <native context> <receiver context>
+ // <materialized stack and accessible context vars> <inner contexts>
+ // All contexts use the closure of the native context, since there is no
+ // function context in the chain. Variables that cannot be resolved are
+ // bound to toplevel (script contexts or global object).
+ // Once debug-evaluate has been executed, the changes to the materialized
+ // objects are written back to the original context chain. Any changes to
+ // the original context chain will therefore be overwritten.
+ const ScopeIterator::Option option = ScopeIterator::COLLECT_NON_LOCALS;
+ for (ScopeIterator it(isolate, &frame_inspector, option);
!it.Failed() && !it.Done() && !stop; it.Next()) {
ScopeIterator::ScopeType scope_type = it.Type();
-
if (scope_type == ScopeIterator::ScopeTypeLocal) {
- Handle<Context> parent_context =
+ DCHECK_EQ(FUNCTION_SCOPE, it.CurrentScopeInfo()->scope_type());
+ it.GetNonLocals(&non_locals_);
+ Handle<Context> local_context =
it.HasContext() ? it.CurrentContext() : outer_context;
// The "this" binding, if any, can't be bound via "with". If we need
// to, add another node onto the outer context to bind "this".
- parent_context = MaterializeReceiver(parent_context, function);
+ Handle<Context> receiver_context =
+ MaterializeReceiver(native_context_, local_context, local_function,
+ global_function, it.ThisIsNonLocal());
Handle<JSObject> materialized_function = NewJSObjectWithNullProto();
-
- frame_inspector.MaterializeStackLocals(materialized_function, function);
-
- MaterializeArgumentsObject(materialized_function, function);
+ frame_inspector.MaterializeStackLocals(materialized_function,
+ local_function);
+ MaterializeArgumentsObject(materialized_function, local_function);
+ MaterializeContextChain(materialized_function, local_context);
Handle<Context> with_context = isolate->factory()->NewWithContext(
- function, parent_context, materialized_function);
+ global_function, receiver_context, materialized_function);
ContextChainElement context_chain_element;
- context_chain_element.original_context = it.CurrentContext();
+ context_chain_element.original_context = local_context;
context_chain_element.materialized_object = materialized_function;
context_chain_element.scope_info = it.CurrentScopeInfo();
context_chain_.Add(context_chain_element);
stop = true;
- RecordContextsInChain(&inner_context, with_context, with_context);
+ RecordContextsInChain(&inner_context, receiver_context, with_context);
} else if (scope_type == ScopeIterator::ScopeTypeCatch ||
scope_type == ScopeIterator::ScopeTypeWith) {
Handle<Context> cloned_context = Handle<Context>::cast(
@@ -175,7 +208,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
Handle<Context> cloned_context = Handle<Context>::cast(
isolate->factory()->CopyFixedArray(it.CurrentContext()));
Handle<Context> with_context = isolate->factory()->NewWithContext(
- function, cloned_context, materialized_object);
+ global_function, cloned_context, materialized_object);
ContextChainElement context_chain_element;
context_chain_element.original_context = it.CurrentContext();
@@ -187,7 +220,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
RecordContextsInChain(&inner_context, cloned_context, with_context);
} else {
Handle<Context> with_context = isolate->factory()->NewWithContext(
- function, outer_context, materialized_object);
+ global_function, outer_context, materialized_object);
ContextChainElement context_chain_element;
context_chain_element.materialized_object = materialized_object;
@@ -208,6 +241,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
void DebugEvaluate::ContextBuilder::UpdateValues() {
+ // TODO(yangguo): remove updating values.
for (int i = 0; i < context_chain_.length(); i++) {
ContextChainElement element = context_chain_[i];
if (!element.original_context.is_null() &&
@@ -224,6 +258,11 @@ void DebugEvaluate::ContextBuilder::UpdateValues() {
FrameInspector(frame_, inlined_jsframe_index_, isolate_)
.UpdateStackLocalsFromMaterializedObject(element.materialized_object,
element.scope_info);
+ if (element.scope_info->scope_type() == FUNCTION_SCOPE) {
+ DCHECK_EQ(context_chain_.length() - 1, i);
+ UpdateContextChainFromMaterializedObject(element.materialized_object,
+ element.original_context);
+ }
}
}
}
@@ -272,41 +311,96 @@ void DebugEvaluate::ContextBuilder::MaterializeArgumentsObject(
}
-Handle<Context> DebugEvaluate::ContextBuilder::MaterializeReceiver(
- Handle<Context> target, Handle<JSFunction> function) {
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
- Handle<Object> receiver;
- switch (scope_info->scope_type()) {
- case FUNCTION_SCOPE: {
- VariableMode mode;
- InitializationFlag init_flag;
- MaybeAssignedFlag maybe_assigned_flag;
-
- // Don't bother creating a fake context node if "this" is in the context
- // already.
- if (ScopeInfo::ContextSlotIndex(scope_info,
- isolate_->factory()->this_string(), &mode,
- &init_flag, &maybe_assigned_flag) >= 0) {
- return target;
- }
- receiver = handle(frame_->receiver(), isolate_);
- break;
+MaybeHandle<Object> DebugEvaluate::ContextBuilder::LoadFromContext(
+ Handle<Context> context, Handle<String> name, bool* global) {
+ static const ContextLookupFlags flags = FOLLOW_CONTEXT_CHAIN;
+ int index;
+ PropertyAttributes attributes;
+ BindingFlags binding;
+ Handle<Object> holder =
+ context->Lookup(name, flags, &index, &attributes, &binding);
+ if (holder.is_null()) return MaybeHandle<Object>();
+ Handle<Object> value;
+ if (index != Context::kNotFound) { // Found on context.
+ Handle<Context> context = Handle<Context>::cast(holder);
+ // Do not shadow variables on the script context.
+ *global = context->IsScriptContext();
+ return Handle<Object>(context->get(index), isolate_);
+ } else { // Found on object.
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
+ // Do not shadow properties on the global object.
+ *global = object->IsJSGlobalObject();
+ return JSReceiver::GetDataProperty(object, name);
+ }
+}
+
+
+void DebugEvaluate::ContextBuilder::MaterializeContextChain(
+ Handle<JSObject> target, Handle<Context> context) {
+ for (const Handle<String>& name : non_locals_) {
+ HandleScope scope(isolate_);
+ Handle<Object> value;
+ bool global;
+ if (!LoadFromContext(context, name, &global).ToHandle(&value) || global) {
+ // If resolving the variable fails, skip it. If it resolves to a global
+ // variable, skip it as well since it's not read-only and can be resolved
+ // within debug-evaluate.
+ continue;
}
- case MODULE_SCOPE:
- receiver = isolate_->factory()->undefined_value();
- break;
- case SCRIPT_SCOPE:
- receiver = handle(function->global_proxy(), isolate_);
- break;
- default:
- // For eval code, arrow functions, and the like, there's no "this" binding
- // to materialize.
- return target;
+ JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
}
+}
+
+
+void DebugEvaluate::ContextBuilder::StoreToContext(Handle<Context> context,
+ Handle<String> name,
+ Handle<Object> value) {
+ static const ContextLookupFlags flags = FOLLOW_CONTEXT_CHAIN;
+ int index;
+ PropertyAttributes attributes;
+ BindingFlags binding;
+ Handle<Object> holder =
+ context->Lookup(name, flags, &index, &attributes, &binding);
+ if (holder.is_null()) return;
+ if (attributes & READ_ONLY) return;
+ if (index != Context::kNotFound) { // Found on context.
+ Handle<Context> context = Handle<Context>::cast(holder);
+ context->set(index, *value);
+ } else { // Found on object.
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
+ LookupIterator lookup(object, name);
+ if (lookup.state() != LookupIterator::DATA) return;
+ CHECK(JSReceiver::SetDataProperty(&lookup, value).FromJust());
+ }
+}
+
- return isolate_->factory()->NewCatchContext(
- function, target, isolate_->factory()->this_string(), receiver);
+void DebugEvaluate::ContextBuilder::UpdateContextChainFromMaterializedObject(
+ Handle<JSObject> source, Handle<Context> context) {
+ // TODO(yangguo): check whether overwriting context fields is actually safe
+ // wrt fields we consider constant.
+ for (const Handle<String>& name : non_locals_) {
+ HandleScope scope(isolate_);
+ Handle<Object> value = JSReceiver::GetDataProperty(source, name);
+ StoreToContext(context, name, value);
+ }
+}
+
+
+Handle<Context> DebugEvaluate::ContextBuilder::MaterializeReceiver(
+ Handle<Context> parent_context, Handle<Context> lookup_context,
+ Handle<JSFunction> local_function, Handle<JSFunction> global_function,
+ bool this_is_non_local) {
+ Handle<Object> receiver = isolate_->factory()->undefined_value();
+ Handle<String> this_string = isolate_->factory()->this_string();
+ if (this_is_non_local) {
+ bool global;
+ LoadFromContext(lookup_context, this_string, &global).ToHandle(&receiver);
+ } else if (local_function->shared()->scope_info()->HasReceiver()) {
+ receiver = handle(frame_->receiver(), isolate_);
+ }
+ return isolate_->factory()->NewCatchContext(global_function, parent_context,
+ this_string, receiver);
}
} // namespace internal
diff --git a/chromium/v8/src/debug/debug-evaluate.h b/chromium/v8/src/debug/debug-evaluate.h
index 49a7fce3ee7..c0b1f027d16 100644
--- a/chromium/v8/src/debug/debug-evaluate.h
+++ b/chromium/v8/src/debug/debug-evaluate.h
@@ -15,7 +15,7 @@ class DebugEvaluate : public AllStatic {
public:
static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source,
bool disable_break,
- Handle<Object> context_extension);
+ Handle<HeapObject> context_extension);
// Evaluate a piece of JavaScript in the context of a stack frame for
// debugging. Things that need special attention are:
@@ -25,7 +25,7 @@ class DebugEvaluate : public AllStatic {
static MaybeHandle<Object> Local(Isolate* isolate, StackFrame::Id frame_id,
int inlined_jsframe_index,
Handle<String> source, bool disable_break,
- Handle<Object> context_extension);
+ Handle<HeapObject> context_extension);
private:
// This class builds a context chain for evaluation of expressions
@@ -54,6 +54,7 @@ class DebugEvaluate : public AllStatic {
void UpdateValues();
Handle<Context> innermost_context() const { return innermost_context_; }
+ Handle<Context> native_context() const { return native_context_; }
Handle<SharedFunctionInfo> outer_info() const { return outer_info_; }
private:
@@ -74,12 +75,29 @@ class DebugEvaluate : public AllStatic {
void MaterializeArgumentsObject(Handle<JSObject> target,
Handle<JSFunction> function);
- Handle<Context> MaterializeReceiver(Handle<Context> target,
- Handle<JSFunction> function);
+ void MaterializeContextChain(Handle<JSObject> target,
+ Handle<Context> context);
+
+ void UpdateContextChainFromMaterializedObject(Handle<JSObject> source,
+ Handle<Context> context);
+
+ Handle<Context> MaterializeReceiver(Handle<Context> parent_context,
+ Handle<Context> lookup_context,
+ Handle<JSFunction> local_function,
+ Handle<JSFunction> global_function,
+ bool this_is_non_local);
+
+ MaybeHandle<Object> LoadFromContext(Handle<Context> context,
+ Handle<String> name, bool* global);
+
+ void StoreToContext(Handle<Context> context, Handle<String> name,
+ Handle<Object> value);
Handle<SharedFunctionInfo> outer_info_;
Handle<Context> innermost_context_;
+ Handle<Context> native_context_;
List<ContextChainElement> context_chain_;
+ List<Handle<String> > non_locals_;
Isolate* isolate_;
JavaScriptFrame* frame_;
int inlined_jsframe_index_;
@@ -88,7 +106,7 @@ class DebugEvaluate : public AllStatic {
static MaybeHandle<Object> Evaluate(Isolate* isolate,
Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
- Handle<Object> context_extension,
+ Handle<HeapObject> context_extension,
Handle<Object> receiver,
Handle<String> source);
};
diff --git a/chromium/v8/src/debug/debug-frames.cc b/chromium/v8/src/debug/debug-frames.cc
index c4c288148c5..012d2916223 100644
--- a/chromium/v8/src/debug/debug-frames.cc
+++ b/chromium/v8/src/debug/debug-frames.cc
@@ -45,11 +45,6 @@ int FrameInspector::GetParametersCount() {
}
-int FrameInspector::expression_count() {
- return deoptimized_frame_->expression_count();
-}
-
-
Object* FrameInspector::GetFunction() {
return is_optimized_ ? deoptimized_frame_->GetFunction() : frame_->function();
}
@@ -109,6 +104,8 @@ void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
// First fill all parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
// Do not materialize the parameter if it is shadowed by a context local.
+ // TODO(yangguo): check whether this is necessary, now that we materialize
+ // context locals as well.
Handle<String> name(scope_info->ParameterName(i));
if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
@@ -206,7 +203,7 @@ int DebugFrameHelper::FindIndexedNonNativeFrame(JavaScriptFrameIterator* it,
it->frame()->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
// Omit functions from native and extension scripts.
- if (!frames[i].function()->IsSubjectToDebugging()) continue;
+ if (!frames[i].function()->shared()->IsSubjectToDebugging()) continue;
if (++count == index) return i;
}
}
diff --git a/chromium/v8/src/debug/debug-frames.h b/chromium/v8/src/debug/debug-frames.h
index 86e817d47ff..c0d20bbd1d1 100644
--- a/chromium/v8/src/debug/debug-frames.h
+++ b/chromium/v8/src/debug/debug-frames.h
@@ -21,7 +21,6 @@ class FrameInspector {
~FrameInspector();
int GetParametersCount();
- int expression_count();
Object* GetFunction();
Object* GetParameter(int index);
Object* GetExpression(int index);
diff --git a/chromium/v8/src/debug/debug-scopes.cc b/chromium/v8/src/debug/debug-scopes.cc
index e8ef2403934..15a05940093 100644
--- a/chromium/v8/src/debug/debug-scopes.cc
+++ b/chromium/v8/src/debug/debug-scopes.cc
@@ -4,21 +4,22 @@
#include "src/debug/debug-scopes.h"
+#include "src/ast/scopes.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/globals.h"
#include "src/isolate-inl.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
- bool ignore_nested_scopes)
+ ScopeIterator::Option option)
: isolate_(isolate),
frame_inspector_(frame_inspector),
nested_scope_chain_(4),
+ non_locals_(nullptr),
seen_script_scope_(false),
failed_(false) {
if (!frame_inspector->GetContext()->IsContext() ||
@@ -46,7 +47,8 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// addEventListener call), even if we drop some nested scopes.
// Later we may optimize getting the nested scopes (cache the result?)
// and include nested scopes into the "fast" iteration case as well.
-
+ bool ignore_nested_scopes = (option == IGNORE_NESTED_SCOPES);
+ bool collect_non_locals = (option == COLLECT_NON_LOCALS);
if (!ignore_nested_scopes && shared_info->HasDebugInfo()) {
// The source position at return is always the end of the function,
// which is not consistent with the current scope chain. Therefore all
@@ -61,8 +63,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
Address call_pc = GetFrame()->pc() - 1;
// Find the break point where execution has stopped.
- BreakLocation location =
- BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, call_pc);
+ BreakLocation location = BreakLocation::FromAddress(debug_info, call_pc);
ignore_nested_scopes = location.IsReturn();
}
@@ -75,40 +76,40 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
context_ = Handle<Context>(context_->previous(), isolate_);
}
}
- if (scope_info->scope_type() == FUNCTION_SCOPE ||
- scope_info->scope_type() == ARROW_SCOPE) {
+ if (scope_info->scope_type() == FUNCTION_SCOPE) {
nested_scope_chain_.Add(scope_info);
}
- } else {
- // Reparse the code and analyze the scopes.
+ if (!collect_non_locals) return;
+ }
+
+ // Reparse the code and analyze the scopes.
+ Scope* scope = NULL;
+ // Check whether we are in global, eval or function code.
+ Zone zone;
+ if (scope_info->scope_type() != FUNCTION_SCOPE) {
+ // Global or eval code.
Handle<Script> script(Script::cast(shared_info->script()));
- Scope* scope = NULL;
-
- // Check whether we are in global, eval or function code.
- Zone zone;
- if (scope_info->scope_type() != FUNCTION_SCOPE &&
- scope_info->scope_type() != ARROW_SCOPE) {
- // Global or eval code.
- ParseInfo info(&zone, script);
- if (scope_info->scope_type() == SCRIPT_SCOPE) {
- info.set_global();
- } else {
- DCHECK(scope_info->scope_type() == EVAL_SCOPE);
- info.set_eval();
- info.set_context(Handle<Context>(function->context()));
- }
- if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
- scope = info.literal()->scope();
- }
- RetrieveScopeChain(scope, shared_info);
+ ParseInfo info(&zone, script);
+ if (scope_info->scope_type() == SCRIPT_SCOPE) {
+ info.set_global();
} else {
- // Function code
- ParseInfo info(&zone, function);
- if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
- scope = info.literal()->scope();
- }
- RetrieveScopeChain(scope, shared_info);
+ DCHECK(scope_info->scope_type() == EVAL_SCOPE);
+ info.set_eval();
+ info.set_context(Handle<Context>(function->context()));
+ }
+ if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
+ scope = info.literal()->scope();
+ }
+ if (!ignore_nested_scopes) RetrieveScopeChain(scope);
+ if (collect_non_locals) CollectNonLocals(scope);
+ } else {
+ // Function code
+ ParseInfo info(&zone, function);
+ if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
+ scope = info.literal()->scope();
}
+ if (!ignore_nested_scopes) RetrieveScopeChain(scope);
+ if (collect_non_locals) CollectNonLocals(scope);
}
}
@@ -117,9 +118,10 @@ ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
: isolate_(isolate),
frame_inspector_(NULL),
context_(function->context()),
+ non_locals_(nullptr),
seen_script_scope_(false),
failed_(false) {
- if (!function->IsSubjectToDebugging()) context_ = Handle<Context>();
+ if (!function->shared()->IsSubjectToDebugging()) context_ = Handle<Context>();
}
@@ -132,6 +134,12 @@ MUST_USE_RESULT MaybeHandle<JSObject> ScopeIterator::MaterializeScopeDetails() {
Handle<JSObject> scope_object;
ASSIGN_RETURN_ON_EXCEPTION(isolate_, scope_object, ScopeObject(), JSObject);
details->set(kScopeDetailsObjectIndex, *scope_object);
+ if (HasContext() && CurrentContext()->closure() != NULL) {
+ Handle<String> closure_name = JSFunction::GetDebugName(
+ Handle<JSFunction>(CurrentContext()->closure()));
+ if (!closure_name.is_null() && (closure_name->length() != 0))
+ details->set(kScopeDetailsNameIndex, *closure_name);
+ }
return isolate_->factory()->NewJSArrayWithElements(details);
}
@@ -177,7 +185,6 @@ ScopeIterator::ScopeType ScopeIterator::Type() {
Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
switch (scope_info->scope_type()) {
case FUNCTION_SCOPE:
- case ARROW_SCOPE:
DCHECK(context_->IsFunctionContext() || !scope_info->HasContext());
return ScopeTypeLocal;
case MODULE_SCOPE:
@@ -200,7 +207,7 @@ ScopeIterator::ScopeType ScopeIterator::Type() {
}
}
if (context_->IsNativeContext()) {
- DCHECK(context_->global_object()->IsGlobalObject());
+ DCHECK(context_->global_object()->IsJSGlobalObject());
// If we are at the native context and have not yet seen script scope,
// fake it.
return seen_script_scope_ ? ScopeTypeGlobal : ScopeTypeScript;
@@ -317,6 +324,27 @@ Handle<Context> ScopeIterator::CurrentContext() {
}
}
+
+void ScopeIterator::GetNonLocals(List<Handle<String> >* list_out) {
+ Handle<String> this_string = isolate_->factory()->this_string();
+ for (HashMap::Entry* entry = non_locals_->Start(); entry != nullptr;
+ entry = non_locals_->Next(entry)) {
+ Handle<String> name(reinterpret_cast<String**>(entry->key));
+ // We need to treat "this" differently.
+ if (name.is_identical_to(this_string)) continue;
+ list_out->Add(Handle<String>(reinterpret_cast<String**>(entry->key)));
+ }
+}
+
+
+bool ScopeIterator::ThisIsNonLocal() {
+ Handle<String> this_string = isolate_->factory()->this_string();
+ void* key = reinterpret_cast<void*>(this_string.location());
+ HashMap::Entry* entry = non_locals_->Lookup(key, this_string->Hash());
+ return entry != nullptr;
+}
+
+
#ifdef DEBUG
// Debug print of the content of the current scope.
void ScopeIterator::DebugPrint() {
@@ -334,7 +362,7 @@ void ScopeIterator::DebugPrint() {
if (!CurrentContext().is_null()) {
CurrentContext()->Print(os);
if (CurrentContext()->has_extension()) {
- Handle<Object> extension(CurrentContext()->extension(), isolate_);
+ Handle<HeapObject> extension(CurrentContext()->extension(), isolate_);
if (extension->IsJSContextExtensionObject()) {
extension->Print(os);
}
@@ -358,7 +386,7 @@ void ScopeIterator::DebugPrint() {
os << "Closure:\n";
CurrentContext()->Print(os);
if (CurrentContext()->has_extension()) {
- Handle<Object> extension(CurrentContext()->extension(), isolate_);
+ Handle<HeapObject> extension(CurrentContext()->extension(), isolate_);
if (extension->IsJSContextExtensionObject()) {
extension->Print(os);
}
@@ -382,8 +410,7 @@ void ScopeIterator::DebugPrint() {
#endif
-void ScopeIterator::RetrieveScopeChain(Scope* scope,
- Handle<SharedFunctionInfo> shared_info) {
+void ScopeIterator::RetrieveScopeChain(Scope* scope) {
if (scope != NULL) {
int source_position = frame_inspector_->GetSourcePosition();
scope->GetNestedScopeChain(isolate_, &nested_scope_chain_, source_position);
@@ -400,8 +427,17 @@ void ScopeIterator::RetrieveScopeChain(Scope* scope,
}
+void ScopeIterator::CollectNonLocals(Scope* scope) {
+ if (scope != NULL) {
+ DCHECK_NULL(non_locals_);
+ non_locals_ = new HashMap(InternalizedStringMatch);
+ scope->CollectNonLocals(non_locals_);
+ }
+}
+
+
MaybeHandle<JSObject> ScopeIterator::MaterializeScriptScope() {
- Handle<GlobalObject> global(CurrentContext()->global_object());
+ Handle<JSGlobalObject> global(CurrentContext()->global_object());
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
@@ -757,7 +793,7 @@ void ScopeIterator::CopyContextLocalsToScopeObject(
// TODO(verwaest): Use AddDataProperty instead.
JSObject::SetOwnPropertyIgnoreAttributes(
scope_object, handle(String::cast(scope_info->get(i + start))), value,
- ::NONE)
+ NONE)
.Check();
}
}
@@ -768,7 +804,8 @@ bool ScopeIterator::CopyContextExtensionToScopeObject(
JSReceiver::KeyCollectionType type) {
Handle<FixedArray> keys;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, keys, JSReceiver::GetKeys(extension, type), false);
+ isolate_, keys, JSReceiver::GetKeys(extension, type, ENUMERABLE_STRINGS),
+ false);
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
diff --git a/chromium/v8/src/debug/debug-scopes.h b/chromium/v8/src/debug/debug-scopes.h
index 20cd0336dc9..d4e335a2a54 100644
--- a/chromium/v8/src/debug/debug-scopes.h
+++ b/chromium/v8/src/debug/debug-scopes.h
@@ -30,13 +30,18 @@ class ScopeIterator {
static const int kScopeDetailsTypeIndex = 0;
static const int kScopeDetailsObjectIndex = 1;
- static const int kScopeDetailsSize = 2;
+ static const int kScopeDetailsNameIndex = 2;
+ static const int kScopeDetailsSize = 3;
+
+ enum Option { DEFAULT, IGNORE_NESTED_SCOPES, COLLECT_NON_LOCALS };
ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
- bool ignore_nested_scopes = false);
+ Option options = DEFAULT);
ScopeIterator(Isolate* isolate, Handle<JSFunction> function);
+ ~ScopeIterator() { delete non_locals_; }
+
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScopeDetails();
// More scopes?
@@ -67,6 +72,11 @@ class ScopeIterator {
// be an actual context.
Handle<Context> CurrentContext();
+ // Populate the list with collected non-local variable names.
+ void GetNonLocals(List<Handle<String> >* list_out);
+
+ bool ThisIsNonLocal();
+
#ifdef DEBUG
// Debug print of the content of the current scope.
void DebugPrint();
@@ -77,6 +87,7 @@ class ScopeIterator {
FrameInspector* const frame_inspector_;
Handle<Context> context_;
List<Handle<ScopeInfo> > nested_scope_chain_;
+ HashMap* non_locals_;
bool seen_script_scope_;
bool failed_;
@@ -89,7 +100,17 @@ class ScopeIterator {
JSFunction::cast(frame_inspector_->GetFunction()));
}
- void RetrieveScopeChain(Scope* scope, Handle<SharedFunctionInfo> shared_info);
+ static bool InternalizedStringMatch(void* key1, void* key2) {
+ Handle<String> s1(reinterpret_cast<String**>(key1));
+ Handle<String> s2(reinterpret_cast<String**>(key2));
+ DCHECK(s1->IsInternalizedString());
+ DCHECK(s2->IsInternalizedString());
+ return s1.is_identical_to(s2);
+ }
+
+ void RetrieveScopeChain(Scope* scope);
+
+ void CollectNonLocals(Scope* scope);
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScriptScope();
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeLocalScope();
diff --git a/chromium/v8/src/debug/debug.cc b/chromium/v8/src/debug/debug.cc
index 4f23555d28f..bd45b71551f 100644
--- a/chromium/v8/src/debug/debug.cc
+++ b/chromium/v8/src/debug/debug.cc
@@ -38,10 +38,12 @@ Debug::Debug(Isolate* isolate)
is_suppressed_(false),
live_edit_enabled_(true), // TODO(yangguo): set to false by default.
break_disabled_(false),
+ break_points_active_(true),
in_debug_event_listener_(false),
break_on_exception_(false),
break_on_uncaught_exception_(false),
debug_info_list_(NULL),
+ feature_tracker_(isolate),
isolate_(isolate) {
ThreadInit();
}
@@ -84,7 +86,6 @@ int BreakLocation::Iterator::GetModeMask(BreakLocatorType type) {
mask |= RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
- mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL);
if (type == ALL_BREAK_LOCATIONS) {
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUGGER_STATEMENT);
@@ -144,9 +145,9 @@ void BreakLocation::Iterator::Next() {
// Find the break point at the supplied address, or the closest one before
// the address.
BreakLocation BreakLocation::FromAddress(Handle<DebugInfo> debug_info,
- BreakLocatorType type, Address pc) {
- Iterator it(debug_info, type);
- it.SkipTo(BreakIndexFromAddress(debug_info, type, pc));
+ Address pc) {
+ Iterator it(debug_info, ALL_BREAK_LOCATIONS);
+ it.SkipTo(BreakIndexFromAddress(debug_info, pc));
return it.GetBreakLocation();
}
@@ -154,10 +155,10 @@ BreakLocation BreakLocation::FromAddress(Handle<DebugInfo> debug_info,
// Find the break point at the supplied address, or the closest one before
// the address.
void BreakLocation::FromAddressSameStatement(Handle<DebugInfo> debug_info,
- BreakLocatorType type, Address pc,
+ Address pc,
List<BreakLocation>* result_out) {
- int break_index = BreakIndexFromAddress(debug_info, type, pc);
- Iterator it(debug_info, type);
+ int break_index = BreakIndexFromAddress(debug_info, pc);
+ Iterator it(debug_info, ALL_BREAK_LOCATIONS);
it.SkipTo(break_index);
int statement_position = it.statement_position();
while (!it.Done() && it.statement_position() == statement_position) {
@@ -168,11 +169,11 @@ void BreakLocation::FromAddressSameStatement(Handle<DebugInfo> debug_info,
int BreakLocation::BreakIndexFromAddress(Handle<DebugInfo> debug_info,
- BreakLocatorType type, Address pc) {
+ Address pc) {
// Run through all break points to locate the one closest to the address.
int closest_break = 0;
int distance = kMaxInt;
- for (Iterator it(debug_info, type); !it.Done(); it.Next()) {
+ for (Iterator it(debug_info, ALL_BREAK_LOCATIONS); !it.Done(); it.Next()) {
// Check if this break point is closer that what was previously found.
if (it.pc() <= pc && pc - it.pc() < distance) {
closest_break = it.break_index();
@@ -186,14 +187,14 @@ int BreakLocation::BreakIndexFromAddress(Handle<DebugInfo> debug_info,
BreakLocation BreakLocation::FromPosition(Handle<DebugInfo> debug_info,
- BreakLocatorType type, int position,
+ int position,
BreakPositionAlignment alignment) {
// Run through all break points to locate the one closest to the source
// position.
int closest_break = 0;
int distance = kMaxInt;
- for (Iterator it(debug_info, type); !it.Done(); it.Next()) {
+ for (Iterator it(debug_info, ALL_BREAK_LOCATIONS); !it.Done(); it.Next()) {
int next_position;
if (alignment == STATEMENT_ALIGNED) {
next_position = it.statement_position();
@@ -209,7 +210,7 @@ BreakLocation BreakLocation::FromPosition(Handle<DebugInfo> debug_info,
}
}
- Iterator it(debug_info, type);
+ Iterator it(debug_info, ALL_BREAK_LOCATIONS);
it.SkipTo(closest_break);
return it.GetBreakLocation();
}
@@ -279,10 +280,11 @@ void BreakLocation::SetDebugBreak() {
if (IsDebugBreak()) return;
DCHECK(IsDebugBreakSlot());
- Builtins* builtins = debug_info_->GetIsolate()->builtins();
+ Isolate* isolate = debug_info_->GetIsolate();
+ Builtins* builtins = isolate->builtins();
Handle<Code> target =
IsReturn() ? builtins->Return_DebugBreak() : builtins->Slot_DebugBreak();
- DebugCodegen::PatchDebugBreakSlot(pc(), target);
+ DebugCodegen::PatchDebugBreakSlot(isolate, pc(), target);
DCHECK(IsDebugBreak());
}
@@ -292,21 +294,15 @@ void BreakLocation::ClearDebugBreak() {
if (IsDebuggerStatement()) return;
DCHECK(IsDebugBreakSlot());
- DebugCodegen::ClearDebugBreakSlot(pc());
+ DebugCodegen::ClearDebugBreakSlot(debug_info_->GetIsolate(), pc());
DCHECK(!IsDebugBreak());
}
-bool BreakLocation::IsStepInLocation() const {
- return IsConstructCall() || IsCall();
-}
-
-
bool BreakLocation::IsDebugBreak() const {
- if (IsDebugBreakSlot()) {
- return rinfo().IsPatchedDebugBreakSlotSequence();
- }
- return false;
+ if (IsDebuggerStatement()) return false;
+ DCHECK(IsDebugBreakSlot());
+ return rinfo().IsPatchedDebugBreakSlotSequence();
}
@@ -315,6 +311,15 @@ Handle<Object> BreakLocation::BreakPointObjects() const {
}
+void DebugFeatureTracker::Track(DebugFeatureTracker::Feature feature) {
+ uint32_t mask = 1 << feature;
+ // Only count one sample per feature and isolate.
+ if (bitfield_ & mask) return;
+ isolate_->counters()->debug_feature_usage()->AddSample(feature);
+ bitfield_ |= mask;
+}
+
+
// Threading support.
void Debug::ThreadInit() {
thread_local_.break_count_ = 0;
@@ -322,15 +327,12 @@ void Debug::ThreadInit() {
thread_local_.break_frame_id_ = StackFrame::NO_ID;
thread_local_.last_step_action_ = StepNone;
thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
- thread_local_.step_count_ = 0;
thread_local_.last_fp_ = 0;
- thread_local_.queued_step_count_ = 0;
- thread_local_.step_into_fp_ = 0;
- thread_local_.step_out_fp_ = 0;
+ thread_local_.target_fp_ = 0;
+ thread_local_.step_in_enabled_ = false;
// TODO(isolates): frames_are_dropped_?
base::NoBarrier_Store(&thread_local_.current_debug_scope_,
static_cast<base::AtomicWord>(0));
- thread_local_.restarter_frame_function_pointer_ = NULL;
}
@@ -395,6 +397,9 @@ bool Debug::Load() {
debug_context_ = Handle<Context>::cast(
isolate_->global_handles()->Create(*context));
+
+ feature_tracker()->Track(DebugFeatureTracker::kActive);
+
return true;
}
@@ -413,7 +418,6 @@ void Debug::Unload() {
void Debug::Break(Arguments args, JavaScriptFrame* frame) {
- Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
DCHECK(args.length() == 0);
@@ -439,91 +443,62 @@ void Debug::Break(Arguments args, JavaScriptFrame* frame) {
}
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- // Find the break point where execution has stopped.
+ // Find the break location where execution has stopped.
// PC points to the instruction after the current one, possibly a break
// location as well. So the "- 1" to exclude it from the search.
Address call_pc = frame->pc() - 1;
- BreakLocation break_location =
- BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, call_pc);
-
- // Check whether step next reached a new statement.
- if (!StepNextContinue(&break_location, frame)) {
- // Decrease steps left if performing multiple steps.
- if (thread_local_.step_count_ > 0) {
- thread_local_.step_count_--;
- }
- }
-
- // If there is one or more real break points check whether any of these are
- // triggered.
- Handle<Object> break_points_hit(heap->undefined_value(), isolate_);
- if (break_location.HasBreakPoint()) {
- Handle<Object> break_point_objects = break_location.BreakPointObjects();
- break_points_hit = CheckBreakPoints(break_point_objects);
- }
-
- // If step out is active skip everything until the frame where we need to step
- // out to is reached, unless real breakpoint is hit.
- if (StepOutActive() &&
- frame->fp() != thread_local_.step_out_fp_ &&
- break_points_hit->IsUndefined() ) {
- // Step count should always be 0 for StepOut.
- DCHECK(thread_local_.step_count_ == 0);
- } else if (!break_points_hit->IsUndefined() ||
- (thread_local_.last_step_action_ != StepNone &&
- thread_local_.step_count_ == 0)) {
- // Notify debugger if a real break point is triggered or if performing
- // single stepping with no more steps to perform. Otherwise do another step.
-
- // Clear all current stepping setup.
- ClearStepping();
-
- if (thread_local_.queued_step_count_ > 0) {
- // Perform queued steps
- int step_count = thread_local_.queued_step_count_;
-
- // Clear queue
- thread_local_.queued_step_count_ = 0;
-
- PrepareStep(StepNext, step_count, StackFrame::NO_ID);
- } else {
+ BreakLocation location = BreakLocation::FromAddress(debug_info, call_pc);
+
+ // Find actual break points, if any, and trigger debug break event.
+ if (break_points_active_ && location.HasBreakPoint()) {
+ Handle<Object> break_point_objects = location.BreakPointObjects();
+ Handle<Object> break_points_hit = CheckBreakPoints(break_point_objects);
+ if (!break_points_hit->IsUndefined()) {
+ // Clear all current stepping setup.
+ ClearStepping();
// Notify the debug event listeners.
OnDebugBreak(break_points_hit, false);
+ return;
}
- } else if (thread_local_.last_step_action_ != StepNone) {
- // Hold on to last step action as it is cleared by the call to
- // ClearStepping.
- StepAction step_action = thread_local_.last_step_action_;
- int step_count = thread_local_.step_count_;
-
- // If StepNext goes deeper in code, StepOut until original frame
- // and keep step count queued up in the meantime.
- if (step_action == StepNext && frame->fp() < thread_local_.last_fp_) {
- // Count frames until target frame
- int count = 0;
- JavaScriptFrameIterator it(isolate_);
- while (!it.done() && it.frame()->fp() < thread_local_.last_fp_) {
- count++;
- it.Advance();
- }
+ }
- // Check that we indeed found the frame we are looking for.
- CHECK(!it.done() && (it.frame()->fp() == thread_local_.last_fp_));
- if (step_count > 1) {
- // Save old count and action to continue stepping after StepOut.
- thread_local_.queued_step_count_ = step_count - 1;
- }
+ // No break point. Check for stepping.
+ StepAction step_action = last_step_action();
+ Address current_fp = frame->UnpaddedFP();
+ Address target_fp = thread_local_.target_fp_;
+ Address last_fp = thread_local_.last_fp_;
- // Set up for StepOut to reach target frame.
- step_action = StepOut;
- step_count = count;
- }
+ bool step_break = true;
+ switch (step_action) {
+ case StepNone:
+ return;
+ case StepOut:
+ // Step out has not reached the target frame yet.
+ if (current_fp < target_fp) return;
+ break;
+ case StepNext:
+ // Step next should not break in a deeper frame.
+ if (current_fp < target_fp) return;
+ // Fall through.
+ case StepIn:
+ step_break = location.IsReturn() || (current_fp != last_fp) ||
+ (thread_local_.last_statement_position_ !=
+ location.code()->SourceStatementPosition(frame->pc()));
+ break;
+ case StepFrame:
+ step_break = current_fp != last_fp;
+ break;
+ }
- // Clear all current stepping setup.
- ClearStepping();
+ // Clear all current stepping setup.
+ ClearStepping();
- // Set up for the remaining steps.
- PrepareStep(step_action, step_count, StackFrame::NO_ID);
+ if (step_break) {
+ // Notify the debug event listeners.
+ OnDebugBreak(isolate_->factory()->undefined_value(), false);
+ } else {
+ // Re-prepare to continue.
+ PrepareStep(step_action);
}
}
@@ -574,7 +549,7 @@ MaybeHandle<Object> Debug::CallFunction(const char* name, int argc,
Handle<JSFunction> fun = Handle<JSFunction>::cast(
Object::GetProperty(isolate_, holder, name, STRICT).ToHandleChecked());
Handle<Object> undefined = isolate_->factory()->undefined_value();
- return Execution::TryCall(fun, undefined, argc, args);
+ return Execution::TryCall(isolate_, fun, undefined, argc, args);
}
@@ -620,10 +595,12 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
// Find the break point and change it.
BreakLocation location = BreakLocation::FromPosition(
- debug_info, ALL_BREAK_LOCATIONS, *source_position, STATEMENT_ALIGNED);
+ debug_info, *source_position, STATEMENT_ALIGNED);
*source_position = location.statement_position();
location.SetBreakPoint(break_point_object);
+ feature_tracker()->Track(DebugFeatureTracker::kBreakPoint);
+
// At least one active break point now.
return debug_info->GetBreakPointCount() > 0;
}
@@ -661,10 +638,12 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
DCHECK(position >= 0);
// Find the break point and change it.
- BreakLocation location = BreakLocation::FromPosition(
- debug_info, ALL_BREAK_LOCATIONS, position, alignment);
+ BreakLocation location =
+ BreakLocation::FromPosition(debug_info, position, alignment);
location.SetBreakPoint(break_point_object);
+ feature_tracker()->Track(DebugFeatureTracker::kBreakPoint);
+
position = (alignment == STATEMENT_ALIGNED) ? location.statement_position()
: location.position();
@@ -693,8 +672,7 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
Address pc =
debug_info->code()->entry() + break_point_info->code_position();
- BreakLocation location =
- BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, pc);
+ BreakLocation location = BreakLocation::FromAddress(debug_info, pc);
location.ClearBreakPoint(break_point_object);
// If there are no more break points left remove the debug info for this
@@ -730,6 +708,16 @@ void Debug::ClearAllBreakPoints() {
void Debug::FloodWithOneShot(Handle<JSFunction> function,
BreakLocatorType type) {
+ // Debug utility functions are not subject to debugging.
+ if (function->native_context() == *debug_context()) return;
+
+ if (!function->shared()->IsSubjectToDebugging()) {
+ // Builtin functions are not subject to stepping, but need to be
+ // deoptimized, because optimized code does not check for debug
+ // step in at call sites.
+ Deoptimizer::DeoptimizeFunction(*function);
+ return;
+ }
// Make sure the function is compiled and has set up the debug info.
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
@@ -745,78 +733,6 @@ void Debug::FloodWithOneShot(Handle<JSFunction> function,
}
-void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
- Handle<FixedArray> new_bindings(function->function_bindings());
- Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex),
- isolate_);
-
- if (!bindee.is_null() && bindee->IsJSFunction()) {
- Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
- FloodWithOneShotGeneric(bindee_function);
- }
-}
-
-
-void Debug::FloodDefaultConstructorWithOneShot(Handle<JSFunction> function) {
- DCHECK(function->shared()->is_default_constructor());
- // Instead of stepping into the function we directly step into the super class
- // constructor.
- Isolate* isolate = function->GetIsolate();
- PrototypeIterator iter(isolate, function);
- Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
- if (!proto->IsJSFunction()) return; // Object.prototype
- Handle<JSFunction> function_proto = Handle<JSFunction>::cast(proto);
- FloodWithOneShotGeneric(function_proto);
-}
-
-
-void Debug::FloodWithOneShotGeneric(Handle<JSFunction> function,
- Handle<Object> holder) {
- if (function->shared()->bound()) {
- FloodBoundFunctionWithOneShot(function);
- } else if (function->shared()->is_default_constructor()) {
- FloodDefaultConstructorWithOneShot(function);
- } else {
- Isolate* isolate = function->GetIsolate();
- // Don't allow step into functions in the native context.
- if (function->shared()->code() ==
- isolate->builtins()->builtin(Builtins::kFunctionApply) ||
- function->shared()->code() ==
- isolate->builtins()->builtin(Builtins::kFunctionCall)) {
- // Handle function.apply and function.call separately to flood the
- // function to be called and not the code for Builtins::FunctionApply or
- // Builtins::FunctionCall. The receiver of call/apply is the target
- // function.
- if (!holder.is_null() && holder->IsJSFunction()) {
- Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
- FloodWithOneShotGeneric(js_function);
- }
- } else {
- FloodWithOneShot(function);
- }
- }
-}
-
-
-void Debug::FloodHandlerWithOneShot() {
- // Iterate through the JavaScript stack looking for handlers.
- StackFrame::Id id = break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there is no JavaScript stack don't do anything.
- return;
- }
- for (JavaScriptFrameIterator it(isolate_, id); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- int stack_slots = 0; // The computed stack slot count is not used.
- if (frame->LookupExceptionHandlerInTable(&stack_slots, NULL) > 0) {
- // Flood the function with the catch/finally block with break points.
- FloodWithOneShot(Handle<JSFunction>(frame->function()));
- return;
- }
- }
-}
-
-
void Debug::ChangeBreakOnException(ExceptionBreakType type, bool enable) {
if (type == BreakUncaughtException) {
break_on_uncaught_exception_ = enable;
@@ -842,41 +758,66 @@ FrameSummary GetFirstFrameSummary(JavaScriptFrame* frame) {
}
-void Debug::PrepareStep(StepAction step_action,
- int step_count,
- StackFrame::Id frame_id) {
- HandleScope scope(isolate_);
+void Debug::PrepareStepIn(Handle<JSFunction> function) {
+ if (!is_active()) return;
+ if (last_step_action() < StepIn) return;
+ if (in_debug_scope()) return;
+ if (thread_local_.step_in_enabled_) {
+ FloodWithOneShot(function);
+ }
+}
- DCHECK(in_debug_scope());
- // Remember this step action and count.
- thread_local_.last_step_action_ = step_action;
- if (step_action == StepOut) {
- // For step out target frame will be found on the stack so there is no need
- // to set step counter for it. It's expected to always be 0 for StepOut.
- thread_local_.step_count_ = 0;
- } else {
- thread_local_.step_count_ = step_count;
+void Debug::PrepareStepOnThrow() {
+ if (!is_active()) return;
+ if (last_step_action() == StepNone) return;
+ if (in_debug_scope()) return;
+
+ ClearOneShot();
+
+ // Iterate through the JavaScript stack looking for handlers.
+ JavaScriptFrameIterator it(isolate_);
+ while (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ int stack_slots = 0; // The computed stack slot count is not used.
+ if (frame->LookupExceptionHandlerInTable(&stack_slots, NULL) > 0) break;
+ it.Advance();
+ }
+
+ // Find the closest Javascript frame we can flood with one-shots.
+ while (!it.done() &&
+ !it.frame()->function()->shared()->IsSubjectToDebugging()) {
+ it.Advance();
}
+ if (it.done()) return; // No suitable Javascript catch handler.
+
+ FloodWithOneShot(Handle<JSFunction>(it.frame()->function()));
+}
+
+
+void Debug::PrepareStep(StepAction step_action) {
+ HandleScope scope(isolate_);
+
+ DCHECK(in_debug_scope());
+
// Get the frame where the execution has stopped and skip the debug frame if
// any. The debug frame will only be present if execution was stopped due to
// hitting a break point. In other situations (e.g. unhandled exception) the
// debug frame is not present.
- StackFrame::Id id = break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there is no JavaScript stack don't do anything.
- return;
- }
- if (frame_id != StackFrame::NO_ID) {
- id = frame_id;
- }
- JavaScriptFrameIterator frames_it(isolate_, id);
+ StackFrame::Id frame_id = break_frame_id();
+ // If there is no JavaScript stack don't do anything.
+ if (frame_id == StackFrame::NO_ID) return;
+
+ JavaScriptFrameIterator frames_it(isolate_, frame_id);
JavaScriptFrame* frame = frames_it.frame();
- // First of all ensure there is one-shot break points in the top handler
- // if any.
- FloodHandlerWithOneShot();
+ feature_tracker()->Track(DebugFeatureTracker::kStepping);
+
+ // Remember this step action and count.
+ thread_local_.last_step_action_ = step_action;
+ STATIC_ASSERT(StepFrame > StepIn);
+ thread_local_.step_in_enabled_ = (step_action >= StepIn);
// If the function on the top frame is unresolved perform step out. This will
// be the case when calling unknown function and having the debugger stopped
@@ -907,142 +848,57 @@ void Debug::PrepareStep(StepAction step_action,
// PC points to the instruction after the current one, possibly a break
// location as well. So the "- 1" to exclude it from the search.
Address call_pc = summary.pc() - 1;
- BreakLocation location =
- BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, call_pc);
-
- // If this is the last break code target step out is the only possibility.
- if (location.IsReturn() || step_action == StepOut) {
- if (step_action == StepOut) {
- // Skip step_count frames starting with the current one.
- while (step_count-- > 0 && !frames_it.done()) {
- frames_it.Advance();
- }
- } else {
- DCHECK(location.IsReturn());
- frames_it.Advance();
- }
- // Skip native and extension functions on the stack.
- while (!frames_it.done() &&
- !frames_it.frame()->function()->IsSubjectToDebugging()) {
- frames_it.Advance();
- }
- // Step out: If there is a JavaScript caller frame, we need to
- // flood it with breakpoints.
- if (!frames_it.done()) {
- // Fill the function to return to with one-shot break points.
- JSFunction* function = frames_it.frame()->function();
- FloodWithOneShot(Handle<JSFunction>(function));
- // Set target frame pointer.
- ActivateStepOut(frames_it.frame());
- }
- return;
- }
-
- if (step_action != StepNext && step_action != StepMin) {
- // If there's restarter frame on top of the stack, just get the pointer
- // to function which is going to be restarted.
- if (thread_local_.restarter_frame_function_pointer_ != NULL) {
- Handle<JSFunction> restarted_function(
- JSFunction::cast(*thread_local_.restarter_frame_function_pointer_));
- FloodWithOneShot(restarted_function);
- } else if (location.IsCall()) {
- // Find target function on the expression stack.
- // Expression stack looks like this (top to bottom):
- // argN
- // ...
- // arg0
- // Receiver
- // Function to call
- int num_expressions_without_args =
- frame->ComputeExpressionsCount() - location.CallArgumentsCount();
- DCHECK(num_expressions_without_args >= 2);
- Object* fun = frame->GetExpression(num_expressions_without_args - 2);
-
- // Flood the actual target of call/apply.
- if (fun->IsJSFunction()) {
- Isolate* isolate = JSFunction::cast(fun)->GetIsolate();
- Code* apply = isolate->builtins()->builtin(Builtins::kFunctionApply);
- Code* call = isolate->builtins()->builtin(Builtins::kFunctionCall);
- // Find target function on the expression stack for expression like
- // Function.call.call...apply(...)
- int i = 1;
- while (fun->IsJSFunction()) {
- Code* code = JSFunction::cast(fun)->shared()->code();
- if (code != apply && code != call) break;
- DCHECK(num_expressions_without_args >= i);
- fun = frame->GetExpression(num_expressions_without_args - i);
- i--;
- }
- }
-
- if (fun->IsJSFunction()) {
- Handle<JSFunction> js_function(JSFunction::cast(fun));
- FloodWithOneShotGeneric(js_function);
- }
- }
-
- ActivateStepIn(frame);
- }
+ BreakLocation location = BreakLocation::FromAddress(debug_info, call_pc);
- // Fill the current function with one-shot break points even for step in on
- // a call target as the function called might be a native function for
- // which step in will not stop. It also prepares for stepping in
- // getters/setters.
- // If we are stepping into another frame, only fill calls and returns.
- FloodWithOneShot(function, step_action == StepFrame ? CALLS_AND_RETURNS
- : ALL_BREAK_LOCATIONS);
+ // At a return statement we will step out either way.
+ if (location.IsReturn()) step_action = StepOut;
- // Remember source position and frame to handle step next.
thread_local_.last_statement_position_ =
debug_info->code()->SourceStatementPosition(summary.pc());
thread_local_.last_fp_ = frame->UnpaddedFP();
-}
-
-
-// Check whether the current debug break should be reported to the debugger. It
-// is used to have step next and step in only report break back to the debugger
-// if on a different frame or in a different statement. In some situations
-// there will be several break points in the same statement when the code is
-// flooded with one-shot break points. This function helps to perform several
-// steps before reporting break back to the debugger.
-bool Debug::StepNextContinue(BreakLocation* break_location,
- JavaScriptFrame* frame) {
- // StepNext and StepOut shouldn't bring us deeper in code, so last frame
- // shouldn't be a parent of current frame.
- StepAction step_action = thread_local_.last_step_action_;
- if (step_action == StepNext || step_action == StepOut) {
- if (frame->fp() < thread_local_.last_fp_) return true;
- }
-
- // We stepped into a new frame if the frame pointer changed.
- if (step_action == StepFrame) {
- return frame->UnpaddedFP() == thread_local_.last_fp_;
- }
-
- // If the step last action was step next or step in make sure that a new
- // statement is hit.
- if (step_action == StepNext || step_action == StepIn) {
- // Never continue if returning from function.
- if (break_location->IsReturn()) return false;
-
- // Continue if we are still on the same frame and in the same statement.
- int current_statement_position =
- break_location->code()->SourceStatementPosition(frame->pc());
- return thread_local_.last_fp_ == frame->UnpaddedFP() &&
- thread_local_.last_statement_position_ == current_statement_position;
+ switch (step_action) {
+ case StepNone:
+ UNREACHABLE();
+ break;
+ case StepOut:
+ // Advance to caller frame.
+ frames_it.Advance();
+ // Skip native and extension functions on the stack.
+ while (!frames_it.done() &&
+ !frames_it.frame()->function()->shared()->IsSubjectToDebugging()) {
+ // Builtin functions are not subject to stepping, but need to be
+ // deoptimized to include checks for step-in at call sites.
+ Deoptimizer::DeoptimizeFunction(frames_it.frame()->function());
+ frames_it.Advance();
+ }
+ if (frames_it.done()) {
+ // Stepping out to the embedder. Disable step-in to avoid stepping into
+ // the next (unrelated) call that the embedder makes.
+ thread_local_.step_in_enabled_ = false;
+ } else {
+ // Fill the caller function to return to with one-shot break points.
+ Handle<JSFunction> caller_function(frames_it.frame()->function());
+ FloodWithOneShot(caller_function);
+ thread_local_.target_fp_ = frames_it.frame()->UnpaddedFP();
+ }
+ // Clear last position info. For stepping out it does not matter.
+ thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
+ thread_local_.last_fp_ = 0;
+ break;
+ case StepNext:
+ thread_local_.target_fp_ = frame->UnpaddedFP();
+ FloodWithOneShot(function);
+ break;
+ case StepIn:
+ FloodWithOneShot(function);
+ break;
+ case StepFrame:
+ // No point in setting one-shot breaks at places where we are not about
+ // to leave the current frame.
+ FloodWithOneShot(function, CALLS_AND_RETURNS);
+ break;
}
-
- // No step next action - don't continue.
- return false;
-}
-
-
-// Check whether the code object at the specified address is a debug break code
-// object.
-bool Debug::IsDebugBreak(Address addr) {
- Code* code = Code::GetCodeFromTargetAddress(addr);
- return code->is_debug_stub();
}
@@ -1084,41 +940,15 @@ Handle<Object> Debug::GetSourceBreakLocations(
}
-// Handle stepping into a function.
-void Debug::HandleStepIn(Handle<Object> function_obj, bool is_constructor) {
- // Flood getter/setter if we either step in or step to another frame.
- bool step_frame = thread_local_.last_step_action_ == StepFrame;
- if (!StepInActive() && !step_frame) return;
- if (!function_obj->IsJSFunction()) return;
- Handle<JSFunction> function = Handle<JSFunction>::cast(function_obj);
- Isolate* isolate = function->GetIsolate();
-
- StackFrameIterator it(isolate);
- it.Advance();
- // For constructor functions skip another frame.
- if (is_constructor) {
- DCHECK(it.frame()->is_construct());
- it.Advance();
- }
- Address fp = it.frame()->fp();
-
- // Flood the function with one-shot break points if it is called from where
- // step into was requested, or when stepping into a new frame.
- if (fp == thread_local_.step_into_fp_ || step_frame) {
- FloodWithOneShotGeneric(function, Handle<Object>());
- }
-}
-
-
void Debug::ClearStepping() {
// Clear the various stepping setup.
ClearOneShot();
- ClearStepIn();
- ClearStepOut();
- ClearStepNext();
- // Clear multiple step counter.
- thread_local_.step_count_ = 0;
+ thread_local_.last_step_action_ = StepNone;
+ thread_local_.step_in_enabled_ = false;
+ thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
+ thread_local_.last_fp_ = 0;
+ thread_local_.target_fp_ = 0;
}
@@ -1139,32 +969,9 @@ void Debug::ClearOneShot() {
}
-void Debug::ActivateStepIn(StackFrame* frame) {
- DCHECK(!StepOutActive());
- thread_local_.step_into_fp_ = frame->UnpaddedFP();
-}
-
-
-void Debug::ClearStepIn() {
- thread_local_.step_into_fp_ = 0;
-}
-
-
-void Debug::ActivateStepOut(StackFrame* frame) {
- DCHECK(!StepInActive());
- thread_local_.step_out_fp_ = frame->UnpaddedFP();
-}
-
-
-void Debug::ClearStepOut() {
- thread_local_.step_out_fp_ = 0;
-}
-
-
-void Debug::ClearStepNext() {
- thread_local_.last_step_action_ = StepNone;
- thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
- thread_local_.last_fp_ = 0;
+void Debug::EnableStepIn() {
+ STATIC_ASSERT(StepFrame > StepIn);
+ thread_local_.step_in_enabled_ = (last_step_action() >= StepIn);
}
@@ -1305,8 +1112,16 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
List<Handle<JSFunction> > functions;
List<Handle<JSGeneratorObject> > suspended_generators;
- if (!shared->optimized_code_map()->IsSmi()) {
- shared->ClearOptimizedCodeMap();
+ // Flush all optimized code maps. Note that the below heap iteration does not
+ // cover this, because the given function might have been inlined into code
+ // for which no JSFunction exists.
+ {
+ SharedFunctionInfo::Iterator iterator(isolate_);
+ while (SharedFunctionInfo* shared = iterator.Next()) {
+ if (!shared->OptimizedCodeMapIsCleared()) {
+ shared->ClearOptimizedCodeMap();
+ }
+ }
}
// Make sure we abort incremental marking.
@@ -1373,6 +1188,7 @@ class SharedFunctionInfoFinder {
target_position_(target_position) {}
void NewCandidate(SharedFunctionInfo* shared, JSFunction* closure = NULL) {
+ if (!shared->IsSubjectToDebugging()) return;
int start_position = shared->function_token_position();
if (start_position == RelocInfo::kNoPosition) {
start_position = shared->start_position();
@@ -1422,7 +1238,7 @@ class SharedFunctionInfoFinder {
// cannot be compiled without context (need to find outer compilable SFI etc.)
Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
int position) {
- while (true) {
+ for (int iteration = 0;; iteration++) {
// Go through all shared function infos associated with this script to
// find the inner most function containing this position.
// If there is no shared function info for this script at all, there is
@@ -1440,7 +1256,18 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
shared = finder.Result();
if (shared == NULL) break;
// We found it if it's already compiled and has debug code.
- if (shared->HasDebugCode()) return handle(shared);
+ if (shared->HasDebugCode()) {
+ Handle<SharedFunctionInfo> shared_handle(shared);
+ // If the iteration count is larger than 1, we had to compile the outer
+ // function in order to create this shared function info. So there can
+ // be no JSFunction referencing it. We can anticipate creating a debug
+ // info while bypassing PrepareFunctionForBreakpoints.
+ if (iteration > 1) {
+ AllowHeapAllocation allow_before_return;
+ CreateDebugInfo(shared_handle);
+ }
+ return shared_handle;
+ }
}
// If not, compile to reveal inner functions, if possible.
if (shared->allows_lazy_compilation_without_context()) {
@@ -1473,6 +1300,7 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
closure = finder.ResultClosure();
shared = finder.Result();
}
+ if (shared == NULL) break;
HandleScope scope(isolate_);
if (closure == NULL) {
if (!Compiler::CompileDebugCode(handle(shared))) break;
@@ -1500,11 +1328,13 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
if (!PrepareFunctionForBreakPoints(shared)) return false;
- // Make sure IC state is clean. This is so that we correctly flood
- // accessor pairs when stepping in.
- shared->code()->ClearInlineCaches();
- shared->feedback_vector()->ClearICSlots(*shared);
+ CreateDebugInfo(shared);
+ return true;
+}
+
+
+void Debug::CreateDebugInfo(Handle<SharedFunctionInfo> shared) {
// Create the debug info object.
DCHECK(shared->HasDebugCode());
Handle<DebugInfo> debug_info = isolate_->factory()->NewDebugInfo(shared);
@@ -1513,8 +1343,6 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
node->set_next(debug_info_list_);
debug_info_list_ = node;
-
- return true;
}
@@ -1585,18 +1413,15 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- LiveEdit::FrameDropMode mode,
- Object** restarter_frame_function_pointer) {
+ LiveEdit::FrameDropMode mode) {
if (mode != LiveEdit::CURRENTLY_SET_MODE) {
thread_local_.frame_drop_mode_ = mode;
}
thread_local_.break_frame_id_ = new_break_frame_id;
- thread_local_.restarter_frame_function_pointer_ =
- restarter_frame_function_pointer;
}
-bool Debug::IsDebugGlobal(GlobalObject* global) {
+bool Debug::IsDebugGlobal(JSGlobalObject* global) {
return is_loaded() && global == debug_context()->global_object();
}
@@ -1647,8 +1472,7 @@ void Debug::GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
// has stopped.
Address call_pc = summary.pc() - 1;
List<BreakLocation> locations;
- BreakLocation::FromAddressSameStatement(debug_info, ALL_BREAK_LOCATIONS,
- call_pc, &locations);
+ BreakLocation::FromAddressSameStatement(debug_info, call_pc, &locations);
for (BreakLocation location : locations) {
if (location.pc() <= summary.pc()) {
@@ -1663,7 +1487,7 @@ void Debug::GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
if (frame_it.frame()->id() != frame_id) continue;
}
}
- if (location.IsStepInLocation()) results_out->Add(location.position());
+ if (location.IsCall()) results_out->Add(location.position());
}
}
@@ -1736,6 +1560,7 @@ MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<JSObject> task_event) {
void Debug::OnThrow(Handle<Object> exception) {
if (in_debug_scope() || ignore_events()) return;
+ PrepareStepOnThrow();
// Temporarily clear any scheduled_exception to allow evaluating
// JavaScript from the debug event handler.
HandleScope scope(isolate_);
@@ -1797,9 +1622,6 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
DebugScope debug_scope(this);
if (debug_scope.failed()) return;
- // Clear all current stepping setup.
- ClearStepping();
-
// Create the event data object.
Handle<Object> event_data;
// Bail out and don't call debugger if exception.
@@ -1943,7 +1765,7 @@ void Debug::CallEventCallback(v8::DebugEvent event,
event_data,
event_listener_data_ };
Handle<JSReceiver> global(isolate_->global_proxy());
- Execution::TryCall(Handle<JSFunction>::cast(event_listener_),
+ Execution::TryCall(isolate_, Handle<JSFunction>::cast(event_listener_),
global, arraysize(argv), argv);
}
in_debug_event_listener_ = previous;
@@ -2089,7 +1911,7 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
Handle<String> answer;
MaybeHandle<Object> maybe_exception;
MaybeHandle<Object> maybe_result =
- Execution::TryCall(process_debug_request, cmd_processor, 1,
+ Execution::TryCall(isolate_, process_debug_request, cmd_processor, 1,
request_args, &maybe_exception);
if (maybe_result.ToHandle(&answer_value)) {
@@ -2208,7 +2030,7 @@ void Debug::EnqueueCommandMessage(Vector<const uint16_t> command,
}
-MaybeHandle<Object> Debug::Call(Handle<JSFunction> fun, Handle<Object> data) {
+MaybeHandle<Object> Debug::Call(Handle<Object> fun, Handle<Object> data) {
DebugScope debug_scope(this);
if (debug_scope.failed()) return isolate_->factory()->undefined_value();
@@ -2244,8 +2066,9 @@ void Debug::HandleDebugBreak() {
Object* fun = it.frame()->function();
if (fun && fun->IsJSFunction()) {
// Don't stop in builtin functions.
- if (!JSFunction::cast(fun)->IsSubjectToDebugging()) return;
- GlobalObject* global = JSFunction::cast(fun)->context()->global_object();
+ if (!JSFunction::cast(fun)->shared()->IsSubjectToDebugging()) return;
+ JSGlobalObject* global =
+ JSFunction::cast(fun)->context()->global_object();
// Don't stop in debugger functions.
if (IsDebugGlobal(global)) return;
}
@@ -2257,6 +2080,9 @@ void Debug::HandleDebugBreak() {
isolate_->stack_guard()->ClearDebugBreak();
+ // Clear stepping to avoid duplicate breaks.
+ ClearStepping();
+
ProcessDebugMessages(debug_command_only);
}
@@ -2307,7 +2133,6 @@ DebugScope::DebugScope(Debug* debug)
}
-
DebugScope::~DebugScope() {
if (!failed_ && prev_ == NULL) {
// Clear mirror cache when leaving the debugger. Skip this if there is a
@@ -2419,7 +2244,7 @@ v8::Local<v8::String> MessageImpl::GetJSON() const {
}
MaybeHandle<Object> maybe_json =
- Execution::TryCall(Handle<JSFunction>::cast(fun), event_data_, 0, NULL);
+ Execution::TryCall(isolate, fun, event_data_, 0, NULL);
Handle<Object> json;
if (!maybe_json.ToHandle(&json) || !json->IsString()) {
return v8::Local<v8::String>();
diff --git a/chromium/v8/src/debug/debug.h b/chromium/v8/src/debug/debug.h
index 640355a7e66..4b098db49a3 100644
--- a/chromium/v8/src/debug/debug.h
+++ b/chromium/v8/src/debug/debug.h
@@ -31,16 +31,13 @@ class DebugScope;
// Step actions. NOTE: These values are in macros.py as well.
-enum StepAction {
+enum StepAction : int8_t {
StepNone = -1, // Stepping not prepared.
StepOut = 0, // Step out of the current function.
StepNext = 1, // Step to the next statement in the current function.
StepIn = 2, // Step into new functions invoked or the next statement
// in the current function.
- StepMin = 3, // Perform a minimum step in the current function.
- StepInMin = 4, // Step into new functions invoked or perform a minimum step
- // in the current function.
- StepFrame = 5 // Step into a new frame or return to previous frame.
+ StepFrame = 3 // Step into a new frame or return to previous frame.
};
@@ -67,15 +64,12 @@ class BreakLocation {
public:
// Find the break point at the supplied address, or the closest one before
// the address.
- static BreakLocation FromAddress(Handle<DebugInfo> debug_info,
- BreakLocatorType type, Address pc);
+ static BreakLocation FromAddress(Handle<DebugInfo> debug_info, Address pc);
- static void FromAddressSameStatement(Handle<DebugInfo> debug_info,
- BreakLocatorType type, Address pc,
+ static void FromAddressSameStatement(Handle<DebugInfo> debug_info, Address pc,
List<BreakLocation>* result_out);
- static BreakLocation FromPosition(Handle<DebugInfo> debug_info,
- BreakLocatorType type, int position,
+ static BreakLocation FromPosition(Handle<DebugInfo> debug_info, int position,
BreakPositionAlignment alignment);
bool IsDebugBreak() const;
@@ -86,15 +80,6 @@ class BreakLocation {
inline bool IsCall() const {
return RelocInfo::IsDebugBreakSlotAtCall(rmode_);
}
- inline bool IsConstructCall() const {
- return RelocInfo::IsDebugBreakSlotAtConstructCall(rmode_);
- }
- inline int CallArgumentsCount() const {
- DCHECK(IsCall());
- return RelocInfo::DebugBreakCallArgumentsCount(data_);
- }
-
- bool IsStepInLocation() const;
inline bool HasBreakPoint() const {
return debug_info_->HasBreakPoint(pc_offset_);
}
@@ -109,7 +94,7 @@ class BreakLocation {
inline RelocInfo rinfo() const {
- return RelocInfo(pc(), rmode(), data_, code());
+ return RelocInfo(debug_info_->GetIsolate(), pc(), rmode(), data_, code());
}
inline int position() const { return position_; }
@@ -164,8 +149,7 @@ class BreakLocation {
friend class Debug;
- static int BreakIndexFromAddress(Handle<DebugInfo> debug_info,
- BreakLocatorType type, Address pc);
+ static int BreakIndexFromAddress(Handle<DebugInfo> debug_info, Address pc);
void SetDebugBreak();
void ClearDebugBreak();
@@ -343,6 +327,28 @@ class LockingCommandMessageQueue BASE_EMBEDDED {
};
+class DebugFeatureTracker {
+ public:
+ enum Feature {
+ kActive = 1,
+ kBreakPoint = 2,
+ kStepping = 3,
+ kHeapSnapshot = 4,
+ kAllocationTracking = 5,
+ kProfiler = 6,
+ kLiveEdit = 7,
+ };
+
+ explicit DebugFeatureTracker(Isolate* isolate)
+ : isolate_(isolate), bitfield_(0) {}
+ void Track(Feature feature);
+
+ private:
+ Isolate* isolate_;
+ uint32_t bitfield_;
+};
+
+
// This class contains the debugger support. The main purpose is to handle
// setting break points in the code.
//
@@ -368,7 +374,7 @@ class Debug {
void SetMessageHandler(v8::Debug::MessageHandler handler);
void EnqueueCommandMessage(Vector<const uint16_t> command,
v8::Debug::ClientData* client_data = NULL);
- MUST_USE_RESULT MaybeHandle<Object> Call(Handle<JSFunction> fun,
+ MUST_USE_RESULT MaybeHandle<Object> Call(Handle<Object> fun,
Handle<Object> data);
Handle<Context> GetDebugContext();
void HandleDebugBreak();
@@ -394,25 +400,16 @@ class Debug {
void ClearAllBreakPoints();
void FloodWithOneShot(Handle<JSFunction> function,
BreakLocatorType type = ALL_BREAK_LOCATIONS);
- void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
- void FloodDefaultConstructorWithOneShot(Handle<JSFunction> function);
- void FloodWithOneShotGeneric(Handle<JSFunction> function,
- Handle<Object> holder = Handle<Object>());
- void FloodHandlerWithOneShot();
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
// Stepping handling.
- void PrepareStep(StepAction step_action,
- int step_count,
- StackFrame::Id frame_id);
+ void PrepareStep(StepAction step_action);
+ void PrepareStepIn(Handle<JSFunction> function);
+ void PrepareStepOnThrow();
void ClearStepping();
void ClearStepOut();
- bool IsStepping() { return thread_local_.step_count_ > 0; }
- bool StepNextContinue(BreakLocation* location, JavaScriptFrame* frame);
- bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
- void HandleStepIn(Handle<Object> function_obj, bool is_constructor);
- bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
+ void EnableStepIn();
void GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
List<int>* results_out);
@@ -424,6 +421,7 @@ class Debug {
// function needs to be compiled already.
bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
Handle<JSFunction> function);
+ void CreateDebugInfo(Handle<SharedFunctionInfo> shared);
static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
template <typename C>
@@ -433,23 +431,19 @@ class Debug {
Handle<Object> FindSharedFunctionInfoInScript(Handle<Script> script,
int position);
- // Returns true if the current stub call is patched to call the debugger.
- static bool IsDebugBreak(Address addr);
-
static Handle<Object> GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared,
BreakPositionAlignment position_aligment);
// Check whether a global object is the debug global object.
- bool IsDebugGlobal(GlobalObject* global);
+ bool IsDebugGlobal(JSGlobalObject* global);
// Check whether this frame is just about to return.
bool IsBreakAtReturn(JavaScriptFrame* frame);
// Support for LiveEdit
void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- LiveEdit::FrameDropMode mode,
- Object** restarter_frame_function_pointer);
+ LiveEdit::FrameDropMode mode);
// Threading support.
char* ArchiveDebug(char* to);
@@ -482,7 +476,7 @@ class Debug {
inline bool in_debug_scope() const {
return !!base::NoBarrier_Load(&thread_local_.current_debug_scope_);
}
- void set_disable_break(bool v) { break_disabled_ = v; }
+ void set_break_points_active(bool v) { break_points_active_ = v; }
StackFrame::Id break_frame_id() { return thread_local_.break_frame_id_; }
int break_id() { return thread_local_.break_id_; }
@@ -496,17 +490,14 @@ class Debug {
return reinterpret_cast<Address>(&after_break_target_);
}
- Address restarter_frame_function_pointer_address() {
- Object*** address = &thread_local_.restarter_frame_function_pointer_;
- return reinterpret_cast<Address>(address);
- }
-
- Address step_in_fp_addr() {
- return reinterpret_cast<Address>(&thread_local_.step_into_fp_);
+ Address step_in_enabled_address() {
+ return reinterpret_cast<Address>(&thread_local_.step_in_enabled_);
}
StepAction last_step_action() { return thread_local_.last_step_action_; }
+ DebugFeatureTracker* feature_tracker() { return &feature_tracker_; }
+
private:
explicit Debug(Isolate* isolate);
@@ -561,10 +552,7 @@ class Debug {
void InvokeMessageHandler(MessageImpl message);
void ClearOneShot();
- void ActivateStepIn(StackFrame* frame);
- void ClearStepIn();
void ActivateStepOut(StackFrame* frame);
- void ClearStepNext();
void RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info);
Handle<Object> CheckBreakPoints(Handle<Object> break_point);
bool CheckBreakPoint(Handle<Object> break_point_object);
@@ -592,8 +580,8 @@ class Debug {
bool is_active_;
bool is_suppressed_;
bool live_edit_enabled_;
- bool has_break_points_;
bool break_disabled_;
+ bool break_points_active_;
bool in_debug_event_listener_;
bool break_on_exception_;
bool break_on_uncaught_exception_;
@@ -605,6 +593,9 @@ class Debug {
// before returning to the DebugBreakCallHelper.
Address after_break_target_;
+ // Used to collect histogram data on debugger feature usage.
+ DebugFeatureTracker feature_tracker_;
+
// Per-thread data.
class ThreadLocal {
public:
@@ -626,30 +617,20 @@ class Debug {
// Source statement position from last step next action.
int last_statement_position_;
- // Number of steps left to perform before debug event.
- int step_count_;
-
// Frame pointer from last step next or step frame action.
Address last_fp_;
- // Number of queued steps left to perform before debug event.
- int queued_step_count_;
-
- // Frame pointer for frame from which step in was performed.
- Address step_into_fp_;
+ // Frame pointer of the target frame we want to arrive at.
+ Address target_fp_;
- // Frame pointer for the frame where debugger should be called when current
- // step out action is completed.
- Address step_out_fp_;
+ // Whether functions are flooded on entry for step-in and step-frame.
+ // If we stepped out to the embedder, disable flooding to spill stepping
+ // to the next call that the embedder makes.
+ bool step_in_enabled_;
// Stores the way how LiveEdit has patched the stack. It is used when
// debugger returns control back to user script.
LiveEdit::FrameDropMode frame_drop_mode_;
-
- // When restarter frame is on stack, stores the address
- // of the pointer to function being restarted. Otherwise (most of the time)
- // stores NULL. This pointer is used with 'step in' implementation.
- Object** restarter_frame_function_pointer_;
};
// Storage location for registers when handling debug break calls
@@ -746,8 +727,6 @@ class DebugCodegen : public AllStatic {
static void GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode);
- static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
-
// FrameDropper is a code replacement for a JavaScript frame with possibly
// several frames above.
// There is no calling conventions here, because it never actually gets
@@ -755,14 +734,15 @@ class DebugCodegen : public AllStatic {
static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
- static void GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc = -1);
+ static void GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode);
- static void PatchDebugBreakSlot(Address pc, Handle<Code> code);
- static void ClearDebugBreakSlot(Address pc);
+ static void PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code);
+ static void ClearDebugBreakSlot(Isolate* isolate, Address pc);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DEBUG_DEBUG_H_
diff --git a/chromium/v8/src/debug/debug.js b/chromium/v8/src/debug/debug.js
index 2e51d430889..bc2c69602b9 100644
--- a/chromium/v8/src/debug/debug.js
+++ b/chromium/v8/src/debug/debug.js
@@ -15,22 +15,20 @@ var IsNaN = global.isNaN;
var JSONParse = global.JSON.parse;
var JSONStringify = global.JSON.stringify;
var LookupMirror = global.LookupMirror;
+var MakeError;
+var MakeTypeError;
var MakeMirror = global.MakeMirror;
var MakeMirrorSerializer = global.MakeMirrorSerializer;
var MathMin = global.Math.min;
var Mirror = global.Mirror;
var MirrorType;
var ParseInt = global.parseInt;
-var ToBoolean;
-var ToNumber;
-var ToString;
var ValueMirror = global.ValueMirror;
utils.Import(function(from) {
+ MakeError = from.MakeError;
+ MakeTypeError = from.MakeTypeError;
MirrorType = from.MirrorType;
- ToBoolean = from.ToBoolean;
- ToNumber = from.ToNumber;
- ToString = from.ToString;
});
//----------------------------------------------------------------------------
@@ -64,9 +62,7 @@ Debug.ExceptionBreak = { Caught : 0,
Debug.StepAction = { StepOut: 0,
StepNext: 1,
StepIn: 2,
- StepMin: 3,
- StepInMin: 4,
- StepFrame: 5 };
+ StepFrame: 3 };
// The different types of scripts matching enum ScriptType in objects.h.
Debug.ScriptType = { Native: 0,
@@ -106,7 +102,7 @@ var debugger_flags = {
getValue: function() { return this.value; },
setValue: function(value) {
this.value = !!value;
- %SetDisableBreak(!this.value);
+ %SetBreakPointsActive(this.value);
}
},
breakOnCaughtException: {
@@ -234,7 +230,7 @@ BreakPoint.prototype.isTriggered = function(exec_state) {
try {
var mirror = exec_state.frame(0).evaluate(this.condition());
// If no sensible mirror or non true value break point not triggered.
- if (!(mirror instanceof ValueMirror) || !ToBoolean(mirror.value_)) {
+ if (!(mirror instanceof ValueMirror) || !mirror.value_) {
return false;
}
} catch (e) {
@@ -947,23 +943,20 @@ function ExecutionState(break_id) {
this.selected_frame = 0;
}
-ExecutionState.prototype.prepareStep = function(opt_action, opt_count,
- opt_callframe) {
- var action = Debug.StepAction.StepIn;
- if (!IS_UNDEFINED(opt_action)) action = ToNumber(opt_action);
- var count = opt_count ? ToNumber(opt_count) : 1;
- var callFrameId = 0;
- if (!IS_UNDEFINED(opt_callframe)) {
- callFrameId = opt_callframe.details_.frameId();
+ExecutionState.prototype.prepareStep = function(action) {
+ if (action === Debug.StepAction.StepIn ||
+ action === Debug.StepAction.StepOut ||
+ action === Debug.StepAction.StepNext ||
+ action === Debug.StepAction.StepFrame) {
+ return %PrepareStep(this.break_id, action);
}
-
- return %PrepareStep(this.break_id, action, count, callFrameId);
+ throw MakeTypeError(kDebuggerType);
};
ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
opt_additional_context) {
return MakeMirror(%DebugEvaluateGlobal(this.break_id, source,
- ToBoolean(disable_break),
+ TO_BOOLEAN(disable_break),
opt_additional_context));
};
@@ -985,7 +978,7 @@ ExecutionState.prototype.frame = function(opt_index) {
};
ExecutionState.prototype.setSelectedFrame = function(index) {
- var i = ToNumber(index);
+ var i = TO_NUMBER(index);
if (i < 0 || i >= this.frameCount()) {
throw MakeTypeError(kDebuggerFrame);
}
@@ -1421,7 +1414,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
var key = request.command.toLowerCase();
var handler = DebugCommandProcessor.prototype.dispatch_[key];
if (IS_FUNCTION(handler)) {
- %_CallFunction(this, request, response, handler);
+ %_Call(handler, this, request, response);
} else {
throw MakeError(kDebugger,
'Unknown command "' + request.command + '" in request');
@@ -1432,7 +1425,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
response = this.createResponse();
}
response.success = false;
- response.message = ToString(e);
+ response.message = TO_STRING(e);
}
// Return the response as a JSON encoded string.
@@ -1449,7 +1442,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
'"request_seq":' + request.seq + ',' +
'"type":"response",' +
'"success":false,' +
- '"message":"Internal error: ' + ToString(e) + '"}';
+ '"message":"Internal error: ' + TO_STRING(e) + '"}';
}
} catch (e) {
// Failed in one of the catch blocks above - most generic error.
@@ -1461,28 +1454,15 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
// Check for arguments for continue.
if (request.arguments) {
- var count = 1;
var action = Debug.StepAction.StepIn;
// Pull out arguments.
var stepaction = request.arguments.stepaction;
- var stepcount = request.arguments.stepcount;
-
- // Get the stepcount argument if any.
- if (stepcount) {
- count = ToNumber(stepcount);
- if (count < 0) {
- throw MakeError(kDebugger,
- 'Invalid stepcount argument "' + stepcount + '".');
- }
- }
// Get the stepaction argument.
if (stepaction) {
if (stepaction == 'in') {
action = Debug.StepAction.StepIn;
- } else if (stepaction == 'min') {
- action = Debug.StepAction.StepMin;
} else if (stepaction == 'next') {
action = Debug.StepAction.StepNext;
} else if (stepaction == 'out') {
@@ -1494,7 +1474,7 @@ DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
}
// Set up the VM for stepping.
- this.exec_state_.prepareStep(action, count);
+ this.exec_state_.prepareStep(action);
}
// VM should be running after executing this request.
@@ -1545,7 +1525,7 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
// Find the function through a global evaluate.
f = this.exec_state_.evaluateGlobal(target).value();
} catch (e) {
- response.failed('Error: "' + ToString(e) +
+ response.failed('Error: "' + TO_STRING(e) +
'" evaluating "' + target + '"');
return;
}
@@ -1634,7 +1614,7 @@ DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(
}
// Pull out arguments.
- var break_point = ToNumber(request.arguments.breakpoint);
+ var break_point = TO_NUMBER(request.arguments.breakpoint);
var enabled = request.arguments.enabled;
var condition = request.arguments.condition;
var ignoreCount = request.arguments.ignoreCount;
@@ -1710,7 +1690,7 @@ DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(
}
// Pull out arguments.
- var break_point = ToNumber(request.arguments.breakpoint);
+ var break_point = TO_NUMBER(request.arguments.breakpoint);
// Check for legal arguments.
if (!break_point) {
@@ -1968,7 +1948,7 @@ DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
// With no scope argument just return top scope.
var scope_index = 0;
if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
- scope_index = ToNumber(request.arguments.number);
+ scope_index = TO_NUMBER(request.arguments.number);
if (scope_index < 0 || scope_holder.scopeCount() <= scope_index) {
return response.failed('Invalid scope number');
}
@@ -1992,11 +1972,11 @@ DebugCommandProcessor.resolveValue_ = function(value_description) {
return value_mirror.value();
} else if ("stringDescription" in value_description) {
if (value_description.type == MirrorType.BOOLEAN_TYPE) {
- return ToBoolean(value_description.stringDescription);
+ return TO_BOOLEAN(value_description.stringDescription);
} else if (value_description.type == MirrorType.NUMBER_TYPE) {
- return ToNumber(value_description.stringDescription);
+ return TO_NUMBER(value_description.stringDescription);
} if (value_description.type == MirrorType.STRING_TYPE) {
- return ToString(value_description.stringDescription);
+ return TO_STRING(value_description.stringDescription);
} else {
throw MakeError(kDebugger, "Unknown type");
}
@@ -2032,7 +2012,7 @@ DebugCommandProcessor.prototype.setVariableValueRequest_ =
if (IS_UNDEFINED(scope_description.number)) {
response.failed('Missing scope number');
}
- var scope_index = ToNumber(scope_description.number);
+ var scope_index = TO_NUMBER(scope_description.number);
var scope = scope_holder.scope(scope_index);
@@ -2064,7 +2044,7 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
// The expression argument could be an integer so we convert it to a
// string.
try {
- expression = ToString(expression);
+ expression = TO_STRING(expression);
} catch(e) {
return response.failed('Failed to convert expression argument to string');
}
@@ -2094,7 +2074,7 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (global) {
// Evaluate in the native context.
response.body = this.exec_state_.evaluateGlobal(
- expression, ToBoolean(disable_break), additional_context_object);
+ expression, TO_BOOLEAN(disable_break), additional_context_object);
return;
}
@@ -2110,18 +2090,18 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
// Check whether a frame was specified.
if (!IS_UNDEFINED(frame)) {
- var frame_number = ToNumber(frame);
+ var frame_number = TO_NUMBER(frame);
if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
return response.failed('Invalid frame "' + frame + '"');
}
// Evaluate in the specified frame.
response.body = this.exec_state_.frame(frame_number).evaluate(
- expression, ToBoolean(disable_break), additional_context_object);
+ expression, TO_BOOLEAN(disable_break), additional_context_object);
return;
} else {
// Evaluate in the selected frame.
response.body = this.exec_state_.frame().evaluate(
- expression, ToBoolean(disable_break), additional_context_object);
+ expression, TO_BOOLEAN(disable_break), additional_context_object);
return;
}
};
@@ -2142,7 +2122,7 @@ DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
// Set 'includeSource' option for script lookup.
if (!IS_UNDEFINED(request.arguments.includeSource)) {
- var includeSource = ToBoolean(request.arguments.includeSource);
+ var includeSource = TO_BOOLEAN(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
}
@@ -2210,7 +2190,7 @@ DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) {
to_line = request.arguments.toLine;
if (!IS_UNDEFINED(request.arguments.frame)) {
- var frame_number = ToNumber(request.arguments.frame);
+ var frame_number = TO_NUMBER(request.arguments.frame);
if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
return response.failed('Invalid frame "' + frame + '"');
}
@@ -2246,7 +2226,7 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
if (request.arguments) {
// Pull out arguments.
if (!IS_UNDEFINED(request.arguments.types)) {
- types = ToNumber(request.arguments.types);
+ types = TO_NUMBER(request.arguments.types);
if (IsNaN(types) || types < 0) {
return response.failed('Invalid types "' +
request.arguments.types + '"');
@@ -2254,7 +2234,7 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
}
if (!IS_UNDEFINED(request.arguments.includeSource)) {
- includeSource = ToBoolean(request.arguments.includeSource);
+ includeSource = TO_BOOLEAN(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
}
@@ -2269,7 +2249,7 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
var filterStr = null;
var filterNum = null;
if (!IS_UNDEFINED(request.arguments.filter)) {
- var num = ToNumber(request.arguments.filter);
+ var num = TO_NUMBER(request.arguments.filter);
if (!IsNaN(num)) {
filterNum = num;
}
@@ -2405,7 +2385,7 @@ DebugCommandProcessor.prototype.restartFrameRequest_ = function(
var frame_mirror;
// Check whether a frame was specified.
if (!IS_UNDEFINED(frame)) {
- var frame_number = ToNumber(frame);
+ var frame_number = TO_NUMBER(frame);
if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
return response.failed('Invalid frame "' + frame + '"');
}
@@ -2607,6 +2587,9 @@ function ValueToProtocolValue_(value, mirror_serializer) {
utils.InstallConstants(global, [
"Debug", Debug,
"DebugCommandProcessor", DebugCommandProcessor,
+ "BreakEvent", BreakEvent,
+ "CompileEvent", CompileEvent,
+ "BreakPoint", BreakPoint,
]);
// Functions needed by the debugger runtime.
diff --git a/chromium/v8/src/debug/ia32/debug-ia32.cc b/chromium/v8/src/debug/ia32/debug-ia32.cc
index fb8d495af83..d489a014413 100644
--- a/chromium/v8/src/debug/ia32/debug-ia32.cc
+++ b/chromium/v8/src/debug/ia32/debug-ia32.cc
@@ -23,24 +23,24 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction.
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotLength);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
static const int kSize = Assembler::kDebugBreakSlotLength;
- CodePatcher patcher(pc, kSize);
+ CodePatcher patcher(isolate, pc, kSize);
// Add a label for checking the size of the code used for returning.
Label check_codesize;
@@ -105,33 +105,29 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
-
// We do not know our frame height, but set esp based on ebp.
__ lea(esp, Operand(ebp, -1 * kPointerSize));
__ pop(edi); // Function.
__ pop(ebp);
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(edi, no_reg, dummy, dummy);
+
// Load context from the function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ // Clear new.target register as a safety measure.
+ __ mov(edx, masm->isolate()->factory()->undefined_value());
+
// Get function code.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context.
- __ jmp(edx);
+ __ jmp(ebx);
}
diff --git a/chromium/v8/src/debug/liveedit.cc b/chromium/v8/src/debug/liveedit.cc
index 8a936ac177b..f1f3f2391a6 100644
--- a/chromium/v8/src/debug/liveedit.cc
+++ b/chromium/v8/src/debug/liveedit.cc
@@ -4,6 +4,8 @@
#include "src/debug/liveedit.h"
+#include "src/ast/scopeinfo.h"
+#include "src/ast/scopes.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
@@ -13,9 +15,7 @@
#include "src/global-handles.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
-#include "src/parser.h"
-#include "src/scopeinfo.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/v8.h"
#include "src/v8memory.h"
@@ -811,10 +811,6 @@ bool LiveEdit::SetAfterBreakTarget(Debug* debug) {
switch (debug->thread_local_.frame_drop_mode_) {
case FRAMES_UNTOUCHED:
return false;
- case FRAME_DROPPED_IN_IC_CALL:
- // We must have been calling IC stub. Do not go there anymore.
- code = isolate->builtins()->builtin(Builtins::kPlainReturn_LiveEdit);
- break;
case FRAME_DROPPED_IN_DEBUG_SLOT_CALL:
// Debug break slot stub does not return normally, instead it manually
// cleans the stack and jumps. We should patch the jump address.
@@ -910,7 +906,7 @@ class ReplacingVisitor : public ObjectVisitor {
: original_(original), substitution_(substitution) {
}
- virtual void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
if (*p == original_) {
*p = substitution_;
@@ -918,14 +914,14 @@ class ReplacingVisitor : public ObjectVisitor {
}
}
- virtual void VisitCodeEntry(Address entry) {
+ void VisitCodeEntry(Address entry) override {
if (Code::GetObjectFromEntryAddress(entry) == original_) {
Address substitution_entry = substitution_->instruction_start();
Memory::Address_at(entry) = substitution_entry;
}
}
- virtual void VisitCodeTarget(RelocInfo* rinfo) {
+ void VisitCodeTarget(RelocInfo* rinfo) override {
if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
Address substitution_entry = substitution_->instruction_start();
@@ -933,9 +929,7 @@ class ReplacingVisitor : public ObjectVisitor {
}
}
- virtual void VisitDebugTarget(RelocInfo* rinfo) {
- VisitCodeTarget(rinfo);
- }
+ void VisitDebugTarget(RelocInfo* rinfo) override { VisitCodeTarget(rinfo); }
private:
Code* original_;
@@ -1148,9 +1142,6 @@ void LiveEdit::ReplaceFunctionCode(
LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info, isolate);
- shared_info->set_construct_stub(
- isolate->builtins()->builtin(Builtins::kJSConstructStubGeneric));
-
DeoptimizeDependentFunctions(*shared_info);
isolate->compilation_cache()->Remove(shared_info);
}
@@ -1312,7 +1303,8 @@ static Handle<Code> PatchPositionsInCode(
int new_position = TranslatePosition(position,
position_change_array);
if (position != new_position) {
- RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position, NULL);
+ RelocInfo info_copy(rinfo->isolate(), rinfo->pc(), rinfo->rmode(),
+ new_position, NULL);
buffer_writer.Write(&info_copy);
continue;
}
@@ -1495,17 +1487,13 @@ static bool FixTryCatchHandler(StackFrame* top_frame,
// a. successful work of frame dropper code which eventually gets control,
// b. being compatible with regular stack structure for various stack
// iterators.
-// Returns address of stack allocated pointer to restarted function,
-// the value that is called 'restarter_frame_function_pointer'. The value
-// at this address (possibly updated by GC) may be used later when preparing
-// 'step in' operation.
// Frame structure (conforms InternalFrame structure):
// -- code
// -- SMI maker
// -- function (slot is called "context")
// -- frame base
-static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code) {
+static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
+ Handle<Code> code) {
DCHECK(bottom_js_frame->is_java_script());
Address fp = bottom_js_frame->fp();
@@ -1517,9 +1505,6 @@ static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Memory::Object_at(fp + InternalFrameConstants::kCodeOffset) = *code;
Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset) =
Smi::FromInt(StackFrame::INTERNAL);
-
- return reinterpret_cast<Object**>(&Memory::Object_at(
- fp + StandardFrameConstants::kContextOffset));
}
@@ -1527,11 +1512,9 @@ static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
// frames in range. Anyway the bottom frame is restarted rather than dropped,
// and therefore has to be a JavaScript frame.
// Returns error message or NULL.
-static const char* DropFrames(Vector<StackFrame*> frames,
- int top_frame_index,
+static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
int bottom_js_frame_index,
- LiveEdit::FrameDropMode* mode,
- Object*** restarter_frame_function_pointer) {
+ LiveEdit::FrameDropMode* mode) {
if (!LiveEdit::kFrameDropperSupported) {
return "Stack manipulations are not supported in this architecture.";
}
@@ -1546,12 +1529,8 @@ static const char* DropFrames(Vector<StackFrame*> frames,
Isolate* isolate = bottom_js_frame->isolate();
Code* pre_top_frame_code = pre_top_frame->LookupCode();
bool frame_has_padding = true;
- if (pre_top_frame_code->is_inline_cache_stub() &&
- pre_top_frame_code->is_debug_stub()) {
- // OK, we can drop inline cache calls.
- *mode = LiveEdit::FRAME_DROPPED_IN_IC_CALL;
- } else if (pre_top_frame_code ==
- isolate->builtins()->builtin(Builtins::kSlot_DebugBreak)) {
+ if (pre_top_frame_code ==
+ isolate->builtins()->builtin(Builtins::kSlot_DebugBreak)) {
// OK, we can drop debug break slot.
*mode = LiveEdit::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
} else if (pre_top_frame_code ==
@@ -1645,10 +1624,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
*top_frame_pc_address = code->entry();
pre_top_frame->SetCallerFp(bottom_js_frame->fp());
- *restarter_frame_function_pointer =
- SetUpFrameDropperFrame(bottom_js_frame, code);
-
- DCHECK((**restarter_frame_function_pointer)->IsJSFunction());
+ SetUpFrameDropperFrame(bottom_js_frame, code);
for (Address a = unused_stack_top;
a < unused_stack_bottom;
@@ -1664,20 +1640,60 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// Finding no such frames does not mean error.
class MultipleFunctionTarget {
public:
- MultipleFunctionTarget(Handle<JSArray> shared_info_array,
- Handle<JSArray> result)
- : m_shared_info_array(shared_info_array),
- m_result(result) {}
+ MultipleFunctionTarget(Handle<JSArray> old_shared_array,
+ Handle<JSArray> new_shared_array,
+ Handle<JSArray> result)
+ : old_shared_array_(old_shared_array),
+ new_shared_array_(new_shared_array),
+ result_(result) {}
bool MatchActivation(StackFrame* frame,
LiveEdit::FunctionPatchabilityStatus status) {
- return CheckActivation(m_shared_info_array, m_result, frame, status);
+ return CheckActivation(old_shared_array_, result_, frame, status);
}
const char* GetNotFoundMessage() const {
return NULL;
}
+ bool FrameUsesNewTarget(StackFrame* frame) {
+ if (!frame->is_java_script()) return false;
+ JavaScriptFrame* jsframe = JavaScriptFrame::cast(frame);
+ Handle<SharedFunctionInfo> old_shared(jsframe->function()->shared());
+ Isolate* isolate = old_shared->GetIsolate();
+ int len = GetArrayLength(old_shared_array_);
+ // Find corresponding new shared function info and return whether it
+ // references new.target.
+ for (int i = 0; i < len; i++) {
+ HandleScope scope(isolate);
+ Handle<Object> old_element =
+ Object::GetElement(isolate, old_shared_array_, i).ToHandleChecked();
+ if (!old_shared.is_identical_to(UnwrapSharedFunctionInfoFromJSValue(
+ Handle<JSValue>::cast(old_element)))) {
+ continue;
+ }
+
+ Handle<Object> new_element =
+ Object::GetElement(isolate, new_shared_array_, i).ToHandleChecked();
+ if (new_element->IsUndefined()) return false;
+ Handle<SharedFunctionInfo> new_shared =
+ UnwrapSharedFunctionInfoFromJSValue(
+ Handle<JSValue>::cast(new_element));
+ if (new_shared->scope_info()->HasNewTarget()) {
+ SetElementSloppy(
+ result_, i,
+ Handle<Smi>(
+ Smi::FromInt(
+ LiveEdit::FUNCTION_BLOCKED_NO_NEW_TARGET_ON_RESTART),
+ isolate));
+ return true;
+ }
+ return false;
+ }
+ return false;
+ }
+
private:
- Handle<JSArray> m_shared_info_array;
- Handle<JSArray> m_result;
+ Handle<JSArray> old_shared_array_;
+ Handle<JSArray> new_shared_array_;
+ Handle<JSArray> result_;
};
@@ -1724,11 +1740,14 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE;
break;
}
- if (frame->is_java_script() &&
- JavaScriptFrame::cast(frame)->function()->shared()->is_generator()) {
- non_droppable_frame_found = true;
- non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR;
- break;
+ if (frame->is_java_script()) {
+ SharedFunctionInfo* shared =
+ JavaScriptFrame::cast(frame)->function()->shared();
+ if (shared->is_generator()) {
+ non_droppable_frame_found = true;
+ non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR;
+ break;
+ }
}
if (target.MatchActivation(
frame, LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
@@ -1752,6 +1771,9 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
}
}
+ // We cannot restart a frame that uses new.target.
+ if (target.FrameUsesNewTarget(frames[bottom_js_frame_index])) return NULL;
+
if (!do_drop) {
// We are in check-only mode.
return NULL;
@@ -1763,10 +1785,8 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
}
LiveEdit::FrameDropMode drop_mode = LiveEdit::FRAMES_UNTOUCHED;
- Object** restarter_frame_function_pointer = NULL;
- const char* error_message = DropFrames(frames, top_frame_index,
- bottom_js_frame_index, &drop_mode,
- &restarter_frame_function_pointer);
+ const char* error_message =
+ DropFrames(frames, top_frame_index, bottom_js_frame_index, &drop_mode);
if (error_message != NULL) {
return error_message;
@@ -1780,8 +1800,7 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
break;
}
}
- debug->FramesHaveBeenDropped(
- new_id, drop_mode, restarter_frame_function_pointer);
+ debug->FramesHaveBeenDropped(new_id, drop_mode);
return NULL;
}
@@ -1789,9 +1808,10 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
// Fills result array with statuses of functions. Modifies the stack
// removing all listed function if possible and if do_drop is true.
static const char* DropActivationsInActiveThread(
- Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
- MultipleFunctionTarget target(shared_info_array, result);
- Isolate* isolate = shared_info_array->GetIsolate();
+ Handle<JSArray> old_shared_array, Handle<JSArray> new_shared_array,
+ Handle<JSArray> result, bool do_drop) {
+ MultipleFunctionTarget target(old_shared_array, new_shared_array, result);
+ Isolate* isolate = old_shared_array->GetIsolate();
const char* message =
DropActivationsInActiveThreadImpl(isolate, target, do_drop);
@@ -1799,7 +1819,7 @@ static const char* DropActivationsInActiveThread(
return message;
}
- int array_len = GetArrayLength(shared_info_array);
+ int array_len = GetArrayLength(old_shared_array);
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
@@ -1855,16 +1875,16 @@ bool LiveEdit::FindActiveGenerators(Handle<FixedArray> shared_info_array,
class InactiveThreadActivationsChecker : public ThreadVisitor {
public:
- InactiveThreadActivationsChecker(Handle<JSArray> shared_info_array,
+ InactiveThreadActivationsChecker(Handle<JSArray> old_shared_array,
Handle<JSArray> result)
- : shared_info_array_(shared_info_array), result_(result),
- has_blocked_functions_(false) {
- }
+ : old_shared_array_(old_shared_array),
+ result_(result),
+ has_blocked_functions_(false) {}
void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- has_blocked_functions_ |= CheckActivation(
- shared_info_array_, result_, it.frame(),
- LiveEdit::FUNCTION_BLOCKED_ON_OTHER_STACK);
+ has_blocked_functions_ |=
+ CheckActivation(old_shared_array_, result_, it.frame(),
+ LiveEdit::FUNCTION_BLOCKED_ON_OTHER_STACK);
}
}
bool HasBlockedFunctions() {
@@ -1872,20 +1892,21 @@ class InactiveThreadActivationsChecker : public ThreadVisitor {
}
private:
- Handle<JSArray> shared_info_array_;
+ Handle<JSArray> old_shared_array_;
Handle<JSArray> result_;
bool has_blocked_functions_;
};
Handle<JSArray> LiveEdit::CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop) {
- Isolate* isolate = shared_info_array->GetIsolate();
- int len = GetArrayLength(shared_info_array);
+ Handle<JSArray> old_shared_array, Handle<JSArray> new_shared_array,
+ bool do_drop) {
+ Isolate* isolate = old_shared_array->GetIsolate();
+ int len = GetArrayLength(old_shared_array);
- DCHECK(shared_info_array->HasFastElements());
- Handle<FixedArray> shared_info_array_elements(
- FixedArray::cast(shared_info_array->elements()));
+ DCHECK(old_shared_array->HasFastElements());
+ Handle<FixedArray> old_shared_array_elements(
+ FixedArray::cast(old_shared_array->elements()));
Handle<JSArray> result = isolate->factory()->NewJSArray(len);
Handle<FixedArray> result_elements =
@@ -1901,12 +1922,12 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
// running (as we wouldn't want to restart them, because we don't know where
// to restart them from) or suspended. Fail if any one corresponds to the set
// of functions being edited.
- if (FindActiveGenerators(shared_info_array_elements, result_elements, len)) {
+ if (FindActiveGenerators(old_shared_array_elements, result_elements, len)) {
return result;
}
// Check inactive threads. Fail if some functions are blocked there.
- InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
+ InactiveThreadActivationsChecker inactive_threads_checker(old_shared_array,
result);
isolate->thread_manager()->IterateArchivedThreads(
&inactive_threads_checker);
@@ -1915,8 +1936,8 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
}
// Try to drop activations from the current stack.
- const char* error_message =
- DropActivationsInActiveThread(shared_info_array, result, do_drop);
+ const char* error_message = DropActivationsInActiveThread(
+ old_shared_array, new_shared_array, result, do_drop);
if (error_message != NULL) {
// Add error message as an array extra element.
Handle<String> str =
@@ -1949,6 +1970,17 @@ class SingleFrameTarget {
LiveEdit::FunctionPatchabilityStatus saved_status() {
return m_saved_status;
}
+ void set_status(LiveEdit::FunctionPatchabilityStatus status) {
+ m_saved_status = status;
+ }
+
+ bool FrameUsesNewTarget(StackFrame* frame) {
+ if (!frame->is_java_script()) return false;
+ JavaScriptFrame* jsframe = JavaScriptFrame::cast(frame);
+ Handle<SharedFunctionInfo> shared(jsframe->function()->shared());
+ return shared->scope_info()->HasNewTarget();
+ }
+
private:
JavaScriptFrame* m_frame;
LiveEdit::FunctionPatchabilityStatus m_saved_status;
diff --git a/chromium/v8/src/debug/liveedit.h b/chromium/v8/src/debug/liveedit.h
index 251368f0cba..f3d6c54c0ed 100644
--- a/chromium/v8/src/debug/liveedit.h
+++ b/chromium/v8/src/debug/liveedit.h
@@ -61,8 +61,6 @@ class LiveEdit : AllStatic {
enum FrameDropMode {
// No frame has been dropped.
FRAMES_UNTOUCHED,
- // The top JS frame had been calling IC stub. IC stub mustn't be called now.
- FRAME_DROPPED_IN_IC_CALL,
// The top JS frame had been calling debug break slot stub. Patch the
// address this stub jumps to in the end.
FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
@@ -117,7 +115,8 @@ class LiveEdit : AllStatic {
// has restart the lowest found frames and drops all other frames above
// if possible and if do_drop is true.
static Handle<JSArray> CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop);
+ Handle<JSArray> old_shared_array, Handle<JSArray> new_shared_array,
+ bool do_drop);
// Restarts the call frame and completely drops all frames above it.
// Return error message or NULL.
@@ -131,7 +130,8 @@ class LiveEdit : AllStatic {
FUNCTION_BLOCKED_UNDER_NATIVE_CODE = 4,
FUNCTION_REPLACED_ON_ACTIVE_STACK = 5,
FUNCTION_BLOCKED_UNDER_GENERATOR = 6,
- FUNCTION_BLOCKED_ACTIVE_GENERATOR = 7
+ FUNCTION_BLOCKED_ACTIVE_GENERATOR = 7,
+ FUNCTION_BLOCKED_NO_NEW_TARGET_ON_RESTART = 8
};
// Compares 2 strings line-by-line, then token-wise and returns diff in form
@@ -172,7 +172,8 @@ class LiveEdit : AllStatic {
*/
// A size of frame base including fp. Padding words starts right above
// the base.
- static const int kFrameDropperFrameSize = 4;
+ static const int kFrameDropperFrameSize =
+ 4 + StandardFrameConstants::kCPSlotCount;
// A number of words that should be reserved on stack for the LiveEdit use.
// Stored on stack in form of Smi.
static const int kFramePaddingInitialSize = 1;
@@ -364,6 +365,7 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
friend class JSArrayBasedStruct<SharedInfoWrapper>;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif /* V8_DEBUG_LIVEEDIT_H_ */
diff --git a/chromium/v8/src/debug/liveedit.js b/chromium/v8/src/debug/liveedit.js
index 27425c154d0..85e55c4c18c 100644
--- a/chromium/v8/src/debug/liveedit.js
+++ b/chromium/v8/src/debug/liveedit.js
@@ -142,14 +142,17 @@
HarvestTodo(root_old_node);
// Collect shared infos for functions whose code need to be patched.
- var replaced_function_infos = new GlobalArray();
+ var replaced_function_old_infos = new GlobalArray();
+ var replaced_function_new_infos = new GlobalArray();
for (var i = 0; i < replace_code_list.length; i++) {
- var live_shared_function_infos =
- replace_code_list[i].live_shared_function_infos;
-
- if (live_shared_function_infos) {
- for (var j = 0; j < live_shared_function_infos.length; j++) {
- replaced_function_infos.push(live_shared_function_infos[j]);
+ var old_infos = replace_code_list[i].live_shared_function_infos;
+ var new_info =
+ replace_code_list[i].corresponding_node.info.shared_function_info;
+
+ if (old_infos) {
+ for (var j = 0; j < old_infos.length; j++) {
+ replaced_function_old_infos.push(old_infos[j]);
+ replaced_function_new_infos.push(new_info);
}
}
}
@@ -159,7 +162,9 @@
// Check that function being patched is not currently on stack or drop them.
var dropped_functions_number =
- CheckStackActivations(replaced_function_infos, change_log);
+ CheckStackActivations(replaced_function_old_infos,
+ replaced_function_new_infos,
+ change_log);
// Our current implementation requires client to manually issue "step in"
// command for correct stack state if the stack was modified.
@@ -910,21 +915,24 @@
// For array of wrapped shared function infos checks that none of them
// have activations on stack (of any thread). Throws a Failure exception
// if this proves to be false.
- function CheckStackActivations(shared_wrapper_list, change_log) {
- var shared_list = new GlobalArray();
- for (var i = 0; i < shared_wrapper_list.length; i++) {
- shared_list[i] = shared_wrapper_list[i].info;
+ function CheckStackActivations(old_shared_wrapper_list,
+ new_shared_list,
+ change_log) {
+ var old_shared_list = new GlobalArray();
+ for (var i = 0; i < old_shared_wrapper_list.length; i++) {
+ old_shared_list[i] = old_shared_wrapper_list[i].info;
}
- var result = %LiveEditCheckAndDropActivations(shared_list, true);
- if (result[shared_list.length]) {
+ var result = %LiveEditCheckAndDropActivations(
+ old_shared_list, new_shared_list, true);
+ if (result[old_shared_wrapper_list.length]) {
// Extra array element may contain error message.
- throw new Failure(result[shared_list.length]);
+ throw new Failure(result[old_shared_wrapper_list.length]);
}
var problems = new GlobalArray();
var dropped = new GlobalArray();
- for (var i = 0; i < shared_list.length; i++) {
- var shared = shared_wrapper_list[i];
+ for (var i = 0; i < old_shared_list.length; i++) {
+ var shared = old_shared_wrapper_list[i];
if (result[i] == FunctionPatchabilityStatus.REPLACED_ON_ACTIVE_STACK) {
dropped.push({ name: shared.function_name } );
} else if (result[i] != FunctionPatchabilityStatus.AVAILABLE_FOR_PATCH) {
@@ -957,7 +965,8 @@
BLOCKED_UNDER_NATIVE_CODE: 4,
REPLACED_ON_ACTIVE_STACK: 5,
BLOCKED_UNDER_GENERATOR: 6,
- BLOCKED_ACTIVE_GENERATOR: 7
+ BLOCKED_ACTIVE_GENERATOR: 7,
+ BLOCKED_NO_NEW_TARGET_ON_RESTART: 8
};
FunctionPatchabilityStatus.SymbolName = function(code) {
diff --git a/chromium/v8/src/debug/mips/OWNERS b/chromium/v8/src/debug/mips/OWNERS
index 5508ba626f3..89455a4fbd7 100644
--- a/chromium/v8/src/debug/mips/OWNERS
+++ b/chromium/v8/src/debug/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/debug/mips/debug-mips.cc b/chromium/v8/src/debug/mips/debug-mips.cc
index 30bdcac1b6f..c5c58d044b0 100644
--- a/chromium/v8/src/debug/mips/debug-mips.cc
+++ b/chromium/v8/src/debug/mips/debug-mips.cc
@@ -24,25 +24,25 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the trampoline pool in the debug break slot code.
Assembler::BlockTrampolinePoolScope block_pool(masm);
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
// Patch the code changing the debug break slot code from:
// nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
// nop(DEBUG_BREAK_NOP)
@@ -108,26 +108,21 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ li(at, Operand(restarter_frame_function_slot));
- __ sw(zero_reg, MemOperand(at, 0));
-
// We do not know our frame height, but set sp based on fp.
__ Subu(sp, fp, Operand(kPointerSize));
__ Pop(ra, fp, a1); // Return address, Frame, Function.
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(a1, no_reg, dummy, dummy);
+
// Load context from the function.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Clear new.target as a safety measure.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+
// Get function code.
__ lw(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
diff --git a/chromium/v8/src/debug/mips64/OWNERS b/chromium/v8/src/debug/mips64/OWNERS
index 5508ba626f3..89455a4fbd7 100644
--- a/chromium/v8/src/debug/mips64/OWNERS
+++ b/chromium/v8/src/debug/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/debug/mips64/debug-mips64.cc b/chromium/v8/src/debug/mips64/debug-mips64.cc
index 9b4d355d796..1d65fd9efde 100644
--- a/chromium/v8/src/debug/mips64/debug-mips64.cc
+++ b/chromium/v8/src/debug/mips64/debug-mips64.cc
@@ -23,25 +23,25 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the trampoline pool in the debug break slot code.
Assembler::BlockTrampolinePoolScope block_pool(masm);
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
// Patch the code changing the debug break slot code from:
// nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
// nop(DEBUG_BREAK_NOP)
@@ -110,26 +110,21 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ li(at, Operand(restarter_frame_function_slot));
- __ sw(zero_reg, MemOperand(at, 0));
-
// We do not know our frame height, but set sp based on fp.
__ Dsubu(sp, fp, Operand(kPointerSize));
__ Pop(ra, fp, a1); // Return address, Frame, Function.
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(a1, no_reg, dummy, dummy);
+
// Load context from the function.
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Clear new.target as a safety measure.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+
// Get function code.
__ ld(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ ld(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
diff --git a/chromium/v8/src/debug/mirrors.js b/chromium/v8/src/debug/mirrors.js
index 11f9e485c13..1fd5fa9ecd7 100644
--- a/chromium/v8/src/debug/mirrors.js
+++ b/chromium/v8/src/debug/mirrors.js
@@ -8,22 +8,28 @@
// ----------------------------------------------------------------------------
// Imports
-var FunctionSourceString;
+var ErrorToString;
var GlobalArray = global.Array;
var IsNaN = global.isNaN;
var JSONStringify = global.JSON.stringify;
+var MakeError;
+var MapEntries;
+var MapIteratorNext;
var MathMin = global.Math.min;
var promiseStatusSymbol = utils.ImportNow("promise_status_symbol");
var promiseValueSymbol = utils.ImportNow("promise_value_symbol");
+var SetIteratorNext;
+var SetValues;
var SymbolToString;
-var ToBoolean;
-var ToString;
utils.Import(function(from) {
- FunctionSourceString = from.FunctionSourceString;
+ ErrorToString = from.ErrorToString;
+ MakeError = from.MakeError;
+ MapEntries = from.MapEntries;
+ MapIteratorNext = from.MapIteratorNext;
+ SetIteratorNext = from.SetIteratorNext;
+ SetValues = from.SetValues;
SymbolToString = from.SymbolToString;
- ToBoolean = from.ToBoolean;
- ToString = from.ToString;
});
// ----------------------------------------------------------------------------
@@ -108,7 +114,7 @@ function ClearMirrorCache(value) {
function ObjectIsPromise(value) {
- return IS_SPEC_OBJECT(value) &&
+ return IS_RECEIVER(value) &&
!IS_UNDEFINED(%DebugGetProperty(value, promiseStatusSymbol));
}
@@ -231,11 +237,6 @@ function inherits(ctor, superCtor) {
// Maximum length when sending strings through the JSON protocol.
var kMaxProtocolStringLength = 80;
-// Different kind of properties.
-var PropertyKind = {};
-PropertyKind.Named = 1;
-PropertyKind.Indexed = 2;
-
// A copy of the PropertyType enum from property-details.h
var PropertyType = {};
@@ -536,7 +537,7 @@ Mirror.prototype.toText = function() {
* @extends Mirror
*/
function ValueMirror(type, value, transient) {
- %_CallFunction(this, type, Mirror);
+ %_Call(Mirror, this, type);
this.value_ = value;
if (!transient) {
this.allocateHandle_();
@@ -582,7 +583,7 @@ ValueMirror.prototype.value = function() {
* @extends ValueMirror
*/
function UndefinedMirror() {
- %_CallFunction(this, MirrorType.UNDEFINED_TYPE, UNDEFINED, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.UNDEFINED_TYPE, UNDEFINED);
}
inherits(UndefinedMirror, ValueMirror);
@@ -598,7 +599,7 @@ UndefinedMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function NullMirror() {
- %_CallFunction(this, MirrorType.NULL_TYPE, null, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.NULL_TYPE, null);
}
inherits(NullMirror, ValueMirror);
@@ -615,7 +616,7 @@ NullMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function BooleanMirror(value) {
- %_CallFunction(this, MirrorType.BOOLEAN_TYPE, value, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.BOOLEAN_TYPE, value);
}
inherits(BooleanMirror, ValueMirror);
@@ -632,7 +633,7 @@ BooleanMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function NumberMirror(value) {
- %_CallFunction(this, MirrorType.NUMBER_TYPE, value, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.NUMBER_TYPE, value);
}
inherits(NumberMirror, ValueMirror);
@@ -649,7 +650,7 @@ NumberMirror.prototype.toText = function() {
* @extends ValueMirror
*/
function StringMirror(value) {
- %_CallFunction(this, MirrorType.STRING_TYPE, value, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.STRING_TYPE, value);
}
inherits(StringMirror, ValueMirror);
@@ -678,7 +679,7 @@ StringMirror.prototype.toText = function() {
* @extends Mirror
*/
function SymbolMirror(value) {
- %_CallFunction(this, MirrorType.SYMBOL_TYPE, value, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.SYMBOL_TYPE, value);
}
inherits(SymbolMirror, ValueMirror);
@@ -689,7 +690,7 @@ SymbolMirror.prototype.description = function() {
SymbolMirror.prototype.toText = function() {
- return %_CallFunction(this.value_, SymbolToString);
+ return %_Call(SymbolToString, this.value_);
}
@@ -703,7 +704,7 @@ SymbolMirror.prototype.toText = function() {
*/
function ObjectMirror(value, type, transient) {
type = type || MirrorType.OBJECT_TYPE;
- %_CallFunction(this, type, value, transient, ValueMirror);
+ %_Call(ValueMirror, this, type, value, transient);
}
inherits(ObjectMirror, ValueMirror);
@@ -742,19 +743,6 @@ ObjectMirror.prototype.hasIndexedInterceptor = function() {
};
-// Get all own property names except for private symbols.
-function TryGetPropertyNames(object) {
- try {
- // TODO(yangguo): Should there be a special debugger implementation of
- // %GetOwnPropertyNames that doesn't perform access checks?
- return %GetOwnPropertyNames(object, PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL);
- } catch (e) {
- // Might have hit a failed access check.
- return [];
- }
-}
-
-
/**
* Return the property names for this object.
* @param {number} kind Indicate whether named, indexed or both kinds of
@@ -763,66 +751,8 @@ function TryGetPropertyNames(object) {
value
* @return {Array} Property names for this object
*/
-ObjectMirror.prototype.propertyNames = function(kind, limit) {
- // Find kind and limit and allocate array for the result
- kind = kind || PropertyKind.Named | PropertyKind.Indexed;
-
- var propertyNames;
- var elementNames;
- var total = 0;
-
- // Find all the named properties.
- if (kind & PropertyKind.Named) {
- propertyNames = TryGetPropertyNames(this.value_);
- total += propertyNames.length;
-
- // Get names for named interceptor properties if any.
- if (this.hasNamedInterceptor() && (kind & PropertyKind.Named)) {
- var namedInterceptorNames =
- %GetNamedInterceptorPropertyNames(this.value_);
- if (namedInterceptorNames) {
- propertyNames = propertyNames.concat(namedInterceptorNames);
- total += namedInterceptorNames.length;
- }
- }
- }
-
- // Find all the indexed properties.
- if (kind & PropertyKind.Indexed) {
- // Get own element names.
- elementNames = %GetOwnElementNames(this.value_);
- total += elementNames.length;
-
- // Get names for indexed interceptor properties.
- if (this.hasIndexedInterceptor() && (kind & PropertyKind.Indexed)) {
- var indexedInterceptorNames =
- %GetIndexedInterceptorElementNames(this.value_);
- if (indexedInterceptorNames) {
- elementNames = elementNames.concat(indexedInterceptorNames);
- total += indexedInterceptorNames.length;
- }
- }
- }
- limit = MathMin(limit || total, total);
-
- var names = new GlobalArray(limit);
- var index = 0;
-
- // Copy names for named properties.
- if (kind & PropertyKind.Named) {
- for (var i = 0; index < limit && i < propertyNames.length; i++) {
- names[index++] = propertyNames[i];
- }
- }
-
- // Copy names for indexed properties.
- if (kind & PropertyKind.Indexed) {
- for (var i = 0; index < limit && i < elementNames.length; i++) {
- names[index++] = elementNames[i];
- }
- }
-
- return names;
+ObjectMirror.prototype.propertyNames = function() {
+ return %GetOwnPropertyKeys(this.value_, PROPERTY_FILTER_NONE);
};
@@ -834,8 +764,8 @@ ObjectMirror.prototype.propertyNames = function(kind, limit) {
specified value
* @return {Array} Property mirrors for this object
*/
-ObjectMirror.prototype.properties = function(kind, limit) {
- var names = this.propertyNames(kind, limit);
+ObjectMirror.prototype.properties = function() {
+ var names = this.propertyNames();
var properties = new GlobalArray(names.length);
for (var i = 0; i < names.length; i++) {
properties[i] = this.property(names[i]);
@@ -879,7 +809,7 @@ ObjectMirror.prototype.lookupProperty = function(value) {
// Look for property value in properties.
for (var i = 0; i < properties.length; i++) {
- // Skip properties which are defined through assessors.
+ // Skip properties which are defined through accessors.
var property = properties[i];
if (property.propertyType() != PropertyType.AccessorConstant) {
if (%_ObjectEquals(property.value_, value.value_)) {
@@ -953,7 +883,7 @@ ObjectMirror.GetInternalProperties = function(value) {
* @extends ObjectMirror
*/
function FunctionMirror(value) {
- %_CallFunction(this, value, MirrorType.FUNCTION_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.FUNCTION_TYPE);
this.resolved_ = true;
}
inherits(FunctionMirror, ObjectMirror);
@@ -979,6 +909,16 @@ FunctionMirror.prototype.name = function() {
/**
+ * Returns the displayName if it is set, otherwise name, otherwise inferred
+ * name.
+ * @return {string} Name of the function
+ */
+FunctionMirror.prototype.debugName = function() {
+ return %FunctionGetDebugName(this.value_);
+}
+
+
+/**
* Returns the inferred name of the function.
* @return {string} Name of the function
*/
@@ -996,7 +936,7 @@ FunctionMirror.prototype.source = function() {
// Return source if function is resolved. Otherwise just fall through to
// return undefined.
if (this.resolved()) {
- return FunctionSourceString(this.value_);
+ return %FunctionToString(this.value_);
}
};
@@ -1107,7 +1047,7 @@ FunctionMirror.prototype.toText = function() {
function UnresolvedFunctionMirror(value) {
// Construct this using the ValueMirror as an unresolved function is not a
// real object but just a string.
- %_CallFunction(this, MirrorType.FUNCTION_TYPE, value, ValueMirror);
+ %_Call(ValueMirror, this, MirrorType.FUNCTION_TYPE, value);
this.propertyCount_ = 0;
this.elementCount_ = 0;
this.resolved_ = false;
@@ -1157,7 +1097,7 @@ UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
* @extends ObjectMirror
*/
function ArrayMirror(value) {
- %_CallFunction(this, value, ObjectMirror);
+ %_Call(ObjectMirror, this, value);
}
inherits(ArrayMirror, ObjectMirror);
@@ -1174,7 +1114,7 @@ ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index,
if (from_index > to_index) return new GlobalArray();
var values = new GlobalArray(to_index - from_index + 1);
for (var i = from_index; i <= to_index; i++) {
- var details = %DebugGetPropertyDetails(this.value_, ToString(i));
+ var details = %DebugGetPropertyDetails(this.value_, TO_STRING(i));
var value;
if (details) {
value = new PropertyMirror(this, i, details);
@@ -1194,7 +1134,7 @@ ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index,
* @extends ObjectMirror
*/
function DateMirror(value) {
- %_CallFunction(this, value, ObjectMirror);
+ %_Call(ObjectMirror, this, value);
}
inherits(DateMirror, ObjectMirror);
@@ -1212,7 +1152,7 @@ DateMirror.prototype.toText = function() {
* @extends ObjectMirror
*/
function RegExpMirror(value) {
- %_CallFunction(this, value, MirrorType.REGEXP_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.REGEXP_TYPE);
}
inherits(RegExpMirror, ObjectMirror);
@@ -1284,7 +1224,7 @@ RegExpMirror.prototype.toText = function() {
* @extends ObjectMirror
*/
function ErrorMirror(value) {
- %_CallFunction(this, value, MirrorType.ERROR_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.ERROR_TYPE);
}
inherits(ErrorMirror, ObjectMirror);
@@ -1302,7 +1242,7 @@ ErrorMirror.prototype.toText = function() {
// Use the same text representation as in messages.js.
var text;
try {
- text = %_CallFunction(this.value_, builtins.$errorToString);
+ text = %_Call(ErrorToString, this.value_);
} catch (e) {
text = '#<Error>';
}
@@ -1317,7 +1257,7 @@ ErrorMirror.prototype.toText = function() {
* @extends ObjectMirror
*/
function PromiseMirror(value) {
- %_CallFunction(this, value, MirrorType.PROMISE_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.PROMISE_TYPE);
}
inherits(PromiseMirror, ObjectMirror);
@@ -1346,7 +1286,7 @@ PromiseMirror.prototype.promiseValue = function() {
function MapMirror(value) {
- %_CallFunction(this, value, MirrorType.MAP_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.MAP_TYPE);
}
inherits(MapMirror, ObjectMirror);
@@ -1372,7 +1312,7 @@ MapMirror.prototype.entries = function(opt_limit) {
return result;
}
- var iter = %_CallFunction(this.value_, builtins.$mapEntries);
+ var iter = %_Call(MapEntries, this.value_);
var next;
while ((!opt_limit || result.length < opt_limit) &&
!(next = iter.next()).done) {
@@ -1386,7 +1326,7 @@ MapMirror.prototype.entries = function(opt_limit) {
function SetMirror(value) {
- %_CallFunction(this, value, MirrorType.SET_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.SET_TYPE);
}
inherits(SetMirror, ObjectMirror);
@@ -1395,7 +1335,7 @@ function IteratorGetValues_(iter, next_function, opt_limit) {
var result = [];
var next;
while ((!opt_limit || result.length < opt_limit) &&
- !(next = %_CallFunction(iter, next_function)).done) {
+ !(next = %_Call(next_function, iter)).done) {
result.push(next.value);
}
return result;
@@ -1414,13 +1354,13 @@ SetMirror.prototype.values = function(opt_limit) {
return %GetWeakSetValues(this.value_, opt_limit || 0);
}
- var iter = %_CallFunction(this.value_, builtins.$setValues);
- return IteratorGetValues_(iter, builtins.$setIteratorNext, opt_limit);
+ var iter = %_Call(SetValues, this.value_);
+ return IteratorGetValues_(iter, SetIteratorNext, opt_limit);
};
function IteratorMirror(value) {
- %_CallFunction(this, value, MirrorType.ITERATOR_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.ITERATOR_TYPE);
}
inherits(IteratorMirror, ObjectMirror);
@@ -1435,11 +1375,11 @@ inherits(IteratorMirror, ObjectMirror);
IteratorMirror.prototype.preview = function(opt_limit) {
if (IS_MAP_ITERATOR(this.value_)) {
return IteratorGetValues_(%MapIteratorClone(this.value_),
- builtins.$mapIteratorNext,
+ MapIteratorNext,
opt_limit);
} else if (IS_SET_ITERATOR(this.value_)) {
return IteratorGetValues_(%SetIteratorClone(this.value_),
- builtins.$setIteratorNext,
+ SetIteratorNext,
opt_limit);
}
};
@@ -1452,7 +1392,7 @@ IteratorMirror.prototype.preview = function(opt_limit) {
* @extends Mirror
*/
function GeneratorMirror(value) {
- %_CallFunction(this, value, MirrorType.GENERATOR_TYPE, ObjectMirror);
+ %_Call(ObjectMirror, this, value, MirrorType.GENERATOR_TYPE);
}
inherits(GeneratorMirror, ObjectMirror);
@@ -1519,7 +1459,7 @@ GeneratorMirror.prototype.receiver = function() {
* @extends Mirror
*/
function PropertyMirror(mirror, name, details) {
- %_CallFunction(this, MirrorType.PROPERTY_TYPE, Mirror);
+ %_Call(Mirror, this, MirrorType.PROPERTY_TYPE);
this.mirror_ = mirror;
this.name_ = name;
this.value_ = details[0];
@@ -1662,7 +1602,7 @@ PropertyMirror.prototype.isNative = function() {
* @extends Mirror
*/
function InternalPropertyMirror(name, value) {
- %_CallFunction(this, MirrorType.INTERNAL_PROPERTY_TYPE, Mirror);
+ %_Call(Mirror, this, MirrorType.INTERNAL_PROPERTY_TYPE);
this.name_ = name;
this.value_ = value;
}
@@ -1875,7 +1815,7 @@ FrameDetails.prototype.stepInPositionsImpl = function() {
* @extends Mirror
*/
function FrameMirror(break_id, index) {
- %_CallFunction(this, MirrorType.FRAME_TYPE, Mirror);
+ %_Call(Mirror, this, MirrorType.FRAME_TYPE);
this.break_id_ = break_id;
this.index_ = index;
this.details_ = new FrameDetails(break_id, index);
@@ -2074,7 +2014,7 @@ FrameMirror.prototype.evaluate = function(source, disable_break,
this.details_.frameId(),
this.details_.inlinedFrameIndex(),
source,
- ToBoolean(disable_break),
+ TO_BOOLEAN(disable_break),
opt_context_object));
};
@@ -2233,8 +2173,10 @@ FrameMirror.prototype.toText = function(opt_locals) {
};
+// This indexes correspond definitions in debug-scopes.h.
var kScopeDetailsTypeIndex = 0;
var kScopeDetailsObjectIndex = 1;
+var kScopeDetailsNameIndex = 2;
function ScopeDetails(frame, fun, index, opt_details) {
if (frame) {
@@ -2271,6 +2213,14 @@ ScopeDetails.prototype.object = function() {
};
+ScopeDetails.prototype.name = function() {
+ if (!IS_UNDEFINED(this.break_id_)) {
+ %CheckExecutionState(this.break_id_);
+ }
+ return this.details_[kScopeDetailsNameIndex];
+};
+
+
ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
var raw_res;
if (!IS_UNDEFINED(this.break_id_)) {
@@ -2296,7 +2246,7 @@ ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
* @extends Mirror
*/
function ScopeMirror(frame, fun, index, opt_details) {
- %_CallFunction(this, MirrorType.SCOPE_TYPE, Mirror);
+ %_Call(Mirror, this, MirrorType.SCOPE_TYPE);
if (frame) {
this.frame_index_ = frame.index_;
} else {
@@ -2351,7 +2301,7 @@ ScopeMirror.prototype.setVariableValue = function(name, new_value) {
* @extends Mirror
*/
function ScriptMirror(script) {
- %_CallFunction(this, MirrorType.SCRIPT_TYPE, Mirror);
+ %_Call(Mirror, this, MirrorType.SCRIPT_TYPE);
this.script_ = script;
this.context_ = new ContextMirror(script.context_data);
this.allocateHandle_();
@@ -2472,7 +2422,7 @@ ScriptMirror.prototype.toText = function() {
* @extends Mirror
*/
function ContextMirror(data) {
- %_CallFunction(this, MirrorType.CONTEXT_TYPE, Mirror);
+ %_Call(Mirror, this, MirrorType.CONTEXT_TYPE);
this.data_ = data;
this.allocateHandle_();
}
@@ -2855,24 +2805,15 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
}
// Add actual properties - named properties followed by indexed properties.
- var propertyNames = mirror.propertyNames(PropertyKind.Named);
- var propertyIndexes = mirror.propertyNames(PropertyKind.Indexed);
- var p = new GlobalArray(propertyNames.length + propertyIndexes.length);
- for (var i = 0; i < propertyNames.length; i++) {
- var propertyMirror = mirror.property(propertyNames[i]);
- p[i] = this.serializeProperty_(propertyMirror);
- if (details) {
- this.add_(propertyMirror.value());
- }
- }
- for (var i = 0; i < propertyIndexes.length; i++) {
- var propertyMirror = mirror.property(propertyIndexes[i]);
- p[propertyNames.length + i] = this.serializeProperty_(propertyMirror);
+ var properties = mirror.propertyNames();
+ for (var i = 0; i < properties.length; i++) {
+ var propertyMirror = mirror.property(properties[i]);
+ properties[i] = this.serializeProperty_(propertyMirror);
if (details) {
this.add_(propertyMirror.value());
}
}
- content.properties = p;
+ content.properties = properties;
var internalProperties = mirror.internalProperties();
if (internalProperties.length > 0) {
@@ -3070,7 +3011,6 @@ utils.InstallFunctions(global, DONT_ENUM, [
utils.InstallConstants(global, [
"ScopeType", ScopeType,
- "PropertyKind", PropertyKind,
"PropertyType", PropertyType,
"PropertyAttribute", PropertyAttribute,
"Mirror", Mirror,
diff --git a/chromium/v8/src/debug/ppc/debug-ppc.cc b/chromium/v8/src/debug/ppc/debug-ppc.cc
index ed4a632475c..c5ddab8bc0b 100644
--- a/chromium/v8/src/debug/ppc/debug-ppc.cc
+++ b/chromium/v8/src/debug/ppc/debug-ppc.cc
@@ -24,25 +24,25 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the trampoline pool in the debug break slot code.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
// Patch the code changing the debug break slot code from
//
// ori r3, r3, 0
@@ -115,19 +115,7 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(ip, Operand(restarter_frame_function_slot));
- __ li(r4, Operand::Zero());
- __ StoreP(r4, MemOperand(ip, 0));
-
// Load the function pointer off of our current stack frame.
__ LoadP(r4, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset -
kPointerSize));
@@ -135,9 +123,15 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// Pop return address and frame
__ LeaveFrame(StackFrame::INTERNAL);
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(r4, no_reg, dummy, dummy);
+
// Load context from the function.
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ // Clear new.target as a safety measure.
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+
// Get function code.
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
diff --git a/chromium/v8/src/debug/x64/debug-x64.cc b/chromium/v8/src/debug/x64/debug-x64.cc
index 3b656787091..0d56ea7521c 100644
--- a/chromium/v8/src/debug/x64/debug-x64.cc
+++ b/chromium/v8/src/debug/x64/debug-x64.cc
@@ -24,24 +24,24 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction.
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotLength);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
static const int kSize = Assembler::kDebugBreakSlotLength;
- CodePatcher patcher(pc, kSize);
+ CodePatcher patcher(isolate, pc, kSize);
Label check_codesize;
patcher.masm()->bind(&check_codesize);
patcher.masm()->movp(kScratchRegister, reinterpret_cast<void*>(code->entry()),
@@ -106,34 +106,29 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ Move(rax, restarter_frame_function_slot);
- __ movp(Operand(rax, 0), Immediate(0));
-
// We do not know our frame height, but set rsp based on rbp.
__ leap(rsp, Operand(rbp, -1 * kPointerSize));
__ Pop(rdi); // Function.
__ popq(rbp);
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(rdi, no_reg, dummy, dummy);
+
// Load context from the function.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ // Clear new.target as a safety measure.
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+
// Get function code.
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ leap(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rbx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
+ __ leap(rbx, FieldOperand(rbx, Code::kHeaderSize));
// Re-run JSFunction, rdi is function, rsi is context.
- __ jmp(rdx);
+ __ jmp(rbx);
}
const bool LiveEdit::kFrameDropperSupported = true;
diff --git a/chromium/v8/src/debug/x87/debug-x87.cc b/chromium/v8/src/debug/x87/debug-x87.cc
index 5ec608a99a5..8c04e02b89c 100644
--- a/chromium/v8/src/debug/x87/debug-x87.cc
+++ b/chromium/v8/src/debug/x87/debug-x87.cc
@@ -23,24 +23,24 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction.
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotLength);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
static const int kSize = Assembler::kDebugBreakSlotLength;
- CodePatcher patcher(pc, kSize);
+ CodePatcher patcher(isolate, pc, kSize);
// Add a label for checking the size of the code used for returning.
Label check_codesize;
@@ -105,33 +105,29 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
-
// We do not know our frame height, but set esp based on ebp.
__ lea(esp, Operand(ebp, -1 * kPointerSize));
__ pop(edi); // Function.
__ pop(ebp);
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(edi, no_reg, dummy, dummy);
+
// Load context from the function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ // Clear new.target register as a safety measure.
+ __ mov(edx, masm->isolate()->factory()->undefined_value());
+
// Get function code.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context.
- __ jmp(edx);
+ __ jmp(ebx);
}
diff --git a/chromium/v8/src/deoptimizer.cc b/chromium/v8/src/deoptimizer.cc
index fdca98e90a2..4bdafbf1b41 100644
--- a/chromium/v8/src/deoptimizer.cc
+++ b/chromium/v8/src/deoptimizer.cc
@@ -5,13 +5,13 @@
#include "src/deoptimizer.h"
#include "src/accessors.h"
+#include "src/ast/prettyprinter.h"
#include "src/codegen.h"
#include "src/disasm.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
#include "src/macro-assembler.h"
-#include "src/prettyprinter.h"
#include "src/profiler/cpu-profiler.h"
#include "src/v8.h"
@@ -736,7 +736,7 @@ void Deoptimizer::DoComputeOutputFrames() {
TranslationIterator state_iterator(translations, translation_index);
translated_state_.Init(
- input_->GetFramePointerAddress(), function_, &state_iterator,
+ input_->GetFramePointerAddress(), &state_iterator,
input_data->LiteralArray(), input_->GetRegisterValues(),
trace_scope_ == nullptr ? nullptr : trace_scope_->file());
@@ -760,23 +760,27 @@ void Deoptimizer::DoComputeOutputFrames() {
int frame_index = static_cast<int>(i);
switch (translated_state_.frames()[i].kind()) {
case TranslatedFrame::kFunction:
- DoComputeJSFrame(nullptr, frame_index);
+ DoComputeJSFrame(frame_index);
+ jsframe_count_++;
+ break;
+ case TranslatedFrame::kInterpretedFunction:
+ DoComputeInterpretedFrame(frame_index);
jsframe_count_++;
break;
case TranslatedFrame::kArgumentsAdaptor:
- DoComputeArgumentsAdaptorFrame(nullptr, frame_index);
+ DoComputeArgumentsAdaptorFrame(frame_index);
break;
case TranslatedFrame::kConstructStub:
- DoComputeConstructStubFrame(nullptr, frame_index);
+ DoComputeConstructStubFrame(frame_index);
break;
case TranslatedFrame::kGetter:
- DoComputeAccessorStubFrame(nullptr, frame_index, false);
+ DoComputeAccessorStubFrame(frame_index, false);
break;
case TranslatedFrame::kSetter:
- DoComputeAccessorStubFrame(nullptr, frame_index, true);
+ DoComputeAccessorStubFrame(frame_index, true);
break;
case TranslatedFrame::kCompiledStub:
- DoComputeCompiledStubFrame(nullptr, frame_index);
+ DoComputeCompiledStubFrame(frame_index);
break;
case TranslatedFrame::kInvalid:
FATAL("invalid frame");
@@ -806,8 +810,7 @@ void Deoptimizer::DoComputeOutputFrames() {
}
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
+void Deoptimizer::DoComputeJSFrame(int frame_index) {
TranslatedFrame* translated_frame =
&(translated_state_.frames()[frame_index]);
TranslatedFrame::iterator value_iterator = translated_frame->begin();
@@ -829,7 +832,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
+ unsigned fixed_frame_size = ComputeJavascriptFixedSize(function);
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
@@ -944,8 +947,6 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
input_offset -= kPointerSize;
// Read the context from the translations.
Object* context = value_iterator->GetRawValue();
- // The context should not be a placeholder for a materialized object.
- CHECK(context != isolate_->heap()->arguments_marker());
if (context == isolate_->heap()->undefined_value()) {
// If the context was optimized away, just use the context from
// the activation. This should only apply to Crankshaft code.
@@ -960,6 +961,12 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
WriteValueToOutput(context, input_index, frame_index, output_offset,
"context ");
+ if (context == isolate_->heap()->arguments_marker()) {
+ Address output_address =
+ reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
+ output_offset;
+ values_to_materialize_.push_back({output_address, value_iterator});
+ }
value_iterator++;
input_index++;
@@ -1023,8 +1030,223 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
+void Deoptimizer::DoComputeInterpretedFrame(int frame_index) {
+ TranslatedFrame* translated_frame =
+ &(translated_state_.frames()[frame_index]);
+ TranslatedFrame::iterator value_iterator = translated_frame->begin();
+ int input_index = 0;
+
+ BailoutId bytecode_offset = translated_frame->node_id();
+ unsigned height = translated_frame->height();
+ unsigned height_in_bytes = height * kPointerSize;
+ JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
+ value_iterator++;
+ input_index++;
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(), " translating interpreted frame ");
+ function->PrintName(trace_scope_->file());
+ PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d\n",
+ bytecode_offset.ToInt(), height_in_bytes);
+ }
+
+ // The 'fixed' part of the frame consists of the incoming parameters and
+ // the part described by InterpreterFrameConstants.
+ unsigned fixed_frame_size = ComputeInterpretedFixedSize(function);
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new (output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::INTERPRETED);
+
+ bool is_bottommost = (0 == frame_index);
+ bool is_topmost = (output_count_ - 1 == frame_index);
+ CHECK(frame_index >= 0 && frame_index < output_count_);
+ CHECK_NULL(output_[frame_index]);
+ output_[frame_index] = output_frame;
+
+ // The top address for the bottommost output frame can be computed from
+ // the input frame pointer and the output frame's height. For all
+ // subsequent output frames, it can be computed from the previous one's
+ // top address and the current frame's size.
+ Register fp_reg = InterpretedFrame::fp_register();
+ intptr_t top_address;
+ if (is_bottommost) {
+ // Subtract interpreter fixed frame size for the context function slots,
+ // new,target and bytecode offset.
+ top_address = input_->GetRegister(fp_reg.code()) -
+ InterpreterFrameConstants::kFixedFrameSizeFromFp -
+ height_in_bytes;
+ } else {
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ }
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count =
+ function->shared()->internal_formal_parameter_count() + 1;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+ output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, the function, new.target and the bytecode offset. Synthesize
+ // their values and set them up
+ // explicitly.
+ //
+ // The caller's pc for the bottommost output frame is the same as in the
+ // input frame. For all subsequent output frames, it can be read from the
+ // previous one. This frame's pc can be computed from the non-optimized
+ // function code and AST id of the bailout.
+ output_offset -= kPCOnStackSize;
+ input_offset -= kPCOnStackSize;
+ intptr_t value;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetPc();
+ }
+ output_frame->SetCallerPc(output_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_offset, "caller's pc\n");
+
+ // The caller's frame pointer for the bottommost output frame is the same
+ // as in the input frame. For all subsequent output frames, it can be
+ // read from the previous one. Also compute and set this frame's frame
+ // pointer.
+ output_offset -= kFPOnStackSize;
+ input_offset -= kFPOnStackSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetFp();
+ }
+ output_frame->SetCallerFp(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ DCHECK(!is_bottommost ||
+ (input_->GetRegister(fp_reg.code()) +
+ has_alignment_padding_ * kPointerSize) == fp_value);
+ output_frame->SetFp(fp_value);
+ if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
+ DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
+ DCHECK(!is_bottommost || !has_alignment_padding_ ||
+ (fp_value & kPointerSize) != 0);
+
+ if (FLAG_enable_embedded_constant_pool) {
+ // For the bottommost output frame the constant pool pointer can be gotten
+ // from the input frame. For subsequent output frames, it can be read from
+ // the previous frame.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetConstantPool();
+ }
+ output_frame->SetCallerConstantPool(output_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_offset,
+ "caller's constant_pool\n");
+ }
+
+ // For the bottommost output frame the context can be gotten from the input
+ // frame. For all subsequent output frames it can be gotten from the function
+ // so long as we don't inline functions that need local contexts.
+ Register context_reg = InterpretedFrame::context_register();
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ // Read the context from the translations.
+ Object* context = value_iterator->GetRawValue();
+ // The context should not be a placeholder for a materialized object.
+ CHECK(context != isolate_->heap()->arguments_marker());
+ value = reinterpret_cast<intptr_t>(context);
+ output_frame->SetContext(value);
+ if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
+ WriteValueToOutput(context, input_index, frame_index, output_offset,
+ "context ");
+ value_iterator++;
+ input_index++;
+
+ // The function was mentioned explicitly in the BEGIN_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ // The function for the bottommost output frame should also agree with the
+ // input frame.
+ DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
+
+ // TODO(rmcilroy): Deal with new.target correctly - currently just set it to
+ // undefined.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ Object* new_target = isolate_->heap()->undefined_value();
+ WriteValueToOutput(new_target, 0, frame_index, output_offset, "new_target ");
+
+ // The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ int raw_bytecode_offset =
+ BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset.ToInt();
+ Smi* smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
+ WriteValueToOutput(smi_bytecode_offset, 0, frame_index, output_offset,
+ "bytecode offset ");
+
+ // Translate the rest of the interpreter registers in the frame.
+ for (unsigned i = 0; i < height; ++i) {
+ output_offset -= kPointerSize;
+ WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+ output_offset);
+ }
+ CHECK_EQ(0u, output_offset);
+
+ // Set the accumulator register.
+ output_frame->SetRegister(
+ kInterpreterAccumulatorRegister.code(),
+ reinterpret_cast<intptr_t>(value_iterator->GetRawValue()));
+ value_iterator++;
+
+ Builtins* builtins = isolate_->builtins();
+ Code* trampoline = builtins->builtin(Builtins::kInterpreterEntryTrampoline);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(trampoline->entry()));
+ output_frame->SetState(0);
+
+ // Update constant pool.
+ if (FLAG_enable_embedded_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(trampoline->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ if (is_topmost) {
+ Register constant_pool_reg =
+ InterpretedFrame::constant_pool_pointer_register();
+ output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
+ }
+ }
+
+ // Set the continuation for the topmost frame.
+ if (is_topmost && bailout_type_ != DEBUGGER) {
+ Code* continuation =
+ builtins->builtin(Builtins::kInterpreterNotifyDeoptimized);
+ if (bailout_type_ == LAZY) {
+ continuation =
+ builtins->builtin(Builtins::kInterpreterNotifyLazyDeoptimized);
+ } else if (bailout_type_ == SOFT) {
+ continuation =
+ builtins->builtin(Builtins::kInterpreterNotifySoftDeoptimized);
+ } else {
+ CHECK_EQ(bailout_type_, EAGER);
+ }
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(continuation->entry()));
+ }
+}
+
+
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(int frame_index) {
TranslatedFrame* translated_frame =
&(translated_state_.frames()[frame_index]);
TranslatedFrame::iterator value_iterator = translated_frame->begin();
@@ -1130,8 +1352,7 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
+void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
TranslatedFrame* translated_frame =
&(translated_state_.frames()[frame_index]);
TranslatedFrame::iterator value_iterator = translated_frame->begin();
@@ -1238,12 +1459,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
PrintF(trace_scope_->file(), "(%d)\n", height - 1);
}
- // The original constructor.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(isolate_->heap()->undefined_value());
- output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset, "new.target\n");
-
// The newly allocated object was passed as receiver in the artificial
// constructor stub environment created by HEnvironment::CopyForInlining().
output_offset -= kPointerSize;
@@ -1266,8 +1481,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
+void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
bool is_setter_stub_frame) {
TranslatedFrame* translated_frame =
&(translated_state_.frames()[frame_index]);
@@ -1392,8 +1606,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
- int frame_index) {
+void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
//
// FROM TO
// | .... | | .... |
@@ -1767,7 +1980,7 @@ void Deoptimizer::DebugPrintOutputSlot(intptr_t value, int frame_index,
unsigned Deoptimizer::ComputeInputFrameSize() const {
- unsigned fixed_size = ComputeFixedSize(function_);
+ unsigned fixed_size = ComputeJavascriptFixedSize(function_);
// The fp-to-sp delta already takes the context, constant pool pointer and the
// function into account so we have to avoid double counting them.
unsigned result = fixed_size + fp_to_sp_delta_ -
@@ -1782,7 +1995,7 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
}
-unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
+unsigned Deoptimizer::ComputeJavascriptFixedSize(JSFunction* function) const {
// The fixed part of the frame consists of the return address, frame
// pointer, function, context, and all the incoming arguments.
return ComputeIncomingArgumentSize(function) +
@@ -1790,6 +2003,15 @@ unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
}
+unsigned Deoptimizer::ComputeInterpretedFixedSize(JSFunction* function) const {
+ // The fixed part of the frame consists of the return address, frame
+ // pointer, function, context, new.target, bytecode offset and all the
+ // incoming arguments.
+ return ComputeIncomingArgumentSize(function) +
+ InterpreterFrameConstants::kFixedFrameSize;
+}
+
+
unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
// The incoming arguments is the values for formal parameters and
// the receiver. Every slot contains a pointer.
@@ -1836,7 +2058,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
while (max_entry_id >= entry_count) entry_count *= 2;
CHECK(entry_count <= Deoptimizer::kMaxNumberOfEntries);
- MacroAssembler masm(isolate, NULL, 16 * KB);
+ MacroAssembler masm(isolate, NULL, 16 * KB, CodeObjectRequired::kYes);
masm.set_emit_debug_code(false);
GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc;
@@ -1883,8 +2105,13 @@ FrameDescription::FrameDescription(uint32_t frame_size,
int FrameDescription::ComputeFixedSize() {
- return StandardFrameConstants::kFixedFrameSize +
- (ComputeParametersCount() + 1) * kPointerSize;
+ if (type_ == StackFrame::INTERPRETED) {
+ return InterpreterFrameConstants::kFixedFrameSize +
+ (ComputeParametersCount() + 1) * kPointerSize;
+ } else {
+ return StandardFrameConstants::kFixedFrameSize +
+ (ComputeParametersCount() + 1) * kPointerSize;
+ }
}
@@ -2022,6 +2249,15 @@ void Translation::BeginJSFrame(BailoutId node_id,
}
+void Translation::BeginInterpretedFrame(BailoutId bytecode_offset,
+ int literal_id, unsigned height) {
+ buffer_->Add(INTERPRETED_FRAME, zone());
+ buffer_->Add(bytecode_offset.ToInt(), zone());
+ buffer_->Add(literal_id, zone());
+ buffer_->Add(height, zone());
+}
+
+
void Translation::BeginCompiledStubFrame(int height) {
buffer_->Add(COMPILED_STUB_FRAME, zone());
buffer_->Add(height, zone());
@@ -2072,7 +2308,7 @@ void Translation::StoreBoolRegister(Register reg) {
void Translation::StoreDoubleRegister(DoubleRegister reg) {
buffer_->Add(DOUBLE_REGISTER, zone());
- buffer_->Add(DoubleRegister::ToAllocationIndex(reg), zone());
+ buffer_->Add(reg.code(), zone());
}
@@ -2154,6 +2390,7 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case CONSTRUCT_STUB_FRAME:
return 2;
case JS_FRAME:
+ case INTERPRETED_FRAME:
return 3;
}
FATAL("Unexpected translation type");
@@ -2491,16 +2728,6 @@ Object* TranslatedValue::GetRawValue() const {
break;
}
- case kDouble: {
- int int_value = FastD2IChecked(double_value());
- bool is_smi = !IsMinusZero(double_value()) &&
- double_value() == int_value && Smi::IsValid(int_value);
- if (is_smi) {
- return Smi::FromInt(static_cast<int32_t>(int_value));
- }
- break;
- }
-
case kBoolBit: {
if (uint32_value() == 0) {
return isolate()->heap()->false_value();
@@ -2637,6 +2864,15 @@ TranslatedFrame TranslatedFrame::JSFrame(BailoutId node_id,
}
+TranslatedFrame TranslatedFrame::InterpretedFrame(
+ BailoutId bytecode_offset, SharedFunctionInfo* shared_info, int height) {
+ TranslatedFrame frame(kInterpretedFunction, shared_info->GetIsolate(),
+ shared_info, height);
+ frame.node_id_ = bytecode_offset;
+ return frame;
+}
+
+
TranslatedFrame TranslatedFrame::AccessorFrame(
Kind kind, SharedFunctionInfo* shared_info) {
DCHECK(kind == kSetter || kind == kGetter);
@@ -2663,9 +2899,17 @@ int TranslatedFrame::GetValueCount() {
case kFunction: {
int parameter_count =
raw_shared_info_->internal_formal_parameter_count() + 1;
+ // + 1 for function.
return height_ + parameter_count + 1;
}
+ case kInterpretedFunction: {
+ int parameter_count =
+ raw_shared_info_->internal_formal_parameter_count() + 1;
+ // + 3 for function, context and accumulator.
+ return height_ + parameter_count + 3;
+ }
+
case kGetter:
return 2; // Function and receiver.
@@ -2701,7 +2945,7 @@ void TranslatedFrame::Handlify() {
TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
TranslationIterator* iterator, FixedArray* literal_array, Address fp,
- JSFunction* frame_function, FILE* trace_file) {
+ FILE* trace_file) {
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
switch (opcode) {
@@ -2716,11 +2960,29 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
PrintF(trace_file, " reading input frame %s", name.get());
int arg_count = shared_info->internal_formal_parameter_count() + 1;
PrintF(trace_file, " => node=%d, args=%d, height=%d; inputs:\n",
- arg_count, node_id.ToInt(), height);
+ node_id.ToInt(), arg_count, height);
}
return TranslatedFrame::JSFrame(node_id, shared_info, height);
}
+ case Translation::INTERPRETED_FRAME: {
+ BailoutId bytecode_offset = BailoutId(iterator->Next());
+ SharedFunctionInfo* shared_info =
+ SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ base::SmartArrayPointer<char> name =
+ shared_info->DebugName()->ToCString();
+ PrintF(trace_file, " reading input frame %s", name.get());
+ int arg_count = shared_info->internal_formal_parameter_count() + 1;
+ PrintF(trace_file,
+ " => bytecode_offset=%d, args=%d, height=%d; inputs:\n",
+ bytecode_offset.ToInt(), arg_count, height);
+ }
+ return TranslatedFrame::InterpretedFrame(bytecode_offset, shared_info,
+ height);
+ }
+
case Translation::ARGUMENTS_ADAPTOR_FRAME: {
SharedFunctionInfo* shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
@@ -2833,6 +3095,7 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
switch (opcode) {
case Translation::BEGIN:
case Translation::JS_FRAME:
+ case Translation::INTERPRETED_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
@@ -2925,7 +3188,7 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
double value = registers->GetDoubleRegister(input_reg);
if (trace_file != nullptr) {
PrintF(trace_file, "%e ; %s (bool)", value,
- DoubleRegister::AllocationIndexToString(input_reg));
+ DoubleRegister::from_code(input_reg).ToString());
}
return TranslatedValue::NewDouble(this, value);
}
@@ -3024,8 +3287,8 @@ TranslatedState::TranslatedState(JavaScriptFrame* frame)
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
- Init(frame->fp(), frame->function(), &it, data->LiteralArray(),
- nullptr /* registers */, nullptr /* trace file */);
+ Init(frame->fp(), &it, data->LiteralArray(), nullptr /* registers */,
+ nullptr /* trace file */);
}
@@ -3036,7 +3299,6 @@ TranslatedState::TranslatedState()
void TranslatedState::Init(Address input_frame_pointer,
- JSFunction* input_frame_function,
TranslationIterator* iterator,
FixedArray* literal_array, RegisterValues* registers,
FILE* trace_file) {
@@ -3058,9 +3320,8 @@ void TranslatedState::Init(Address input_frame_pointer,
// Read the frames
for (int i = 0; i < count; i++) {
// Read the frame descriptor.
- frames_.push_back(
- CreateNextTranslatedFrame(iterator, literal_array, input_frame_pointer,
- input_frame_function, trace_file));
+ frames_.push_back(CreateNextTranslatedFrame(
+ iterator, literal_array, input_frame_pointer, trace_file));
TranslatedFrame& frame = frames_.back();
// Read the values.
@@ -3229,6 +3490,42 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
object->set_length(*length);
return object;
}
+ case FIXED_ARRAY_TYPE: {
+ Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
+ int32_t length = 0;
+ CHECK(lengthObject->ToInt32(&length));
+ Handle<FixedArray> object =
+ isolate_->factory()->NewFixedArray(length);
+ // We need to set the map, because the fixed array we are
+ // materializing could be a context or an arguments object,
+ // in which case we must retain that information.
+ object->set_map(*map);
+ slot->value_ = object;
+ for (int i = 0; i < length; ++i) {
+ Handle<Object> value = MaterializeAt(frame_index, value_index);
+ object->set(i, *value);
+ }
+ return object;
+ }
+ case FIXED_DOUBLE_ARRAY_TYPE: {
+ DCHECK_EQ(*map, isolate_->heap()->fixed_double_array_map());
+ Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
+ int32_t length = 0;
+ CHECK(lengthObject->ToInt32(&length));
+ Handle<FixedArrayBase> object =
+ isolate_->factory()->NewFixedDoubleArray(length);
+ slot->value_ = object;
+ if (length > 0) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(object);
+ for (int i = 0; i < length; ++i) {
+ Handle<Object> value = MaterializeAt(frame_index, value_index);
+ CHECK(value->IsNumber());
+ double_array->set(i, value->Number());
+ }
+ }
+ return object;
+ }
default:
PrintF(stderr, "[couldn't handle instance type %d]\n",
map->instance_type());
diff --git a/chromium/v8/src/deoptimizer.h b/chromium/v8/src/deoptimizer.h
index b116ccd54da..10685b61931 100644
--- a/chromium/v8/src/deoptimizer.h
+++ b/chromium/v8/src/deoptimizer.h
@@ -112,6 +112,7 @@ class TranslatedFrame {
public:
enum Kind {
kFunction,
+ kInterpretedFunction,
kGetter,
kSetter,
kArgumentsAdaptor,
@@ -172,6 +173,9 @@ class TranslatedFrame {
// Constructor static methods.
static TranslatedFrame JSFrame(BailoutId node_id,
SharedFunctionInfo* shared_info, int height);
+ static TranslatedFrame InterpretedFrame(BailoutId bytecode_offset,
+ SharedFunctionInfo* shared_info,
+ int height);
static TranslatedFrame AccessorFrame(Kind kind,
SharedFunctionInfo* shared_info);
static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo* shared_info,
@@ -252,9 +256,9 @@ class TranslatedState {
Isolate* isolate() { return isolate_; }
- void Init(Address input_frame_pointer, JSFunction* input_frame_function,
- TranslationIterator* iterator, FixedArray* literal_array,
- RegisterValues* registers, FILE* trace_file);
+ void Init(Address input_frame_pointer, TranslationIterator* iterator,
+ FixedArray* literal_array, RegisterValues* registers,
+ FILE* trace_file);
private:
friend TranslatedValue;
@@ -262,7 +266,6 @@ class TranslatedState {
TranslatedFrame CreateNextTranslatedFrame(TranslationIterator* iterator,
FixedArray* literal_array,
Address fp,
- JSFunction* frame_function,
FILE* trace_file);
TranslatedValue CreateNextTranslatedValue(int frame_index, int value_index,
TranslationIterator* iterator,
@@ -308,6 +311,7 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
#define DEOPT_MESSAGES_LIST(V) \
+ V(kAccessCheck, "Access check needed") \
V(kNoReason, "no reason") \
V(kConstantGlobalVariableAssignment, "Constant global variable assignment") \
V(kConversionOverflow, "conversion overflow") \
@@ -336,6 +340,7 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
V(kInsufficientTypeFeedbackForRHSOfBinaryOperation, \
"Insufficient type feedback for RHS of binary operation") \
V(kKeyIsNegative, "key is negative") \
+ V(kLiteralsWereDisposed, "literals have been disposed") \
V(kLostPrecision, "lost precision") \
V(kLostPrecisionOrNaN, "lost precision or NaN") \
V(kMementoFound, "memento found") \
@@ -356,6 +361,7 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
V(kOutOfBounds, "out of bounds") \
V(kOutsideOfRange, "Outside of range") \
V(kOverflow, "overflow") \
+ V(kProxy, "proxy") \
V(kReceiverWasAGlobalObject, "receiver was a global object") \
V(kSmi, "Smi") \
V(kTooManyArguments, "too many arguments") \
@@ -586,16 +592,12 @@ class Deoptimizer : public Malloced {
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
- void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
- void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index);
- void DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index);
- void DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame);
- void DoComputeCompiledStubFrame(TranslationIterator* iterator,
- int frame_index);
+ void DoComputeJSFrame(int frame_index);
+ void DoComputeInterpretedFrame(int frame_index);
+ void DoComputeArgumentsAdaptorFrame(int frame_index);
+ void DoComputeConstructStubFrame(int frame_index);
+ void DoComputeAccessorStubFrame(int frame_index, bool is_setter_stub_frame);
+ void DoComputeCompiledStubFrame(int frame_index);
void WriteTranslatedValueToOutput(
TranslatedFrame::iterator* iterator, int* input_index, int frame_index,
@@ -609,7 +611,8 @@ class Deoptimizer : public Malloced {
const char* debug_hint_string);
unsigned ComputeInputFrameSize() const;
- unsigned ComputeFixedSize(JSFunction* function) const;
+ unsigned ComputeJavascriptFixedSize(JSFunction* function) const;
+ unsigned ComputeInterpretedFixedSize(JSFunction* function) const;
unsigned ComputeIncomingArgumentSize(JSFunction* function) const;
static unsigned ComputeOutgoingArgumentSize(Code* code, unsigned bailout_id);
@@ -742,12 +745,9 @@ class FrameDescription {
return malloc(size + frame_size - kPointerSize);
}
-// Bug in VS2015 RC, reported fixed in RTM. Microsoft bug: 1153909.
-#if !defined(_MSC_FULL_VER) || _MSC_FULL_VER != 190022816
void operator delete(void* pointer, uint32_t frame_size) {
free(pointer);
}
-#endif // _MSC_FULL_VER
void operator delete(void* description) {
free(description);
@@ -957,6 +957,7 @@ class TranslationIterator BASE_EMBEDDED {
#define TRANSLATION_OPCODE_LIST(V) \
V(BEGIN) \
V(JS_FRAME) \
+ V(INTERPRETED_FRAME) \
V(CONSTRUCT_STUB_FRAME) \
V(GETTER_STUB_FRAME) \
V(SETTER_STUB_FRAME) \
@@ -1002,6 +1003,8 @@ class Translation BASE_EMBEDDED {
// Commands.
void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
+ void BeginInterpretedFrame(BailoutId bytecode_offset, int literal_id,
+ unsigned height);
void BeginCompiledStubFrame(int height);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
diff --git a/chromium/v8/src/disassembler.cc b/chromium/v8/src/disassembler.cc
index b16f090e9d4..59a57e552ef 100644
--- a/chromium/v8/src/disassembler.cc
+++ b/chromium/v8/src/disassembler.cc
@@ -32,7 +32,9 @@ class V8NameConverter: public disasm::NameConverter {
const char* V8NameConverter::NameOfAddress(byte* pc) const {
- const char* name = code_->GetIsolate()->builtins()->Lookup(pc);
+ const char* name =
+ code_ == NULL ? NULL : code_->GetIsolate()->builtins()->Lookup(pc);
+
if (name != NULL) {
SNPrintF(v8_buffer_, "%s (%p)", name, pc);
return v8_buffer_.start();
@@ -153,7 +155,8 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
// Print all the reloc info for this instruction which are not comments.
for (int i = 0; i < pcs.length(); i++) {
// Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], converter.code());
+ RelocInfo relocinfo(isolate, pcs[i], rmodes[i], datas[i],
+ converter.code());
// Indent the printing of the reloc info.
if (i == 0) {
@@ -189,9 +192,6 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
out.AddFormatted(" ;; external reference (%s)", reference_name);
} else if (RelocInfo::IsCodeTarget(rmode)) {
out.AddFormatted(" ;; code:");
- if (rmode == RelocInfo::CONSTRUCT_CALL) {
- out.AddFormatted(" constructor,");
- }
Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
Code::Kind kind = code->kind();
if (code->is_inline_cache_stub()) {
diff --git a/chromium/v8/src/disassembler.h b/chromium/v8/src/disassembler.h
index 32e48c4e92b..ac53f775b13 100644
--- a/chromium/v8/src/disassembler.h
+++ b/chromium/v8/src/disassembler.h
@@ -20,6 +20,7 @@ class Disassembler : public AllStatic {
Code* code = NULL);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DISASSEMBLER_H_
diff --git a/chromium/v8/src/diy-fp.h b/chromium/v8/src/diy-fp.h
index e0daf27a1e0..1325c945197 100644
--- a/chromium/v8/src/diy-fp.h
+++ b/chromium/v8/src/diy-fp.h
@@ -93,6 +93,7 @@ class DiyFp {
int e_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DIY_FP_H_
diff --git a/chromium/v8/src/double.h b/chromium/v8/src/double.h
index cb126286751..f21bd748f9a 100644
--- a/chromium/v8/src/double.h
+++ b/chromium/v8/src/double.h
@@ -204,6 +204,7 @@ class Double {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DOUBLE_H_
diff --git a/chromium/v8/src/dtoa.h b/chromium/v8/src/dtoa.h
index ca6277ee56f..9f190ab4727 100644
--- a/chromium/v8/src/dtoa.h
+++ b/chromium/v8/src/dtoa.h
@@ -59,6 +59,7 @@ const int kBase10MaximalLength = 17;
void DoubleToAscii(double v, DtoaMode mode, int requested_digits,
Vector<char> buffer, int* sign, int* length, int* point);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_DTOA_H_
diff --git a/chromium/v8/src/effects.h b/chromium/v8/src/effects.h
index 8d539f64bd8..020471830cb 100644
--- a/chromium/v8/src/effects.h
+++ b/chromium/v8/src/effects.h
@@ -329,6 +329,7 @@ class NestedEffects: public
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EFFECTS_H_
diff --git a/chromium/v8/src/elements-kind.h b/chromium/v8/src/elements-kind.h
index 1397bd7c19c..5f6cd62c46e 100644
--- a/chromium/v8/src/elements-kind.h
+++ b/chromium/v8/src/elements-kind.h
@@ -226,6 +226,7 @@ inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ELEMENTS_KIND_H_
diff --git a/chromium/v8/src/elements.cc b/chromium/v8/src/elements.cc
index 4a8308128bd..d4d80dbdecf 100644
--- a/chromium/v8/src/elements.cc
+++ b/chromium/v8/src/elements.cc
@@ -136,20 +136,14 @@ void CopyObjectToObjectElements(FixedArrayBase* from_base,
FixedArray* to = FixedArray::cast(to_base);
DCHECK(IsFastSmiOrObjectElementsKind(from_kind));
DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
- Address to_address = to->address() + FixedArray::kHeaderSize;
- Address from_address = from->address() + FixedArray::kHeaderSize;
- CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
- reinterpret_cast<Object**>(from_address) + from_start,
- static_cast<size_t>(copy_size));
- if (IsFastObjectElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Heap* heap = from->GetHeap();
- if (!heap->InNewSpace(to)) {
- heap->RecordWrites(to->address(),
- to->OffsetOfElementAt(to_start),
- copy_size);
- }
- heap->incremental_marking()->RecordWrites(to);
+
+ WriteBarrierMode write_barrier_mode =
+ (IsFastObjectElementsKind(from_kind) && IsFastObjectElementsKind(to_kind))
+ ? UPDATE_WRITE_BARRIER
+ : SKIP_WRITE_BARRIER;
+ for (int i = 0; i < copy_size; i++) {
+ Object* value = from->get(from_start + i);
+ to->set(to_start + i, value, write_barrier_mode);
}
}
@@ -160,7 +154,6 @@ static void CopyDictionaryToObjectElements(
DisallowHeapAllocation no_allocation;
SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
- Heap* heap = from->GetHeap();
if (raw_copy_size < 0) {
DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
@@ -183,24 +176,19 @@ static void CopyDictionaryToObjectElements(
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
}
+ WriteBarrierMode write_barrier_mode = IsFastObjectElementsKind(to_kind)
+ ? UPDATE_WRITE_BARRIER
+ : SKIP_WRITE_BARRIER;
for (int i = 0; i < copy_size; i++) {
int entry = from->FindEntry(i + from_start);
if (entry != SeededNumberDictionary::kNotFound) {
Object* value = from->ValueAt(entry);
DCHECK(!value->IsTheHole());
- to->set(i + to_start, value, SKIP_WRITE_BARRIER);
+ to->set(i + to_start, value, write_barrier_mode);
} else {
to->set_the_hole(i + to_start);
}
}
- if (IsFastObjectElementsKind(to_kind)) {
- if (!heap->InNewSpace(to)) {
- heap->RecordWrites(to->address(),
- to->OffsetOfElementAt(to_start),
- copy_size);
- }
- heap->incremental_marking()->RecordWrites(to);
- }
}
@@ -440,8 +428,8 @@ static void TraceTopFrame(Isolate* isolate) {
}
StackFrame* raw_frame = it.frame();
if (raw_frame->is_internal()) {
- Code* apply_builtin = isolate->builtins()->builtin(
- Builtins::kFunctionApply);
+ Code* apply_builtin =
+ isolate->builtins()->builtin(Builtins::kFunctionPrototypeApply);
if (raw_frame->unchecked_code() == apply_builtin) {
PrintF("apply from ");
it.Advance();
@@ -506,9 +494,8 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsAccessorSubclass::ValidateImpl(holder);
}
- virtual bool IsPacked(Handle<JSObject> holder,
- Handle<FixedArrayBase> backing_store, uint32_t start,
- uint32_t end) final {
+ bool IsPacked(Handle<JSObject> holder, Handle<FixedArrayBase> backing_store,
+ uint32_t start, uint32_t end) final {
return ElementsAccessorSubclass::IsPackedImpl(holder, backing_store, start,
end);
}
@@ -518,7 +505,8 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t end) {
if (IsFastPackedElementsKind(kind())) return true;
for (uint32_t i = start; i < end; i++) {
- if (!ElementsAccessorSubclass::HasElementImpl(holder, i, backing_store)) {
+ if (!ElementsAccessorSubclass::HasElementImpl(holder, i, backing_store,
+ ALL_PROPERTIES)) {
return false;
}
}
@@ -543,20 +531,22 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
- virtual bool HasElement(Handle<JSObject> holder, uint32_t index,
- Handle<FixedArrayBase> backing_store) final {
+ bool HasElement(Handle<JSObject> holder, uint32_t index,
+ Handle<FixedArrayBase> backing_store,
+ PropertyFilter filter) final {
return ElementsAccessorSubclass::HasElementImpl(holder, index,
- backing_store);
+ backing_store, filter);
}
static bool HasElementImpl(Handle<JSObject> holder, uint32_t index,
- Handle<FixedArrayBase> backing_store) {
+ Handle<FixedArrayBase> backing_store,
+ PropertyFilter filter) {
return ElementsAccessorSubclass::GetEntryForIndexImpl(
- *holder, *backing_store, index) != kMaxUInt32;
+ *holder, *backing_store, index, filter) != kMaxUInt32;
}
- virtual Handle<Object> Get(Handle<FixedArrayBase> backing_store,
- uint32_t entry) final {
+ Handle<Object> Get(Handle<FixedArrayBase> backing_store,
+ uint32_t entry) final {
return ElementsAccessorSubclass::GetImpl(backing_store, entry);
}
@@ -566,8 +556,7 @@ class ElementsAccessorBase : public ElementsAccessor {
return BackingStore::get(Handle<BackingStore>::cast(backing_store), index);
}
- virtual void Set(FixedArrayBase* backing_store, uint32_t entry,
- Object* value) final {
+ void Set(FixedArrayBase* backing_store, uint32_t entry, Object* value) final {
ElementsAccessorSubclass::SetImpl(backing_store, entry, value);
}
@@ -582,10 +571,9 @@ class ElementsAccessorBase : public ElementsAccessor {
UNREACHABLE();
}
- virtual void Reconfigure(Handle<JSObject> object,
- Handle<FixedArrayBase> store, uint32_t entry,
- Handle<Object> value,
- PropertyAttributes attributes) final {
+ void Reconfigure(Handle<JSObject> object, Handle<FixedArrayBase> store,
+ uint32_t entry, Handle<Object> value,
+ PropertyAttributes attributes) final {
ElementsAccessorSubclass::ReconfigureImpl(object, store, entry, value,
attributes);
}
@@ -597,9 +585,8 @@ class ElementsAccessorBase : public ElementsAccessor {
UNREACHABLE();
}
- virtual void Add(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) final {
+ void Add(Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, uint32_t new_capacity) final {
ElementsAccessorSubclass::AddImpl(object, index, value, attributes,
new_capacity);
}
@@ -610,9 +597,8 @@ class ElementsAccessorBase : public ElementsAccessor {
UNREACHABLE();
}
- virtual uint32_t Push(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store, Arguments* args,
- uint32_t push_size) final {
+ uint32_t Push(Handle<JSArray> receiver, Handle<FixedArrayBase> backing_store,
+ Arguments* args, uint32_t push_size) final {
return ElementsAccessorSubclass::PushImpl(receiver, backing_store, args,
push_size);
}
@@ -624,9 +610,9 @@ class ElementsAccessorBase : public ElementsAccessor {
return 0;
}
- virtual uint32_t Unshift(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
- Arguments* args, uint32_t unshift_size) final {
+ uint32_t Unshift(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store, Arguments* args,
+ uint32_t unshift_size) final {
return ElementsAccessorSubclass::UnshiftImpl(receiver, backing_store, args,
unshift_size);
}
@@ -638,9 +624,9 @@ class ElementsAccessorBase : public ElementsAccessor {
return 0;
}
- virtual Handle<JSArray> Slice(Handle<JSObject> receiver,
- Handle<FixedArrayBase> backing_store,
- uint32_t start, uint32_t end) final {
+ Handle<JSArray> Slice(Handle<JSObject> receiver,
+ Handle<FixedArrayBase> backing_store, uint32_t start,
+ uint32_t end) final {
return ElementsAccessorSubclass::SliceImpl(receiver, backing_store, start,
end);
}
@@ -652,10 +638,10 @@ class ElementsAccessorBase : public ElementsAccessor {
return Handle<JSArray>();
}
- virtual Handle<JSArray> Splice(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
- uint32_t start, uint32_t delete_count,
- Arguments* args, uint32_t add_count) final {
+ Handle<JSArray> Splice(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store, uint32_t start,
+ uint32_t delete_count, Arguments* args,
+ uint32_t add_count) final {
return ElementsAccessorSubclass::SpliceImpl(receiver, backing_store, start,
delete_count, args, add_count);
}
@@ -668,8 +654,8 @@ class ElementsAccessorBase : public ElementsAccessor {
return Handle<JSArray>();
}
- virtual Handle<Object> Pop(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store) final {
+ Handle<Object> Pop(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store) final {
return ElementsAccessorSubclass::PopImpl(receiver, backing_store);
}
@@ -679,8 +665,8 @@ class ElementsAccessorBase : public ElementsAccessor {
return Handle<Object>();
}
- virtual Handle<Object> Shift(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store) final {
+ Handle<Object> Shift(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store) final {
return ElementsAccessorSubclass::ShiftImpl(receiver, backing_store);
}
@@ -690,12 +676,13 @@ class ElementsAccessorBase : public ElementsAccessor {
return Handle<Object>();
}
- virtual void SetLength(Handle<JSArray> array, uint32_t length) final {
- ElementsAccessorSubclass::SetLengthImpl(array, length,
+ void SetLength(Handle<JSArray> array, uint32_t length) final {
+ ElementsAccessorSubclass::SetLengthImpl(array->GetIsolate(), array, length,
handle(array->elements()));
}
- static void SetLengthImpl(Handle<JSArray> array, uint32_t length,
+ static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
Handle<FixedArrayBase> backing_store) {
DCHECK(!array->SetLengthWouldNormalize(length));
DCHECK(IsFastElementsKind(array->GetElementsKind()));
@@ -712,6 +699,7 @@ class ElementsAccessorBase : public ElementsAccessor {
// Check whether the backing store should be shrunk.
uint32_t capacity = backing_store->length();
+ old_length = Min(old_length, capacity);
if (length == 0) {
array->initialize_elements();
} else if (length <= capacity) {
@@ -720,7 +708,7 @@ class ElementsAccessorBase : public ElementsAccessor {
}
if (2 * length <= capacity) {
// If more than half the elements won't be used, trim the array.
- array->GetHeap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
+ isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
*backing_store, capacity - length);
} else {
// Otherwise, fill the unused tail with holes.
@@ -810,12 +798,12 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
- virtual void GrowCapacityAndConvert(Handle<JSObject> object,
- uint32_t capacity) final {
+ void GrowCapacityAndConvert(Handle<JSObject> object,
+ uint32_t capacity) final {
ElementsAccessorSubclass::GrowCapacityAndConvertImpl(object, capacity);
}
- virtual void Delete(Handle<JSObject> obj, uint32_t entry) final {
+ void Delete(Handle<JSObject> obj, uint32_t entry) final {
ElementsAccessorSubclass::DeleteImpl(obj, entry);
}
@@ -826,9 +814,9 @@ class ElementsAccessorBase : public ElementsAccessor {
UNREACHABLE();
}
- virtual void CopyElements(Handle<FixedArrayBase> from, uint32_t from_start,
- ElementsKind from_kind, Handle<FixedArrayBase> to,
- uint32_t to_start, int copy_size) final {
+ void CopyElements(Handle<FixedArrayBase> from, uint32_t from_start,
+ ElementsKind from_kind, Handle<FixedArrayBase> to,
+ uint32_t to_start, int copy_size) final {
DCHECK(!from.is_null());
// NOTE: the ElementsAccessorSubclass::CopyElementsImpl() methods
// violate the handlified function signature convention:
@@ -841,9 +829,9 @@ class ElementsAccessorBase : public ElementsAccessor {
kPackedSizeNotKnown, copy_size);
}
- virtual void CopyElements(JSObject* from_holder, uint32_t from_start,
- ElementsKind from_kind, Handle<FixedArrayBase> to,
- uint32_t to_start, int copy_size) final {
+ void CopyElements(JSObject* from_holder, uint32_t from_start,
+ ElementsKind from_kind, Handle<FixedArrayBase> to,
+ uint32_t to_start, int copy_size) final {
int packed_size = kPackedSizeNotKnown;
bool is_packed = IsFastPackedElementsKind(from_kind) &&
from_holder->IsJSArray();
@@ -868,25 +856,54 @@ class ElementsAccessorBase : public ElementsAccessor {
from, from_start, *to, from_kind, to_start, packed_size, copy_size);
}
- virtual void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
- KeyAccumulator* accumulator,
- FixedArray::KeyFilter filter) final {
+ static void CollectElementIndicesImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys, uint32_t range,
+ PropertyFilter filter,
+ uint32_t offset) {
+ if (filter & ONLY_ALL_CAN_READ) {
+ // Non-dictionary elements can't have all-can-read accessors.
+ return;
+ }
+ uint32_t length = 0;
+ if (object->IsJSArray()) {
+ length = Smi::cast(JSArray::cast(*object)->length())->value();
+ } else {
+ length =
+ ElementsAccessorSubclass::GetCapacityImpl(*object, *backing_store);
+ }
+ if (range < length) length = range;
+ for (uint32_t i = offset; i < length; i++) {
+ if (!ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
+ filter))
+ continue;
+ keys->AddKey(i);
+ }
+ }
+
+ void CollectElementIndices(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys, uint32_t range,
+ PropertyFilter filter, uint32_t offset) final {
+ ElementsAccessorSubclass::CollectElementIndicesImpl(
+ object, backing_store, keys, range, filter, offset);
+ };
+
+ void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) final {
Handle<FixedArrayBase> from(receiver->elements());
uint32_t add_length =
ElementsAccessorSubclass::GetCapacityImpl(*receiver, *from);
if (add_length == 0) return;
- accumulator->PrepareForComparisons(add_length);
- int prev_key_count = accumulator->GetLength();
+
for (uint32_t i = 0; i < add_length; i++) {
if (!ElementsAccessorSubclass::HasEntryImpl(*from, i)) continue;
Handle<Object> value = ElementsAccessorSubclass::GetImpl(from, i);
DCHECK(!value->IsTheHole());
DCHECK(!value->IsAccessorPair());
DCHECK(!value->IsExecutableAccessorInfo());
- if (filter == FixedArray::NON_SYMBOL_KEYS && value->IsSymbol()) {
- continue;
- }
- accumulator->AddKey(value, prev_key_count);
+ accumulator->AddKey(value, convert);
}
}
@@ -910,7 +927,7 @@ class ElementsAccessorBase : public ElementsAccessor {
static uint32_t GetEntryForIndexImpl(JSObject* holder,
FixedArrayBase* backing_store,
- uint32_t index) {
+ uint32_t index, PropertyFilter filter) {
if (IsHoleyElementsKind(kind())) {
return index < ElementsAccessorSubclass::GetCapacityImpl(holder,
backing_store) &&
@@ -918,17 +935,20 @@ class ElementsAccessorBase : public ElementsAccessor {
? index
: kMaxUInt32;
} else {
- Smi* smi_length = Smi::cast(JSArray::cast(holder)->length());
- uint32_t length = static_cast<uint32_t>(smi_length->value());
+ uint32_t length =
+ holder->IsJSArray()
+ ? static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(holder)->length())->value())
+ : ElementsAccessorSubclass::GetCapacityImpl(holder,
+ backing_store);
return index < length ? index : kMaxUInt32;
}
}
- virtual uint32_t GetEntryForIndex(JSObject* holder,
- FixedArrayBase* backing_store,
- uint32_t index) final {
- return ElementsAccessorSubclass::GetEntryForIndexImpl(holder, backing_store,
- index);
+ uint32_t GetEntryForIndex(JSObject* holder, FixedArrayBase* backing_store,
+ uint32_t index) final {
+ return ElementsAccessorSubclass::GetEntryForIndexImpl(
+ holder, backing_store, index, ALL_PROPERTIES);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
@@ -936,8 +956,8 @@ class ElementsAccessorBase : public ElementsAccessor {
return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
}
- virtual PropertyDetails GetDetails(FixedArrayBase* backing_store,
- uint32_t entry) final {
+ PropertyDetails GetDetails(FixedArrayBase* backing_store,
+ uint32_t entry) final {
return ElementsAccessorSubclass::GetDetailsImpl(backing_store, entry);
}
@@ -954,11 +974,11 @@ class DictionaryElementsAccessor
: ElementsAccessorBase<DictionaryElementsAccessor,
ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
- static void SetLengthImpl(Handle<JSArray> array, uint32_t length,
+ static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
Handle<FixedArrayBase> backing_store) {
Handle<SeededNumberDictionary> dict =
Handle<SeededNumberDictionary>::cast(backing_store);
- Isolate* isolate = array->GetIsolate();
int capacity = dict->Capacity();
uint32_t old_length = 0;
CHECK(array->length()->ToArrayLength(&old_length));
@@ -1090,19 +1110,55 @@ class DictionaryElementsAccessor
}
static uint32_t GetEntryForIndexImpl(JSObject* holder, FixedArrayBase* store,
- uint32_t index) {
+ uint32_t index, PropertyFilter filter) {
DisallowHeapAllocation no_gc;
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
- int entry = dict->FindEntry(index);
- return entry == SeededNumberDictionary::kNotFound
- ? kMaxUInt32
- : static_cast<uint32_t>(entry);
+ SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(store);
+ int entry = dictionary->FindEntry(index);
+ if (entry == SeededNumberDictionary::kNotFound) return kMaxUInt32;
+ if (filter != ALL_PROPERTIES) {
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & filter) != 0) return kMaxUInt32;
+ }
+ return static_cast<uint32_t>(entry);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
uint32_t entry) {
return SeededNumberDictionary::cast(backing_store)->DetailsAt(entry);
}
+
+ static void CollectElementIndicesImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys, uint32_t range,
+ PropertyFilter filter,
+ uint32_t offset) {
+ Handle<SeededNumberDictionary> dictionary =
+ Handle<SeededNumberDictionary>::cast(backing_store);
+ int capacity = dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = dictionary->KeyAt(i);
+ if (!dictionary->IsKey(k)) continue;
+ if (k->FilterKey(filter)) continue;
+ if (dictionary->IsDeleted(i)) continue;
+ DCHECK(k->IsNumber());
+ DCHECK_LE(k->Number(), kMaxUInt32);
+ uint32_t index = static_cast<uint32_t>(k->Number());
+ if (index < offset) continue;
+ PropertyDetails details = dictionary->DetailsAt(i);
+ if (filter & ONLY_ALL_CAN_READ) {
+ if (details.kind() != kAccessor) continue;
+ Object* accessors = dictionary->ValueAt(i);
+ if (!accessors->IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
+ }
+ PropertyAttributes attr = details.attributes();
+ if ((attr & filter) != 0) continue;
+ keys->AddKey(index);
+ }
+
+ keys->SortCurrentElementsList();
+ }
};
@@ -1181,13 +1237,18 @@ class FastElementsAccessor
}
int num_used = 0;
for (int i = 0; i < backing_store->length(); ++i) {
- if (!backing_store->is_the_hole(i)) ++num_used;
- // Bail out early if more than 1/4 is used.
- if (4 * num_used > backing_store->length()) break;
- }
- if (4 * num_used <= backing_store->length()) {
- JSObject::NormalizeElements(obj);
+ if (!backing_store->is_the_hole(i)) {
+ ++num_used;
+ // Bail out if a number dictionary wouldn't be able to save at least
+ // 75% space.
+ if (4 * SeededNumberDictionary::ComputeCapacity(num_used) *
+ SeededNumberDictionary::kEntrySize >
+ backing_store->length()) {
+ return;
+ }
+ }
}
+ JSObject::NormalizeElements(obj);
}
}
@@ -1293,9 +1354,10 @@ class FastElementsAccessor
receiver, backing_store, args, unshift_size, AT_START);
}
- static void MoveElements(Heap* heap, Handle<FixedArrayBase> backing_store,
- int dst_index, int src_index, int len,
- int hole_start, int hole_end) {
+ static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store, int dst_index,
+ int src_index, int len, int hole_start,
+ int hole_end) {
UNREACHABLE();
}
@@ -1344,13 +1406,13 @@ class FastElementsAccessor
// Delete and move elements to make space for add_count new elements.
if (add_count < delete_count) {
- FastElementsAccessorSubclass::SpliceShrinkStep(backing_store, heap, start,
- delete_count, add_count,
- length, new_length);
+ FastElementsAccessorSubclass::SpliceShrinkStep(
+ isolate, receiver, backing_store, start, delete_count, add_count,
+ length, new_length);
} else if (add_count > delete_count) {
backing_store = FastElementsAccessorSubclass::SpliceGrowStep(
- receiver, backing_store, isolate, heap, start, delete_count,
- add_count, length, new_length);
+ isolate, receiver, backing_store, start, delete_count, add_count,
+ length, new_length);
}
// Copy over the arguments.
@@ -1364,29 +1426,33 @@ class FastElementsAccessor
}
private:
- static void SpliceShrinkStep(Handle<FixedArrayBase> backing_store, Heap* heap,
+ // SpliceShrinkStep might modify the backing_store.
+ static void SpliceShrinkStep(Isolate* isolate, Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
uint32_t start, uint32_t delete_count,
uint32_t add_count, uint32_t len,
uint32_t new_length) {
const int move_left_count = len - delete_count - start;
const int move_left_dst_index = start + add_count;
FastElementsAccessorSubclass::MoveElements(
- heap, backing_store, move_left_dst_index, start + delete_count,
- move_left_count, new_length, len);
+ isolate, receiver, backing_store, move_left_dst_index,
+ start + delete_count, move_left_count, new_length, len);
}
-
+ // SpliceGrowStep might modify the backing_store.
static Handle<FixedArrayBase> SpliceGrowStep(
- Handle<JSArray> receiver, Handle<FixedArrayBase> backing_store,
- Isolate* isolate, Heap* heap, uint32_t start, uint32_t delete_count,
- uint32_t add_count, uint32_t length, uint32_t new_length) {
+ Isolate* isolate, Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store, uint32_t start,
+ uint32_t delete_count, uint32_t add_count, uint32_t length,
+ uint32_t new_length) {
// Check we do not overflow the new_length.
DCHECK((add_count - delete_count) <= (Smi::kMaxValue - length));
// Check if backing_store is big enough.
if (new_length <= static_cast<uint32_t>(backing_store->length())) {
FastElementsAccessorSubclass::MoveElements(
- heap, backing_store, start + add_count, start + delete_count,
- (length - delete_count - start), 0, 0);
+ isolate, receiver, backing_store, start + add_count,
+ start + delete_count, (length - delete_count - start), 0, 0);
+ // MoveElements updates the backing_store in-place.
return backing_store;
}
// New backing storage is needed.
@@ -1407,20 +1473,19 @@ class FastElementsAccessor
static Handle<Object> RemoveElement(Handle<JSArray> receiver,
Handle<FixedArrayBase> backing_store,
Where remove_position) {
+ Isolate* isolate = receiver->GetIsolate();
uint32_t length =
static_cast<uint32_t>(Smi::cast(receiver->length())->value());
- Isolate* isolate = receiver->GetIsolate();
DCHECK(length > 0);
int new_length = length - 1;
int remove_index = remove_position == AT_START ? 0 : new_length;
Handle<Object> result =
FastElementsAccessorSubclass::GetImpl(backing_store, remove_index);
if (remove_position == AT_START) {
- Heap* heap = isolate->heap();
- FastElementsAccessorSubclass::MoveElements(heap, backing_store, 0, 1,
- new_length, 0, 0);
+ FastElementsAccessorSubclass::MoveElements(
+ isolate, receiver, backing_store, 0, 1, new_length, 0, 0);
}
- FastElementsAccessorSubclass::SetLengthImpl(receiver, new_length,
+ FastElementsAccessorSubclass::SetLengthImpl(isolate, receiver, new_length,
backing_store);
if (IsHoleyElementsKind(KindTraits::Kind) && result->IsTheHole()) {
@@ -1454,8 +1519,8 @@ class FastElementsAccessor
// If the backing store has enough capacity and we add elements to the
// start we have to shift the existing objects.
Isolate* isolate = receiver->GetIsolate();
- FastElementsAccessorSubclass::MoveElements(isolate->heap(), backing_store,
- add_size, 0, length, 0, 0);
+ FastElementsAccessorSubclass::MoveElements(
+ isolate, receiver, backing_store, add_size, 0, length, 0, 0);
}
int insertion_index = remove_position == AT_START ? 0 : length;
@@ -1508,11 +1573,22 @@ class FastSmiOrObjectElementsAccessor
return backing_store->get(index);
}
- static void MoveElements(Heap* heap, Handle<FixedArrayBase> backing_store,
- int dst_index, int src_index, int len,
- int hole_start, int hole_end) {
+ static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store, int dst_index,
+ int src_index, int len, int hole_start,
+ int hole_end) {
+ Heap* heap = isolate->heap();
Handle<FixedArray> dst_elms = Handle<FixedArray>::cast(backing_store);
- if (len != 0) {
+ if (heap->CanMoveObjectStart(*dst_elms) && dst_index == 0) {
+ // Update all the copies of this backing_store handle.
+ *dst_elms.location() =
+ FixedArray::cast(heap->LeftTrimFixedArray(*dst_elms, src_index));
+ receiver->set_elements(*dst_elms);
+ // Adjust the hole offset as the array has been shrunk.
+ hole_end -= src_index;
+ DCHECK_LE(hole_start, backing_store->length());
+ DCHECK_LE(hole_end, backing_store->length());
+ } else if (len != 0) {
DisallowHeapAllocation no_gc;
heap->MoveElements(*dst_elms, dst_index, src_index, len);
}
@@ -1631,12 +1707,23 @@ class FastDoubleElementsAccessor
FixedDoubleArray::cast(backing_store)->set(entry, value->Number());
}
- static void MoveElements(Heap* heap, Handle<FixedArrayBase> backing_store,
- int dst_index, int src_index, int len,
- int hole_start, int hole_end) {
+ static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store, int dst_index,
+ int src_index, int len, int hole_start,
+ int hole_end) {
+ Heap* heap = isolate->heap();
Handle<FixedDoubleArray> dst_elms =
Handle<FixedDoubleArray>::cast(backing_store);
- if (len != 0) {
+ if (heap->CanMoveObjectStart(*dst_elms) && dst_index == 0) {
+ // Update all the copies of this backing_store handle.
+ *dst_elms.location() = FixedDoubleArray::cast(
+ heap->LeftTrimFixedArray(*dst_elms, src_index));
+ receiver->set_elements(*dst_elms);
+ // Adjust the hole offset as the array has been shrunk.
+ hole_end -= src_index;
+ DCHECK_LE(hole_start, backing_store->length());
+ DCHECK_LE(hole_end, backing_store->length());
+ } else if (len != 0) {
MemMove(dst_elms->data_start() + dst_index,
dst_elms->data_start() + src_index, len * kDoubleSize);
}
@@ -1742,7 +1829,8 @@ class TypedElementsAccessor
return PropertyDetails(DONT_DELETE, DATA, 0, PropertyCellType::kNoCell);
}
- static void SetLengthImpl(Handle<JSArray> array, uint32_t length,
+ static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
Handle<FixedArrayBase> backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
@@ -1759,7 +1847,7 @@ class TypedElementsAccessor
static uint32_t GetEntryForIndexImpl(JSObject* holder,
FixedArrayBase* backing_store,
- uint32_t index) {
+ uint32_t index, PropertyFilter filter) {
return index < AccessorClass::GetCapacityImpl(holder, backing_store)
? index
: kMaxUInt32;
@@ -1856,7 +1944,8 @@ class SloppyArgumentsElementsAccessor
}
}
- static void SetLengthImpl(Handle<JSArray> array, uint32_t length,
+ static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
Handle<FixedArrayBase> parameter_map) {
// Sloppy arguments objects are not arrays.
UNREACHABLE();
@@ -1893,14 +1982,14 @@ class SloppyArgumentsElementsAccessor
static uint32_t GetEntryForIndexImpl(JSObject* holder,
FixedArrayBase* parameters,
- uint32_t index) {
+ uint32_t index, PropertyFilter filter) {
FixedArray* parameter_map = FixedArray::cast(parameters);
Object* probe = GetParameterMapArg(parameter_map, index);
if (!probe->IsTheHole()) return index;
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- uint32_t entry =
- ArgumentsAccessor::GetEntryForIndexImpl(holder, arguments, index);
+ uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(holder, arguments,
+ index, filter);
if (entry == kMaxUInt32) return entry;
return (parameter_map->length() - 2) + entry;
}
@@ -2170,7 +2259,7 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
// Optimize the case where there is one argument and the argument is a small
// smi.
- if (length > 0 && length < JSObject::kInitialMaxFastElementArray) {
+ if (length > 0 && length < JSArray::kInitialMaxFastElementArray) {
ElementsKind elements_kind = array->GetElementsKind();
JSArray::Initialize(array, length, length);
diff --git a/chromium/v8/src/elements.h b/chromium/v8/src/elements.h
index fcc90024ba7..71e70a1c009 100644
--- a/chromium/v8/src/elements.h
+++ b/chromium/v8/src/elements.h
@@ -8,6 +8,7 @@
#include "src/elements-kind.h"
#include "src/heap/heap.h"
#include "src/isolate.h"
+#include "src/key-accumulator.h"
#include "src/objects.h"
namespace v8 {
@@ -22,6 +23,14 @@ class ElementsAccessor {
const char* name() const { return name_; }
+ // Returns a shared ElementsAccessor for the specified ElementsKind.
+ static ElementsAccessor* ForKind(ElementsKind elements_kind) {
+ DCHECK(static_cast<int>(elements_kind) < kElementsKindCount);
+ return elements_accessors_[elements_kind];
+ }
+
+ static ElementsAccessor* ForArray(Handle<FixedArrayBase> array);
+
// Checks the elements of an object for consistency, asserting when a problem
// is found.
virtual void Validate(Handle<JSObject> obj) = 0;
@@ -30,12 +39,19 @@ class ElementsAccessor {
// without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with
// the ElementsKind of the ElementsAccessor. If backing_store is NULL, the
- // holder->elements() is used as the backing store.
+ // holder->elements() is used as the backing store. If a |filter| is
+ // specified the PropertyAttributes of the element at the given index
+ // are compared to the given |filter|. If they match/overlap the given
+ // index is ignored. Note that only Dictionary elements have custom
+ // PropertyAttributes associated, hence the |filter| argument is ignored for
+ // all but DICTIONARY_ELEMENTS and SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
virtual bool HasElement(Handle<JSObject> holder, uint32_t index,
- Handle<FixedArrayBase> backing_store) = 0;
+ Handle<FixedArrayBase> backing_store,
+ PropertyFilter filter = ALL_PROPERTIES) = 0;
- inline bool HasElement(Handle<JSObject> holder, uint32_t index) {
- return HasElement(holder, index, handle(holder->elements()));
+ inline bool HasElement(Handle<JSObject> holder, uint32_t index,
+ PropertyFilter filter = ALL_PROPERTIES) {
+ return HasElement(holder, index, handle(holder->elements()), filter);
}
// Returns true if the backing store is compact in the given range
@@ -97,20 +113,31 @@ class ElementsAccessor {
*from_holder, 0, from_kind, to, 0, kCopyToEndAndInitializeToHole);
}
- virtual void GrowCapacityAndConvert(Handle<JSObject> object,
- uint32_t capacity) = 0;
+ // Copy all indices that have elements from |object| into the given
+ // KeyAccumulator. For Dictionary-based element-kinds we filter out elements
+ // whose PropertyAttribute match |filter|.
+ virtual void CollectElementIndices(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys,
+ uint32_t range = kMaxUInt32,
+ PropertyFilter filter = ALL_PROPERTIES,
+ uint32_t offset = 0) = 0;
+
+ inline void CollectElementIndices(Handle<JSObject> object,
+ KeyAccumulator* keys,
+ uint32_t range = kMaxUInt32,
+ PropertyFilter filter = ALL_PROPERTIES,
+ uint32_t offset = 0) {
+ CollectElementIndices(object, handle(object->elements()), keys, range,
+ filter, offset);
+ }
virtual void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
- FixedArray::KeyFilter filter) = 0;
-
- // Returns a shared ElementsAccessor for the specified ElementsKind.
- static ElementsAccessor* ForKind(ElementsKind elements_kind) {
- DCHECK(static_cast<int>(elements_kind) < kElementsKindCount);
- return elements_accessors_[elements_kind];
- }
+ AddKeyConversion convert) = 0;
- static ElementsAccessor* ForArray(Handle<FixedArrayBase> array);
+ virtual void GrowCapacityAndConvert(Handle<JSObject> object,
+ uint32_t capacity) = 0;
static void InitializeOncePerProcess();
static void TearDown();
@@ -158,8 +185,6 @@ class ElementsAccessor {
static ElementsAccessor* ForArray(FixedArrayBase* array);
- virtual uint32_t GetCapacity(JSObject* holder,
- FixedArrayBase* backing_store) = 0;
// Element handlers distinguish between entries and indices when they
// manipulate elements. Entries refer to elements in terms of their location
@@ -176,6 +201,8 @@ class ElementsAccessor {
uint32_t entry) = 0;
private:
+ virtual uint32_t GetCapacity(JSObject* holder,
+ FixedArrayBase* backing_store) = 0;
static ElementsAccessor** elements_accessors_;
const char* name_;
@@ -189,6 +216,7 @@ MUST_USE_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
Handle<JSArray> array,
Arguments* args);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ELEMENTS_H_
diff --git a/chromium/v8/src/execution.cc b/chromium/v8/src/execution.cc
index 526390bd187..d4efb7653d8 100644
--- a/chromium/v8/src/execution.cc
+++ b/chromium/v8/src/execution.cc
@@ -6,7 +6,6 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/deoptimizer.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/vm-state-inl.h"
@@ -58,7 +57,7 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
Handle<Object> receiver, int argc,
Handle<Object> args[],
Handle<Object> new_target) {
- DCHECK(!receiver->IsGlobalObject());
+ DCHECK(!receiver->IsJSGlobalObject());
// Entering JavaScript.
VMState<JS> state(isolate);
@@ -95,7 +94,8 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
if (FLAG_profile_deserialization && target->IsJSFunction()) {
PrintDeserializedCodeInfo(Handle<JSFunction>::cast(target));
}
- value = CALL_GENERATED_CODE(stub_entry, orig_func, func, recv, argc, argv);
+ value = CALL_GENERATED_CODE(isolate, stub_entry, orig_func, func, recv,
+ argc, argv);
}
#ifdef VERIFY_HEAP
@@ -109,10 +109,6 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
DCHECK(has_exception == isolate->has_pending_exception());
if (has_exception) {
isolate->ReportPendingMessages();
- // Reset stepping state when script exits with uncaught exception.
- if (isolate->debug()->is_active()) {
- isolate->debug()->ClearStepping();
- }
return MaybeHandle<Object>();
} else {
isolate->clear_pending_message();
@@ -131,9 +127,9 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
// Convert calls on global objects to be calls on the global
// receiver instead to avoid having a 'this' pointer which refers
// directly to a global object.
- if (receiver->IsGlobalObject()) {
+ if (receiver->IsJSGlobalObject()) {
receiver =
- handle(Handle<GlobalObject>::cast(receiver)->global_proxy(), isolate);
+ handle(Handle<JSGlobalObject>::cast(receiver)->global_proxy(), isolate);
}
// api callbacks can be called directly.
@@ -152,7 +148,7 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
isolate, receiver, Execution::ToObject(isolate, receiver), Object);
}
}
- DCHECK(function->context()->global_object()->IsGlobalObject());
+ DCHECK(function->context()->global_object()->IsJSGlobalObject());
auto value = Builtins::InvokeApiFunction(function, receiver, argc, argv);
bool has_exception = value.is_null();
DCHECK(has_exception == isolate->has_pending_exception());
@@ -185,12 +181,12 @@ MaybeHandle<Object> Execution::New(Isolate* isolate, Handle<Object> constructor,
}
-MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func,
+MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
+ Handle<Object> callable,
Handle<Object> receiver, int argc,
Handle<Object> args[],
MaybeHandle<Object>* exception_out) {
bool is_termination = false;
- Isolate* isolate = func->GetIsolate();
MaybeHandle<Object> maybe_result;
if (exception_out != NULL) *exception_out = MaybeHandle<Object>();
// Enter a try-block while executing the JavaScript code. To avoid
@@ -202,7 +198,7 @@ MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func,
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
- maybe_result = Call(isolate, func, receiver, argc, args);
+ maybe_result = Call(isolate, callable, receiver, argc, args);
if (maybe_result.is_null()) {
DCHECK(catcher.HasCaught());
@@ -424,50 +420,16 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
// --- C a l l s t o n a t i v e s ---
-#define RETURN_NATIVE_CALL(name, args) \
- do { \
- Handle<Object> argv[] = args; \
- return Call(isolate, isolate->name##_fun(), \
- isolate->factory()->undefined_value(), arraysize(argv), argv); \
- } while (false)
-
-
-MaybeHandle<Object> Execution::ToDetailString(
- Isolate* isolate, Handle<Object> obj) {
- RETURN_NATIVE_CALL(to_detail_string, { obj });
-}
-
-
-MaybeHandle<Object> Execution::NewDate(Isolate* isolate, double time) {
- Handle<Object> time_obj = isolate->factory()->NewNumber(time);
- RETURN_NATIVE_CALL(create_date, { time_obj });
-}
-
-#undef RETURN_NATIVE_CALL
-
-
-MaybeHandle<Object> Execution::ToObject(Isolate* isolate, Handle<Object> obj) {
+MaybeHandle<JSReceiver> Execution::ToObject(Isolate* isolate,
+ Handle<Object> obj) {
Handle<JSReceiver> receiver;
if (JSReceiver::ToObject(isolate, obj).ToHandle(&receiver)) {
return receiver;
}
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject), Object);
-}
-
-
-MaybeHandle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
- Handle<String> flags) {
- Isolate* isolate = pattern->GetIsolate();
- Handle<JSFunction> function = Handle<JSFunction>(
- isolate->native_context()->regexp_function());
- Handle<Object> re_obj;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, re_obj,
- RegExpImpl::CreateRegExpLiteral(function, pattern, flags),
- JSRegExp);
- return Handle<JSRegExp>::cast(re_obj);
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kUndefinedOrNullToObject),
+ JSReceiver);
}
@@ -478,7 +440,7 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Isolate* isolate = fun->GetIsolate();
Handle<Object> args[] = { recv, fun, pos, is_global };
MaybeHandle<Object> maybe_result =
- TryCall(isolate->get_stack_trace_line_fun(),
+ TryCall(isolate, isolate->get_stack_trace_line_fun(),
isolate->factory()->undefined_value(), arraysize(args), args);
Handle<Object> result;
if (!maybe_result.ToHandle(&result) || !result->IsString()) {
@@ -497,6 +459,11 @@ void StackGuard::HandleGCInterrupt() {
Object* StackGuard::HandleInterrupts() {
+ if (FLAG_verify_predictable) {
+ // Advance synthetic time by making a time request.
+ isolate_->heap()->MonotonicallyIncreasingTimeInMs();
+ }
+
if (CheckAndClearInterrupt(GC_REQUEST)) {
isolate_->heap()->HandleGCRequest();
}
diff --git a/chromium/v8/src/execution.h b/chromium/v8/src/execution.h
index 84f106a4968..81b71b631ef 100644
--- a/chromium/v8/src/execution.h
+++ b/chromium/v8/src/execution.h
@@ -13,9 +13,6 @@
namespace v8 {
namespace internal {
-// Forward declarations.
-class JSRegExp;
-
class Execution final : public AllStatic {
public:
// Call a function, the caller supplies a receiver and an array
@@ -47,26 +44,14 @@ class Execution final : public AllStatic {
// that occurred (if caught exception is true).
// In the exception case, exception_out holds the caught exceptions, unless
// it is a termination exception.
- static MaybeHandle<Object> TryCall(Handle<JSFunction> func,
+ static MaybeHandle<Object> TryCall(Isolate* isolate, Handle<Object> callable,
Handle<Object> receiver, int argc,
Handle<Object> argv[],
MaybeHandle<Object>* exception_out = NULL);
- // ECMA-262 9.8
- MUST_USE_RESULT static MaybeHandle<Object> ToDetailString(
- Isolate* isolate, Handle<Object> obj);
-
// ECMA-262 9.9
- MUST_USE_RESULT static MaybeHandle<Object> ToObject(
- Isolate* isolate, Handle<Object> obj);
-
- // Create a new date object from 'time'.
- MUST_USE_RESULT static MaybeHandle<Object> NewDate(
- Isolate* isolate, double time);
-
- // Create a new regular expression object from 'pattern' and 'flags'.
- MUST_USE_RESULT static MaybeHandle<JSRegExp> NewJSRegExp(
- Handle<String> pattern, Handle<String> flags);
+ MUST_USE_RESULT static MaybeHandle<JSReceiver> ToObject(Isolate* isolate,
+ Handle<Object> obj);
static Handle<String> GetStackTraceLine(Handle<Object> recv,
Handle<JSFunction> fun,
@@ -247,6 +232,7 @@ class StackGuard final {
DISALLOW_COPY_AND_ASSIGN(StackGuard);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EXECUTION_H_
diff --git a/chromium/v8/src/extensions/externalize-string-extension.h b/chromium/v8/src/extensions/externalize-string-extension.h
index 009e8184978..c8907b42ed2 100644
--- a/chromium/v8/src/extensions/externalize-string-extension.h
+++ b/chromium/v8/src/extensions/externalize-string-extension.h
@@ -22,6 +22,7 @@ class ExternalizeStringExtension : public v8::Extension {
static const char* const kSource;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
diff --git a/chromium/v8/src/extensions/free-buffer-extension.h b/chromium/v8/src/extensions/free-buffer-extension.h
index bb1418c4a3a..6bc5e57cbc1 100644
--- a/chromium/v8/src/extensions/free-buffer-extension.h
+++ b/chromium/v8/src/extensions/free-buffer-extension.h
@@ -19,6 +19,7 @@ class FreeBufferExtension : public v8::Extension {
static void FreeBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
diff --git a/chromium/v8/src/extensions/gc-extension.h b/chromium/v8/src/extensions/gc-extension.h
index 91433911c29..9be0d4b7011 100644
--- a/chromium/v8/src/extensions/gc-extension.h
+++ b/chromium/v8/src/extensions/gc-extension.h
@@ -30,6 +30,7 @@ class GCExtension : public v8::Extension {
char buffer_[50];
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EXTENSIONS_GC_EXTENSION_H_
diff --git a/chromium/v8/src/extensions/statistics-extension.h b/chromium/v8/src/extensions/statistics-extension.h
index 5dac4097b6e..714f86aeba0 100644
--- a/chromium/v8/src/extensions/statistics-extension.h
+++ b/chromium/v8/src/extensions/statistics-extension.h
@@ -21,6 +21,7 @@ class StatisticsExtension : public v8::Extension {
static const char* const kSource;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EXTENSIONS_STATISTICS_EXTENSION_H_
diff --git a/chromium/v8/src/extensions/trigger-failure-extension.h b/chromium/v8/src/extensions/trigger-failure-extension.h
index 4b10bdc8866..7c7ecf882cf 100644
--- a/chromium/v8/src/extensions/trigger-failure-extension.h
+++ b/chromium/v8/src/extensions/trigger-failure-extension.h
@@ -26,6 +26,7 @@ class TriggerFailureExtension : public v8::Extension {
static const char* const kSource;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
diff --git a/chromium/v8/src/factory.cc b/chromium/v8/src/factory.cc
index 8923d071b10..4701c2154d0 100644
--- a/chromium/v8/src/factory.cc
+++ b/chromium/v8/src/factory.cc
@@ -96,7 +96,6 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
result->set_prototype_users(WeakFixedArray::Empty());
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
result->set_validity_cell(Smi::FromInt(0));
- result->set_constructor_name(Smi::FromInt(0));
return result;
}
@@ -244,6 +243,12 @@ Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
}
+Handle<Name> Factory::InternalizeName(Handle<Name> name) {
+ if (name->IsUniqueName()) return name;
+ return InternalizeString(Handle<String>::cast(name));
+}
+
+
MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
PretenureFlag pretenure) {
int length = string.length();
@@ -665,7 +670,13 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
}
- Handle<Map> map = external_one_byte_string_map();
+ Handle<Map> map;
+ if (resource->IsCompressible()) {
+ // TODO(hajimehoshi): Rename this to 'uncached_external_one_byte_string_map'
+ map = short_external_one_byte_string_map();
+ } else {
+ map = external_one_byte_string_map();
+ }
Handle<ExternalOneByteString> external_string =
New<ExternalOneByteString>(map, NEW_SPACE);
external_string->set_length(static_cast<int>(length));
@@ -688,8 +699,15 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
static const size_t kOneByteCheckLengthLimit = 32;
bool is_one_byte = length <= kOneByteCheckLengthLimit &&
String::IsOneByte(resource->data(), static_cast<int>(length));
- Handle<Map> map = is_one_byte ?
- external_string_with_one_byte_data_map() : external_string_map();
+ Handle<Map> map;
+ if (resource->IsCompressible()) {
+ // TODO(hajimehoshi): Rename these to 'uncached_external_string_...'.
+ map = is_one_byte ? short_external_string_with_one_byte_data_map()
+ : short_external_string_map();
+ } else {
+ map = is_one_byte ? external_string_with_one_byte_data_map()
+ : external_string_map();
+ }
Handle<ExternalTwoByteString> external_string =
New<ExternalTwoByteString>(map, NEW_SPACE);
external_string->set_length(static_cast<int>(length));
@@ -708,14 +726,9 @@ Handle<Symbol> Factory::NewSymbol() {
}
-Handle<Symbol> Factory::NewPrivateSymbol(Handle<Object> name) {
+Handle<Symbol> Factory::NewPrivateSymbol() {
Handle<Symbol> symbol = NewSymbol();
symbol->set_is_private(true);
- if (name->IsString()) {
- symbol->set_name(*name);
- } else {
- DCHECK(name->IsUndefined());
- }
return symbol;
}
@@ -725,7 +738,10 @@ Handle<Context> Factory::NewNativeContext() {
NewFixedArray(Context::NATIVE_CONTEXT_SLOTS, TENURED);
array->set_map_no_write_barrier(*native_context_map());
Handle<Context> context = Handle<Context>::cast(array);
- context->set_js_array_maps(*undefined_value());
+ context->set_native_context(*context);
+ context->set_errors_thrown(Smi::FromInt(0));
+ Handle<WeakCell> weak_cell = NewWeakCell(context);
+ context->set_self_weak_cell(*weak_cell);
DCHECK(context->IsNativeContext());
return context;
}
@@ -740,7 +756,7 @@ Handle<Context> Factory::NewScriptContext(Handle<JSFunction> function,
context->set_closure(*function);
context->set_previous(function->context());
context->set_extension(*scope_info);
- context->set_global_object(function->context()->global_object());
+ context->set_native_context(function->native_context());
DCHECK(context->IsScriptContext());
return context;
}
@@ -762,7 +778,7 @@ Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
array->set_map_no_write_barrier(*module_context_map());
// Instance link will be set later.
Handle<Context> context = Handle<Context>::cast(array);
- context->set_extension(Smi::FromInt(0));
+ context->set_extension(*the_hole_value());
return context;
}
@@ -775,8 +791,8 @@ Handle<Context> Factory::NewFunctionContext(int length,
Handle<Context> context = Handle<Context>::cast(array);
context->set_closure(*function);
context->set_previous(function->context());
- context->set_extension(Smi::FromInt(0));
- context->set_global_object(function->context()->global_object());
+ context->set_extension(*the_hole_value());
+ context->set_native_context(function->native_context());
return context;
}
@@ -792,7 +808,7 @@ Handle<Context> Factory::NewCatchContext(Handle<JSFunction> function,
context->set_closure(*function);
context->set_previous(*previous);
context->set_extension(*name);
- context->set_global_object(previous->global_object());
+ context->set_native_context(previous->native_context());
context->set(Context::THROWN_OBJECT_INDEX, *thrown_object);
return context;
}
@@ -807,7 +823,7 @@ Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
context->set_closure(*function);
context->set_previous(*previous);
context->set_extension(*extension);
- context->set_global_object(previous->global_object());
+ context->set_native_context(previous->native_context());
return context;
}
@@ -822,7 +838,7 @@ Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
context->set_closure(*function);
context->set_previous(*previous);
context->set_extension(*scope_info);
- context->set_global_object(previous->global_object());
+ context->set_native_context(previous->native_context());
return context;
}
@@ -964,6 +980,13 @@ Handle<WeakCell> Factory::NewWeakCell(Handle<HeapObject> value) {
}
+Handle<TransitionArray> Factory::NewTransitionArray(int capacity) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateTransitionArray(capacity),
+ TransitionArray);
+}
+
+
Handle<AllocationSite> Factory::NewAllocationSite() {
Handle<Map> map = allocation_site_map();
Handle<AllocationSite> site = New<AllocationSite>(map, OLD_SPACE);
@@ -1122,8 +1145,8 @@ Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
// running the factory method, use the exception as the result.
Handle<Object> result;
MaybeHandle<Object> exception;
- if (!Execution::TryCall(fun, undefined_value(), arraysize(argv), argv,
- &exception)
+ if (!Execution::TryCall(isolate(), fun, undefined_value(), arraysize(argv),
+ argv, &exception)
.ToHandle(&result)) {
Handle<Object> exception_obj;
if (exception.ToHandle(&exception_obj)) {
@@ -1144,8 +1167,8 @@ Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
// running the factory method, use the exception as the result.
Handle<Object> result;
MaybeHandle<Object> exception;
- if (!Execution::TryCall(constructor, undefined_value(), arraysize(argv), argv,
- &exception)
+ if (!Execution::TryCall(isolate(), constructor, undefined_value(),
+ arraysize(argv), argv, &exception)
.ToHandle(&result)) {
Handle<Object> exception_obj;
if (exception.ToHandle(&exception_obj)) return exception_obj;
@@ -1171,28 +1194,23 @@ DEFINE_ERROR(TypeError, type_error)
#undef DEFINE_ERROR
-void Factory::InitializeFunction(Handle<JSFunction> function,
- Handle<SharedFunctionInfo> info,
- Handle<Context> context) {
+Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
+ Handle<SharedFunctionInfo> info,
+ Handle<Context> context,
+ PretenureFlag pretenure) {
+ AllocationSpace space = pretenure == TENURED ? OLD_SPACE : NEW_SPACE;
+ Handle<JSFunction> function = New<JSFunction>(map, space);
+
function->initialize_properties();
function->initialize_elements();
function->set_shared(*info);
function->set_code(info->code());
function->set_context(*context);
function->set_prototype_or_initial_map(*the_hole_value());
- function->set_literals_or_bindings(*empty_fixed_array());
+ function->set_literals(LiteralsArray::cast(*empty_fixed_array()));
function->set_next_function_link(*undefined_value(), SKIP_WRITE_BARRIER);
-}
-
-
-Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
- Handle<SharedFunctionInfo> info,
- Handle<Context> context,
- PretenureFlag pretenure) {
- AllocationSpace space = pretenure == TENURED ? OLD_SPACE : NEW_SPACE;
- Handle<JSFunction> result = New<JSFunction>(map, space);
- InitializeFunction(result, info, context);
- return result;
+ isolate()->heap()->InitializeJSObjectBody(*function, *map, JSFunction::kSize);
+ return function;
}
@@ -1200,14 +1218,19 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
Handle<String> name,
MaybeHandle<Code> code) {
Handle<Context> context(isolate()->native_context());
- Handle<SharedFunctionInfo> info = NewSharedFunctionInfo(name, code);
- DCHECK(is_sloppy(info->language_mode()) &&
- (map.is_identical_to(isolate()->sloppy_function_map()) ||
- map.is_identical_to(
- isolate()->sloppy_function_without_prototype_map()) ||
- map.is_identical_to(
- isolate()->sloppy_function_with_readonly_prototype_map()) ||
- map.is_identical_to(isolate()->strict_function_map())));
+ Handle<SharedFunctionInfo> info =
+ NewSharedFunctionInfo(name, code, map->is_constructor());
+ DCHECK(is_sloppy(info->language_mode()));
+ DCHECK(!map->IsUndefined());
+ DCHECK(
+ map.is_identical_to(isolate()->sloppy_function_map()) ||
+ map.is_identical_to(isolate()->sloppy_function_without_prototype_map()) ||
+ map.is_identical_to(
+ isolate()->sloppy_function_with_readonly_prototype_map()) ||
+ map.is_identical_to(isolate()->strict_function_map()) ||
+ // TODO(titzer): wasm_function_map() could be undefined here. ugly.
+ (*map == context->get(Context::WASM_FUNCTION_MAP_INDEX)) ||
+ map.is_identical_to(isolate()->proxy_function_map()));
return NewFunction(map, info, context);
}
@@ -1317,8 +1340,19 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
PretenureFlag pretenure) {
int map_index =
Context::FunctionMapIndex(info->language_mode(), info->kind());
- Handle<Map> map(Map::cast(context->native_context()->get(map_index)));
- Handle<JSFunction> result = NewFunction(map, info, context, pretenure);
+ Handle<Map> initial_map(Map::cast(context->native_context()->get(map_index)));
+
+ return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
+ pretenure);
+}
+
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
+ Handle<Context> context, PretenureFlag pretenure) {
+ DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
+ Handle<JSFunction> result =
+ NewFunction(initial_map, info, context, pretenure);
if (info->ic_age() != isolate()->heap()->global_ic_age()) {
info->ResetForNewContext(isolate()->heap()->global_ic_age());
@@ -1339,19 +1373,17 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
if (cached.literals != nullptr) {
result->set_literals(cached.literals);
-
- } else if (!info->bound()) {
+ } else {
int number_of_literals = info->num_literals();
Handle<LiteralsArray> literals =
LiteralsArray::New(isolate(), handle(info->feedback_vector()),
number_of_literals, pretenure);
result->set_literals(*literals);
+
// Cache context-specific literals.
- if (FLAG_cache_optimized_code) {
- Handle<Context> native_context(context->native_context());
- SharedFunctionInfo::AddToOptimizedCodeMap(
- info, native_context, undefined_value(), literals, BailoutId::None());
- }
+ Handle<Context> native_context(context->native_context());
+ SharedFunctionInfo::AddLiteralsToOptimizedCodeMap(info, native_context,
+ literals);
}
return result;
@@ -1415,9 +1447,8 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
code->set_next_code_link(*undefined_value());
code->set_handler_table(*empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_prologue_offset(prologue_offset);
- if (FLAG_enable_embedded_constant_pool) {
- code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
- }
+ code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
+
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
}
@@ -1492,7 +1523,8 @@ Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
}
-Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
+Handle<JSGlobalObject> Factory::NewJSGlobalObject(
+ Handle<JSFunction> constructor) {
DCHECK(constructor->has_initial_map());
Handle<Map> map(constructor->initial_map());
DCHECK(map->is_dictionary_map());
@@ -1510,7 +1542,7 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
// Initial size of the backing store to avoid resize of the storage during
// bootstrapping. The size differs between the JS global object ad the
// builtins object.
- int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
+ int initial_size = 64;
// Allocate a dictionary object for backing storage.
int at_least_space_for = map->NumberOfOwnDescriptors() * 2 + initial_size;
@@ -1534,7 +1566,7 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
}
// Allocate the global object and initialize it with the backing store.
- Handle<GlobalObject> global = New<GlobalObject>(map, OLD_SPACE);
+ Handle<JSGlobalObject> global = New<JSGlobalObject>(map, OLD_SPACE);
isolate()->heap()->InitializeJSObjectFromMap(*global, *dictionary, *map);
// Create a new map for the global object.
@@ -1546,7 +1578,7 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
global->set_properties(*dictionary);
// Make sure result is a global object with properties in dictionary.
- DCHECK(global->IsGlobalObject() && !global->HasFastProperties());
+ DCHECK(global->IsJSGlobalObject() && !global->HasFastProperties());
return global;
}
@@ -1645,7 +1677,7 @@ Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
DCHECK(function->shared()->is_generator());
JSFunction::EnsureHasInitialMap(function);
Handle<Map> map(function->initial_map());
- DCHECK(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
+ DCHECK_EQ(JS_GENERATOR_OBJECT_TYPE, map->instance_type());
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSObjectFromMap(*map),
@@ -1923,103 +1955,81 @@ Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
}
-Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
- Handle<Object> prototype) {
- // Allocate map.
- // TODO(rossberg): Once we optimize proxies, think about a scheme to share
- // maps. Will probably depend on the identity of the handler object, too.
- Handle<Map> map = NewMap(JS_PROXY_TYPE, JSProxy::kSize);
- Map::SetPrototype(map, prototype);
-
- // Allocate the proxy object.
- Handle<JSProxy> result = New<JSProxy>(map, NEW_SPACE);
- result->InitializeBody(map->instance_size(), Smi::FromInt(0));
- result->set_handler(*handler);
- result->set_hash(*undefined_value(), SKIP_WRITE_BARRIER);
- return result;
-}
-
-
-Handle<JSProxy> Factory::NewJSFunctionProxy(Handle<Object> handler,
- Handle<JSReceiver> call_trap,
- Handle<Object> construct_trap,
- Handle<Object> prototype) {
- // Allocate map.
- // TODO(rossberg): Once we optimize proxies, think about a scheme to share
- // maps. Will probably depend on the identity of the handler object, too.
- Handle<Map> map = NewMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
- Map::SetPrototype(map, prototype);
- map->set_is_callable();
- map->set_is_constructor(construct_trap->IsCallable());
-
- // Allocate the proxy object.
- Handle<JSFunctionProxy> result = New<JSFunctionProxy>(map, NEW_SPACE);
- result->InitializeBody(map->instance_size(), Smi::FromInt(0));
- result->set_handler(*handler);
- result->set_hash(*undefined_value(), SKIP_WRITE_BARRIER);
- result->set_call_trap(*call_trap);
- result->set_construct_trap(*construct_trap);
- return result;
-}
-
-
-void Factory::ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type,
- int size) {
- DCHECK(type == JS_OBJECT_TYPE || type == JS_FUNCTION_TYPE);
-
- Handle<Map> proxy_map(proxy->map());
- Handle<Map> map = Map::FixProxy(proxy_map, type, size);
-
- // Check that the receiver has at least the size of the fresh object.
- int size_difference = proxy_map->instance_size() - map->instance_size();
- DCHECK(size_difference >= 0);
-
- // Allocate the backing storage for the properties.
- Handle<FixedArray> properties = empty_fixed_array();
-
- Heap* heap = isolate()->heap();
- MaybeHandle<SharedFunctionInfo> shared;
- if (type == JS_FUNCTION_TYPE) {
- OneByteStringKey key(STATIC_CHAR_VECTOR("<freezing call trap>"),
- heap->HashSeed());
- Handle<String> name = InternalizeStringWithKey(&key);
- shared = NewSharedFunctionInfo(name, MaybeHandle<Code>());
+MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
+ Handle<JSReceiver> target_function, Handle<Object> bound_this,
+ Vector<Handle<Object>> bound_args) {
+ DCHECK(target_function->IsCallable());
+ STATIC_ASSERT(Code::kMaxArguments <= FixedArray::kMaxLength);
+ if (bound_args.length() >= Code::kMaxArguments) {
+ THROW_NEW_ERROR(isolate(),
+ NewRangeError(MessageTemplate::kTooManyArguments),
+ JSBoundFunction);
}
- // In order to keep heap in consistent state there must be no allocations
- // before object re-initialization is finished and filler object is installed.
- DisallowHeapAllocation no_allocation;
+ // Determine the prototype of the {target_function}.
+ Handle<Object> prototype;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), prototype,
+ Object::GetPrototype(isolate(), target_function),
+ JSBoundFunction);
- // Put in filler if the new object is smaller than the old.
- if (size_difference > 0) {
- Address address = proxy->address();
- heap->CreateFillerObjectAt(address + map->instance_size(), size_difference);
- heap->AdjustLiveBytes(*proxy, -size_difference,
- Heap::CONCURRENT_TO_SWEEPER);
+ // Create the [[BoundArguments]] for the result.
+ Handle<FixedArray> bound_arguments;
+ if (bound_args.length() == 0) {
+ bound_arguments = empty_fixed_array();
+ } else {
+ bound_arguments = NewFixedArray(bound_args.length());
+ for (int i = 0; i < bound_args.length(); ++i) {
+ bound_arguments->set(i, *bound_args[i]);
+ }
}
- // Reset the map for the object.
- proxy->synchronized_set_map(*map);
- Handle<JSObject> jsobj = Handle<JSObject>::cast(proxy);
-
- // Reinitialize the object from the constructor map.
- heap->InitializeJSObjectFromMap(*jsobj, *properties, *map);
+ // Setup the map for the JSBoundFunction instance.
+ Handle<Map> map = handle(
+ target_function->IsConstructor()
+ ? isolate()->native_context()->bound_function_with_constructor_map()
+ : isolate()
+ ->native_context()
+ ->bound_function_without_constructor_map(),
+ isolate());
+ if (map->prototype() != *prototype) {
+ map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
+ }
+ DCHECK_EQ(target_function->IsConstructor(), map->is_constructor());
+
+ // Setup the JSBoundFunction instance.
+ Handle<JSBoundFunction> result =
+ Handle<JSBoundFunction>::cast(NewJSObjectFromMap(map));
+ result->set_bound_target_function(*target_function);
+ result->set_bound_this(*bound_this);
+ result->set_bound_arguments(*bound_arguments);
+ result->set_creation_context(*isolate()->native_context());
+ result->set_length(Smi::FromInt(0));
+ result->set_name(*undefined_value(), SKIP_WRITE_BARRIER);
+ return result;
+}
- // The current native context is used to set up certain bits.
- // TODO(adamk): Using the current context seems wrong, it should be whatever
- // context the JSProxy originated in. But that context isn't stored anywhere.
- Handle<Context> context(isolate()->native_context());
- // Functions require some minimal initialization.
- if (type == JS_FUNCTION_TYPE) {
- map->set_is_constructor(true);
- map->set_is_callable();
- Handle<JSFunction> js_function = Handle<JSFunction>::cast(proxy);
- InitializeFunction(js_function, shared.ToHandleChecked(), context);
+// ES6 section 9.5.15 ProxyCreate (target, handler)
+Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
+ Handle<JSReceiver> handler) {
+ // Allocate the proxy object.
+ Handle<Map> map;
+ if (target->IsCallable()) {
+ if (target->IsConstructor()) {
+ map = Handle<Map>(isolate()->proxy_constructor_map());
+ } else {
+ map = Handle<Map>(isolate()->proxy_callable_map());
+ }
} else {
- // Provide JSObjects with a constructor.
- map->SetConstructor(context->object_function());
+ map = Handle<Map>(isolate()->proxy_map());
}
+ DCHECK(map->prototype()->IsNull());
+ Handle<JSProxy> result = New<JSProxy>(map, NEW_SPACE);
+ result->initialize_properties();
+ result->set_target(*target);
+ result->set_handler(*handler);
+ result->set_hash(*undefined_value(), SKIP_WRITE_BARRIER);
+ return result;
}
@@ -2039,14 +2049,22 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
Handle<JSFunction> constructor) {
DCHECK(constructor->has_initial_map());
Handle<Map> map(constructor->initial_map(), isolate());
+ Handle<Map> old_map(object->map(), isolate());
// The proxy's hash should be retained across reinitialization.
Handle<Object> hash(object->hash(), isolate());
+ JSObject::InvalidatePrototypeChains(*old_map);
+ if (old_map->is_prototype_map()) {
+ map = Map::Copy(map, "CopyAsPrototypeForJSGlobalProxy");
+ map->set_is_prototype_map(true);
+ }
+ JSObject::UpdatePrototypeUserRegistration(old_map, map, isolate());
+
// Check that the already allocated object has the same size and type as
// objects allocated using the constructor.
- DCHECK(map->instance_size() == object->map()->instance_size());
- DCHECK(map->instance_type() == object->map()->instance_type());
+ DCHECK(map->instance_size() == old_map->instance_size());
+ DCHECK(map->instance_type() == old_map->instance_type());
// Allocate the backing storage for the properties.
Handle<FixedArray> properties = empty_fixed_array();
@@ -2067,33 +2085,13 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
}
-void Factory::BecomeJSObject(Handle<JSProxy> proxy) {
- ReinitializeJSProxy(proxy, JS_OBJECT_TYPE, JSObject::kHeaderSize);
-}
-
-
-void Factory::BecomeJSFunction(Handle<JSProxy> proxy) {
- ReinitializeJSProxy(proxy, JS_FUNCTION_TYPE, JSFunction::kSize);
-}
-
-
-template Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(
- const FeedbackVectorSpec* spec);
-template Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(
- const StaticFeedbackVectorSpec* spec);
-
-template <typename Spec>
-Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(const Spec* spec) {
- return TypeFeedbackVector::Allocate<Spec>(isolate(), spec);
-}
-
-
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name, int number_of_literals, FunctionKind kind,
Handle<Code> code, Handle<ScopeInfo> scope_info,
Handle<TypeFeedbackVector> feedback_vector) {
DCHECK(IsValidFunctionKind(kind));
- Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name, code);
+ Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
+ name, code, IsConstructable(kind, scope_info->language_mode()));
shared->set_scope_info(*scope_info);
shared->set_feedback_vector(*feedback_vector);
shared->set_kind(kind);
@@ -2126,8 +2124,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- Handle<String> name,
- MaybeHandle<Code> maybe_code) {
+ Handle<String> name, MaybeHandle<Code> maybe_code, bool is_constructor) {
Handle<Map> map = shared_function_info_map();
Handle<SharedFunctionInfo> share = New<SharedFunctionInfo>(map, OLD_SPACE);
@@ -2135,22 +2132,25 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_name(*name);
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
- code = handle(isolate()->builtins()->builtin(Builtins::kIllegal));
+ code = isolate()->builtins()->Illegal();
}
share->set_code(*code);
- share->set_optimized_code_map(Smi::FromInt(0));
+ share->set_optimized_code_map(*cleared_optimized_code_map());
share->set_scope_info(ScopeInfo::Empty(isolate()));
- Code* construct_stub =
- isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
- share->set_construct_stub(construct_stub);
+ Handle<Code> construct_stub =
+ is_constructor ? isolate()->builtins()->JSConstructStubGeneric()
+ : isolate()->builtins()->ConstructedNonConstructable();
+ share->set_construct_stub(*construct_stub);
share->set_instance_class_name(*Object_string());
share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
StaticFeedbackVectorSpec empty_spec;
+ Handle<TypeFeedbackMetadata> feedback_metadata =
+ TypeFeedbackMetadata::New(isolate(), &empty_spec);
Handle<TypeFeedbackVector> feedback_vector =
- NewTypeFeedbackVector(&empty_spec);
+ TypeFeedbackVector::New(isolate(), feedback_metadata);
share->set_feedback_vector(*feedback_vector, SKIP_WRITE_BARRIER);
#if TRACE_MAPS
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
@@ -2171,6 +2171,11 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_compiler_hints(0);
share->set_opt_count_and_bailout_reason(0);
+ // Link into the list.
+ Handle<Object> new_noscript_list =
+ WeakFixedArray::Add(noscript_shared_function_infos(), share);
+ isolate()->heap()->set_noscript_shared_function_infos(*new_noscript_list);
+
return share;
}
@@ -2270,7 +2275,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
int length) {
bool strict_mode_callee = is_strict(callee->shared()->language_mode()) ||
- !callee->has_simple_parameters();
+ !callee->shared()->has_simple_parameters();
Handle<Map> map = strict_mode_callee ? isolate()->strict_arguments_map()
: isolate()->sloppy_arguments_map();
AllocationSiteUsageContext context(isolate(), Handle<AllocationSite>(),
@@ -2359,7 +2364,7 @@ void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp,
store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
store->set(JSRegExp::kSourceIndex, *source);
- store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
+ store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
store->set(JSRegExp::kAtomPatternIndex, *data);
regexp->set_data(*store);
}
@@ -2374,7 +2379,7 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
Smi* uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
store->set(JSRegExp::kSourceIndex, *source);
- store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
+ store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
store->set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
store->set(JSRegExp::kIrregexpLatin1CodeSavedIndex, uninitialized);
diff --git a/chromium/v8/src/factory.h b/chromium/v8/src/factory.h
index b7602e023b0..01a2f7eecf2 100644
--- a/chromium/v8/src/factory.h
+++ b/chromium/v8/src/factory.h
@@ -7,12 +7,11 @@
#include "src/isolate.h"
#include "src/messages.h"
+#include "src/type-feedback-vector.h"
namespace v8 {
namespace internal {
-class StaticFeedbackVectorSpec;
-
// Interface for handle based allocation.
class Factory final {
public:
@@ -81,6 +80,8 @@ class Factory final {
template<class StringTableKey>
Handle<String> InternalizeStringWithKey(StringTableKey* key);
+ Handle<Name> InternalizeName(Handle<Name> name);
+
// String creation functions. Most of the string creation functions take
// a Heap::PretenureFlag argument to optionally request that they be
@@ -218,7 +219,7 @@ class Factory final {
// Create a symbol.
Handle<Symbol> NewSymbol();
- Handle<Symbol> NewPrivateSymbol(Handle<Object> name);
+ Handle<Symbol> NewPrivateSymbol();
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewNativeContext();
@@ -294,6 +295,8 @@ class Factory final {
Handle<WeakCell> NewWeakCell(Handle<HeapObject> value);
+ Handle<TransitionArray> NewTransitionArray(int capacity);
+
// Allocate a tenured AllocationSite. It's payload is null.
Handle<AllocationSite> NewAllocationSite();
@@ -340,7 +343,9 @@ class Factory final {
PretenureFlag pretenure = NOT_TENURED);
Handle<Object> NewNumberFromSize(size_t value,
PretenureFlag pretenure = NOT_TENURED) {
- if (Smi::IsValid(static_cast<intptr_t>(value))) {
+ // We can't use Smi::IsValid() here because that operates on a signed
+ // intptr_t, and casting from size_t could create a bogus sign bit.
+ if (value <= static_cast<size_t>(Smi::kMaxValue)) {
return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
isolate());
}
@@ -375,7 +380,7 @@ class Factory final {
Handle<AllocationSite> site);
// Global objects are pretenured and initialized based on a constructor.
- Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
+ Handle<JSGlobalObject> NewJSGlobalObject(Handle<JSFunction> constructor);
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
@@ -474,14 +479,14 @@ class Factory final {
Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value,
Handle<Object> done);
- // Allocates a Harmony proxy.
- Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
+ // Allocates a bound function.
+ MaybeHandle<JSBoundFunction> NewJSBoundFunction(
+ Handle<JSReceiver> target_function, Handle<Object> bound_this,
+ Vector<Handle<Object>> bound_args);
- // Allocates a Harmony function proxy.
- Handle<JSProxy> NewJSFunctionProxy(Handle<Object> handler,
- Handle<JSReceiver> call_trap,
- Handle<Object> construct_trap,
- Handle<Object> prototype);
+ // Allocates a Harmony proxy.
+ Handle<JSProxy> NewJSProxy(Handle<JSReceiver> target,
+ Handle<JSReceiver> handler);
// Reinitialize an JSGlobalProxy based on a constructor. The object
// must have the same size as objects allocated using the
@@ -492,10 +497,6 @@ class Factory final {
Handle<JSGlobalProxy> NewUninitializedJSGlobalProxy();
- // Change the type of the argument into a JS object/function and reinitialize.
- void BecomeJSObject(Handle<JSProxy> object);
- void BecomeJSFunction(Handle<JSProxy> object);
-
Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
Handle<Object> prototype,
bool read_only_prototype = false,
@@ -506,8 +507,11 @@ class Factory final {
bool is_strict = false);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> function_info,
- Handle<Context> context,
+ Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
+ Handle<Context> context, PretenureFlag pretenure = TENURED);
+
+ Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> function_info, Handle<Context> context,
PretenureFlag pretenure = TENURED);
Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
@@ -520,6 +524,8 @@ class Factory final {
Handle<Code> code,
InstanceType type,
int instance_size);
+ Handle<JSFunction> NewFunction(Handle<Map> map, Handle<String> name,
+ MaybeHandle<Code> maybe_code);
// Create a serialized scope info.
Handle<ScopeInfo> NewScopeInfo(int length);
@@ -616,6 +622,7 @@ class Factory final {
&isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
}
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
// Allocates a new SharedFunctionInfo object.
@@ -624,11 +631,8 @@ class Factory final {
Handle<Code> code, Handle<ScopeInfo> scope_info,
Handle<TypeFeedbackVector> feedback_vector);
Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name,
- MaybeHandle<Code> code);
-
- // Allocate a new type feedback vector
- template <typename Spec>
- Handle<TypeFeedbackVector> NewTypeFeedbackVector(const Spec* spec);
+ MaybeHandle<Code> code,
+ bool is_constructor);
// Allocates a new JSMessageObject object.
Handle<JSMessageObject> NewJSMessageObject(MessageTemplate::Template message,
@@ -695,31 +699,14 @@ class Factory final {
// Update the cache with a new number-string pair.
void SetNumberStringCache(Handle<Object> number, Handle<String> string);
- // Initializes a function with a shared part and prototype.
- // Note: this code was factored out of NewFunction such that other parts of
- // the VM could use it. Specifically, a function that creates instances of
- // type JS_FUNCTION_TYPE benefit from the use of this function.
- inline void InitializeFunction(Handle<JSFunction> function,
- Handle<SharedFunctionInfo> info,
- Handle<Context> context);
-
// Creates a function initialized with a shared part.
Handle<JSFunction> NewFunction(Handle<Map> map,
Handle<SharedFunctionInfo> info,
Handle<Context> context,
PretenureFlag pretenure = TENURED);
-
- Handle<JSFunction> NewFunction(Handle<Map> map,
- Handle<String> name,
- MaybeHandle<Code> maybe_code);
-
- // Reinitialize a JSProxy into an (empty) JS object of respective type and
- // size, but keeping the original prototype. The receiver must have at least
- // the size of the new object. The object is reinitialized and behaves as an
- // object that has been freshly allocated.
- void ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type, int size);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FACTORY_H_
diff --git a/chromium/v8/src/fast-dtoa.h b/chromium/v8/src/fast-dtoa.h
index 38e8a824995..d9e3ba89322 100644
--- a/chromium/v8/src/fast-dtoa.h
+++ b/chromium/v8/src/fast-dtoa.h
@@ -57,6 +57,7 @@ bool FastDtoa(double d,
int* length,
int* decimal_point);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FAST_DTOA_H_
diff --git a/chromium/v8/src/field-index-inl.h b/chromium/v8/src/field-index-inl.h
index 042e4fbdd23..2e6693ce388 100644
--- a/chromium/v8/src/field-index-inl.h
+++ b/chromium/v8/src/field-index-inl.h
@@ -89,8 +89,7 @@ inline int FieldIndex::GetLoadByFieldIndex() const {
inline FieldIndex FieldIndex::ForDescriptor(Map* map, int descriptor_index) {
PropertyDetails details =
map->instance_descriptors()->GetDetails(descriptor_index);
- int field_index =
- map->instance_descriptors()->GetFieldIndex(descriptor_index);
+ int field_index = details.field_index();
return ForPropertyIndex(map, field_index,
details.representation().IsDouble());
}
@@ -119,6 +118,7 @@ inline int FieldIndex::GetKeyedLookupCacheIndex() const {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif
diff --git a/chromium/v8/src/field-index.h b/chromium/v8/src/field-index.h
index 67515be0474..2862d36bdb7 100644
--- a/chromium/v8/src/field-index.h
+++ b/chromium/v8/src/field-index.h
@@ -19,6 +19,8 @@ class Map;
// index it was originally generated from.
class FieldIndex final {
public:
+ FieldIndex() : bit_field_(0) {}
+
static FieldIndex ForPropertyIndex(Map* map,
int index,
bool is_double = false);
@@ -111,6 +113,7 @@ class FieldIndex final {
int bit_field_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif
diff --git a/chromium/v8/src/fixed-dtoa.h b/chromium/v8/src/fixed-dtoa.h
index 0a6cb50818d..f8a29e06398 100644
--- a/chromium/v8/src/fixed-dtoa.h
+++ b/chromium/v8/src/fixed-dtoa.h
@@ -29,6 +29,7 @@ namespace internal {
bool FastFixedDtoa(double v, int fractional_count,
Vector<char> buffer, int* length, int* decimal_point);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FIXED_DTOA_H_
diff --git a/chromium/v8/src/flag-definitions.h b/chromium/v8/src/flag-definitions.h
index e9e1363939f..9966a70382c 100644
--- a/chromium/v8/src/flag-definitions.h
+++ b/chromium/v8/src/flag-definitions.h
@@ -179,44 +179,56 @@ DEFINE_BOOL(strong_mode, false, "experimental strong language mode")
DEFINE_IMPLICATION(use_strong, strong_mode)
DEFINE_BOOL(strong_this, true, "don't allow 'this' to escape from constructors")
-DEFINE_BOOL(es_staging, false, "enable all completed harmony features")
+DEFINE_BOOL(es_staging, false,
+ "enable test-worthy harmony features (for internal use only)")
DEFINE_BOOL(harmony, false, "enable all completed harmony features")
-DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony fetaures")
-DEFINE_IMPLICATION(harmony, es_staging)
+DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
-DEFINE_BOOL(legacy_const, true, "legacy semantics for const in sloppy mode")
+DEFINE_BOOL(legacy_const, false, "legacy semantics for const in sloppy mode")
+// ES2015 const semantics are shipped
+DEFINE_NEG_VALUE_IMPLICATION(harmony_shipping, legacy_const, true)
+
+DEFINE_BOOL(promise_extra, true, "additional V8 Promise functions")
+// Removing extra Promise functions is staged
+DEFINE_NEG_IMPLICATION(harmony, promise_extra)
+
+// Activate on ClusterFuzz.
+DEFINE_IMPLICATION(es_staging, harmony_regexp_lookbehind)
+DEFINE_IMPLICATION(es_staging, move_object_start)
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS(V) \
- V(harmony_modules, "harmony modules") \
- V(harmony_regexps, "harmony regular expression extensions") \
- V(harmony_proxies, "harmony proxies") \
- V(harmony_unicode_regexps, "harmony unicode regexps") \
- V(harmony_tolength, "harmony ToLength") \
- V(harmony_reflect, "harmony Reflect API") \
- V(harmony_sloppy_function, "harmony sloppy function block scoping") \
- V(harmony_destructuring, "harmony destructuring") \
- V(harmony_default_parameters, "harmony default parameters") \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_simd, "harmony simd")
+#define HARMONY_INPROGRESS(V) \
+ V(harmony_modules, "harmony modules") \
+ V(harmony_unicode_regexps, "harmony unicode regexps") \
+ V(harmony_function_name, "harmony Function name inference") \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_simd, "harmony simd") \
+ V(harmony_do_expressions, "harmony do-expressions") \
+ V(harmony_regexp_subclass, "harmony regexp subclassing") \
+ V(harmony_species, "harmony Symbol.species")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) \
- V(harmony_tostring, "harmony toString") \
- V(harmony_concat_spreadable, "harmony isConcatSpreadable") \
- V(harmony_sloppy, "harmony features in sloppy mode") \
- V(harmony_sloppy_let, "harmony let in sloppy mode")
+#define HARMONY_STAGED(V) \
+ V(harmony_regexp_lookbehind, "harmony regexp lookbehind")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
- V(harmony_arrow_functions, "harmony arrow functions") \
- V(harmony_array_includes, "harmony Array.prototype.includes") \
- V(harmony_new_target, "harmony new.target") \
- V(harmony_object_observe, "harmony Object.observe") \
- V(harmony_rest_parameters, "harmony rest parameters") \
- V(harmony_spread_calls, "harmony spread-calls") \
- V(harmony_spread_arrays, "harmony spread in array literals")
+#define HARMONY_SHIPPING(V) \
+ V(harmony_default_parameters, "harmony default parameters") \
+ V(harmony_destructuring_assignment, "harmony destructuring assignment") \
+ V(harmony_destructuring_bind, "harmony destructuring bind") \
+ V(harmony_concat_spreadable, "harmony isConcatSpreadable") \
+ V(harmony_object_observe, "harmony Object.observe") \
+ V(harmony_tolength, "harmony ToLength") \
+ V(harmony_tostring, "harmony toString") \
+ V(harmony_completion, "harmony completion value semantics") \
+ V(harmony_regexps, "harmony regular expression extensions") \
+ V(harmony_sloppy, "harmony features in sloppy mode") \
+ V(harmony_sloppy_let, "harmony let in sloppy mode") \
+ V(harmony_sloppy_function, "harmony sloppy function block scoping") \
+ V(harmony_proxies, "harmony proxies") \
+ V(harmony_reflect, "harmony Reflect API")
+
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -231,7 +243,7 @@ HARMONY_INPROGRESS(FLAG_INPROGRESS_FEATURES)
#define FLAG_STAGED_FEATURES(id, description) \
DEFINE_BOOL(id, false, "enable " #description) \
- DEFINE_IMPLICATION(es_staging, id)
+ DEFINE_IMPLICATION(harmony, id)
HARMONY_STAGED(FLAG_STAGED_FEATURES)
#undef FLAG_STAGED_FEATURES
@@ -248,7 +260,7 @@ DEFINE_IMPLICATION(harmony_sloppy_function, harmony_sloppy)
// Destructuring shares too much parsing architecture with default parameters
// to be enabled on its own.
-DEFINE_IMPLICATION(harmony_destructuring, harmony_default_parameters)
+DEFINE_IMPLICATION(harmony_destructuring_bind, harmony_default_parameters)
// Flags for experimental implementation features.
DEFINE_BOOL(compiled_keyed_generic_loads, false,
@@ -284,8 +296,12 @@ DEFINE_BOOL(string_slices, true, "use string slices")
// Flags for Ignition.
DEFINE_BOOL(ignition, false, "use ignition interpreter")
-DEFINE_IMPLICATION(ignition, vector_stores)
-DEFINE_STRING(ignition_filter, "~~", "filter for ignition interpreter")
+DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
+DEFINE_BOOL(ignition_fake_try_catch, false,
+ "enable fake try-catch-finally blocks in ignition for testing")
+DEFINE_BOOL(ignition_fallback_on_eval_and_catch, false,
+ "fallback to full-codegen for functions which contain eval, catch"
+ "and es6 blocks")
DEFINE_BOOL(print_bytecode, false,
"print bytecode generated by ignition interpreter")
DEFINE_BOOL(trace_ignition_codegen, false,
@@ -371,8 +387,7 @@ DEFINE_INT(stress_runs, 0, "number of stress runs")
DEFINE_BOOL(lookup_sample_by_shared, true,
"when picking a function to optimize, watch for shared function "
"info, not JSFunction itself")
-DEFINE_BOOL(cache_optimized_code, true, "cache optimized code for closures")
-DEFINE_BOOL(flush_optimized_code_cache, true,
+DEFINE_BOOL(flush_optimized_code_cache, false,
"flushes the cache of optimized code for closures on every GC")
DEFINE_BOOL(inline_construct, true, "inline constructor calls")
DEFINE_BOOL(inline_arguments, true, "inline functions with arguments object")
@@ -392,7 +407,7 @@ DEFINE_INT(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
DEFINE_BOOL(block_concurrent_recompilation, false,
"block queued jobs until released")
-DEFINE_BOOL(concurrent_osr, true, "concurrent on-stack replacement")
+DEFINE_BOOL(concurrent_osr, false, "concurrent on-stack replacement")
DEFINE_IMPLICATION(concurrent_osr, concurrent_recompilation)
DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
@@ -401,13 +416,15 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
// Flags for TurboFan.
DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
+DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
+DEFINE_IMPLICATION(turbo, turbo_inlining)
DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
+DEFINE_BOOL(turbo_sp_frame_access, false,
+ "use stack pointer-relative access to frame wherever possible")
DEFINE_BOOL(turbo_preprocess_ranges, true,
"run pre-register allocation heuristics")
DEFINE_BOOL(turbo_loop_stackcheck, true, "enable stack checks in loops")
-
-DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
DEFINE_STRING(turbo_filter, "~~", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
DEFINE_BOOL(trace_turbo_graph, false, "trace generated TurboFan graphs")
@@ -426,13 +443,13 @@ DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
DEFINE_BOOL(turbo_types, true, "use typed lowering in TurboFan")
-DEFINE_BOOL(turbo_type_feedback, false, "use type feedback in TurboFan")
-DEFINE_BOOL(turbo_allocate, false, "enable inline allocations in TurboFan")
DEFINE_BOOL(turbo_source_positions, false,
"track source code positions when building TurboFan IR")
DEFINE_IMPLICATION(trace_turbo, turbo_source_positions)
DEFINE_BOOL(function_context_specialization, false,
"enable function context specialization in TurboFan")
+DEFINE_BOOL(native_context_specialization, true,
+ "enable native context specialization in TurboFan")
DEFINE_BOOL(turbo_inlining, false, "enable inlining in TurboFan")
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
DEFINE_BOOL(loop_assignment_analysis, true, "perform loop assignment analysis")
@@ -449,14 +466,18 @@ DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
DEFINE_BOOL(turbo_cache_shared_code, true, "cache context-independent code")
DEFINE_BOOL(turbo_preserve_shared_code, false, "keep context-independent code")
+DEFINE_BOOL(turbo_escape, false, "enable escape analysis")
+DEFINE_BOOL(trace_turbo_escape, false, "enable tracing in escape analysis")
+DEFINE_BOOL(turbo_instruction_scheduling, false,
+ "enable instruction scheduling in TurboFan")
-#if defined(V8_WASM)
// Flags for native WebAssembly.
+DEFINE_BOOL(expose_wasm, false, "expose WASM interface to JavaScript")
DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
+DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
DEFINE_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error")
-#endif
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
@@ -515,6 +536,12 @@ DEFINE_BOOL(force_long_branches, false,
"force all emitted branches to be in long mode (MIPS/PPC only)")
DEFINE_STRING(mcpu, "auto", "enable optimization for specific cpu")
+DEFINE_IMPLICATION(enable_armv8, enable_vfp3)
+DEFINE_IMPLICATION(enable_armv8, enable_neon)
+DEFINE_IMPLICATION(enable_armv8, enable_32dregs)
+DEFINE_IMPLICATION(enable_armv8, enable_sudiv)
+DEFINE_IMPLICATION(enable_armv8, enable_mls)
+
// bootstrapper.cc
DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
DEFINE_STRING(expose_debug_as, NULL, "expose debug in global object")
@@ -554,7 +581,6 @@ DEFINE_BOOL(trace_stub_failures, false,
"trace deoptimization of generated code stubs")
DEFINE_BOOL(serialize_toplevel, true, "enable caching of toplevel scripts")
-DEFINE_BOOL(serialize_inner, true, "enable caching of inner functions")
DEFINE_BOOL(trace_serializer, false, "print code serializer trace")
// compiler.cc
@@ -572,8 +598,7 @@ DEFINE_BOOL(cache_prototype_transitions, true, "cache prototype transitions")
DEFINE_INT(cpu_profiler_sampling_interval, 1000,
"CPU profiler sampling interval in microseconds")
-// debug.cc
-DEFINE_BOOL(trace_debug_json, false, "trace debugging JSON request/response")
+// Array abuse tracing
DEFINE_BOOL(trace_js_array_abuse, false,
"trace out-of-bounds accesses to JS arrays")
DEFINE_BOOL(trace_external_array_abuse, false,
@@ -582,6 +607,11 @@ DEFINE_BOOL(trace_array_abuse, false,
"trace out-of-bounds accesses to all arrays")
DEFINE_IMPLICATION(trace_array_abuse, trace_js_array_abuse)
DEFINE_IMPLICATION(trace_array_abuse, trace_external_array_abuse)
+
+// debugger
+DEFINE_BOOL(debug_eval_readonly_locals, true,
+ "do not update locals after debug-evaluate")
+DEFINE_BOOL(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_BOOL(enable_liveedit, true, "enable liveedit experimental feature")
DEFINE_BOOL(hard_abort, true, "abort by crashing")
@@ -652,15 +682,13 @@ DEFINE_BOOL(age_code, true,
"track un-executed functions to age code and flush only "
"old code (required for code flushing)")
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
-DEFINE_BOOL(overapproximate_weak_closure, true,
- "overapproximate weak closer to reduce atomic pause time")
-DEFINE_INT(min_progress_during_object_groups_marking, 128,
- "keep overapproximating the weak closure as long as we discover at "
+DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
+ "keep finalizing incremental marking as long as we discover at "
"least this many unmarked objects")
-DEFINE_INT(max_object_groups_marking_rounds, 3,
- "at most try this many times to over approximate the weak closure")
+DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
+ "at most try this many times to finalize incremental marking")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
-DEFINE_BOOL(parallel_compaction, false, "use parallel compaction")
+DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_BOOL(track_gc_object_stats, false,
@@ -676,7 +704,12 @@ DEFINE_IMPLICATION(trace_detached_contexts, track_detached_contexts)
#ifdef VERIFY_HEAP
DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
#endif
-DEFINE_BOOL(move_object_start, false, "enable moving of object starts")
+DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
+DEFINE_BOOL(memory_reducer, true, "use memory reducer")
+DEFINE_BOOL(scavenge_reclaim_unmodified_objects, false,
+ "remove unmodified and unreferenced objects")
+DEFINE_INT(heap_growing_percent, 0,
+ "specifies heap growing factor as (1 + heap_growing_percent/100)")
// counters.cc
DEFINE_INT(histogram_interval, 600000,
@@ -694,8 +727,6 @@ DEFINE_BOOL(use_idle_notification, true,
// ic.cc
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
-DEFINE_BOOL(vector_stores, false, "use vectors for store ics")
-DEFINE_BOOL(global_var_shortcuts, true, "use ic-less global loads and stores")
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
@@ -813,6 +844,7 @@ DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(predictable, concurrent_osr)
DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
DEFINE_NEG_IMPLICATION(predictable, parallel_compaction)
+DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
// mark-compact.cc
DEFINE_BOOL(force_marking_deque_overflows, false,
@@ -930,6 +962,7 @@ DEFINE_BOOL(regexp_possessive_quantifier, false,
DEFINE_BOOL(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
DEFINE_BOOL(trace_regexp_assembler, false,
"trace regexp macro assembler calls.")
+DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
//
// Logging and profiling flags
@@ -1060,7 +1093,7 @@ DEFINE_IMPLICATION(print_all_code, trace_codegen)
DEFINE_BOOL(verify_predictable, false,
"this mode is used for checking that V8 behaves predictably")
-DEFINE_INT(dump_allocations_digest_at_alloc, 0,
+DEFINE_INT(dump_allocations_digest_at_alloc, -1,
"dump allocations digest each n-th allocation")
@@ -1078,6 +1111,8 @@ DEFINE_BOOL(unbox_double_fields, V8_DOUBLE_FIELDS_UNBOXING,
"enable in-object double fields unboxing (64-bit only)")
DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
+DEFINE_BOOL(global_var_shortcuts, false, "use ic-less global loads and stores")
+
// Cleanup...
#undef FLAG_FULL
diff --git a/chromium/v8/src/flags.h b/chromium/v8/src/flags.h
index 545c172d367..7bf515100c6 100644
--- a/chromium/v8/src/flags.h
+++ b/chromium/v8/src/flags.h
@@ -63,6 +63,7 @@ class FlagList {
static uint32_t Hash();
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FLAGS_H_
diff --git a/chromium/v8/src/frames-inl.h b/chromium/v8/src/frames-inl.h
index cccd4d191a9..4013601dace 100644
--- a/chromium/v8/src/frames-inl.h
+++ b/chromium/v8/src/frames-inl.h
@@ -234,6 +234,10 @@ inline OptimizedFrame::OptimizedFrame(StackFrameIteratorBase* iterator)
}
+inline InterpretedFrame::InterpretedFrame(StackFrameIteratorBase* iterator)
+ : JavaScriptFrame(iterator) {}
+
+
inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
StackFrameIteratorBase* iterator) : JavaScriptFrame(iterator) {
}
@@ -286,6 +290,7 @@ inline StackFrame* SafeStackFrameIterator::frame() const {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FRAMES_INL_H_
diff --git a/chromium/v8/src/frames.cc b/chromium/v8/src/frames.cc
index 7e55833b458..d60ab29c4e6 100644
--- a/chromium/v8/src/frames.cc
+++ b/chromium/v8/src/frames.cc
@@ -6,13 +6,14 @@
#include <sstream>
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopeinfo.h"
#include "src/base/bits.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
-#include "src/scopeinfo.h"
#include "src/string-stream.h"
#include "src/vm-state-inl.h"
@@ -333,11 +334,8 @@ void SafeStackFrameIterator::Advance() {
// ExternalCallbackScope, just skip them as we cannot collect any useful
// information about them.
if (external_callback_scope_->scope_address() < frame_->fp()) {
- Address* callback_address =
- external_callback_scope_->callback_address();
- if (*callback_address != NULL) {
- frame_->state_.pc_address = callback_address;
- }
+ frame_->state_.pc_address =
+ external_callback_scope_->callback_entrypoint_address();
external_callback_scope_ = external_callback_scope_->previous();
DCHECK(external_callback_scope_ == NULL ||
external_callback_scope_->scope_address() > frame_->fp());
@@ -438,6 +436,21 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return JAVA_SCRIPT;
case Code::OPTIMIZED_FUNCTION:
return OPTIMIZED;
+ case Code::WASM_FUNCTION:
+ return STUB;
+ case Code::BUILTIN:
+ if (!marker->IsSmi()) {
+ if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ // An adapter frame has a special SMI constant for the context and
+ // is not distinguished through the marker.
+ return ARGUMENTS_ADAPTOR;
+ } else {
+ // The interpreter entry trampoline has a non-SMI marker.
+ DCHECK(code_obj->is_interpreter_entry_trampoline());
+ return INTERPRETED;
+ }
+ }
+ break; // Marker encodes the frame type.
case Code::HANDLER:
if (!marker->IsSmi()) {
// Only hydrogen code stub handlers can have a non-SMI marker.
@@ -450,12 +463,6 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
}
}
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- // An adapter frame has a special SMI constant for the context and
- // is not distinguished through the marker.
- return ARGUMENTS_ADAPTOR;
- }
-
// Didn't find a code object, or the code kind wasn't specific enough.
// The marker should encode the frame type.
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
@@ -658,7 +665,9 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
if (safepoint_entry.has_doubles()) {
// Number of doubles not known at snapshot time.
DCHECK(!isolate()->serializer_enabled());
- parameters_base += DoubleRegister::NumAllocatableRegisters() *
+ parameters_base +=
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+ ->num_allocatable_double_registers() *
kDoubleSize / kPointerSize;
}
@@ -742,26 +751,13 @@ bool JavaScriptFrame::IsConstructor() const {
}
-bool JavaScriptFrame::HasInlinedFrames() {
+bool JavaScriptFrame::HasInlinedFrames() const {
List<JSFunction*> functions(1);
GetFunctions(&functions);
return functions.length() > 1;
}
-Object* JavaScriptFrame::GetOriginalConstructor() const {
- Address fp = caller_fp();
- if (has_adapted_arguments()) {
- // Skip the arguments adaptor frame and look at the real caller.
- fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
- }
- DCHECK(IsConstructFrame(fp));
- STATIC_ASSERT(ConstructFrameConstants::kOriginalConstructorOffset ==
- StandardFrameConstants::kExpressionsOffset - 3 * kPointerSize);
- return GetExpression(fp, 3);
-}
-
-
int JavaScriptFrame::GetArgumentsLength() const {
// If there is an arguments adaptor frame get the arguments length from it.
if (has_adapted_arguments()) {
@@ -792,7 +788,7 @@ Address JavaScriptFrame::GetCallerStackPointer() const {
}
-void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
+void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) const {
DCHECK(functions->length() == 0);
functions->Add(function());
}
@@ -941,8 +937,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- DCHECK_EQ(Translation::BEGIN, opcode);
+ Translation::Opcode frame_opcode =
+ static_cast<Translation::Opcode>(it.Next());
+ DCHECK_EQ(Translation::BEGIN, frame_opcode);
it.Next(); // Drop frame count.
int jsframe_count = it.Next();
@@ -950,8 +947,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
// in the deoptimization translation are ordered bottom-to-top.
bool is_constructor = IsConstructor();
while (jsframe_count != 0) {
- opcode = static_cast<Translation::Opcode>(it.Next());
- if (opcode == Translation::JS_FRAME) {
+ frame_opcode = static_cast<Translation::Opcode>(it.Next());
+ if (frame_opcode == Translation::JS_FRAME ||
+ frame_opcode == Translation::INTERPRETED_FRAME) {
jsframe_count--;
BailoutId const ast_id = BailoutId(it.Next());
SharedFunctionInfo* const shared_info =
@@ -960,7 +958,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
// The translation commands are ordered and the function is always
// at the first position, and the receiver is next.
- opcode = static_cast<Translation::Opcode>(it.Next());
+ Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
// Get the correct function in the optimized frame.
JSFunction* function;
@@ -997,25 +995,33 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
}
Code* const code = shared_info->code();
- DeoptimizationOutputData* const output_data =
- DeoptimizationOutputData::cast(code->deoptimization_data());
- unsigned const entry =
- Deoptimizer::GetOutputInfo(output_data, ast_id, shared_info);
- unsigned const pc_offset =
- FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
- DCHECK_NE(0U, pc_offset);
+ unsigned pc_offset;
+ if (frame_opcode == Translation::JS_FRAME) {
+ DeoptimizationOutputData* const output_data =
+ DeoptimizationOutputData::cast(code->deoptimization_data());
+ unsigned const entry =
+ Deoptimizer::GetOutputInfo(output_data, ast_id, shared_info);
+ pc_offset =
+ FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
+ DCHECK_NE(0U, pc_offset);
+ } else {
+ // TODO(rmcilroy): Modify FrameSummary to enable us to summarize
+ // based on the BytecodeArray and bytecode offset.
+ DCHECK_EQ(frame_opcode, Translation::INTERPRETED_FRAME);
+ pc_offset = 0;
+ }
FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
frames->Add(summary);
is_constructor = false;
- } else if (opcode == Translation::CONSTRUCT_STUB_FRAME) {
+ } else if (frame_opcode == Translation::CONSTRUCT_STUB_FRAME) {
// The next encountered JS_FRAME will be marked as a constructor call.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
+ it.Skip(Translation::NumberOfOperandsFor(frame_opcode));
DCHECK(!is_constructor);
is_constructor = true;
} else {
// Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
+ it.Skip(Translation::NumberOfOperandsFor(frame_opcode));
}
}
DCHECK(!is_constructor);
@@ -1034,7 +1040,7 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
- int* deopt_index) {
+ int* deopt_index) const {
DCHECK(is_optimized());
JSFunction* opt_function = function();
@@ -1058,7 +1064,7 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
}
-void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
+void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const {
DCHECK(functions->length() == 0);
DCHECK(is_optimized());
@@ -1087,7 +1093,8 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
opcode = static_cast<Translation::Opcode>(it.Next());
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
- if (opcode == Translation::JS_FRAME) {
+ if (opcode == Translation::JS_FRAME ||
+ opcode == Translation::INTERPRETED_FRAME) {
jsframe_count--;
// The translation commands are ordered and the function is always at the
@@ -1502,9 +1509,8 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
isolate_->counters()->pc_to_code()->Increment();
DCHECK(base::bits::IsPowerOfTwo32(kInnerPointerToCodeCacheSize));
- uint32_t hash = ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)),
- v8::internal::kZeroHashSeed);
+ uint32_t hash = ComputeIntegerHash(ObjectAddressForHashing(inner_pointer),
+ v8::internal::kZeroHashSeed);
uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
InnerPointerToCodeCacheEntry* entry = cache(index);
if (entry->inner_pointer == inner_pointer) {
diff --git a/chromium/v8/src/frames.h b/chromium/v8/src/frames.h
index d6bfd7aab8d..674d7daeca2 100644
--- a/chromium/v8/src/frames.h
+++ b/chromium/v8/src/frames.h
@@ -99,16 +99,17 @@ class StackHandler BASE_EMBEDDED {
#define STACK_FRAME_TYPE_LIST(V) \
- V(ENTRY, EntryFrame) \
- V(ENTRY_CONSTRUCT, EntryConstructFrame) \
- V(EXIT, ExitFrame) \
- V(JAVA_SCRIPT, JavaScriptFrame) \
- V(OPTIMIZED, OptimizedFrame) \
- V(STUB, StubFrame) \
+ V(ENTRY, EntryFrame) \
+ V(ENTRY_CONSTRUCT, EntryConstructFrame) \
+ V(EXIT, ExitFrame) \
+ V(JAVA_SCRIPT, JavaScriptFrame) \
+ V(OPTIMIZED, OptimizedFrame) \
+ V(INTERPRETED, InterpretedFrame) \
+ V(STUB, StubFrame) \
V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \
- V(INTERNAL, InternalFrame) \
- V(CONSTRUCT, ConstructFrame) \
- V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
+ V(INTERNAL, InternalFrame) \
+ V(CONSTRUCT, ConstructFrame) \
+ V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
class StandardFrameConstants : public AllStatic {
@@ -160,8 +161,6 @@ class ConstructFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kImplicitReceiverOffset =
- StandardFrameConstants::kExpressionsOffset - 4 * kPointerSize;
- static const int kOriginalConstructorOffset =
StandardFrameConstants::kExpressionsOffset - 3 * kPointerSize;
static const int kLengthOffset =
StandardFrameConstants::kExpressionsOffset - 2 * kPointerSize;
@@ -171,16 +170,30 @@ class ConstructFrameConstants : public AllStatic {
StandardFrameConstants::kExpressionsOffset - 0 * kPointerSize;
static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 5 * kPointerSize;
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
};
class InterpreterFrameConstants : public AllStatic {
public:
+ // Fixed frame includes new.target and bytecode offset.
+ static const int kFixedFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 2 * kPointerSize;
+ static const int kFixedFrameSizeFromFp =
+ StandardFrameConstants::kFixedFrameSizeFromFp + 2 * kPointerSize;
+
+ // FP-relative.
+ static const int kRegisterFilePointerFromFp =
+ -StandardFrameConstants::kFixedFrameSizeFromFp - 3 * kPointerSize;
+
// Register file pointer relative.
static const int kLastParamFromRegisterPointer =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
- static const int kFunctionFromRegisterPointer = kPointerSize;
+ StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
+
+ static const int kBytecodeOffsetFromRegisterPointer = 1 * kPointerSize;
+ static const int kNewTargetFromRegisterPointer = 2 * kPointerSize;
+ static const int kFunctionFromRegisterPointer = 3 * kPointerSize;
+ static const int kContextFromRegisterPointer = 4 * kPointerSize;
};
@@ -246,7 +259,8 @@ class StackFrame BASE_EMBEDDED {
bool is_java_script() const {
Type type = this->type();
- return (type == JAVA_SCRIPT) || (type == OPTIMIZED);
+ return (type == JAVA_SCRIPT) || (type == OPTIMIZED) ||
+ (type == INTERPRETED);
}
// Accessors.
@@ -373,18 +387,18 @@ class StackFrame BASE_EMBEDDED {
// Entry frames are used to enter JavaScript execution from C.
class EntryFrame: public StackFrame {
public:
- virtual Type type() const { return ENTRY; }
+ Type type() const override { return ENTRY; }
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
// Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
static EntryFrame* cast(StackFrame* frame) {
DCHECK(frame->is_entry());
return static_cast<EntryFrame*>(frame);
}
- virtual void SetCallerFp(Address caller_fp);
+ void SetCallerFp(Address caller_fp) override;
protected:
inline explicit EntryFrame(StackFrameIteratorBase* iterator);
@@ -392,11 +406,11 @@ class EntryFrame: public StackFrame {
// The caller stack pointer for entry frames is always zero. The
// real information about the caller frame is available through the
// link to the top exit frame.
- virtual Address GetCallerStackPointer() const { return 0; }
+ Address GetCallerStackPointer() const override { return 0; }
private:
- virtual void ComputeCallerState(State* state) const;
- virtual Type GetCallerState(State* state) const;
+ void ComputeCallerState(State* state) const override;
+ Type GetCallerState(State* state) const override;
friend class StackFrameIteratorBase;
};
@@ -404,9 +418,9 @@ class EntryFrame: public StackFrame {
class EntryConstructFrame: public EntryFrame {
public:
- virtual Type type() const { return ENTRY_CONSTRUCT; }
+ Type type() const override { return ENTRY_CONSTRUCT; }
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
static EntryConstructFrame* cast(StackFrame* frame) {
DCHECK(frame->is_entry_construct());
@@ -424,16 +438,16 @@ class EntryConstructFrame: public EntryFrame {
// Exit frames are used to exit JavaScript execution and go to C.
class ExitFrame: public StackFrame {
public:
- virtual Type type() const { return EXIT; }
+ Type type() const override { return EXIT; }
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
Object*& code_slot() const;
// Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
- virtual void SetCallerFp(Address caller_fp);
+ void SetCallerFp(Address caller_fp) override;
static ExitFrame* cast(StackFrame* frame) {
DCHECK(frame->is_exit());
@@ -450,10 +464,10 @@ class ExitFrame: public StackFrame {
protected:
inline explicit ExitFrame(StackFrameIteratorBase* iterator);
- virtual Address GetCallerStackPointer() const;
+ Address GetCallerStackPointer() const override;
private:
- virtual void ComputeCallerState(State* state) const;
+ void ComputeCallerState(State* state) const override;
friend class StackFrameIteratorBase;
};
@@ -462,7 +476,7 @@ class ExitFrame: public StackFrame {
class StandardFrame: public StackFrame {
public:
// Testers.
- virtual bool is_standard() const { return true; }
+ bool is_standard() const override { return true; }
// Accessors.
inline Object* context() const;
@@ -473,7 +487,7 @@ class StandardFrame: public StackFrame {
int ComputeExpressionsCount() const;
static Object* GetExpression(Address fp, int index);
- virtual void SetCallerFp(Address caller_fp);
+ void SetCallerFp(Address caller_fp) override;
static StandardFrame* cast(StackFrame* frame) {
DCHECK(frame->is_standard());
@@ -483,7 +497,7 @@ class StandardFrame: public StackFrame {
protected:
inline explicit StandardFrame(StackFrameIteratorBase* iterator);
- virtual void ComputeCallerState(State* state) const;
+ void ComputeCallerState(State* state) const override;
// Accessors.
inline Address caller_fp() const;
@@ -547,7 +561,7 @@ class FrameSummary BASE_EMBEDDED {
class JavaScriptFrame: public StandardFrame {
public:
- virtual Type type() const { return JAVA_SCRIPT; }
+ Type type() const override { return JAVA_SCRIPT; }
// Accessors.
inline JSFunction* function() const;
@@ -578,11 +592,7 @@ class JavaScriptFrame: public StandardFrame {
// Determines whether this frame includes inlined activations. To get details
// about the inlined frames use {GetFunctions} and {Summarize}.
- bool HasInlinedFrames();
-
- // Returns the original constructor function that was used in the constructor
- // call to this frame. Note that this is only valid on constructor frames.
- Object* GetOriginalConstructor() const;
+ bool HasInlinedFrames() const;
// Check if this frame has "adapted" arguments in the sense that the
// actual passed arguments are available in an arguments adaptor
@@ -591,18 +601,17 @@ class JavaScriptFrame: public StandardFrame {
int GetArgumentsLength() const;
// Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
// Printing support.
- virtual void Print(StringStream* accumulator,
- PrintMode mode,
- int index) const;
+ void Print(StringStream* accumulator, PrintMode mode,
+ int index) const override;
// Determine the code for the frame.
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
// Return a list with JSFunctions of this frame.
- virtual void GetFunctions(List<JSFunction*>* functions);
+ virtual void GetFunctions(List<JSFunction*>* functions) const;
// Build a list with summaries for this frame including all inlined frames.
virtual void Summarize(List<FrameSummary>* frames);
@@ -632,7 +641,7 @@ class JavaScriptFrame: public StandardFrame {
protected:
inline explicit JavaScriptFrame(StackFrameIteratorBase* iterator);
- virtual Address GetCallerStackPointer() const;
+ Address GetCallerStackPointer() const override;
virtual int GetNumberOfIncomingArguments() const;
@@ -649,18 +658,18 @@ class JavaScriptFrame: public StandardFrame {
class StubFrame : public StandardFrame {
public:
- virtual Type type() const { return STUB; }
+ Type type() const override { return STUB; }
// GC support.
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
// Determine the code for the frame.
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
protected:
inline explicit StubFrame(StackFrameIteratorBase* iterator);
- virtual Address GetCallerStackPointer() const;
+ Address GetCallerStackPointer() const override;
virtual int GetNumberOfIncomingArguments() const;
@@ -670,24 +679,24 @@ class StubFrame : public StandardFrame {
class OptimizedFrame : public JavaScriptFrame {
public:
- virtual Type type() const { return OPTIMIZED; }
+ Type type() const override { return OPTIMIZED; }
// GC support.
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
// Return a list with JSFunctions of this frame.
// The functions are ordered bottom-to-top (i.e. functions.last()
// is the top-most activation)
- virtual void GetFunctions(List<JSFunction*>* functions);
+ void GetFunctions(List<JSFunction*>* functions) const override;
- virtual void Summarize(List<FrameSummary>* frames);
+ void Summarize(List<FrameSummary>* frames) override;
// Lookup exception handler for current {pc}, returns -1 if none found. Also
// returns the expected number of stack slots at the handler site.
- virtual int LookupExceptionHandlerInTable(
- int* stack_slots, HandlerTable::CatchPrediction* prediction);
+ int LookupExceptionHandlerInTable(
+ int* stack_slots, HandlerTable::CatchPrediction* prediction) override;
- DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
+ DeoptimizationInputData* GetDeoptimizationData(int* deopt_index) const;
static int StackSlotOffsetRelativeToFp(int slot_index);
@@ -701,15 +710,26 @@ class OptimizedFrame : public JavaScriptFrame {
};
+class InterpretedFrame : public JavaScriptFrame {
+ Type type() const override { return INTERPRETED; }
+
+ protected:
+ inline explicit InterpretedFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
+
// Arguments adaptor frames are automatically inserted below
// JavaScript frames when the actual number of parameters does not
// match the formal number of parameters.
class ArgumentsAdaptorFrame: public JavaScriptFrame {
public:
- virtual Type type() const { return ARGUMENTS_ADAPTOR; }
+ Type type() const override { return ARGUMENTS_ADAPTOR; }
// Determine the code for the frame.
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
static ArgumentsAdaptorFrame* cast(StackFrame* frame) {
DCHECK(frame->is_arguments_adaptor());
@@ -717,16 +737,15 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
}
// Printing support.
- virtual void Print(StringStream* accumulator,
- PrintMode mode,
- int index) const;
+ void Print(StringStream* accumulator, PrintMode mode,
+ int index) const override;
protected:
inline explicit ArgumentsAdaptorFrame(StackFrameIteratorBase* iterator);
- virtual int GetNumberOfIncomingArguments() const;
+ int GetNumberOfIncomingArguments() const override;
- virtual Address GetCallerStackPointer() const;
+ Address GetCallerStackPointer() const override;
private:
friend class StackFrameIteratorBase;
@@ -735,13 +754,13 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
class InternalFrame: public StandardFrame {
public:
- virtual Type type() const { return INTERNAL; }
+ Type type() const override { return INTERNAL; }
// Garbage collection support.
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
// Determine the code for the frame.
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
static InternalFrame* cast(StackFrame* frame) {
DCHECK(frame->is_internal());
@@ -751,7 +770,7 @@ class InternalFrame: public StandardFrame {
protected:
inline explicit InternalFrame(StackFrameIteratorBase* iterator);
- virtual Address GetCallerStackPointer() const;
+ Address GetCallerStackPointer() const override;
private:
friend class StackFrameIteratorBase;
@@ -768,13 +787,13 @@ class StubFailureTrampolineFrame: public StandardFrame {
static const int kCallerStackParameterCountFrameOffset =
StandardFrameConstants::kMarkerOffset - 2 * kPointerSize;
- virtual Type type() const { return STUB_FAILURE_TRAMPOLINE; }
+ Type type() const override { return STUB_FAILURE_TRAMPOLINE; }
// Get the code associated with this frame.
// This method could be called during marking phase of GC.
- virtual Code* unchecked_code() const;
+ Code* unchecked_code() const override;
- virtual void Iterate(ObjectVisitor* v) const;
+ void Iterate(ObjectVisitor* v) const override;
// Architecture-specific register description.
static Register fp_register();
@@ -785,7 +804,7 @@ class StubFailureTrampolineFrame: public StandardFrame {
inline explicit StubFailureTrampolineFrame(
StackFrameIteratorBase* iterator);
- virtual Address GetCallerStackPointer() const;
+ Address GetCallerStackPointer() const override;
private:
friend class StackFrameIteratorBase;
@@ -796,7 +815,7 @@ class StubFailureTrampolineFrame: public StandardFrame {
// function invocations through 'new'.
class ConstructFrame: public InternalFrame {
public:
- virtual Type type() const { return CONSTRUCT; }
+ Type type() const override { return CONSTRUCT; }
static ConstructFrame* cast(StackFrame* frame) {
DCHECK(frame->is_construct());
@@ -949,6 +968,7 @@ class StackFrameLocator BASE_EMBEDDED {
// zone memory.
Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FRAMES_H_
diff --git a/chromium/v8/src/full-codegen/arm/full-codegen-arm.cc b/chromium/v8/src/full-codegen/arm/full-codegen-arm.cc
index d3d53334d42..25be8a66368 100644
--- a/chromium/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/chromium/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -4,15 +4,14 @@
#if V8_TARGET_ARCH_ARM
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/arm/code-stubs-arm.h"
#include "src/arm/macro-assembler-arm.h"
@@ -93,6 +92,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o r1: the JS function object being called (i.e., ourselves)
+// o r3: the new target value
// o cp: our context
// o pp: our caller's constant pool pointer (if enabled)
// o fp: our caller's frame pointer
@@ -117,22 +117,12 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(ne, &ok);
-
- __ ldr(r2, GlobalObjectOperand());
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
-
- __ str(r2, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
+ __ AssertNotSmi(r2);
+ __ CompareObjectType(r2, r2, no_reg, FIRST_JS_RECEIVER_TYPE);
+ __ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -141,8 +131,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
+ __ Prologue(info->GeneratePreagedPrologue());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -155,7 +144,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
__ cmp(r9, Operand(r2));
__ b(hs, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
__ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
@@ -192,15 +181,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ push(r1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ __ CallRuntime(Runtime::kNewScriptContext);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(r3); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(r1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(r3); // Preserve new target.
+ }
}
function_in_register_r1 = false;
// Context is returned in r0. It replaces the context passed to us.
@@ -218,13 +218,13 @@ void FullCodeGenerator::Generate() {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ str(r0, target);
// Update the write barrier.
if (need_write_barrier) {
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteContextSlot(cp, target.offset(), r0, r2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, r0, &done);
@@ -235,10 +235,10 @@ void FullCodeGenerator::Generate() {
}
}
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register_r1| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -252,29 +252,34 @@ void FullCodeGenerator::Generate() {
SetVar(this_function_var, r1, r0, r2);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
+ SetVar(new_target_var, r3, r0, r2);
+ }
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset), eq);
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- Label non_construct_frame, done;
- function_in_register_r1 = false;
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
- __ b(ne, &non_construct_frame);
- __ ldr(r0,
- MemOperand(r2, ConstructFrameConstants::kOriginalConstructorOffset));
- __ b(&done);
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
- __ bind(&non_construct_frame);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
+ __ mov(RestParamAccessDescriptor::parameter_count(),
+ Operand(Smi::FromInt(num_parameters)));
+ __ add(RestParamAccessDescriptor::parameter_pointer(), fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ mov(RestParamAccessDescriptor::rest_parameter_index(),
+ Operand(Smi::FromInt(rest_index)));
+ function_in_register_r1 = false;
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
- SetVar(new_target_var, r0, r2, r3);
+ SetVar(rest_param, r0, r1, r2);
}
Variable* arguments = scope()->arguments();
@@ -308,7 +313,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -445,7 +450,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in r0.
__ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -474,11 +479,10 @@ void FullCodeGenerator::EmitReturnSequence() {
SetReturnPosition(literal());
// TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
PredictableCodeSizeScope predictable(masm_, -1);
- int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
__ add(sp, sp, Operand(sp_delta));
__ Jump(lr);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
}
@@ -684,8 +688,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ tst(result_register(), result_register());
- Split(ne, if_true, if_false, fall_through);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
@@ -723,7 +727,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
if (var->IsContextSlot()) {
int context_chain_length = scope()->ContextChainLength(var->scope());
__ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
+ return ContextMemOperand(scratch, var->index());
} else {
return StackOperand(var);
}
@@ -829,7 +833,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- __ str(r0, ContextOperand(cp, variable->index()));
+ __ str(r0, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -850,10 +854,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
}
__ Push(r2, r0);
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -888,7 +890,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ str(result_register(), ContextOperand(cp, variable->index()));
+ __ str(result_register(), ContextMemOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
@@ -909,7 +911,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(r2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -921,7 +924,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ mov(r1, Operand(pairs));
__ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -929,7 +932,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -1055,7 +1058,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_JS_RECEIVER_TYPE);
__ b(ge, &done_convert);
__ bind(&convert);
ToObjectStub stub(isolate());
@@ -1066,9 +1069,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- __ b(le, &call_runtime);
+ __ CompareObjectType(r0, r1, r1, JS_PROXY_TYPE);
+ __ b(eq, &call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1085,7 +1087,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1121,28 +1123,19 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&exit);
// We got a fixed array in register r0. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
__ EmitLoadTypeFeedbackVector(r1);
__ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
int vector_index = SmiFromSlot(slot)->value();
__ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(vector_index)));
-
- __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
- __ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE);
- __ b(gt, &non_proxy);
- __ mov(r1, Operand(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
+ __ mov(r1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(r1, r0); // Smi and array
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ mov(r0, Operand(Smi::FromInt(0)));
__ Push(r1, r0); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1168,17 +1161,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r4, Operand(r2));
__ b(eq, &update_each);
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- __ cmp(r2, Operand(Smi::FromInt(0)));
- __ b(eq, &update_each);
-
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
__ push(r1); // Enumerable.
__ push(r3); // Current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ mov(r3, Operand(r0));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -1195,6 +1183,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1237,35 +1227,36 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ Push(info);
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(r0);
}
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ ldr(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), r0);
__ mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ ldr(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1281,13 +1272,12 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
+ // Check that extension is "the hole".
+ __ ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
- __ ldr(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ __ ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
@@ -1308,12 +1298,11 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
__ cmp(temp, ip);
__ b(eq, &fast);
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
+ // Check that extension is "the hole".
+ __ ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ ldr(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
__ b(&loop);
__ bind(&fast);
}
@@ -1334,25 +1323,23 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
+ // Check that extension is "the hole".
+ __ ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
- __ ldr(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
- // Check that last extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
+ // Check that last extension is "the hole".
+ __ ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
// destroy the cp register).
- return ContextOperand(context, var->index());
+ return ContextMemOperand(context, var->index());
}
@@ -1380,7 +1367,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ b(ne, done);
__ mov(r0, Operand(var->name()));
__ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ jmp(done);
@@ -1393,26 +1380,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- const int slot = var->index();
- const int depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
- } else {
- __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
- }
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
}
@@ -1451,7 +1423,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
__ b(ne, &done);
__ mov(r0, Operand(var->name()));
__ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&done);
} else {
// Uninitialized legacy const bindings are unholed.
@@ -1478,7 +1450,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(r0);
}
@@ -1488,49 +1460,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // r5 = materialized value (RegExp literal)
- // r4 = JS function, literals array
- // r3 = literal index
- // r2 = RegExp pattern
- // r1 = RegExp flags
- // r0 = RegExp literal clone
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ ldr(r5, FieldMemOperand(r4, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r5, ip);
- __ b(ne, &materialized);
-
- // Create regexp literal using runtime function.
- // Result will be in r0.
- __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r2, Operand(expr->pattern()));
- __ mov(r1, Operand(expr->flags()));
- __ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(r5, r0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ Push(r5, r0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(r5);
-
- __ bind(&allocated);
- // After this, registers are used as follows:
- // r0: Newly allocated regexp.
- // r5: Materialized regexp.
- // r2: temp.
- __ CopyFields(r0, r5, d0, size / kPointerSize);
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(r1, Operand(expr->pattern()));
+ __ mov(r0, Operand(Smi::FromInt(expr->flags())));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(r0);
}
@@ -1557,14 +1492,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<FixedArray> constant_properties = expr->constant_properties();
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_properties));
int flags = expr->ComputeFlags();
__ mov(r0, Operand(Smi::FromInt(flags)));
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
@@ -1603,12 +1537,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(r0));
__ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1630,7 +1560,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
__ mov(r0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes
__ push(r0);
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1641,7 +1571,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(r0);
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
@@ -1669,7 +1601,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1697,9 +1629,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1712,7 +1646,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1725,13 +1659,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1741,7 +1675,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(result_saved);
__ ldr(r0, MemOperand(sp));
__ push(r0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1755,8 +1689,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
-
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1771,13 +1703,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ mov(r0, Operand(Smi::FromInt(expr->ComputeFlags())));
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
@@ -1801,25 +1732,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(r0);
- __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ ldr(r6, MemOperand(sp, kPointerSize)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
- __ str(result_register(), FieldMemOperand(r1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(r1, offset, result_register(), r2,
- kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ mov(r3, Operand(Smi::FromInt(array_index)));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ mov(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
+ __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1830,7 +1752,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ pop(); // literal index
__ Pop(r0);
result_saved = false;
}
@@ -1844,14 +1765,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ pop(); // literal index
context()->PlugTOS();
} else {
context()->Plug(r0);
@@ -2133,9 +2053,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize));
- SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ SetCallPosition(expr);
+ __ mov(r0, Operand(1));
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2151,8 +2073,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallLoadIC(NOT_INSIDE_TYPEOF); // r0=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ cmp(r0, Operand(0));
- __ b(eq, &l_try);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ b(ne, &l_try);
// result.value
__ pop(load_receiver); // result
@@ -2255,7 +2177,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
DCHECK(!result_register().is(r1));
__ Push(r1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2272,12 +2194,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ ldr(r1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
- __ ldr(r1, ContextOperand(r1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r1);
__ pop(r2);
__ LoadRoot(r3,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
@@ -2311,7 +2231,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ Push(key->value());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2328,7 +2248,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2455,7 +2375,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ push(r0);
}
@@ -2470,19 +2390,19 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ mov(r0, Operand(Smi::FromInt(DONT_ENUM)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ mov(r0, Operand(Smi::FromInt(DONT_ENUM)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
@@ -2492,7 +2412,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2508,7 +2428,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2528,7 +2448,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2576,7 +2496,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), r0);
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2601,34 +2521,15 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- const int slot = var->index();
- const int depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(r0));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ push(r0);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2639,12 +2540,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ b(ne, &assign);
__ mov(r3, Operand(var->name()));
__ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
// Perform the assignment.
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2655,11 +2556,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ b(ne, &const_error);
__ mov(r3, Operand(var->name()));
__ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2669,24 +2570,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ b(eq, &uninitialized_this);
__ mov(r0, Operand(var->name()));
__ Push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ push(r0); // Value.
__ mov(r1, Operand(var->name()));
__ mov(r0, Operand(Smi::FromInt(language_mode())));
__ Push(cp, r1, r0); // Context, name, language mode.
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r1);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
@@ -2695,15 +2597,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(r0);
__ mov(r0, Operand(var->name()));
__ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
@@ -2716,9 +2617,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2734,12 +2635,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2757,8 +2654,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(r0);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2769,10 +2665,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(r0);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2783,12 +2678,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2846,11 +2737,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2859,6 +2748,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// is a sloppy mode method.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ push(ip);
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2870,9 +2760,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
__ ldr(ip, MemOperand(sp, 0));
__ push(ip);
__ str(r0, MemOperand(sp, kPointerSize));
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2904,7 +2795,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ str(r0, MemOperand(sp, kPointerSize));
@@ -2912,7 +2803,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2936,7 +2827,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(ip);
__ str(r0, MemOperand(sp, kPointerSize));
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2966,7 +2857,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ str(r0, MemOperand(sp, kPointerSize));
@@ -2974,11 +2865,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2986,8 +2877,9 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
- SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ SetCallPosition(expr);
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -3020,7 +2912,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Do the runtime call.
__ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -3040,7 +2932,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
DCHECK(!context_register().is(r2));
__ mov(r2, Operand(callee->name()));
__ Push(context_register(), r2);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ Push(r0, r1); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -3068,91 +2960,40 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call
- // RuntimeHidden_asResolvePossiblyDirectEval to resolve the function we need
- // to call. Then we call the resolved function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call
+ // RuntimeHidden_asResolvePossiblyDirectEval to resolve the function we need
+ // to call. Then we call the resolved function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ PushCalleeAndWithBaseObject(expr);
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(r1);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Touch up the stack with the resolved function.
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ push(r1);
+ EmitResolvePossiblyDirectEval(arg_count);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Touch up the stack with the resolved function.
+ __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- // Record source position for debugger.
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
-
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ push(r1);
- // Emit function call.
- EmitCall(expr);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ // Record source position for debugger.
+ SetCallPosition(expr);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ mov(r0, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, r0);
}
@@ -3187,8 +3028,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(r2);
__ mov(r3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3201,8 +3042,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ ldr(result_register(),
+ FieldMemOperand(result_register(), HeapObject::kMapOffset));
+ __ ldr(result_register(),
+ FieldMemOperand(result_register(), Map::kPrototypeOffset));
+ __ Push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3215,20 +3063,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into r4.
+ // Load new target into r3.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(r4, result_register());
+ __ mov(r3, result_register());
// Load function and argument count into r1 and r0.
__ mov(r0, Operand(arg_count));
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(r2);
- __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3259,7 +3102,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3273,7 +3116,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_JS_RECEIVER_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@@ -3317,9 +3160,9 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r0, r1, r2, FIRST_FUNCTION_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
+ Split(hs, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3431,40 +3274,7 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
- Register map = r1;
- Register type_reg = r2;
- __ ldr(map, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ sub(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- __ cmp(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ls, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset), eq);
-
- // Check the marker in the calling frame.
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+ __ CompareObjectType(r0, r1, r1, JS_PROXY_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3537,27 +3347,16 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
+ // If the object is not a JSReceiver, we return null.
__ JumpIfSmi(r0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r0, r1, FIRST_JS_RECEIVER_TYPE);
// Map is now in r0.
__ b(lt, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ b(eq, &function);
- __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
+ // Return 'Function' for JSFunction objects.
+ __ cmp(r1, Operand(JS_FUNCTION_TYPE));
__ b(eq, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
Register instance_type = r2;
@@ -3631,45 +3430,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = r0;
- Register result = r0;
- Register scratch0 = r9;
- Register scratch1 = r1;
-
- if (index->value() == 0) {
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch1, Operand(stamp));
- __ ldr(scratch1, MemOperand(scratch1));
- __ ldr(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch1, scratch0);
- __ b(ne, &runtime);
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch1);
- __ mov(r1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3776,37 +3536,12 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(r0, &done_convert);
__ Push(r0);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
- // Load the argument into r0 and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into r0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3821,25 +3556,12 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ b(ls, &done_convert);
__ bind(&convert);
__ Push(r0);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(r0);
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into r0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3950,19 +3672,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(r1);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3970,6 +3679,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to r1.
int const argc = args->length() - 2;
__ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3983,105 +3693,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(ne, &runtime);
-
- // InvokeFunction requires the function in r1. Move it in there.
- __ mov(r1, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION, NullCallWrapper());
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(r0);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Load original constructor into r3.
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r4, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
- // default constructor has no arguments, so no adaptor frame means no args.
- __ mov(r0, Operand::Zero());
- __ b(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(r1, r1);
- __ mov(r0, r1);
-
- // Get arguments pointer in r2.
- __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
- __ add(r2, r2, Operand(StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- // Pre-decrement r2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ ldr(r4, MemOperand(r2, kPointerSize, NegPreIndex));
- __ Push(r4);
- __ sub(r1, r1, Operand(1));
- __ cmp(r1, Operand::Zero());
- __ b(ne, &loop);
- }
-
- __ bind(&args_set_up);
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, r0);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ pop(r1);
- __ pop(r2);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4116,6 +3727,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(r0);
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r0, FieldMemOperand(r0, Map::kPrototypeOffset));
+ context()->Plug(r0);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator, non_trivial_array,
not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
@@ -4229,6 +3851,10 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ b(vs, &bailout);
__ SmiUntag(string_length);
+ // Bailout for large object allocations.
+ __ cmp(string_length, Operand(Page::kMaxRegularHeapObjectSize));
+ __ b(gt, &bailout);
+
// Get first element in the array to free up the elements register to be used
// for the result.
__ add(element,
@@ -4369,9 +3995,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, r0, r2, r3, &runtime, TAG_OBJECT);
- __ ldr(r1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
- __ ldr(r1, ContextOperand(r1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r1);
__ pop(r3);
__ pop(r2);
__ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
@@ -4384,7 +4008,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ b(&done);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(r0);
@@ -4396,9 +4020,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ push(r0);
- __ ldr(r0, GlobalObjectOperand());
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- __ ldr(r0, ContextOperand(r0, expr->context_index()));
+ __ LoadNativeContextSlot(expr->context_index(), r0);
}
@@ -4406,10 +4028,11 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ SetCallPosition(expr);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ mov(r0, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -4478,8 +4101,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(r0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4488,10 +4110,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ ldr(r2, GlobalObjectOperand());
+ __ LoadGlobalObject(r2);
__ mov(r1, Operand(var->name()));
__ Push(r2, r1);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(r0);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4503,7 +4125,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
DCHECK(!context_register().is(r2));
__ mov(r2, Operand(var->name()));
__ Push(context_register(), r2);
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(r0);
}
} else {
@@ -4776,12 +4398,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4819,12 +4437,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4896,8 +4510,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(r0, if_false);
__ CompareRoot(r0, Heap::kNullValueRootIndex);
__ b(eq, if_true);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r0, r1, FIRST_JS_RECEIVER_TYPE);
__ b(lt, if_false);
// Check for callable or undetectable objects => false.
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
@@ -4942,7 +4556,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -5013,8 +4627,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ cmp(r0, Operand(0));
- Split(ne, if_true, if_false, fall_through);
+ __ CompareRoot(r0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -5043,7 +4657,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ldr(dst, ContextOperand(cp, context_index));
+ __ ldr(dst, ContextMemOperand(cp, context_index));
}
@@ -5053,14 +4667,13 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ mov(ip, Operand(Smi::FromInt(0)));
+ // code.
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, ip);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
+ __ ldr(ip, ContextMemOperand(cp, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
__ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -5123,8 +4736,8 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+ DCHECK(!slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
}
@@ -5182,7 +4795,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Code* replacement_code) {
Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
- CodePatcher patcher(branch_address, 1);
+ Isolate* isolate = unoptimized_code->GetIsolate();
+ CodePatcher patcher(isolate, branch_address, 1);
switch (target_state) {
case INTERRUPT:
{
@@ -5234,8 +4848,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
}
// Replace the call address.
- Assembler::set_target_address_at(pc_immediate_load_address, unoptimized_code,
- replacement_code->entry());
+ Assembler::set_target_address_at(isolate, pc_immediate_load_address,
+ unoptimized_code, replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_immediate_load_address, replacement_code);
diff --git a/chromium/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/chromium/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index b53e8ee6cde..e4141bb65f1 100644
--- a/chromium/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/chromium/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -4,15 +4,14 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/arm64/code-stubs-arm64.h"
#include "src/arm64/frames-arm64.h"
@@ -93,6 +92,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// - x1: the JS function object being called (i.e. ourselves).
+// - x3: the new target value
// - cp: our context.
// - fp: our caller's frame pointer.
// - jssp: stack pointer.
@@ -116,23 +116,14 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
- __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
-
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
- __ Poke(x10, receiver_offset);
-
- __ Bind(&ok);
+ __ AssertNotSmi(x10);
+ __ CompareObjectType(x10, x10, x11, FIRST_JS_RECEIVER_TYPE);
+ __ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
}
-
// Open a frame scope to indicate that there is a frame on the stack.
// The MANUAL indicates that the scope shouldn't actually generate code
// to set up the frame because we do it manually below.
@@ -143,8 +134,7 @@ void FullCodeGenerator::Generate() {
// Push(lr, fp, cp, x1);
// Add(fp, jssp, 2 * kPointerSize);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
+ __ Prologue(info->GeneratePreagedPrologue());
// Reserve space on the stack for locals.
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -159,7 +149,7 @@ void FullCodeGenerator::Generate() {
__ Sub(x10, jssp, locals_count * kPointerSize);
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&ok);
}
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
@@ -169,12 +159,12 @@ void FullCodeGenerator::Generate() {
const int kMaxPushes = 32;
if (locals_count >= kMaxPushes) {
int loop_iterations = locals_count / kMaxPushes;
- __ Mov(x3, loop_iterations);
+ __ Mov(x2, loop_iterations);
Label loop_header;
__ Bind(&loop_header);
// Do pushes.
__ PushMultipleTimes(x10 , kMaxPushes);
- __ Subs(x3, x3, 1);
+ __ Subs(x2, x2, 1);
__ B(ne, &loop_header);
}
int remaining = locals_count % kMaxPushes;
@@ -194,15 +184,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ Mov(x10, Operand(info->scope()->GetScopeInfo(info->isolate())));
__ Push(x1, x10);
- __ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ __ CallRuntime(Runtime::kNewScriptContext);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ Push(x1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ Push(x3); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ Pop(x3); // Restore new target.
+ }
}
function_in_register_x1 = false;
// Context is returned in x0. It replaces the context passed to us.
@@ -237,10 +238,10 @@ void FullCodeGenerator::Generate() {
}
}
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register_x1| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -254,35 +255,34 @@ void FullCodeGenerator::Generate() {
SetVar(this_function_var, x1, x0, x2);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
- // Get the frame pointer for the calling frame.
- __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- Label check_frame_marker;
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
- __ Cmp(x1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &check_frame_marker);
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- __ Bind(&check_frame_marker);
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
- __ Cmp(x1, Smi::FromInt(StackFrame::CONSTRUCT));
- function_in_register_x1 = false;
+ SetVar(new_target_var, x3, x0, x2);
+ }
- Label non_construct_frame, done;
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
- __ B(ne, &non_construct_frame);
- __ Ldr(x0,
- MemOperand(x2, ConstructFrameConstants::kOriginalConstructorOffset));
- __ B(&done);
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ Mov(RestParamAccessDescriptor::parameter_count(),
+ Smi::FromInt(num_parameters));
+ __ Add(RestParamAccessDescriptor::parameter_pointer(), fp,
+ StandardFrameConstants::kCallerSPOffset + offset);
+ __ Mov(RestParamAccessDescriptor::rest_parameter_index(),
+ Smi::FromInt(rest_index));
- __ Bind(&non_construct_frame);
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ function_in_register_x1 = false;
- __ Bind(&done);
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
- SetVar(new_target_var, x0, x2, x3);
+ SetVar(rest_param, x0, x1, x2);
}
Variable* arguments = scope()->arguments();
@@ -316,7 +316,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -442,7 +442,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in x0.
__ Push(result_register());
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
DCHECK(x0.Is(result_register()));
}
// Pretend that the exit is a backwards jump to the entry.
@@ -469,7 +469,6 @@ void FullCodeGenerator::EmitReturnSequence() {
// Nothing ensures 16 bytes alignment here.
DCHECK(!current_sp.Is(csp));
__ Mov(current_sp, fp);
- int no_frame_start = masm_->pc_offset();
__ Ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
// Drop the arguments and receiver and return.
// TODO(all): This implementation is overkill as it supports 2**31+1
@@ -480,7 +479,6 @@ void FullCodeGenerator::EmitReturnSequence() {
__ Ret();
int32_t arg_count = info_->scope()->num_parameters() + 1;
__ dc64(kXRegSize * arg_count);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -687,7 +685,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ CompareAndSplit(result_register(), 0, ne, if_true, if_false, fall_through);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
@@ -855,10 +854,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
// Pushing 0 (xzr) indicates no initial value.
__ Push(x2, xzr);
}
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -914,7 +911,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(x2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -930,7 +928,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ Mov(flags, Smi::FromInt(DeclareGlobalsFlags()));
}
__ Push(x11, flags);
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -938,7 +936,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -1061,7 +1059,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(x0, &convert);
- __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, &done_convert, ge);
+ __ JumpIfObjectType(x0, x10, x11, FIRST_JS_RECEIVER_TYPE, &done_convert, ge);
__ Bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1071,8 +1069,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ JumpIfObjectType(x0, x10, x11, LAST_JS_PROXY_TYPE, &call_runtime, le);
+ __ JumpIfObjectType(x0, x10, x11, JS_PROXY_TYPE, &call_runtime, eq);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1089,7 +1086,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ Bind(&call_runtime);
__ Push(x0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1127,20 +1124,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
int vector_index = SmiFromSlot(slot)->value();
__ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index)));
-
- __ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
- __ Peek(x10, 0); // Get enumerated object.
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- // TODO(all): similar check was done already. Can we avoid it here?
- __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
- DCHECK(Smi::FromInt(0) == 0);
- __ CzeroX(x1, le); // Zero indicates proxy.
+ __ Mov(x1, Smi::FromInt(1)); // Smi(1) indicates slow check.
__ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
// Smi and array, fixed array length (as smi) and initial index.
__ Push(x1, x0, x2, xzr);
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ Bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1166,16 +1155,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Cmp(x11, x2);
__ B(eq, &update_each);
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- STATIC_ASSERT(kSmiTag == 0);
- __ Cbz(x2, &update_each);
-
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(x1, x3);
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ Mov(x3, x0);
__ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex,
@@ -1191,6 +1175,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1234,33 +1220,34 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ Push(info);
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(x0);
}
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
__ Mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), x0);
__ Mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1276,9 +1263,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
+ // Check that extension is "the hole".
__ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
__ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
@@ -1299,9 +1286,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// Terminate at native context.
__ Ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
__ JumpIfRoot(temp, Heap::kNativeContextMapRootIndex, &fast);
- // Check that extension is NULL.
+ // Check that extension is "the hole".
__ Ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
__ Ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
__ B(&loop);
@@ -1324,18 +1311,18 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
+ // Check that extension is "the hole".
__ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
__ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
- // Check that last extension is NULL.
+ // Check that last extension is "the hole".
__ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
@@ -1367,7 +1354,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else { // LET || CONST
__ Mov(x0, Operand(var->name()));
__ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ B(done);
@@ -1380,26 +1367,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ Mov(LoadGlobalViaContextDescriptor::SlotRegister(), slot);
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
- } else {
- __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
- __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- CallLoadIC(typeof_mode);
- }
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
+ __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ Mov(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
+ CallLoadIC(typeof_mode);
}
@@ -1438,7 +1410,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// binding in harmony mode.
__ Mov(x0, Operand(var->name()));
__ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ Bind(&done);
} else {
// Uninitialized legacy const bindings are unholed.
@@ -1466,7 +1438,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ Bind(&done);
context()->Plug(x0);
break;
@@ -1477,47 +1449,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // x5 = materialized value (RegExp literal)
- // x4 = JS function, literals array
- // x3 = literal index
- // x2 = RegExp pattern
- // x1 = RegExp flags
- // x0 = RegExp literal clone
- __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x4, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ Ldr(x5, FieldMemOperand(x4, literal_offset));
- __ JumpIfNotRoot(x5, Heap::kUndefinedValueRootIndex, &materialized);
-
- // Create regexp literal using runtime function.
- // Result will be in x0.
- __ Mov(x3, Smi::FromInt(expr->literal_index()));
- __ Mov(x2, Operand(expr->pattern()));
- __ Mov(x1, Operand(expr->flags()));
- __ Push(x4, x3, x2, x1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ Mov(x5, x0);
-
- __ Bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, x0, x2, x3, &runtime_allocate, TAG_OBJECT);
- __ B(&allocated);
-
- __ Bind(&runtime_allocate);
- __ Mov(x10, Smi::FromInt(size));
- __ Push(x5, x10);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(x5);
-
- __ Bind(&allocated);
- // After this, registers are used as follows:
- // x0: Newly allocated regexp.
- // x5: Materialized regexp.
- // x10, x11, x12: temps.
- __ CopyFields(x0, x5, CPURegList(x10, x11, x12), size / kPointerSize);
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Mov(x2, Smi::FromInt(expr->literal_index()));
+ __ Mov(x1, Operand(expr->pattern()));
+ __ Mov(x0, Smi::FromInt(expr->flags()));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(x0);
}
@@ -1544,14 +1481,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<FixedArray> constant_properties = expr->constant_properties();
__ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
__ Mov(x2, Smi::FromInt(expr->literal_index()));
__ Mov(x1, Operand(constant_properties));
int flags = expr->ComputeFlags();
__ Mov(x0, Smi::FromInt(flags));
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ Push(x3, x2, x1, x0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
@@ -1590,12 +1526,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(x0));
__ Mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1616,7 +1548,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
__ Mov(x0, Smi::FromInt(SLOPPY)); // Language mode
__ Push(x0);
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1627,7 +1559,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Peek(x0, 0);
__ Push(x0);
VisitForStackValue(value);
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1654,7 +1588,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ Mov(x10, Smi::FromInt(NONE));
__ Push(x10);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1682,9 +1616,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1697,7 +1633,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
__ Mov(x0, Smi::FromInt(NONE));
__ Push(x0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1710,13 +1646,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ Mov(x0, Smi::FromInt(NONE));
__ Push(x0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ Mov(x0, Smi::FromInt(NONE));
__ Push(x0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1726,7 +1662,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(result_saved);
__ Peek(x0, 0);
__ Push(x0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1740,7 +1676,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1753,13 +1688,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
__ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
__ Mov(x2, Smi::FromInt(expr->literal_index()));
__ Mov(x1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ Mov(x0, Smi::FromInt(expr->ComputeFlags()));
__ Push(x3, x2, x1, x0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
@@ -1782,26 +1716,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ Mov(x1, Smi::FromInt(expr->literal_index()));
- __ Push(x0, x1);
+ __ Push(x0);
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ Peek(x6, kPointerSize); // Copy of array literal.
- __ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
- __ Str(result_register(), FieldMemOperand(x1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(x1, offset, result_register(), x10,
- kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ Mov(x3, Smi::FromInt(array_index));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ Mov(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
+ __ Peek(StoreDescriptor::ReceiverRegister(), 0);
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1812,7 +1737,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Drop(1); // literal index
__ Pop(x0);
result_saved = false;
}
@@ -1826,14 +1750,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(x0);
@@ -2001,7 +1924,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ Push(key->value());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2019,7 +1942,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2166,7 +2089,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ Push(x0);
}
@@ -2181,19 +2104,19 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ Mov(x0, Smi::FromInt(DONT_ENUM));
__ Push(x0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ Mov(x0, Smi::FromInt(DONT_ENUM));
__ Push(x0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
@@ -2203,12 +2126,12 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2230,7 +2153,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Pop(StoreDescriptor::ValueRegister()); // Restore value.
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2278,7 +2201,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Mov(StoreDescriptor::NameRegister(), x0);
__ Pop(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::ValueRegister());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2303,35 +2226,16 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Mov(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ Ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ Mov(StoreGlobalViaContextDescriptor::SlotRegister(), slot);
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(x0));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(x0);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2341,12 +2245,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
__ Mov(x10, Operand(var->name()));
__ Push(x10);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
// Perform the assignment.
__ Bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2356,11 +2260,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &const_error);
__ Mov(x10, Operand(var->name()));
__ Push(x10);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ Bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2369,11 +2273,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ JumpIfRoot(x10, Heap::kTheHoleValueRootIndex, &uninitialized_this);
__ Mov(x0, Operand(var->name()));
__ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Mov(x11, Operand(var->name()));
@@ -2383,13 +2288,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// jssp[16] : context.
// jssp[24] : value.
__ Push(x0, cp, x11, x10);
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, x1);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
__ Ldr(x10, location);
__ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization);
@@ -2397,14 +2302,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ Mov(x1, Operand(var->name()));
__ Push(x0, cp, x1);
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackLocal() || var->IsContextSlot());
Label skip;
@@ -2416,9 +2320,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2435,12 +2339,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ Pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(x0);
@@ -2458,8 +2358,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(x0);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2470,10 +2369,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(x0);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2487,12 +2385,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(x0);
@@ -2546,13 +2440,12 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitCallWithLoadIC");
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2565,6 +2458,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
__ LoadRoot(temp, Heap::kUndefinedValueRootIndex);
__ Push(temp);
}
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2575,13 +2469,15 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push the target function under the receiver.
__ Pop(x10);
__ Push(x0, x10);
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitSuperCallWithLoadIC");
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
@@ -2609,7 +2505,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
@@ -2617,13 +2513,14 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
+ ASM_LOCATION("FullCodeGenerator::EmitKeyedCallWithLoadIC");
// Load the key.
VisitForAccumulatorValue(key);
@@ -2640,11 +2537,12 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ Pop(x10);
__ Push(x0, x10);
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitKeyedSuperCallWithLoadIC");
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
@@ -2670,7 +2568,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
@@ -2678,11 +2576,12 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
+ ASM_LOCATION("FullCodeGenerator::EmitCall");
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2690,9 +2589,10 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
- SetCallPosition(expr, arg_count);
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ SetCallPosition(expr);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
__ Peek(x1, (arg_count + 1) * kXRegSize);
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -2727,7 +2627,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ Push(x9, x10, x11, x12);
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2746,7 +2646,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
// and the object holding it (returned in x1).
__ Mov(x10, Operand(callee->name()));
__ Push(context_register(), x10);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ Push(x0, x1); // Receiver, function.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -2773,94 +2673,43 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitPossiblyEvalCall");
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ Peek(x10, (arg_count + 1) * kPointerSize);
- __ Push(x10);
- EmitResolvePossiblyDirectEval(arg_count);
+ PushCalleeAndWithBaseObject(expr);
- // Touch up the stack with the resolved function.
- __ Poke(x0, (arg_count + 1) * kPointerSize);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ Peek(x10, (arg_count + 1) * kPointerSize);
+ __ Push(x10);
+ EmitResolvePossiblyDirectEval(arg_count);
- // Record source position for debugger.
- SetCallPosition(expr, arg_count);
+ // Touch up the stack with the resolved function.
+ __ Poke(x0, (arg_count + 1) * kPointerSize);
- // Call the evaluated function.
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ Peek(x1, (arg_count + 1) * kXRegSize);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, x0);
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
-
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
- __ Push(x1);
- // Emit function call.
- EmitCall(expr);
- }
+ // Record source position for debugger.
+ SetCallPosition(expr);
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ // Call the evaluated function.
+ __ Peek(x1, (arg_count + 1) * kXRegSize);
+ __ Mov(x0, arg_count);
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, x0);
}
@@ -2895,8 +2744,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(x2);
__ Mov(x3, SmiFromSlot(expr->CallNewFeedbackSlot()));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2905,12 +2754,20 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitSuperConstructorCall");
SuperCallReference* super_call_ref =
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ Ldr(result_register(),
+ FieldMemOperand(result_register(), HeapObject::kMapOffset));
+ __ Ldr(result_register(),
+ FieldMemOperand(result_register(), Map::kPrototypeOffset));
+ __ Push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -2923,20 +2780,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into x4.
+ // Load new target into x3.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ Mov(x4, result_register());
+ __ Mov(x3, result_register());
// Load function and argument count into x1 and x0.
__ Mov(x0, arg_count);
__ Peek(x1, arg_count * kXRegSize);
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(x2);
- __ Mov(x3, SmiFromSlot(expr->CallFeedbackSlot()));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -2966,7 +2818,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2980,7 +2832,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(x0, x10, x11, FIRST_JS_RECEIVER_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@@ -3024,9 +2876,9 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, JS_FUNCTION_TYPE);
+ __ CompareObjectType(x0, x10, x11, FIRST_FUNCTION_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
+ Split(hs, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3139,43 +2991,7 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(x0, if_false);
- Register map = x10;
- Register type_reg = x11;
- __ Ldr(map, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Sub(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- __ Cmp(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ls, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
- __ Cmp(x1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &check_frame_marker);
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ Bind(&check_frame_marker);
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
- __ Cmp(x1, Smi::FromInt(StackFrame::CONSTRUCT));
+ __ CompareObjectType(x0, x10, x11, JS_PROXY_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3250,28 +3066,17 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
+ // If the object is not a JSReceiver, we return null.
__ JumpIfSmi(x0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(x0, x10, x11, FIRST_JS_RECEIVER_TYPE);
// x10: object's map.
// x11: object's type.
__ B(lt, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ B(eq, &function);
- __ Cmp(x11, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
+ // Return 'Function' for JSFunction objects.
+ __ Cmp(x11, JS_FUNCTION_TYPE);
__ B(eq, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
Register instance_type = x14;
@@ -3347,45 +3152,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = x0;
- Register result = x0;
- Register stamp_addr = x10;
- Register stamp_cache = x11;
-
- if (index->value() == 0) {
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ Mov(stamp_addr, stamp);
- __ Ldr(stamp_addr, MemOperand(stamp_addr));
- __ Ldr(stamp_cache, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Cmp(stamp_addr, stamp_cache);
- __ B(ne, &runtime);
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ B(&done);
- }
-
- __ Bind(&runtime);
- __ Mov(x1, index);
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ Bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3486,38 +3252,12 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(x0, &done_convert);
__ Push(x0);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(x0);
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into x0 and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into x0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3531,25 +3271,12 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ JumpIfObjectType(x0, x1, x1, LAST_NAME_TYPE, &done_convert, ls);
__ Bind(&convert);
__ Push(x0);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ Bind(&done_convert);
context()->Plug(x0);
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into x0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3663,30 +3390,15 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitStringAdd");
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ Pop(x1);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
-
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
+ ASM_LOCATION("FullCodeGenerator::EmitCall");
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
// Push target, receiver and arguments onto the stack.
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to x1.
int const argc = args->length() - 2;
__ Peek(x1, (argc + 1) * kXRegSize);
@@ -3700,104 +3412,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(x0, &runtime);
- __ JumpIfNotObjectType(x0, x1, x1, JS_FUNCTION_TYPE, &runtime);
-
- // InvokeFunction requires the function in x1. Move it in there.
- __ Mov(x1, x0);
- ParameterCount count(arg_count);
- __ InvokeFunction(x1, count, CALL_FUNCTION, NullCallWrapper());
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ B(&done);
-
- __ Bind(&runtime);
- __ Push(x0);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ Bind(&done);
-
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Load original constructor into x3.
- __ Peek(x3, 1 * kPointerSize);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ Ldr(x11, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x12, MemOperand(x11, StandardFrameConstants::kContextOffset));
- __ Cmp(x12, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(eq, &adaptor_frame);
- // default constructor has no arguments, so no adaptor frame means no args.
- __ Mov(x0, Operand(0));
- __ B(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ Ldr(x1, MemOperand(x11, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(x1, x1);
-
- __ Mov(x0, x1);
-
- // Get arguments pointer in x11.
- __ Add(x11, x11, Operand(x1, LSL, kPointerSizeLog2));
- __ Add(x11, x11, StandardFrameConstants::kCallerSPOffset);
- Label loop;
- __ bind(&loop);
- // Pre-decrement x11 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Ldr(x10, MemOperand(x11, -kPointerSize, PreIndex));
- __ Push(x10);
- __ Sub(x1, x1, Operand(1));
- __ Cbnz(x1, &loop);
- }
-
- __ bind(&args_set_up);
- __ Peek(x1, Operand(x0, LSL, kPointerSizeLog2));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, x0);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ Pop(x1, x2);
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -3832,6 +3446,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(x0);
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldr(x0, FieldMemOperand(x0, Map::kPrototypeOffset));
+ context()->Plug(x0);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitFastOneByteArrayJoin");
@@ -3946,6 +3571,10 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ Umaddl(string_length, array_length.W(), separator_length.W(),
string_length);
+ // Bailout for large object allocations.
+ __ Cmp(string_length, Page::kMaxRegularHeapObjectSize);
+ __ B(gt, &bailout);
+
// Get first element in the array.
__ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
// Live values in registers:
@@ -4074,10 +3703,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Register boolean_done = x3;
Register empty_fixed_array = x4;
Register untagged_result = x5;
- __ Ldr(map_reg, GlobalObjectMemOperand());
- __ Ldr(map_reg, FieldMemOperand(map_reg, GlobalObject::kNativeContextOffset));
- __ Ldr(map_reg,
- ContextMemOperand(map_reg, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, map_reg);
__ Pop(boolean_done);
__ Pop(result_value);
__ LoadRoot(empty_fixed_array, Heap::kEmptyFixedArrayRootIndex);
@@ -4095,7 +3721,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ B(&done);
__ Bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ Bind(&done);
context()->Plug(x0);
@@ -4107,9 +3733,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
__ Push(x0);
- __ Ldr(x0, GlobalObjectMemOperand());
- __ Ldr(x0, FieldMemOperand(x0, GlobalObject::kNativeContextOffset));
- __ Ldr(x0, ContextMemOperand(x0, expr->context_index()));
+ __ LoadNativeContextSlot(expr->context_index(), x0);
}
@@ -4117,10 +3741,11 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ SetCallPosition(expr);
__ Peek(x1, (arg_count + 1) * kPointerSize);
- __ CallStub(&stub);
+ __ Mov(x0, arg_count);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -4187,8 +3812,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(x0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4197,10 +3821,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ Ldr(x12, GlobalObjectMemOperand());
+ __ LoadGlobalObject(x12);
__ Mov(x11, Operand(var->name()));
__ Push(x12, x11);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(x0);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4211,7 +3835,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ Mov(x2, Operand(var->name()));
__ Push(context_register(), x2);
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(x0);
}
} else {
@@ -4483,12 +4107,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ Pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4526,12 +4146,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4607,8 +4223,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
__ JumpIfSmi(x0, if_false);
__ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, if_false, lt);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(x0, x10, x11, FIRST_JS_RECEIVER_TYPE, if_false, lt);
// Check for callable or undetectable objects => false.
__ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
__ TestAndSplit(x10, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable),
@@ -4658,7 +4274,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(x0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -4729,7 +4345,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ CompareAndSplit(x0, 0, ne, if_true, if_false, fall_through);
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
@@ -4884,9 +4501,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ Mov(x1, x0);
__ Poke(x1, 2 * kPointerSize);
- SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ SetCallPosition(expr);
+ __ Mov(x0, 1);
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -4903,7 +4522,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// The ToBooleanStub argument (result.done) is in x0.
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ Cbz(x0, &l_try);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ B(ne, &l_try);
// result.value
__ Pop(load_receiver); // result
@@ -5001,7 +4621,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Mov(x10, Smi::FromInt(resume_mode));
__ Push(generator_object, result_register(), x10);
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ Unreachable();
@@ -5021,7 +4641,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ Bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ Bind(&done_allocate);
Register map_reg = x1;
@@ -5029,10 +4649,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Register boolean_done = x3;
Register empty_fixed_array = x4;
Register untagged_result = x5;
- __ Ldr(map_reg, GlobalObjectMemOperand());
- __ Ldr(map_reg, FieldMemOperand(map_reg, GlobalObject::kNativeContextOffset));
- __ Ldr(map_reg,
- ContextMemOperand(map_reg, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, map_reg);
__ Pop(result_value);
__ LoadRoot(boolean_done,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
@@ -5087,21 +4704,19 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
+ // code.
DCHECK(kSmiTag == 0);
- __ Push(xzr);
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, x10);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
- __ Push(x10);
} else {
DCHECK(closure_scope->is_function_scope());
__ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(x10);
}
+ __ Push(x10);
}
@@ -5156,8 +4771,8 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+ DCHECK(!slot.IsInvalid());
__ Mov(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
}
@@ -5171,7 +4786,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Code* replacement_code) {
// Turn the jump into a nop.
Address branch_address = pc - 3 * kInstructionSize;
- PatchingAssembler patcher(branch_address, 1);
+ Isolate* isolate = unoptimized_code->GetIsolate();
+ PatchingAssembler patcher(isolate, branch_address, 1);
DCHECK(Instruction::Cast(branch_address)
->IsNop(Assembler::INTERRUPT_CODE_NOP) ||
@@ -5205,25 +4821,17 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address interrupt_address_pointer =
reinterpret_cast<Address>(load) + load->ImmPCOffset();
DCHECK((Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->OnStackReplacement()
- ->entry())) ||
+ reinterpret_cast<uint64_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) ||
(Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->InterruptCheck()
- ->entry())) ||
+ reinterpret_cast<uint64_t>(
+ isolate->builtins()->InterruptCheck()->entry())) ||
(Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->OsrAfterStackCheck()
- ->entry())) ||
+ reinterpret_cast<uint64_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry())) ||
(Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->OnStackReplacement()
- ->entry())));
+ reinterpret_cast<uint64_t>(
+ isolate->builtins()->OnStackReplacement()->entry())));
Memory::uint64_at(interrupt_address_pointer) =
reinterpret_cast<uint64_t>(replacement_code->entry());
diff --git a/chromium/v8/src/full-codegen/full-codegen.cc b/chromium/v8/src/full-codegen/full-codegen.cc
index a29b59cf112..416a69c7089 100644
--- a/chromium/v8/src/full-codegen/full-codegen.cc
+++ b/chromium/v8/src/full-codegen/full-codegen.cc
@@ -4,8 +4,11 @@
#include "src/full-codegen/full-codegen.h"
-#include "src/ast.h"
-#include "src/ast-numbering.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-numbering.h"
+#include "src/ast/prettyprinter.h"
+#include "src/ast/scopeinfo.h"
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/codegen.h"
#include "src/compiler.h"
@@ -13,9 +16,6 @@
#include "src/debug/liveedit.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
-#include "src/prettyprinter.h"
-#include "src/scopeinfo.h"
-#include "src/scopes.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -38,7 +38,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
}
CodeGenerator::MakeCodePrologue(info, "full");
const int kInitialBufferSize = 4 * KB;
- MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
+ MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize,
+ CodeObjectRequired::kYes);
if (info->will_serialize()) masm.enable_serializer();
LOG_CODE_EVENT(isolate,
@@ -158,12 +159,12 @@ bool FullCodeGenerator::MustCreateArrayLiteralWithRuntime(
ArrayLiteral* expr) const {
// TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
return expr->depth() > 1 || expr->is_strong() ||
- expr->values()->length() > JSObject::kInitialMaxFastElementArray;
+ expr->values()->length() > JSArray::kInitialMaxFastElementArray;
}
void FullCodeGenerator::Initialize() {
- InitializeAstVisitor(info_->isolate(), info_->zone());
+ InitializeAstVisitor(info_->isolate());
// The generation of debug code must match between the snapshot code and the
// code that is generated later. This is assumed by the debugger when it is
// calculating PC offsets after generating a debug version of code. Therefore
@@ -482,6 +483,63 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitIntrinsicAsStubCall(CallRuntime* expr,
+ const Callable& callable) {
+ ZoneList<Expression*>* args = expr->arguments();
+ int param_count = callable.descriptor().GetRegisterParameterCount();
+ DCHECK_EQ(args->length(), param_count);
+
+ if (param_count > 0) {
+ int last = param_count - 1;
+ // Put all but last arguments on stack.
+ for (int i = 0; i < last; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ // The last argument goes to the accumulator.
+ VisitForAccumulatorValue(args->at(last));
+
+ // Move the arguments to the registers, as required by the stub.
+ __ Move(callable.descriptor().GetRegisterParameter(last),
+ result_register());
+ for (int i = last; i-- > 0;) {
+ __ Pop(callable.descriptor().GetRegisterParameter(i));
+ }
+ }
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::NumberToString(isolate()));
+}
+
+
+void FullCodeGenerator::EmitToString(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::ToString(isolate()));
+}
+
+
+void FullCodeGenerator::EmitToLength(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::ToLength(isolate()));
+}
+
+
+void FullCodeGenerator::EmitToNumber(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::ToNumber(isolate()));
+}
+
+
+void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::ToObject(isolate()));
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::RegExpConstructResult(isolate()));
+}
+
+
bool RecordStatementPosition(MacroAssembler* masm, int pos) {
if (pos == RelocInfo::kNoPosition) return false;
masm->positions_recorder()->RecordStatementPosition(pos);
@@ -503,7 +561,10 @@ void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
- RecordStatementPosition(masm_, fun->end_position() - 1);
+ // For default constructors, start position equals end position, and there
+ // is no source code besides the class literal.
+ int pos = std::max(fun->start_position(), fun->end_position() - 1);
+ RecordStatementPosition(masm_, pos);
if (info_->is_debug()) {
// Always emit a debug break slot before a return.
DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
@@ -541,36 +602,24 @@ void FullCodeGenerator::SetExpressionAsStatementPosition(Expression* expr) {
}
-void FullCodeGenerator::SetCallPosition(Expression* expr, int argc) {
+void FullCodeGenerator::SetCallPosition(Expression* expr) {
if (expr->position() == RelocInfo::kNoPosition) return;
RecordPosition(masm_, expr->position());
if (info_->is_debug()) {
// Always emit a debug break slot before a call.
- DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_CALL,
- argc);
- }
-}
-
-
-void FullCodeGenerator::SetConstructCallPosition(Expression* expr) {
- if (expr->position() == RelocInfo::kNoPosition) return;
- RecordPosition(masm_, expr->position());
- if (info_->is_debug()) {
- // Always emit a debug break slot before a construct call.
- DebugCodegen::GenerateSlot(masm_,
- RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL);
+ DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
}
}
void FullCodeGenerator::VisitSuperPropertyReference(
SuperPropertyReference* super) {
- __ CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+ __ CallRuntime(Runtime::kThrowUnsupportedSuperError);
}
void FullCodeGenerator::VisitSuperCallReference(SuperCallReference* super) {
- __ CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+ __ CallRuntime(Runtime::kThrowUnsupportedSuperError);
}
@@ -745,6 +794,15 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
}
+void FullCodeGenerator::VisitDoExpression(DoExpression* expr) {
+ Comment cmnt(masm_, "[ Do Expression");
+ NestedStatement nested_block(this);
+ SetExpressionPosition(expr);
+ VisitBlock(expr->block());
+ EmitVariableLoad(expr->result());
+}
+
+
void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
Comment cmnt(masm_, "[ ExpressionStatement");
SetStatementPosition(stmt);
@@ -856,18 +914,12 @@ void FullCodeGenerator::EmitUnwindBeforeReturn() {
void FullCodeGenerator::EmitPropertyKey(ObjectLiteralProperty* property,
BailoutId bailout_id) {
VisitForStackValue(property->key());
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
PrepareForBailoutForId(bailout_id, NO_REGISTERS);
__ Push(result_register());
}
-void FullCodeGenerator::EmitLoadSuperConstructor(SuperCallReference* ref) {
- VisitForStackValue(ref->this_function_var());
- __ CallRuntime(Runtime::kGetPrototype, 1);
-}
-
-
void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
SetStatementPosition(stmt);
@@ -882,9 +934,14 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
Comment cmnt(masm_, "[ WithStatement");
SetStatementPosition(stmt);
- VisitForStackValue(stmt->expression());
+ VisitForAccumulatorValue(stmt->expression());
+ Callable callable = CodeFactory::ToObject(isolate());
+ __ Move(callable.descriptor().GetRegisterParameter(0), result_register());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ PrepareForBailoutForId(stmt->ToObjectId(), NO_REGISTERS);
+ __ Push(result_register());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushWithContext, 2);
+ __ CallRuntime(Runtime::kPushWithContext);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -1081,7 +1138,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
__ Push(stmt->variable()->name());
__ Push(result_register());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushCatchContext, 3);
+ __ CallRuntime(Runtime::kPushCatchContext);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
@@ -1151,7 +1208,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// rethrow the exception if it returns.
__ Call(&finally_entry);
__ Push(result_register());
- __ CallRuntime(Runtime::kReThrow, 1);
+ __ CallRuntime(Runtime::kReThrow);
// Finally block implementation.
__ bind(&finally_entry);
@@ -1268,15 +1325,14 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
__ Push(Smi::FromInt(lit->start_position()));
__ Push(Smi::FromInt(lit->end_position()));
- __ CallRuntime(Runtime::kDefineClass, 5);
+ __ CallRuntime(Runtime::kDefineClass);
PrepareForBailoutForId(lit->CreateLiteralId(), TOS_REG);
EmitClassDefineProperties(lit);
- if (lit->scope() != NULL) {
- DCHECK_NOT_NULL(lit->class_variable_proxy());
- EmitVariableAssignment(lit->class_variable_proxy()->var(),
- Token::INIT_CONST, lit->ProxySlot());
+ if (lit->class_variable_proxy() != nullptr) {
+ EmitVariableAssignment(lit->class_variable_proxy()->var(), Token::INIT,
+ lit->ProxySlot());
}
}
@@ -1298,9 +1354,9 @@ void FullCodeGenerator::VisitNativeFunctionLiteral(
DCHECK(!fun_template.IsEmpty());
// Instantiate the function and create a shared function info from it.
- Handle<JSFunction> fun = Utils::OpenHandle(
+ Handle<JSFunction> fun = Handle<JSFunction>::cast(Utils::OpenHandle(
*fun_template->GetFunction(v8_isolate->GetCurrentContext())
- .ToLocalChecked());
+ .ToLocalChecked()));
const int literals = fun->NumberOfLiterals();
Handle<Code> code = Handle<Code>(fun->shared()->code());
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
@@ -1324,7 +1380,7 @@ void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception());
SetExpressionPosition(expr);
- __ CallRuntime(Runtime::kThrow, 1);
+ __ CallRuntime(Runtime::kThrow);
// Never returns here.
}
@@ -1357,6 +1413,66 @@ void FullCodeGenerator::ExitTryBlock(int handler_index) {
}
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* callee = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+
+ switch (call_type) {
+ case Call::POSSIBLY_EVAL_CALL:
+ EmitPossiblyEvalCall(expr);
+ break;
+ case Call::GLOBAL_CALL:
+ EmitCallWithLoadIC(expr);
+ break;
+ case Call::LOOKUP_SLOT_CALL:
+ // Call to a lookup slot (dynamically introduced variable).
+ PushCalleeAndWithBaseObject(expr);
+ EmitCall(expr);
+ break;
+ case Call::NAMED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VisitForStackValue(property->obj());
+ EmitCallWithLoadIC(expr);
+ break;
+ }
+ case Call::KEYED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VisitForStackValue(property->obj());
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ break;
+ }
+ case Call::NAMED_SUPER_PROPERTY_CALL:
+ EmitSuperCallWithLoadIC(expr);
+ break;
+ case Call::KEYED_SUPER_PROPERTY_CALL:
+ EmitKeyedSuperCallWithLoadIC(expr);
+ break;
+ case Call::SUPER_CALL:
+ EmitSuperConstructorCall(expr);
+ break;
+ case Call::OTHER_CALL:
+ // Call to an arbitrary expression not handled specially above.
+ VisitForStackValue(callee);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ // Emit function call.
+ EmitCall(expr);
+ break;
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ DCHECK(expr->return_is_recorded_);
+#endif
+}
+
+
void FullCodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
@@ -1365,6 +1481,12 @@ void FullCodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
}
+void FullCodeGenerator::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ Visit(expr->expression());
+}
+
+
FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
int* stack_depth, int* context_length) {
// The macros used here must preserve the result register.
@@ -1520,7 +1642,7 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::EnterBlockScopeIfNeeded(
Comment cmnt(masm(), "[ Extend block context");
__ Push(scope->GetScopeInfo(codegen->isolate()));
codegen_->PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushBlockContext, 2);
+ __ CallRuntime(Runtime::kPushBlockContext);
// Replace the context stored in the frame.
codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
diff --git a/chromium/v8/src/full-codegen/full-codegen.h b/chromium/v8/src/full-codegen/full-codegen.h
index 02da16b865a..52eddafa1a1 100644
--- a/chromium/v8/src/full-codegen/full-codegen.h
+++ b/chromium/v8/src/full-codegen/full-codegen.h
@@ -7,14 +7,15 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/bit-vector.h"
+#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/globals.h"
#include "src/objects.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -35,6 +36,8 @@ class FullCodeGenerator: public AstVisitor {
FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
: masm_(masm),
info_(info),
+ isolate_(info->isolate()),
+ zone_(info->zone()),
scope_(info->scope()),
nesting_stack_(NULL),
loop_depth_(0),
@@ -80,8 +83,7 @@ class FullCodeGenerator: public AstVisitor {
#elif V8_TARGET_ARCH_ARM
static const int kCodeSizeMultiplier = 149;
#elif V8_TARGET_ARCH_ARM64
-// TODO(all): Copied ARM value. Check this is sensible for ARM64.
- static const int kCodeSizeMultiplier = 149;
+ static const int kCodeSizeMultiplier = 220;
#elif V8_TARGET_ARCH_PPC64
static const int kCodeSizeMultiplier = 200;
#elif V8_TARGET_ARCH_PPC
@@ -150,10 +152,9 @@ class FullCodeGenerator: public AstVisitor {
Breakable(FullCodeGenerator* codegen, BreakableStatement* statement)
: NestedStatement(codegen), statement_(statement) {
}
- virtual ~Breakable() {}
- virtual Breakable* AsBreakable() { return this; }
- virtual bool IsBreakTarget(Statement* target) {
+ Breakable* AsBreakable() override { return this; }
+ bool IsBreakTarget(Statement* target) override {
return statement() == target;
}
@@ -171,10 +172,9 @@ class FullCodeGenerator: public AstVisitor {
Iteration(FullCodeGenerator* codegen, IterationStatement* statement)
: Breakable(codegen, statement) {
}
- virtual ~Iteration() {}
- virtual Iteration* AsIteration() { return this; }
- virtual bool IsContinueTarget(Statement* target) {
+ Iteration* AsIteration() override { return this; }
+ bool IsContinueTarget(Statement* target) override {
return statement() == target;
}
@@ -190,9 +190,8 @@ class FullCodeGenerator: public AstVisitor {
NestedBlock(FullCodeGenerator* codegen, Block* block)
: Breakable(codegen, block) {
}
- virtual ~NestedBlock() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+ NestedStatement* Exit(int* stack_depth, int* context_length) override {
auto block_scope = statement()->AsBlock()->scope();
if (block_scope != nullptr) {
if (block_scope->ContextLocalCount() > 0) ++(*context_length);
@@ -207,13 +206,12 @@ class FullCodeGenerator: public AstVisitor {
static const int kElementCount = TryBlockConstant::kElementCount;
explicit TryCatch(FullCodeGenerator* codegen) : NestedStatement(codegen) {}
- virtual ~TryCatch() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+ NestedStatement* Exit(int* stack_depth, int* context_length) override {
*stack_depth += kElementCount;
return previous_;
}
- virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ NestedStatement* AccumulateDepth(int* stack_depth) override {
*stack_depth += kElementCount;
return previous_;
}
@@ -227,10 +225,9 @@ class FullCodeGenerator: public AstVisitor {
TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
: NestedStatement(codegen), finally_entry_(finally_entry) {
}
- virtual ~TryFinally() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length);
- virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ NestedStatement* Exit(int* stack_depth, int* context_length) override;
+ NestedStatement* AccumulateDepth(int* stack_depth) override {
*stack_depth += kElementCount;
return previous_;
}
@@ -245,13 +242,12 @@ class FullCodeGenerator: public AstVisitor {
static const int kElementCount = 3;
explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) {}
- virtual ~Finally() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+ NestedStatement* Exit(int* stack_depth, int* context_length) override {
*stack_depth += kElementCount;
return previous_;
}
- virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ NestedStatement* AccumulateDepth(int* stack_depth) override {
*stack_depth += kElementCount;
return previous_;
}
@@ -265,13 +261,12 @@ class FullCodeGenerator: public AstVisitor {
ForIn(FullCodeGenerator* codegen, ForInStatement* statement)
: Iteration(codegen, statement) {
}
- virtual ~ForIn() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+ NestedStatement* Exit(int* stack_depth, int* context_length) override {
*stack_depth += kElementCount;
return previous_;
}
- virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ NestedStatement* AccumulateDepth(int* stack_depth) override {
*stack_depth += kElementCount;
return previous_;
}
@@ -284,9 +279,8 @@ class FullCodeGenerator: public AstVisitor {
explicit WithOrCatch(FullCodeGenerator* codegen)
: NestedStatement(codegen) {
}
- virtual ~WithOrCatch() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+ NestedStatement* Exit(int* stack_depth, int* context_length) override {
++(*context_length);
return previous_;
}
@@ -427,11 +421,6 @@ class FullCodeGenerator: public AstVisitor {
literal()->feedback_vector_spec(), slot));
}
- Smi* SmiFromSlot(FeedbackVectorICSlot slot) const {
- return Smi::FromInt(TypeFeedbackVector::GetIndexFromSpec(
- literal()->feedback_vector_spec(), slot));
- }
-
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
void RecordJSReturnSite(Call* call);
@@ -473,12 +462,13 @@ class FullCodeGenerator: public AstVisitor {
void EmitReturnSequence();
// Platform-specific code sequences for calls
- void EmitCall(Call* expr, CallICState::CallType = CallICState::FUNCTION);
+ void EmitCall(Call* expr, ConvertReceiverMode = ConvertReceiverMode::kAny);
void EmitSuperConstructorCall(Call* expr);
void EmitCallWithLoadIC(Call* expr);
void EmitSuperCallWithLoadIC(Call* expr);
void EmitKeyedCallWithLoadIC(Call* expr, Expression* key);
void EmitKeyedSuperCallWithLoadIC(Call* expr);
+ void EmitPossiblyEvalCall(Call* expr);
#define FOR_EACH_FULL_CODE_INTRINSIC(F) \
F(IsSmi) \
@@ -486,41 +476,39 @@ class FullCodeGenerator: public AstVisitor {
F(IsTypedArray) \
F(IsRegExp) \
F(IsJSProxy) \
- F(IsConstructCall) \
F(Call) \
- F(CallFunction) \
- F(DefaultConstructorCallSuper) \
F(ArgumentsLength) \
F(Arguments) \
F(ValueOf) \
F(SetValueOf) \
F(IsDate) \
- F(DateField) \
F(StringCharFromCode) \
F(StringCharAt) \
F(OneByteSeqStringSetChar) \
F(TwoByteSeqStringSetChar) \
F(ObjectEquals) \
F(IsFunction) \
- F(IsSpecObject) \
+ F(IsJSReceiver) \
F(IsSimdValue) \
F(MathPow) \
F(IsMinusZero) \
F(HasCachedArrayIndex) \
F(GetCachedArrayIndex) \
+ F(GetSuperConstructor) \
F(FastOneByteArrayJoin) \
F(GeneratorNext) \
F(GeneratorThrow) \
F(DebugBreakInOptimizedCode) \
F(ClassOf) \
F(StringCharCodeAt) \
- F(StringAdd) \
F(SubString) \
F(RegExpExec) \
F(RegExpConstructResult) \
F(ToInteger) \
F(NumberToString) \
F(ToString) \
+ F(ToLength) \
+ F(ToNumber) \
F(ToName) \
F(ToObject) \
F(DebugIsActive) \
@@ -530,6 +518,8 @@ class FullCodeGenerator: public AstVisitor {
FOR_EACH_FULL_CODE_INTRINSIC(GENERATOR_DECLARATION)
#undef GENERATOR_DECLARATION
+ void EmitIntrinsicAsStubCall(CallRuntime* expr, const Callable& callable);
+
// Platform-specific code for resuming generators.
void EmitGeneratorResume(Expression *generator,
Expression *value,
@@ -598,12 +588,12 @@ class FullCodeGenerator: public AstVisitor {
// Assign to the given expression as if via '='. The right-hand-side value
// is expected in the accumulator. slot is only used if FLAG_vector_stores
// is true.
- void EmitAssignment(Expression* expr, FeedbackVectorICSlot slot);
+ void EmitAssignment(Expression* expr, FeedbackVectorSlot slot);
// Complete a variable assignment. The right-hand-side value is expected
// in the accumulator.
void EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot);
+ FeedbackVectorSlot slot);
// Helper functions to EmitVariableAssignment
void EmitStoreToStackLocalOrContextSlot(Variable* var,
@@ -634,12 +624,10 @@ class FullCodeGenerator: public AstVisitor {
// The value of the initializer is expected to be at the top of the stack.
// |offset| is the offset in the stack where the home object can be found.
void EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot);
+ FeedbackVectorSlot slot);
void EmitSetHomeObjectAccumulator(Expression* initializer, int offset,
- FeedbackVectorICSlot slot);
-
- void EmitLoadSuperConstructor(SuperCallReference* super_call_ref);
+ FeedbackVectorSlot slot);
void CallIC(Handle<Code> code,
TypeFeedbackId id = TypeFeedbackId::None());
@@ -667,9 +655,12 @@ class FullCodeGenerator: public AstVisitor {
// This is used in loop headers where we want to break for each iteration.
void SetExpressionAsStatementPosition(Expression* expr);
- void SetCallPosition(Expression* expr, int argc);
+ void SetCallPosition(Expression* expr);
- void SetConstructCallPosition(Expression* expr);
+ void SetConstructCallPosition(Expression* expr) {
+ // Currently call and construct calls are treated the same wrt debugging.
+ SetCallPosition(expr);
+ }
// Non-local control flow support.
void EnterTryBlock(int handler_index, Label* handler);
@@ -692,6 +683,8 @@ class FullCodeGenerator: public AstVisitor {
const ExpressionContext* context() { return context_; }
void set_new_context(const ExpressionContext* context) { context_ = context; }
+ Isolate* isolate() const { return isolate_; }
+ Zone* zone() const { return zone_; }
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
@@ -718,7 +711,7 @@ class FullCodeGenerator: public AstVisitor {
void PushCalleeAndWithBaseObject(Call* expr);
// AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
@@ -736,7 +729,7 @@ class FullCodeGenerator: public AstVisitor {
bool MustCreateObjectLiteralWithRuntime(ObjectLiteral* expr) const;
bool MustCreateArrayLiteralWithRuntime(ArrayLiteral* expr) const;
- void EmitLoadStoreICSlot(FeedbackVectorICSlot slot);
+ void EmitLoadStoreICSlot(FeedbackVectorSlot slot);
int NewHandlerTableEntry();
@@ -832,20 +825,18 @@ class FullCodeGenerator: public AstVisitor {
explicit AccumulatorValueContext(FullCodeGenerator* codegen)
: ExpressionContext(codegen) { }
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Variable* var) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual bool IsAccumulatorValue() const { return true; }
+ void Plug(bool flag) const override;
+ void Plug(Register reg) const override;
+ void Plug(Label* materialize_true, Label* materialize_false) const override;
+ void Plug(Variable* var) const override;
+ void Plug(Handle<Object> lit) const override;
+ void Plug(Heap::RootListIndex) const override;
+ void PlugTOS() const override;
+ void DropAndPlug(int count, Register reg) const override;
+ void PrepareTest(Label* materialize_true, Label* materialize_false,
+ Label** if_true, Label** if_false,
+ Label** fall_through) const override;
+ bool IsAccumulatorValue() const override { return true; }
};
class StackValueContext : public ExpressionContext {
@@ -853,20 +844,18 @@ class FullCodeGenerator: public AstVisitor {
explicit StackValueContext(FullCodeGenerator* codegen)
: ExpressionContext(codegen) { }
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Variable* var) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual bool IsStackValue() const { return true; }
+ void Plug(bool flag) const override;
+ void Plug(Register reg) const override;
+ void Plug(Label* materialize_true, Label* materialize_false) const override;
+ void Plug(Variable* var) const override;
+ void Plug(Handle<Object> lit) const override;
+ void Plug(Heap::RootListIndex) const override;
+ void PlugTOS() const override;
+ void DropAndPlug(int count, Register reg) const override;
+ void PrepareTest(Label* materialize_true, Label* materialize_false,
+ Label** if_true, Label** if_false,
+ Label** fall_through) const override;
+ bool IsStackValue() const override { return true; }
};
class TestContext : public ExpressionContext {
@@ -892,20 +881,18 @@ class FullCodeGenerator: public AstVisitor {
Label* false_label() const { return false_label_; }
Label* fall_through() const { return fall_through_; }
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Variable* var) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual bool IsTest() const { return true; }
+ void Plug(bool flag) const override;
+ void Plug(Register reg) const override;
+ void Plug(Label* materialize_true, Label* materialize_false) const override;
+ void Plug(Variable* var) const override;
+ void Plug(Handle<Object> lit) const override;
+ void Plug(Heap::RootListIndex) const override;
+ void PlugTOS() const override;
+ void DropAndPlug(int count, Register reg) const override;
+ void PrepareTest(Label* materialize_true, Label* materialize_false,
+ Label** if_true, Label** if_false,
+ Label** fall_through) const override;
+ bool IsTest() const override { return true; }
private:
Expression* condition_;
@@ -919,20 +906,18 @@ class FullCodeGenerator: public AstVisitor {
explicit EffectContext(FullCodeGenerator* codegen)
: ExpressionContext(codegen) { }
- virtual void Plug(bool flag) const;
- virtual void Plug(Register reg) const;
- virtual void Plug(Label* materialize_true, Label* materialize_false) const;
- virtual void Plug(Variable* var) const;
- virtual void Plug(Handle<Object> lit) const;
- virtual void Plug(Heap::RootListIndex) const;
- virtual void PlugTOS() const;
- virtual void DropAndPlug(int count, Register reg) const;
- virtual void PrepareTest(Label* materialize_true,
- Label* materialize_false,
- Label** if_true,
- Label** if_false,
- Label** fall_through) const;
- virtual bool IsEffect() const { return true; }
+ void Plug(bool flag) const override;
+ void Plug(Register reg) const override;
+ void Plug(Label* materialize_true, Label* materialize_false) const override;
+ void Plug(Variable* var) const override;
+ void Plug(Handle<Object> lit) const override;
+ void Plug(Heap::RootListIndex) const override;
+ void PlugTOS() const override;
+ void DropAndPlug(int count, Register reg) const override;
+ void PrepareTest(Label* materialize_true, Label* materialize_false,
+ Label** if_true, Label** if_false,
+ Label** fall_through) const override;
+ bool IsEffect() const override { return true; }
};
class EnterBlockScopeIfNeeded {
@@ -953,6 +938,8 @@ class FullCodeGenerator: public AstVisitor {
MacroAssembler* masm_;
CompilationInfo* info_;
+ Isolate* isolate_;
+ Zone* zone_;
Scope* scope_;
Label return_label_;
NestedStatement* nesting_stack_;
@@ -976,28 +963,6 @@ class FullCodeGenerator: public AstVisitor {
};
-// A map from property names to getter/setter pairs allocated in the zone.
-class AccessorTable: public TemplateHashMap<Literal,
- ObjectLiteral::Accessors,
- ZoneAllocationPolicy> {
- public:
- explicit AccessorTable(Zone* zone) :
- TemplateHashMap<Literal, ObjectLiteral::Accessors,
- ZoneAllocationPolicy>(Literal::Match,
- ZoneAllocationPolicy(zone)),
- zone_(zone) { }
-
- Iterator lookup(Literal* literal) {
- Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
- if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
- return it;
- }
-
- private:
- Zone* zone_;
-};
-
-
class BackEdgeTable {
public:
BackEdgeTable(Code* code, DisallowHeapAllocation* required) {
@@ -1081,6 +1046,7 @@ class BackEdgeTable {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FULL_CODEGEN_FULL_CODEGEN_H_
diff --git a/chromium/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/chromium/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index cce73579623..4ef3a0984f5 100644
--- a/chromium/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/chromium/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -4,16 +4,15 @@
#if V8_TARGET_ARCH_IA32
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ia32/frames-ia32.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -84,6 +83,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o edi: the JS function object being called (i.e. ourselves)
+// o edx: the new target value
// o esi: our context
// o ebp: our caller's frame pointer
// o esp: stack pointer (pointing to return address)
@@ -106,24 +106,12 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- // +1 for return address.
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(ecx, Operand(esp, receiver_offset));
-
- __ cmp(ecx, isolate()->factory()->undefined_value());
- __ j(not_equal, &ok, Label::kNear);
-
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
-
- __ mov(Operand(esp, receiver_offset), ecx);
-
- __ bind(&ok);
+ __ AssertNotSmi(ecx);
+ __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ Assert(above_equal, kSloppyFunctionExpectsJSReceiverReceiver);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -132,8 +120,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
+ __ Prologue(info->GeneratePreagedPrologue());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -150,7 +137,7 @@ void FullCodeGenerator::Generate() {
ExternalReference::address_of_real_stack_limit(isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
__ mov(eax, Immediate(isolate()->factory()->undefined_value()));
@@ -186,15 +173,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ push(edi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ __ CallRuntime(Runtime::kNewScriptContext);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(edx); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(edi);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(edx); // Restore new target.
+ }
}
function_in_register = false;
// Context is returned in eax. It replaces the context passed to us.
@@ -232,10 +230,10 @@ void FullCodeGenerator::Generate() {
}
}
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -246,37 +244,36 @@ void FullCodeGenerator::Generate() {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
// The write barrier clobbers register again, keep it marked as such.
}
- SetVar(this_function_var, edi, ebx, edx);
+ SetVar(this_function_var, edi, ebx, ecx);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- Label non_adaptor_frame;
- __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &non_adaptor_frame);
- __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
-
- __ bind(&non_adaptor_frame);
- __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-
- Label non_construct_frame, done;
- __ j(not_equal, &non_construct_frame);
-
- // Construct frame
- __ mov(eax,
- Operand(eax, ConstructFrameConstants::kOriginalConstructorOffset));
- __ jmp(&done);
-
- // Non-construct frame
- __ bind(&non_construct_frame);
- __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
-
- __ bind(&done);
- SetVar(new_target_var, eax, ebx, edx);
+ SetVar(new_target_var, edx, ebx, ecx);
+ }
+
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+
+ __ mov(RestParamAccessDescriptor::parameter_count(),
+ Immediate(Smi::FromInt(num_parameters)));
+ __ lea(RestParamAccessDescriptor::parameter_pointer(),
+ Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ mov(RestParamAccessDescriptor::rest_parameter_index(),
+ Immediate(Smi::FromInt(rest_index)));
+ function_in_register = false;
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+ SetVar(rest_param, eax, ebx, edx);
}
Variable* arguments = scope()->arguments();
@@ -309,7 +306,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -414,7 +411,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&return_label_);
if (FLAG_trace) {
__ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -436,13 +433,11 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&ok);
SetReturnPosition(literal());
- int no_frame_start = masm_->pc_offset();
__ leave();
int arg_count = info_->scope()->num_parameters() + 1;
int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, ecx);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -640,9 +635,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ test(result_register(), result_register());
- // The stub returns nonzero for true.
- Split(not_zero, if_true, if_false, fall_through);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
}
@@ -800,10 +794,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
}
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ push(
+ Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -855,7 +848,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
__ push(Immediate(variable->name()));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ push(
+ Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -866,7 +861,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -874,7 +869,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -996,7 +991,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(eax, &convert, Label::kNear);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(above_equal, &done_convert, Label::kNear);
__ bind(&convert);
ToObjectStub stub(isolate());
@@ -1007,9 +1002,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime, use_cache, fixed_array;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- __ j(below_equal, &call_runtime);
+ __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
+ __ j(equal, &call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1023,7 +1017,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
@@ -1054,7 +1048,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&exit);
// We got a fixed array in register eax. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
// No need for a write barrier, we are storing a Smi in the feedback vector.
@@ -1062,22 +1055,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int vector_index = SmiFromSlot(slot)->value();
__ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-
- __ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
- __ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
- __ j(above, &non_proxy);
- __ Move(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
- __ push(ebx); // Smi
+ __ push(Immediate(Smi::FromInt(1))); // Smi(1) indicates slow check
__ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1100,18 +1084,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- DCHECK(Smi::FromInt(0) == 0);
- __ test(edx, edx);
- __ j(zero, &update_each);
-
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
__ push(ecx); // Enumerable.
__ push(ebx); // Current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, loop_statement.continue_label());
@@ -1127,6 +1105,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1167,33 +1147,34 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ push(Immediate(info));
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(eax);
}
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(StoreDescriptor::NameRegister(),
Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), eax);
__ mov(StoreDescriptor::NameRegister(),
Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1208,10 +1189,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
__ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
@@ -1237,9 +1217,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ cmp(FieldOperand(temp, HeapObject::kMapOffset),
Immediate(isolate()->factory()->native_context_map()));
__ j(equal, &fast, Label::kNear);
- // Check that extension is NULL.
- __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(temp, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
__ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
@@ -1261,19 +1241,18 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
__ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering esi.
context = temp;
}
}
- // Check that last extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
+ // Check that last extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an esi-based operand (the write barrier cannot be allowed to
@@ -1305,7 +1284,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ mov(eax, isolate()->factory()->undefined_value());
} else { // LET || CONST
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ jmp(done);
@@ -1318,27 +1297,14 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ Move(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-
- } else {
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), var->name());
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
- }
+ __ mov(LoadDescriptor::ReceiverRegister(), NativeContextOperand());
+ __ mov(LoadDescriptor::ReceiverRegister(),
+ ContextOperand(LoadDescriptor::ReceiverRegister(),
+ Context::EXTENSION_INDEX));
+ __ mov(LoadDescriptor::NameRegister(), var->name());
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
}
@@ -1376,7 +1342,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
} else {
// Uninitialized legacy const bindings are unholed.
DCHECK(var->mode() == CONST_LEGACY);
@@ -1403,7 +1369,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(eax);
break;
@@ -1414,53 +1380,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // edi = JS function.
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, isolate()->factory()->undefined_value());
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->pattern()));
- __ push(Immediate(expr->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
+ __ Move(eax, Immediate(Smi::FromInt(expr->literal_index())));
+ __ Move(ecx, Immediate(expr->pattern()));
+ __ Move(edx, Immediate(Smi::FromInt(expr->flags())));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(eax);
}
@@ -1489,15 +1414,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// If any of the keys would store to the elements array, then we shouldn't
// allow it.
if (MustCreateObjectLiteralWithRuntime(expr)) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_properties));
__ mov(edx, Immediate(Smi::FromInt(flags)));
@@ -1538,12 +1461,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
__ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1561,7 +1480,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
__ push(Immediate(Smi::FromInt(SLOPPY))); // Language mode
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1570,7 +1489,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1597,7 +1518,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1624,9 +1545,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1638,7 +1561,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1650,12 +1573,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1664,7 +1587,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (expr->has_function()) {
DCHECK(result_saved);
__ push(Operand(esp, 0));
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1678,7 +1601,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_constant_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1691,15 +1613,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (MustCreateArrayLiteralWithRuntime(expr)) {
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
__ push(Immediate(Smi::FromInt(expr->ComputeFlags())));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
@@ -1724,31 +1644,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(eax); // array literal.
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (has_constant_fast_elements) {
- // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
- // cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ mov(ebx, Operand(esp, kPointerSize)); // Copy of array literal.
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- // Store the subexpression value in the array's elements.
- __ mov(FieldOperand(ebx, offset), result_register());
- // Update the write barrier for the array store.
- __ RecordWriteField(ebx, offset, result_register(), ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- } else {
- // Store the subexpression value in the array's elements.
- __ mov(ecx, Immediate(Smi::FromInt(array_index)));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
-
+ __ mov(StoreDescriptor::NameRegister(),
+ Immediate(Smi::FromInt(array_index)));
+ __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1758,7 +1664,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Drop(1); // literal index
__ Pop(eax);
result_saved = false;
}
@@ -1772,14 +1677,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(eax);
@@ -2059,9 +1963,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
- SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ SetCallPosition(expr);
+ __ Set(eax, 1);
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2077,8 +1983,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallLoadIC(NOT_INSIDE_TYPEOF); // result.done in eax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ test(eax, eax);
- __ j(zero, &l_try);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ j(not_equal, &l_try);
// result.value
__ pop(load_receiver); // result
@@ -2169,7 +2075,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(ebx);
__ push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
@@ -2186,11 +2092,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, NativeContextOperand());
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -2226,7 +2131,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ push(Immediate(key->value()));
__ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2243,7 +2148,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2369,7 +2274,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ push(eax);
}
@@ -2384,24 +2289,24 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ push(Immediate(Smi::FromInt(DONT_ENUM)));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ push(Immediate(Smi::FromInt(DONT_ENUM)));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2417,7 +2322,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2437,7 +2342,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2485,7 +2390,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), eax);
__ pop(StoreDescriptor::ReceiverRegister()); // Receiver.
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2508,35 +2413,18 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), var->name());
- __ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
+ __ mov(StoreDescriptor::ReceiverRegister(),
+ ContextOperand(StoreDescriptor::ReceiverRegister(),
+ Context::EXTENSION_INDEX));
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ Move(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(eax));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(eax);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2546,11 +2434,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &assign, Label::kNear);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2560,11 +2448,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &const_error, Label::kNear);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2573,24 +2461,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(equal, &uninitialized_this);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ push(eax); // Value.
__ push(esi); // Context.
__ push(Immediate(var->name()));
__ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, ecx);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2599,15 +2488,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(eax);
__ push(esi);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackLocal() || var->IsContextSlot());
Label skip;
@@ -2620,9 +2508,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2639,12 +2527,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2661,8 +2545,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ push(Immediate(key->value()));
__ push(eax);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2672,10 +2555,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// stack : receiver ('this'), home_object, key
__ push(eax);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2690,13 +2572,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
-
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2750,10 +2627,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2761,6 +2637,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ push(Immediate(isolate()->factory()->undefined_value()));
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2771,9 +2648,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push the target function under the receiver.
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2802,7 +2680,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2810,7 +2688,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2833,7 +2711,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2860,7 +2738,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2868,11 +2746,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2880,8 +2758,9 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
- SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ SetCallPosition(expr);
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -2915,7 +2794,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ push(Immediate(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2934,7 +2813,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
// the object holding it (returned in edx).
__ push(context_register());
__ push(Immediate(callee->name()));
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ push(eax); // Function.
__ push(edx); // Receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -2960,88 +2839,38 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
+ PushCalleeAndWithBaseObject(expr);
- // Touch up the stack with the resolved function.
- __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ push(Operand(esp, (arg_count + 1) * kPointerSize));
+ EmitResolvePossiblyDirectEval(arg_count);
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
+ // Touch up the stack with the resolved function.
+ __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ push(Immediate(isolate()->factory()->undefined_value()));
- // Emit function call.
- EmitCall(expr);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ SetCallPosition(expr);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ Set(eax, arg_count);
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, eax);
}
@@ -3076,8 +2905,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(ebx);
__ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -3090,8 +2919,13 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ mov(result_register(),
+ FieldOperand(result_register(), HeapObject::kMapOffset));
+ __ Push(FieldOperand(result_register(), Map::kPrototypeOffset));
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3104,20 +2938,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into ecx.
+ // Load new target into edx.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(ecx, result_register());
+ __ mov(edx, result_register());
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(ebx);
- __ mov(edx, Immediate(SmiFromSlot(expr->CallFeedbackSlot())));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3148,7 +2977,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3162,7 +2991,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
@@ -3206,9 +3035,9 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ CmpObjectType(eax, FIRST_FUNCTION_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
+ Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3321,42 +3150,7 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- Register map = ebx;
- __ mov(map, FieldOperand(eax, HeapObject::kMapOffset));
- __ CmpInstanceType(map, FIRST_JS_PROXY_TYPE);
- __ j(less, if_false);
- __ CmpInstanceType(map, LAST_JS_PROXY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(less_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker);
- __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+ __ CmpObjectType(eax, JS_PROXY_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3433,52 +3227,40 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
- __ JumpIfSmi(eax, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
- // Map is now in eax.
- __ j(below, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ j(equal, &function);
-
- __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ j(equal, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+ // If the object is not a JSReceiver, we return null.
+ __ JumpIfSmi(eax, &null, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, eax);
+ __ j(below, &null, Label::kNear);
+
+ // Return 'Function' for JSFunction objects.
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+ __ j(equal, &function, Label::kNear);
// Check if the constructor in the map is a JS function.
__ GetMapConstructor(eax, eax, ebx);
__ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
- __ j(not_equal, &non_function_constructor);
+ __ j(not_equal, &non_function_constructor, Label::kNear);
// eax now contains the constructor function. Grab the
// instance class name from there.
__ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
__ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ mov(eax, isolate()->factory()->null_value());
+ __ jmp(&done, Label::kNear);
// Functions have class 'Function'.
__ bind(&function);
__ mov(eax, isolate()->factory()->Function_string());
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
__ mov(eax, isolate()->factory()->Object_string());
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ mov(eax, isolate()->factory()->null_value());
// All done.
__ bind(&done);
@@ -3528,43 +3310,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = eax;
- Register result = eax;
- Register scratch = ecx;
-
- if (index->value() == 0) {
- __ mov(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand::StaticVariable(stamp));
- __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ mov(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(Operand(esp, 0), object);
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3674,38 +3419,12 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(eax, &done_convert, Label::kNear);
__ Push(eax);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into eax and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into eax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3721,25 +3440,12 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ j(below_equal, &done_convert, Label::kNear);
__ bind(&convert);
__ Push(eax);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into eax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3853,19 +3559,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(edx);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3873,6 +3566,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to edi.
int const argc = args->length() - 2;
__ mov(edi, Operand(esp, (argc + 1) * kPointerSize));
@@ -3886,101 +3580,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; ++i) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &runtime);
-
- // InvokeFunction requires the function in edi. Move it in there.
- __ mov(edi, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION, NullCallWrapper());
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(eax);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ebx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame);
- // default constructor has no arguments, so no adaptor frame means no args.
- __ mov(eax, Immediate(0));
- __ jmp(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ mov(ebx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(ebx);
-
- __ mov(eax, ebx);
- __ lea(edx, Operand(edx, ebx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- __ push(Operand(edx, -1 * kPointerSize));
- __ sub(edx, Immediate(kPointerSize));
- __ dec(ebx);
- __ j(not_zero, &loop);
- }
-
- __ bind(&args_set_up);
-
- __ mov(edx, Operand(esp, eax, times_pointer_size, 1 * kPointerSize));
- __ mov(edi, Operand(esp, eax, times_pointer_size, 0 * kPointerSize));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, eax);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ pop(ebx);
- __ pop(ecx);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4019,6 +3618,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(eax);
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(eax, FieldOperand(eax, Map::kPrototypeOffset));
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
@@ -4145,6 +3755,11 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ j(overflow, &bailout);
__ shr(string_length, 1);
+
+ // Bailout for large object allocations.
+ __ cmp(string_length, Page::kMaxRegularHeapObjectSize);
+ __ j(greater, &bailout);
+
// Live registers and stack values:
// string_length
// elements
@@ -4302,8 +3917,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime, TAG_OBJECT);
- __ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, NativeContextOperand());
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -4316,7 +3930,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done, Label::kNear);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(eax);
@@ -4327,9 +3941,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push undefined as receiver.
__ push(Immediate(isolate()->factory()->undefined_value()));
- __ mov(eax, GlobalObjectOperand());
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
- __ mov(eax, ContextOperand(eax, expr->context_index()));
+ __ LoadGlobalFunction(expr->context_index(), eax);
}
@@ -4337,10 +3949,11 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ SetCallPosition(expr);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ Set(eax, arg_count);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -4407,8 +4020,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4417,9 +4029,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ push(GlobalObjectOperand());
+ __ mov(eax, NativeContextOperand());
+ __ push(ContextOperand(eax, Context::EXTENSION_INDEX));
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(eax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is
@@ -4431,7 +4044,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ push(context_register());
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(eax);
}
} else {
@@ -4711,12 +4324,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4754,12 +4363,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4830,8 +4435,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(eax, if_false);
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, edx);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, edx);
__ j(below, if_false);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
@@ -4876,7 +4481,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
@@ -4949,8 +4554,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ test(eax, eax);
- Split(not_zero, if_true, if_false, fall_through);
+ __ cmp(eax, isolate()->factory()->true_value());
+ Split(equal, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -4989,9 +4594,9 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ push(Immediate(Smi::FromInt(0)));
+ // code.
+ __ mov(eax, NativeContextOperand());
+ __ push(ContextOperand(eax, Context::CLOSURE_INDEX));
} else if (closure_scope->is_eval_scope()) {
// Contexts nested inside eval code have the same closure as the context
// calling eval, not the anonymous closure containing the eval code.
@@ -5058,8 +4663,8 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+ DCHECK(!slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Immediate(SmiFromSlot(slot)));
}
@@ -5106,8 +4711,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
break;
}
- Assembler::set_target_address_at(call_target_address,
- unoptimized_code,
+ Assembler::set_target_address_at(unoptimized_code->GetIsolate(),
+ call_target_address, unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
diff --git a/chromium/v8/src/full-codegen/mips/OWNERS b/chromium/v8/src/full-codegen/mips/OWNERS
index 5508ba626f3..89455a4fbd7 100644
--- a/chromium/v8/src/full-codegen/mips/OWNERS
+++ b/chromium/v8/src/full-codegen/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/full-codegen/mips/full-codegen-mips.cc b/chromium/v8/src/full-codegen/mips/full-codegen-mips.cc
index f38c01bbeaa..07e9fdfc947 100644
--- a/chromium/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/chromium/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -12,15 +12,14 @@
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/mips/code-stubs-mips.h"
#include "src/mips/macro-assembler-mips.h"
@@ -102,6 +101,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o a1: the JS function object being called (i.e. ourselves)
+// o a3: the new target value
// o cp: our context
// o fp: our caller's frame pointer
// o sp: stack pointer
@@ -125,22 +125,13 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ lw(at, MemOperand(sp, receiver_offset));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Branch(&ok, ne, a2, Operand(at));
-
- __ lw(a2, GlobalObjectOperand());
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
-
- __ sw(a2, MemOperand(sp, receiver_offset));
-
- __ bind(&ok);
+ __ lw(a2, MemOperand(sp, receiver_offset));
+ __ AssertNotSmi(a2);
+ __ GetObjectType(a2, a2, a2);
+ __ Check(ge, kSloppyFunctionExpectsJSReceiverReceiver, a2,
+ Operand(FIRST_JS_RECEIVER_TYPE));
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -149,8 +140,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
+ __ Prologue(info->GeneratePreagedPrologue());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -162,7 +152,7 @@ void FullCodeGenerator::Generate() {
__ Subu(t5, sp, Operand(locals_count * kPointerSize));
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
__ Branch(&ok, hs, t5, Operand(a2));
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
__ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
@@ -201,15 +191,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ push(a1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ __ CallRuntime(Runtime::kNewScriptContext);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(a3); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(a1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(a3); // Restore new target.
+ }
}
function_in_register_a1 = false;
// Context is returned in v0. It replaces the context passed to us.
@@ -227,13 +228,13 @@ void FullCodeGenerator::Generate() {
// Load parameter from stack.
__ lw(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ sw(a0, target);
// Update the write barrier.
if (need_write_barrier) {
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteContextSlot(cp, target.offset(), a0, a2,
+ kRAHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, a0, &done);
@@ -244,10 +245,10 @@ void FullCodeGenerator::Generate() {
}
}
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register_a1| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -258,41 +259,38 @@ void FullCodeGenerator::Generate() {
__ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// The write barrier clobbers register again, keep it marked as such.
}
- SetVar(this_function_var, a1, a2, a3);
+ SetVar(this_function_var, a1, a0, a2);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
+ SetVar(new_target_var, a3, a0, a2);
+ }
- // Get the frame pointer for the calling frame.
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne, a1,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ li(RestParamAccessDescriptor::parameter_count(),
+ Operand(Smi::FromInt(num_parameters)));
+ __ Addu(RestParamAccessDescriptor::parameter_pointer(), fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ li(RestParamAccessDescriptor::rest_parameter_index(),
+ Operand(Smi::FromInt(rest_index)));
+ DCHECK(a1.is(RestParamAccessDescriptor::rest_parameter_index()));
function_in_register_a1 = false;
- Label non_construct_frame, done;
- __ Branch(&non_construct_frame, ne, a1,
- Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-
- __ lw(v0,
- MemOperand(a2, ConstructFrameConstants::kOriginalConstructorOffset));
- __ Branch(&done);
-
- __ bind(&non_construct_frame);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
- SetVar(new_target_var, v0, a2, a3);
+ SetVar(rest_param, v0, a1, a2);
}
Variable* arguments = scope()->arguments();
@@ -326,7 +324,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -444,7 +442,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in v0.
__ push(v0);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -474,11 +472,9 @@ void FullCodeGenerator::EmitReturnSequence() {
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(literal());
masm_->mov(sp, fp);
- int no_frame_start = masm_->pc_offset();
masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
masm_->Addu(sp, sp, Operand(sp_delta));
masm_->Jump(ra);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
}
@@ -686,8 +682,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
__ mov(a0, result_register());
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ mov(at, zero_reg);
- Split(ne, v0, Operand(at), if_true, if_false, fall_through);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ Split(eq, result_register(), Operand(at), if_true, if_false, fall_through);
}
@@ -727,7 +723,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
if (var->IsContextSlot()) {
int context_chain_length = scope()->ContextChainLength(var->scope());
__ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
+ return ContextMemOperand(scratch, var->index());
} else {
return StackOperand(var);
}
@@ -833,7 +829,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ sw(at, ContextOperand(cp, variable->index()));
+ __ sw(at, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -855,10 +851,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
}
__ Push(a2, a0);
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -893,7 +887,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ sw(result_register(), ContextOperand(cp, variable->index()));
+ __ sw(result_register(), ContextMemOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
@@ -914,7 +908,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(a2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -926,7 +921,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ li(a1, Operand(pairs));
__ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(a1, a0);
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -934,7 +929,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -1058,7 +1053,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
__ GetObjectType(a0, a1, a1);
- __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&done_convert, ge, a1, Operand(FIRST_JS_RECEIVER_TYPE));
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1069,9 +1064,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(a0, a1, a1);
- __ Branch(&call_runtime, le, a1, Operand(LAST_JS_PROXY_TYPE));
+ __ Branch(&call_runtime, eq, a1, Operand(JS_PROXY_TYPE));
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1088,7 +1082,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(a0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1121,7 +1115,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&exit);
// We got a fixed array in register v0. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
__ EmitLoadTypeFeedbackVector(a1);
@@ -1129,20 +1122,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int vector_index = SmiFromSlot(slot)->value();
__ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
- __ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
- __ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ GetObjectType(a2, a3, a3);
- __ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE));
- __ li(a1, Operand(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
+ __ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(a1, v0); // Smi and array
__ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ li(a0, Operand(Smi::FromInt(0)));
__ Push(a1, a0); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1169,16 +1155,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&update_each, eq, t0, Operand(a2));
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- __ Branch(&update_each, eq, a2, Operand(zero_reg));
-
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(a1, a3); // Enumerable and current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ mov(a3, result_register());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1194,6 +1175,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1236,35 +1219,36 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ Push(info);
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(v0);
}
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ li(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ lw(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), v0);
__ li(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ lw(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1280,12 +1264,12 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that extension is "the hole".
+ __ lw(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
- __ lw(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ __ lw(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
@@ -1305,11 +1289,11 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ lw(temp, FieldMemOperand(next, HeapObject::kMapOffset));
__ LoadRoot(t0, Heap::kNativeContextMapRootIndex);
__ Branch(&fast, eq, temp, Operand(t0));
- // Check that extension is NULL.
- __ lw(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that extension is "the hole".
+ __ lw(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ lw(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ lw(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
__ Branch(&loop);
__ bind(&fast);
}
@@ -1330,23 +1314,23 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that extension is "the hole".
+ __ lw(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
- __ lw(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ lw(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
- // Check that last extension is NULL.
- __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that last extension is "the hole".
+ __ lw(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
// destroy the cp register).
- return ContextOperand(context, var->index());
+ return ContextMemOperand(context, var->index());
}
@@ -1376,7 +1360,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ Branch(done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ Branch(done);
@@ -1389,27 +1373,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-
- } else {
- __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
- }
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
+ __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
}
@@ -1449,7 +1417,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
__ Branch(&done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&done);
} else {
// Uninitialized legacy const bindings are unholed.
@@ -1477,7 +1445,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(v0);
}
@@ -1487,49 +1455,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // t1 = materialized value (RegExp literal)
- // t0 = JS function, literals array
- // a3 = literal index
- // a2 = RegExp pattern
- // a1 = RegExp flags
- // a0 = RegExp literal clone
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(t0, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ lw(t1, FieldMemOperand(t0, literal_offset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&materialized, ne, t1, Operand(at));
-
- // Create regexp literal using runtime function.
- // Result will be in v0.
- __ li(a3, Operand(Smi::FromInt(expr->literal_index())));
- __ li(a2, Operand(expr->pattern()));
- __ li(a1, Operand(expr->flags()));
- __ Push(t0, a3, a2, a1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(t1, v0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ Push(t1, a0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(t1);
-
- __ bind(&allocated);
-
- // After this, registers are used as follows:
- // v0: Newly allocated regexp.
- // t1: Materialized regexp.
- // a2: temp.
- __ CopyFields(v0, t1, a2.bit(), size / kPointerSize);
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a1, Operand(expr->pattern()));
+ __ li(a0, Operand(Smi::FromInt(expr->flags())));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(v0);
}
@@ -1556,13 +1487,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<FixedArray> constant_properties = expr->constant_properties();
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_properties));
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
@@ -1602,12 +1532,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(a0));
__ li(StoreDescriptor::NameRegister(), Operand(key->value()));
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1629,7 +1555,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
__ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes.
__ push(a0);
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1640,7 +1566,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(a0);
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1667,7 +1595,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1695,9 +1623,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1710,7 +1640,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1723,13 +1653,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1739,7 +1669,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(result_saved);
__ lw(a0, MemOperand(sp));
__ push(a0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1753,8 +1683,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
-
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1768,13 +1696,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(a0, result_register());
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
@@ -1798,27 +1725,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(v0); // array literal
- __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ lw(t2, MemOperand(sp, kPointerSize)); // Copy of array literal.
- __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
- __ sw(result_register(), FieldMemOperand(a1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(a1, offset, result_register(), a2,
- kRAHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ li(a3, Operand(Smi::FromInt(array_index)));
- __ mov(a0, result_register());
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ li(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
+ __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1829,7 +1747,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Pop(); // literal index
__ Pop(v0);
result_saved = false;
}
@@ -1843,14 +1760,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Pop(); // literal index
context()->PlugTOS();
} else {
context()->Plug(v0);
@@ -2135,9 +2051,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a0, v0);
__ mov(a1, a0);
__ sw(a1, MemOperand(sp, 2 * kPointerSize));
- SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ SetCallPosition(expr);
+ __ li(a0, Operand(1));
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2153,7 +2071,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a0, v0);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ Branch(&l_try, eq, v0, Operand(zero_reg));
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&l_try, ne, result_register(), Operand(at));
// result.value
__ pop(load_receiver); // result
@@ -2246,7 +2165,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
DCHECK(!result_register().is(a1));
__ Push(a1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2263,12 +2182,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
- __ lw(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
__ pop(a2);
__ LoadRoot(a3,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
@@ -2304,7 +2221,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ Push(key->value());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2321,7 +2238,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2381,12 +2298,10 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::ADD:
- __ AdduAndCheckForOverflow(v0, left, right, scratch1);
- __ BranchOnOverflow(&stub_call, scratch1);
+ __ AddBranchOvf(v0, left, Operand(right), &stub_call);
break;
case Token::SUB:
- __ SubuAndCheckForOverflow(v0, left, right, scratch1);
- __ BranchOnOverflow(&stub_call, scratch1);
+ __ SubBranchOvf(v0, left, Operand(right), &stub_call);
break;
case Token::MUL: {
__ SmiUntag(scratch1, right);
@@ -2447,7 +2362,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ push(v0);
}
@@ -2462,19 +2377,19 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
@@ -2484,7 +2399,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2501,7 +2416,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2521,7 +2436,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2569,7 +2484,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ mov(StoreDescriptor::NameRegister(), result_register());
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2594,37 +2509,16 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ lw(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(a0));
- __ mov(StoreGlobalViaContextDescriptor::ValueRegister(), result_register());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(a0);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2635,12 +2529,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Branch(&assign, ne, a3, Operand(t0));
__ li(a3, Operand(var->name()));
__ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
// Perform the assignment.
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2651,11 +2545,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Branch(&const_error, ne, a3, Operand(at));
__ li(a3, Operand(var->name()));
__ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2665,23 +2559,24 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Branch(&uninitialized_this, eq, a3, Operand(at));
__ li(a0, Operand(var->name()));
__ Push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ li(a1, Operand(var->name()));
__ li(a0, Operand(Smi::FromInt(language_mode())));
__ Push(v0, cp, a1, a0); // Value, context, name, language mode.
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, a1);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ lw(a2, location);
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
@@ -2690,13 +2585,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ li(a0, Operand(var->name()));
__ Push(v0, cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
@@ -2709,9 +2604,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2728,12 +2623,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2751,8 +2642,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(v0);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2763,10 +2653,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(v0);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2783,12 +2672,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2843,11 +2728,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2856,6 +2739,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// is a sloppy mode method.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ push(at);
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2867,9 +2751,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
__ lw(at, MemOperand(sp, 0));
__ push(at);
__ sw(v0, MemOperand(sp, kPointerSize));
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2899,7 +2784,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ sw(v0, MemOperand(sp, kPointerSize));
@@ -2907,7 +2792,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2931,7 +2816,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(at);
__ sw(v0, MemOperand(sp, kPointerSize));
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2959,7 +2844,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ sw(v0, MemOperand(sp, kPointerSize));
@@ -2967,11 +2852,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2979,9 +2864,10 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Record source position of the IC call.
- SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ SetCallPosition(expr);
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -3014,7 +2900,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Do the runtime call.
__ Push(t3, t2, t1, t0);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -3035,7 +2921,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
DCHECK(!context_register().is(a2));
__ li(a2, Operand(callee->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ Push(v0, v1); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -3063,88 +2949,38 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ PushCalleeAndWithBaseObject(expr);
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(a1);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Touch up the stack with the resolved function.
- __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ push(a1);
+ EmitResolvePossiblyDirectEval(arg_count);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- // Record source position for debugger.
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, v0);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- __ push(a1);
- // Emit function call.
- EmitCall(expr);
- }
+ // Touch up the stack with the resolved function.
+ __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Record source position for debugger.
+ SetCallPosition(expr);
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ li(a0, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, v0);
}
@@ -3179,8 +3015,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(a2);
__ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3193,8 +3029,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ lw(result_register(),
+ FieldMemOperand(result_register(), HeapObject::kMapOffset));
+ __ lw(result_register(),
+ FieldMemOperand(result_register(), Map::kPrototypeOffset));
+ __ Push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3207,20 +3050,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into t0.
+ // Load new target into a3.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(t0, result_register());
+ __ mov(a3, result_register());
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(a2);
- __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3251,7 +3089,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3267,7 +3105,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
+ Split(ge, a1, Operand(FIRST_JS_RECEIVER_TYPE),
if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -3312,7 +3150,7 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a2);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(if_true, hs, a2, Operand(FIRST_FUNCTION_TYPE));
__ Branch(if_false);
context()->Plug(if_true, if_false);
@@ -3430,44 +3268,9 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(v0, if_false);
- Register map = a1;
- Register type_reg = a2;
- __ GetObjectType(v0, map, type_reg);
- __ Subu(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ls, type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE),
- if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne,
- a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ GetObjectType(v0, a1, a1);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
- if_true, if_false, fall_through);
+ Split(eq, a1, Operand(JS_PROXY_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3539,26 +3342,14 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
+ // If the object is not a JSReceiver, we return null.
__ JumpIfSmi(v0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(v0, v0, a1); // Map is now in v0.
- __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&null, lt, a1, Operand(FIRST_JS_RECEIVER_TYPE));
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+ // Return 'Function' for JSFunction objects.
+ __ Branch(&function, eq, a1, Operand(JS_FUNCTION_TYPE));
// Check if the constructor in the map is a JS function.
Register instance_type = a2;
@@ -3635,45 +3426,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = v0;
- Register result = v0;
- Register scratch0 = t5;
- Register scratch1 = a1;
-
- if (index->value() == 0) {
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ li(scratch1, Operand(stamp));
- __ lw(scratch1, MemOperand(scratch1));
- __ lw(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Branch(&runtime, ne, scratch1, Operand(scratch0));
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch1);
- __ li(a1, Operand(index));
- __ Move(a0, object);
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3788,40 +3540,12 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(v0, &done_convert);
__ Push(v0);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(v0);
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into a0 and call the stub.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a0, result_register());
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into a0 and convert it.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a0, result_register());
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3836,26 +3560,12 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ Branch(&done_convert, le, a1, Operand(LAST_NAME_TYPE));
__ bind(&convert);
__ Push(v0);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(v0);
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into a0 and convert it.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a0, result_register());
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3971,20 +3681,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(a1);
- __ mov(a0, result_register()); // StringAddStub requires args in a0, a1.
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3992,6 +3688,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to a1.
int const argc = args->length() - 2;
__ lw(a1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -4005,110 +3702,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(v0, &runtime);
- __ GetObjectType(v0, a1, a1);
- __ Branch(&runtime, ne, a1, Operand(JS_FUNCTION_TYPE));
-
- // InvokeFunction requires the function in a1. Move it in there.
- __ mov(a1, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(a1, count, CALL_FUNCTION, NullCallWrapper());
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(v0);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Load original constructor into a3.
- __ lw(a3, MemOperand(sp, 1 * kPointerSize));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(t0, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame, eq, t0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- // default constructor has no arguments, so no adaptor frame means no args.
- __ mov(a0, zero_reg);
- __ Branch(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(a1, a1);
-
- __ mov(a0, a1);
-
- // Get arguments pointer in a2.
- __ sll(at, a1, kPointerSizeLog2);
- __ addu(a2, a2, at);
- __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- // Pre-decrement a2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Addu(a2, a2, Operand(-kPointerSize));
- __ lw(t0, MemOperand(a2));
- __ Push(t0);
- __ Addu(a1, a1, Operand(-1));
- __ Branch(&loop, ne, a1, Operand(zero_reg));
- }
-
- __ bind(&args_set_up);
- __ sll(at, a0, kPointerSizeLog2);
- __ Addu(at, at, Operand(sp));
- __ lw(a1, MemOperand(at, 0));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, result_register());
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ mov(a0, result_register());
- __ pop(a1);
- __ pop(a2);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4144,6 +3737,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(v0);
+ __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lw(v0, FieldMemOperand(v0, Map::kPrototypeOffset));
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
@@ -4221,8 +3825,7 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
__ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
- __ BranchOnOverflow(&bailout, scratch3);
+ __ AddBranchOvf(string_length, string_length, Operand(scratch1), &bailout);
__ Branch(&loop, lt, element, Operand(elements_end));
// If array_length is 1, return elements[0], a string.
@@ -4255,10 +3858,13 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ Branch(&bailout, ne, scratch3, Operand(zero_reg));
__ And(scratch3, scratch2, Operand(0x80000000));
__ Branch(&bailout, ne, scratch3, Operand(zero_reg));
- __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
- __ BranchOnOverflow(&bailout, scratch3);
+ __ AddBranchOvf(string_length, string_length, Operand(scratch2), &bailout);
__ SmiUntag(string_length);
+ // Bailout for large object allocations.
+ __ Branch(&bailout, gt, string_length,
+ Operand(Page::kMaxRegularHeapObjectSize));
+
// Get first element in the array to free up the elements register to be used
// for the result.
__ Addu(element,
@@ -4396,9 +4002,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime, TAG_OBJECT);
- __ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
- __ lw(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
__ Pop(a2, a3);
__ LoadRoot(t0, Heap::kEmptyFixedArrayRootIndex);
__ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
@@ -4410,7 +4014,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(v0);
@@ -4422,9 +4026,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
__ push(v0);
- __ lw(v0, GlobalObjectOperand());
- __ lw(v0, FieldMemOperand(v0, GlobalObject::kNativeContextOffset));
- __ lw(v0, ContextOperand(v0, expr->context_index()));
+ __ LoadNativeContextSlot(expr->context_index(), v0);
}
@@ -4432,10 +4034,11 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ SetCallPosition(expr);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ li(a0, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -4504,8 +4107,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(v0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4514,10 +4116,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ lw(a2, GlobalObjectOperand());
+ __ LoadGlobalObject(a2);
__ li(a1, Operand(var->name()));
__ Push(a2, a1);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(v0);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4529,7 +4131,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
DCHECK(!context_register().is(a2));
__ li(a2, Operand(var->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(v0);
}
} else {
@@ -4721,10 +4323,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
Register scratch1 = a1;
- Register scratch2 = t0;
__ li(scratch1, Operand(Smi::FromInt(count_value)));
- __ AdduAndCheckForOverflow(v0, v0, scratch1, scratch2);
- __ BranchOnNoOverflow(&done, scratch2);
+ __ AddBranchNoOvf(v0, v0, Operand(scratch1), &done);
// Call stub. Undo operation first.
__ Move(v0, a0);
__ jmp(&stub_call);
@@ -4805,12 +4405,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4849,12 +4445,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4925,9 +4517,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(v0, if_false);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(v0, v0, a1);
- __ Branch(if_false, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(if_false, lt, a1, Operand(FIRST_JS_RECEIVER_TYPE));
// Check for callable or undetectable objects => false.
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1,
@@ -4972,7 +4564,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
@@ -5043,7 +4635,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ __ LoadRoot(a1, Heap::kTrueValueRootIndex);
+ Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -5072,7 +4665,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ lw(dst, ContextOperand(cp, context_index));
+ __ lw(dst, ContextMemOperand(cp, context_index));
}
@@ -5082,14 +4675,13 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ li(at, Operand(Smi::FromInt(0)));
+ // code.
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, at);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ lw(at, ContextOperand(cp, Context::CLOSURE_INDEX));
+ __ lw(at, ContextMemOperand(cp, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
__ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -5156,8 +4748,8 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+ DCHECK(!slot.IsInvalid());
__ li(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
}
@@ -5172,7 +4764,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
Address branch_address = pc - 6 * kInstrSize;
- CodePatcher patcher(branch_address, 1);
+ Isolate* isolate = unoptimized_code->GetIsolate();
+ CodePatcher patcher(isolate, branch_address, 1);
switch (target_state) {
case INTERRUPT:
@@ -5200,7 +4793,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address pc_immediate_load_address = pc - 4 * kInstrSize;
// Replace the stack check address in the load-immediate (lui/ori pair)
// with the entry address of the replacement code.
- Assembler::set_target_address_at(pc_immediate_load_address,
+ Assembler::set_target_address_at(isolate, pc_immediate_load_address,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
diff --git a/chromium/v8/src/full-codegen/mips64/OWNERS b/chromium/v8/src/full-codegen/mips64/OWNERS
index 5508ba626f3..89455a4fbd7 100644
--- a/chromium/v8/src/full-codegen/mips64/OWNERS
+++ b/chromium/v8/src/full-codegen/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/chromium/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index dcdff515ef5..44dd791a59b 100644
--- a/chromium/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/chromium/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -12,15 +12,14 @@
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/mips64/code-stubs-mips64.h"
#include "src/mips64/macro-assembler-mips64.h"
@@ -102,6 +101,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o a1: the JS function object being called (i.e. ourselves)
+// o a3: the new target value
// o cp: our context
// o fp: our caller's frame pointer
// o sp: stack pointer
@@ -125,29 +125,21 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ ld(at, MemOperand(sp, receiver_offset));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Branch(&ok, ne, a2, Operand(at));
-
- __ ld(a2, GlobalObjectOperand());
- __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
-
- __ sd(a2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ __ ld(a2, MemOperand(sp, receiver_offset));
+ __ AssertNotSmi(a2);
+ __ GetObjectType(a2, a2, a2);
+ __ Check(ge, kSloppyFunctionExpectsJSReceiverReceiver, a2,
+ Operand(FIRST_JS_RECEIVER_TYPE));
}
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
+ __ Prologue(info->GeneratePreagedPrologue());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -159,7 +151,7 @@ void FullCodeGenerator::Generate() {
__ Dsubu(t1, sp, Operand(locals_count * kPointerSize));
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
__ Branch(&ok, hs, t1, Operand(a2));
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
@@ -198,15 +190,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ push(a1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ __ CallRuntime(Runtime::kNewScriptContext);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(a3); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(a1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(a3); // Restore new target.
+ }
}
function_in_register_a1 = false;
// Context is returned in v0. It replaces the context passed to us.
@@ -224,13 +227,13 @@ void FullCodeGenerator::Generate() {
// Load parameter from stack.
__ ld(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ sd(a0, target);
// Update the write barrier.
if (need_write_barrier) {
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteContextSlot(cp, target.offset(), a0, a2,
+ kRAHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, a0, &done);
@@ -241,10 +244,10 @@ void FullCodeGenerator::Generate() {
}
}
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register_a1| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -255,40 +258,36 @@ void FullCodeGenerator::Generate() {
__ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// The write barrier clobbers register again, keep it marked as such.
}
- SetVar(this_function_var, a1, a2, a3);
+ SetVar(this_function_var, a1, a0, a2);
}
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
- // Get the frame pointer for the calling frame.
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ld(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne, a1,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ld(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
- function_in_register_a1 = false;
+ SetVar(new_target_var, a3, a0, a2);
+ }
- Label non_construct_frame, done;
- __ Branch(&non_construct_frame, ne, a1,
- Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
- __ ld(v0,
- MemOperand(a2, ConstructFrameConstants::kOriginalConstructorOffset));
- __ Branch(&done);
+ __ li(RestParamAccessDescriptor::parameter_count(),
+ Operand(Smi::FromInt(num_parameters)));
+ __ Daddu(RestParamAccessDescriptor::parameter_pointer(), fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ li(RestParamAccessDescriptor::rest_parameter_index(),
+ Operand(Smi::FromInt(rest_index)));
+ function_in_register_a1 = false;
- __ bind(&non_construct_frame);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
- SetVar(new_target_var, v0, a2, a3);
+ SetVar(rest_param, v0, a1, a2);
}
Variable* arguments = scope()->arguments();
@@ -322,7 +321,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -442,7 +441,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in v0.
__ push(v0);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -472,11 +471,9 @@ void FullCodeGenerator::EmitReturnSequence() {
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(literal());
masm_->mov(sp, fp);
- int no_frame_start = masm_->pc_offset();
masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
masm_->Daddu(sp, sp, Operand(sp_delta));
masm_->Jump(ra);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
}
@@ -684,8 +681,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
__ mov(a0, result_register());
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ mov(at, zero_reg);
- Split(ne, v0, Operand(at), if_true, if_false, fall_through);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ Split(eq, result_register(), Operand(at), if_true, if_false, fall_through);
}
@@ -725,7 +722,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
if (var->IsContextSlot()) {
int context_chain_length = scope()->ContextChainLength(var->scope());
__ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
+ return ContextMemOperand(scratch, var->index());
} else {
return StackOperand(var);
}
@@ -831,7 +828,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ sd(at, ContextOperand(cp, variable->index()));
+ __ sd(at, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -853,10 +850,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
}
__ Push(a2, a0);
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -891,7 +886,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ sd(result_register(), ContextOperand(cp, variable->index()));
+ __ sd(result_register(), ContextMemOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
@@ -912,7 +907,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(a2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -924,7 +920,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ li(a1, Operand(pairs));
__ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(a1, a0);
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -932,7 +928,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -1056,7 +1052,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
__ GetObjectType(a0, a1, a1);
- __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&done_convert, ge, a1, Operand(FIRST_JS_RECEIVER_TYPE));
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1067,9 +1063,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(a0, a1, a1);
- __ Branch(&call_runtime, le, a1, Operand(LAST_JS_PROXY_TYPE));
+ __ Branch(&call_runtime, eq, a1, Operand(JS_PROXY_TYPE));
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1086,7 +1081,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(a0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1119,7 +1114,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&exit);
// We got a fixed array in register v0. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
__ EmitLoadTypeFeedbackVector(a1);
@@ -1127,20 +1121,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int vector_index = SmiFromSlot(slot)->value();
__ sd(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
- __ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
- __ ld(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ GetObjectType(a2, a3, a3);
- __ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE));
- __ li(a1, Operand(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
+ __ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(a1, v0); // Smi and array
__ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ li(a0, Operand(Smi::FromInt(0)));
__ Push(a1, a0); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1167,16 +1154,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&update_each, eq, a4, Operand(a2));
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- __ Branch(&update_each, eq, a2, Operand(zero_reg));
-
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(a1, a3); // Enumerable and current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ mov(a3, result_register());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1192,6 +1174,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1234,35 +1218,36 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ Push(info);
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(v0);
}
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ li(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ ld(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), v0);
__ li(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ ld(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1278,12 +1263,12 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ ld(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that extension is "the hole".
+ __ ld(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
- __ ld(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ __ ld(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
@@ -1303,11 +1288,11 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ ld(temp, FieldMemOperand(next, HeapObject::kMapOffset));
__ LoadRoot(a4, Heap::kNativeContextMapRootIndex);
__ Branch(&fast, eq, temp, Operand(a4));
- // Check that extension is NULL.
- __ ld(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that extension is "the hole".
+ __ ld(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ ld(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ ld(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
__ Branch(&loop);
__ bind(&fast);
}
@@ -1328,23 +1313,23 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ ld(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that extension is "the hole".
+ __ ld(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
- __ ld(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ ld(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
- // Check that last extension is NULL.
- __ ld(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that last extension is "the hole".
+ __ ld(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
// destroy the cp register).
- return ContextOperand(context, var->index());
+ return ContextMemOperand(context, var->index());
}
@@ -1374,7 +1359,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ Branch(done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ Branch(done);
@@ -1387,27 +1372,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-
- } else {
- __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
- }
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
+ __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ li(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
}
@@ -1447,7 +1416,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
__ Branch(&done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&done);
} else {
// Uninitialized legacy const bindings are unholed.
@@ -1475,7 +1444,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(v0);
}
@@ -1485,49 +1454,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // a5 = materialized value (RegExp literal)
- // a4 = JS function, literals array
- // a3 = literal index
- // a2 = RegExp pattern
- // a1 = RegExp flags
- // a0 = RegExp literal clone
- __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ld(a4, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ ld(a5, FieldMemOperand(a4, literal_offset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&materialized, ne, a5, Operand(at));
-
- // Create regexp literal using runtime function.
- // Result will be in v0.
- __ li(a3, Operand(Smi::FromInt(expr->literal_index())));
- __ li(a2, Operand(expr->pattern()));
- __ li(a1, Operand(expr->flags()));
- __ Push(a4, a3, a2, a1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(a5, v0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ Push(a5, a0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(a5);
-
- __ bind(&allocated);
-
- // After this, registers are used as follows:
- // v0: Newly allocated regexp.
- // a5: Materialized regexp.
- // a2: temp.
- __ CopyFields(v0, a5, a2.bit(), size / kPointerSize);
+ __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a1, Operand(expr->pattern()));
+ __ li(a0, Operand(Smi::FromInt(expr->flags())));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(v0);
}
@@ -1554,13 +1486,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<FixedArray> constant_properties = expr->constant_properties();
__ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_properties));
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
@@ -1600,12 +1531,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(a0));
__ li(StoreDescriptor::NameRegister(), Operand(key->value()));
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1627,7 +1554,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
__ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes.
__ push(a0);
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1638,7 +1565,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(a0);
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1665,7 +1594,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1693,9 +1622,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1708,7 +1639,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1721,13 +1652,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1737,7 +1668,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(result_saved);
__ ld(a0, MemOperand(sp));
__ push(a0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1751,8 +1682,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
-
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1766,13 +1695,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(a0, result_register());
__ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
@@ -1796,27 +1724,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(v0); // array literal
- __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ ld(a6, MemOperand(sp, kPointerSize)); // Copy of array literal.
- __ ld(a1, FieldMemOperand(a6, JSObject::kElementsOffset));
- __ sd(result_register(), FieldMemOperand(a1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(a1, offset, result_register(), a2,
- kRAHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ li(a3, Operand(Smi::FromInt(array_index)));
- __ mov(a0, result_register());
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ li(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
+ __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1827,7 +1746,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Pop(); // literal index
__ Pop(v0);
result_saved = false;
}
@@ -1841,14 +1759,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Pop(); // literal index
context()->PlugTOS();
} else {
context()->Plug(v0);
@@ -2131,9 +2048,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a0, v0);
__ mov(a1, a0);
__ sd(a1, MemOperand(sp, 2 * kPointerSize));
- SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ SetCallPosition(expr);
+ __ li(a0, Operand(1));
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2149,7 +2068,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a0, v0);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ Branch(&l_try, eq, v0, Operand(zero_reg));
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&l_try, ne, result_register(), Operand(at));
// result.value
__ pop(load_receiver); // result
@@ -2244,7 +2164,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
DCHECK(!result_register().is(a1));
__ Push(a1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2261,12 +2181,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ ld(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ld(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
- __ ld(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
__ pop(a2);
__ LoadRoot(a3,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
@@ -2302,7 +2220,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ Push(key->value());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2321,7 +2239,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2446,7 +2364,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ push(v0);
}
@@ -2461,19 +2379,19 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
@@ -2483,7 +2401,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2500,7 +2418,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2520,7 +2438,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2568,7 +2486,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), result_register());
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2593,37 +2511,16 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ ld(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(a0));
- __ mov(StoreGlobalViaContextDescriptor::ValueRegister(), result_register());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(a0);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2634,12 +2531,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Branch(&assign, ne, a3, Operand(a4));
__ li(a3, Operand(var->name()));
__ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
// Perform the assignment.
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2650,11 +2547,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Branch(&const_error, ne, a3, Operand(at));
__ li(a3, Operand(var->name()));
__ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2664,11 +2561,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Branch(&uninitialized_this, eq, a3, Operand(at));
__ li(a0, Operand(var->name()));
__ Push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ li(a4, Operand(var->name()));
@@ -2678,13 +2576,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// jssp[16] : context.
// jssp[24] : value.
__ Push(v0, cp, a4, a3);
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, a1);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ ld(a2, location);
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
@@ -2693,13 +2591,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ li(a0, Operand(var->name()));
__ Push(v0, cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
@@ -2712,9 +2610,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2731,12 +2629,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2754,8 +2648,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(v0);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2766,10 +2659,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(v0);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2786,12 +2678,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2846,11 +2734,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2859,6 +2745,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// is a sloppy mode method.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ push(at);
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2870,9 +2757,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
__ ld(at, MemOperand(sp, 0));
__ push(at);
__ sd(v0, MemOperand(sp, kPointerSize));
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2902,7 +2790,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ sd(v0, MemOperand(sp, kPointerSize));
@@ -2910,7 +2798,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2934,7 +2822,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(at);
__ sd(v0, MemOperand(sp, kPointerSize));
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2962,7 +2850,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ sd(v0, MemOperand(sp, kPointerSize));
@@ -2970,11 +2858,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2982,9 +2870,10 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Record source position of the IC call.
- SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ SetCallPosition(expr);
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -3016,7 +2905,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Do the runtime call.
__ Push(a6, a5, a4, a1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -3037,7 +2926,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
DCHECK(!context_register().is(a2));
__ li(a2, Operand(callee->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ Push(v0, v1); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -3065,88 +2954,38 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ PushCalleeAndWithBaseObject(expr);
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(a1);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Touch up the stack with the resolved function.
- __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ push(a1);
+ EmitResolvePossiblyDirectEval(arg_count);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- // Record source position for debugger.
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, v0);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- __ push(a1);
- // Emit function call.
- EmitCall(expr);
- }
+ // Touch up the stack with the resolved function.
+ __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Record source position for debugger.
+ SetCallPosition(expr);
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ li(a0, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, v0);
}
@@ -3181,8 +3020,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(a2);
__ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3195,8 +3034,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ ld(result_register(),
+ FieldMemOperand(result_register(), HeapObject::kMapOffset));
+ __ ld(result_register(),
+ FieldMemOperand(result_register(), Map::kPrototypeOffset));
+ __ Push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3209,20 +3055,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into a4.
+ // Load new target into a3.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(a4, result_register());
+ __ mov(a3, result_register());
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
__ ld(a1, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(a2);
- __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3253,7 +3094,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3269,7 +3110,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
+ Split(ge, a1, Operand(FIRST_JS_RECEIVER_TYPE),
if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -3314,7 +3155,7 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a2);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(if_true, hs, a2, Operand(FIRST_FUNCTION_TYPE));
__ Branch(if_false);
context()->Plug(if_true, if_false);
@@ -3432,44 +3273,9 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(v0, if_false);
- Register map = a1;
- Register type_reg = a2;
- __ GetObjectType(v0, map, type_reg);
- __ Subu(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ls, type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE),
- if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ld(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne,
- a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ld(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ GetObjectType(v0, a1, a1);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
- if_true, if_false, fall_through);
+ Split(eq, a1, Operand(JS_PROXY_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3541,26 +3347,14 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
+ // If the object is not a JSReceiver, we return null.
__ JumpIfSmi(v0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(v0, v0, a1); // Map is now in v0.
- __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&null, lt, a1, Operand(FIRST_JS_RECEIVER_TYPE));
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+ // Return 'Function' for JSFunction objects.
+ __ Branch(&function, eq, a1, Operand(JS_FUNCTION_TYPE));
// Check if the constructor in the map is a JS function.
Register instance_type = a2;
@@ -3637,45 +3431,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = v0;
- Register result = v0;
- Register scratch0 = t1;
- Register scratch1 = a1;
-
- if (index->value() == 0) {
- __ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ li(scratch1, Operand(stamp));
- __ ld(scratch1, MemOperand(scratch1));
- __ ld(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Branch(&runtime, ne, scratch1, Operand(scratch0));
- __ ld(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch1);
- __ li(a1, Operand(index));
- __ Move(a0, object);
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3791,40 +3546,12 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(v0, &done_convert);
__ Push(v0);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(v0);
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into a0 and call the stub.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a0, result_register());
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into a0 and convert it.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a0, result_register());
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3839,26 +3566,12 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ Branch(&done_convert, le, a1, Operand(LAST_NAME_TYPE));
__ bind(&convert);
__ Push(v0);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(v0);
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into a0 and convert it.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a0, result_register());
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3974,20 +3687,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(a1);
- __ mov(a0, result_register()); // StringAddStub requires args in a0, a1.
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3995,6 +3694,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to a1.
int const argc = args->length() - 2;
__ ld(a1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -4008,110 +3708,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(v0, &runtime);
- __ GetObjectType(v0, a1, a1);
- __ Branch(&runtime, ne, a1, Operand(JS_FUNCTION_TYPE));
-
- // InvokeFunction requires the function in a1. Move it in there.
- __ mov(a1, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(a1, count, CALL_FUNCTION, NullCallWrapper());
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(v0);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Load original constructor into a3.
- __ ld(a3, MemOperand(sp, 1 * kPointerSize));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a4, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame, eq, a4,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- // default constructor has no arguments, so no adaptor frame means no args.
- __ mov(a0, zero_reg);
- __ Branch(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(a1, a1);
-
- __ mov(a0, a1);
-
- // Get arguments pointer in a2.
- __ dsll(at, a1, kPointerSizeLog2);
- __ Daddu(a2, a2, Operand(at));
- __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- // Pre-decrement a2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Daddu(a2, a2, Operand(-kPointerSize));
- __ ld(a4, MemOperand(a2));
- __ Push(a4);
- __ Daddu(a1, a1, Operand(-1));
- __ Branch(&loop, ne, a1, Operand(zero_reg));
- }
-
- __ bind(&args_set_up);
- __ dsll(at, a0, kPointerSizeLog2);
- __ Daddu(at, at, Operand(sp));
- __ ld(a1, MemOperand(at, 0));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, result_register());
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ mov(a0, result_register());
- __ pop(a1);
- __ pop(a2);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4147,6 +3743,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(v0);
+ __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ ld(v0, FieldMemOperand(v0, Map::kPrototypeOffset));
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
@@ -4262,6 +3869,10 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
__ BranchOnOverflow(&bailout, scratch3);
+ // Bailout for large object allocations.
+ __ Branch(&bailout, gt, string_length,
+ Operand(Page::kMaxRegularHeapObjectSize));
+
// Get first element in the array to free up the elements register to be used
// for the result.
__ Daddu(element,
@@ -4399,9 +4010,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime, TAG_OBJECT);
- __ ld(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ld(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
- __ ld(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
__ Pop(a2, a3);
__ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
__ sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
@@ -4413,7 +4022,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(v0);
@@ -4425,9 +4034,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
__ push(v0);
- __ ld(v0, GlobalObjectOperand());
- __ ld(v0, FieldMemOperand(v0, GlobalObject::kNativeContextOffset));
- __ ld(v0, ContextOperand(v0, expr->context_index()));
+ __ LoadNativeContextSlot(expr->context_index(), v0);
}
@@ -4435,10 +4042,11 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ SetCallPosition(expr);
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ li(a0, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -4506,8 +4114,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(v0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4516,10 +4123,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ ld(a2, GlobalObjectOperand());
+ __ LoadGlobalObject(a2);
__ li(a1, Operand(var->name()));
__ Push(a2, a1);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(v0);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4531,7 +4138,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
DCHECK(!context_register().is(a2));
__ li(a2, Operand(var->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(v0);
}
} else {
@@ -4807,12 +4414,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4851,12 +4454,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4927,9 +4526,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(v0, if_false);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(v0, v0, a1);
- __ Branch(if_false, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(if_false, lt, a1, Operand(FIRST_JS_RECEIVER_TYPE));
// Check for callable or undetectable objects => false.
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1,
@@ -4974,7 +4573,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(a4, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
@@ -5045,7 +4644,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ __ LoadRoot(a1, Heap::kTrueValueRootIndex);
+ Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -5076,7 +4676,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ld(dst, ContextOperand(cp, context_index));
+ __ ld(dst, ContextMemOperand(cp, context_index));
}
@@ -5086,14 +4686,13 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ li(at, Operand(Smi::FromInt(0)));
+ // code.
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, at);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ ld(at, ContextOperand(cp, Context::CLOSURE_INDEX));
+ __ ld(at, ContextMemOperand(cp, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
__ ld(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -5158,8 +4757,8 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+ DCHECK(!slot.IsInvalid());
__ li(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
}
@@ -5174,7 +4773,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
Address branch_address = pc - 8 * kInstrSize;
- CodePatcher patcher(branch_address, 1);
+ Isolate* isolate = unoptimized_code->GetIsolate();
+ CodePatcher patcher(isolate, branch_address, 1);
switch (target_state) {
case INTERRUPT:
@@ -5206,7 +4806,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address pc_immediate_load_address = pc - 6 * kInstrSize;
// Replace the stack check address in the load-immediate (6-instr sequence)
// with the entry address of the replacement code.
- Assembler::set_target_address_at(pc_immediate_load_address,
+ Assembler::set_target_address_at(isolate, pc_immediate_load_address,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
diff --git a/chromium/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/chromium/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index 03def66d5f7..d9c324c4245 100644
--- a/chromium/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/chromium/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -4,15 +4,14 @@
#if V8_TARGET_ARCH_PPC
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/ppc/code-stubs-ppc.h"
#include "src/ppc/macro-assembler-ppc.h"
@@ -90,6 +89,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o r4: the JS function object being called (i.e., ourselves)
+// o r6: the new target value
// o cp: our context
// o fp: our caller's frame pointer (aka r31)
// o sp: stack pointer
@@ -114,22 +114,12 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadP(r5, MemOperand(sp, receiver_offset), r0);
- __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
- __ bne(&ok);
-
- __ LoadP(r5, GlobalObjectOperand());
- __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
-
- __ StoreP(r5, MemOperand(sp, receiver_offset), r0);
-
- __ bind(&ok);
+ __ AssertNotSmi(r5);
+ __ CompareObjectType(r5, r5, no_reg, FIRST_JS_RECEIVER_TYPE);
+ __ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -145,8 +135,7 @@ void FullCodeGenerator::Generate() {
__ addi(ip, ip, Operand(prologue_offset));
}
info->set_prologue_offset(prologue_offset);
- __ Prologue(info->IsCodePreAgingActive(), prologue_offset);
- info->AddNoFrameRange(0, masm_->pc_offset());
+ __ Prologue(info->GeneratePreagedPrologue(), ip, prologue_offset);
{
Comment cmnt(masm_, "[ Allocate locals");
@@ -160,7 +149,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
__ cmpl(ip, r5);
__ bc_short(ge, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -197,15 +186,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ push(r4);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ __ CallRuntime(Runtime::kNewScriptContext);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ push(r4);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(r6); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(r4);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(r6); // Preserve new target.
+ }
}
function_in_register_r4 = false;
// Context is returned in r3. It replaces the context passed to us.
@@ -223,12 +223,12 @@ void FullCodeGenerator::Generate() {
// Load parameter from stack.
__ LoadP(r3, MemOperand(fp, parameter_offset), r0);
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ StoreP(r3, target, r0);
// Update the write barrier.
if (need_write_barrier) {
- __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
+ __ RecordWriteContextSlot(cp, target.offset(), r3, r5,
kLRHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
@@ -240,10 +240,10 @@ void FullCodeGenerator::Generate() {
}
}
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register_r4| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -257,37 +257,34 @@ void FullCodeGenerator::Generate() {
SetVar(this_function_var, r4, r3, r5);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
+ SetVar(new_target_var, r6, r3, r5);
+ }
- // Get the frame pointer for the calling frame.
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
- // Skip the arguments adaptor frame if it exists.
- __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- Label skip;
- __ bne(&skip);
- __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
- __ bind(&skip);
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
- // Check the marker in the calling frame.
- __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
- __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::CONSTRUCT), r0);
- Label non_construct_frame, done;
+ __ LoadSmiLiteral(RestParamAccessDescriptor::parameter_count(),
+ Smi::FromInt(num_parameters));
+ __ addi(RestParamAccessDescriptor::parameter_pointer(), fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ LoadSmiLiteral(RestParamAccessDescriptor::rest_parameter_index(),
+ Smi::FromInt(rest_index));
function_in_register_r4 = false;
- __ bne(&non_construct_frame);
- __ LoadP(r3, MemOperand(
- r5, ConstructFrameConstants::kOriginalConstructorOffset));
- __ b(&done);
-
- __ bind(&non_construct_frame);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
- SetVar(new_target_var, r3, r5, r6);
+ SetVar(rest_param, r3, r4, r5);
}
Variable* arguments = scope()->arguments();
@@ -321,7 +318,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -440,7 +437,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in r3
__ push(r3);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -467,9 +464,8 @@ void FullCodeGenerator::EmitReturnSequence() {
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(literal());
- int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
__ blr();
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
}
@@ -661,8 +657,8 @@ void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
Label* if_false, Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ cmpi(result_register(), Operand::Zero());
- Split(ne, if_true, if_false, fall_through);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
@@ -698,7 +694,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
if (var->IsContextSlot()) {
int context_chain_length = scope()->ContextChainLength(var->scope());
__ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
+ return ContextMemOperand(scratch, var->index());
} else {
return StackOperand(var);
}
@@ -798,7 +794,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ StoreP(ip, ContextOperand(cp, variable->index()), r0);
+ __ StoreP(ip, ContextMemOperand(cp, variable->index()), r0);
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -815,15 +811,12 @@ void FullCodeGenerator::VisitVariableDeclaration(
// must not destroy the current value.
if (hole_init) {
__ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ Push(r5, r3);
} else {
__ LoadSmiLiteral(r3, Smi::FromInt(0)); // Indicates no initial value.
- __ Push(r5, r3);
}
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ Push(r5, r3);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -858,7 +851,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ StoreP(result_register(), ContextOperand(cp, variable->index()), r0);
+ __ StoreP(result_register(), ContextMemOperand(cp, variable->index()),
+ r0);
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp, offset, result_register(), r5,
@@ -874,7 +868,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(r5);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -886,7 +881,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ mov(r4, Operand(pairs));
__ LoadSmiLiteral(r3, Smi::FromInt(DeclareGlobalsFlags()));
__ Push(r4, r3);
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -894,7 +889,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -1020,7 +1015,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(r3, &convert);
- __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
__ bge(&done_convert);
__ bind(&convert);
ToObjectStub stub(isolate());
@@ -1031,9 +1026,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
- __ ble(&call_runtime);
+ __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
+ __ beq(&call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1050,7 +1044,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r3); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1087,7 +1081,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ b(&exit);
// We got a fixed array in register r3. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
__ EmitLoadTypeFeedbackVector(r4);
@@ -1095,21 +1088,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int vector_index = SmiFromSlot(slot)->value();
__ StoreP(
r5, FieldMemOperand(r4, FixedArray::OffsetOfElementAt(vector_index)), r0);
-
- __ LoadSmiLiteral(r4, Smi::FromInt(1)); // Smi indicates slow check
- __ LoadP(r5, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r5, r6, r6, LAST_JS_PROXY_TYPE);
- __ bgt(&non_proxy);
- __ LoadSmiLiteral(r4, Smi::FromInt(0)); // Zero indicates proxy
- __ bind(&non_proxy);
+ __ LoadSmiLiteral(r4, Smi::FromInt(1)); // Smi(1) indicates slow check
__ Push(r4, r3); // Smi and array
__ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
__ LoadSmiLiteral(r3, Smi::FromInt(0));
__ Push(r4, r3); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1137,16 +1122,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r7, r5);
__ beq(&update_each);
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- __ CmpSmiLiteral(r5, Smi::FromInt(0), r0);
- __ beq(&update_each);
-
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(r4, r6); // Enumerable and current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ mr(r6, r3);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -1164,6 +1144,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1203,35 +1185,36 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ Push(info);
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(r3);
}
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ LoadP(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), r3);
__ mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ LoadP(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1247,13 +1230,12 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ LoadP(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ cmpi(temp, Operand::Zero());
- __ bne(slow);
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
- __ LoadP(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
@@ -1274,12 +1256,11 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
__ cmp(temp, ip);
__ beq(&fast);
- // Check that extension is NULL.
- __ LoadP(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ cmpi(temp, Operand::Zero());
- __ bne(slow);
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ LoadP(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ LoadP(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
__ b(&loop);
__ bind(&fast);
}
@@ -1300,25 +1281,23 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ LoadP(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ cmpi(temp, Operand::Zero());
- __ bne(slow);
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
- __ LoadP(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ LoadP(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
- // Check that last extension is NULL.
- __ LoadP(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ cmpi(temp, Operand::Zero());
- __ bne(slow);
+ // Check that last extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
// destroy the cp register).
- return ContextOperand(context, var->index());
+ return ContextMemOperand(context, var->index());
}
@@ -1346,7 +1325,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else { // LET || CONST
__ mov(r3, Operand(var->name()));
__ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ b(done);
@@ -1359,26 +1338,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- const int slot = var->index();
- const int depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
- } else {
- __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
- }
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
}
@@ -1417,7 +1381,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// binding in harmony mode.
__ mov(r3, Operand(var->name()));
__ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
} else {
// Uninitialized legacy const bindings are unholed.
DCHECK(var->mode() == CONST_LEGACY);
@@ -1444,7 +1408,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(r3);
}
@@ -1454,49 +1418,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // r8 = materialized value (RegExp literal)
- // r7 = JS function, literals array
- // r6 = literal index
- // r5 = RegExp pattern
- // r4 = RegExp flags
- // r3 = RegExp literal clone
- __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ LoadP(r8, FieldMemOperand(r7, literal_offset), r0);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r8, ip);
- __ bne(&materialized);
-
- // Create regexp literal using runtime function.
- // Result will be in r3.
- __ LoadSmiLiteral(r6, Smi::FromInt(expr->literal_index()));
- __ mov(r5, Operand(expr->pattern()));
- __ mov(r4, Operand(expr->flags()));
- __ Push(r7, r6, r5, r4);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mr(r8, r3);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
- __ b(&allocated);
-
- __ bind(&runtime_allocate);
- __ LoadSmiLiteral(r3, Smi::FromInt(size));
- __ Push(r8, r3);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(r8);
-
- __ bind(&allocated);
- // After this, registers are used as follows:
- // r3: Newly allocated regexp.
- // r8: Materialized regexp.
- // r5: temp.
- __ CopyFields(r3, r8, r5.bit(), size / kPointerSize);
+ __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
+ __ mov(r4, Operand(expr->pattern()));
+ __ LoadSmiLiteral(r3, Smi::FromInt(expr->flags()));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(r3);
}
@@ -1523,14 +1450,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<FixedArray> constant_properties = expr->constant_properties();
__ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(r6, FieldMemOperand(r6, JSFunction::kLiteralsOffset));
__ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
__ mov(r4, Operand(constant_properties));
int flags = expr->ComputeFlags();
__ LoadSmiLiteral(r3, Smi::FromInt(flags));
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ Push(r6, r5, r4, r3);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
@@ -1569,12 +1495,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(r3));
__ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1596,7 +1518,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
__ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY)); // PropertyAttributes
__ push(r3);
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1607,7 +1529,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(r3);
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1633,7 +1557,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ LoadSmiLiteral(r3, Smi::FromInt(NONE));
__ push(r3);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1661,9 +1585,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1676,7 +1602,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
__ LoadSmiLiteral(r3, Smi::FromInt(NONE));
__ push(r3);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1689,13 +1615,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ mov(r3, Operand(Smi::FromInt(NONE)));
__ push(r3);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ mov(r3, Operand(Smi::FromInt(NONE)));
__ push(r3);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1705,7 +1631,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(result_saved);
__ LoadP(r3, MemOperand(sp));
__ push(r3);
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1719,7 +1645,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1734,13 +1659,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
__ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(r6, FieldMemOperand(r6, JSFunction::kLiteralsOffset));
__ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
__ mov(r4, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ LoadSmiLiteral(r3, Smi::FromInt(expr->ComputeFlags()));
__ Push(r6, r5, r4, r3);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
@@ -1763,25 +1687,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(r3);
- __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ LoadP(r8, MemOperand(sp, kPointerSize)); // Copy of array literal.
- __ LoadP(r4, FieldMemOperand(r8, JSObject::kElementsOffset));
- __ StoreP(result_register(), FieldMemOperand(r4, offset), r0);
- // Update the write barrier for the array store.
- __ RecordWriteField(r4, offset, result_register(), r5, kLRHasBeenSaved,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- } else {
- __ LoadSmiLiteral(r6, Smi::FromInt(array_index));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ LoadSmiLiteral(StoreDescriptor::NameRegister(),
+ Smi::FromInt(array_index));
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1792,7 +1708,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Drop(1); // literal index
__ Pop(r3);
result_saved = false;
}
@@ -1806,14 +1721,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(r3);
@@ -2096,9 +2010,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ mr(r4, r3);
__ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
- SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ SetCallPosition(expr);
+ __ li(r3, Operand(1));
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2113,8 +2029,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallLoadIC(NOT_INSIDE_TYPEOF); // r0=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ cmpi(r3, Operand::Zero());
- __ beq(&l_try);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ bne(&l_try);
// result.value
__ pop(load_receiver); // result
@@ -2224,7 +2140,7 @@ void FullCodeGenerator::EmitGeneratorResume(
DCHECK(!result_register().is(r4));
__ Push(r4, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2241,12 +2157,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
- __ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
__ pop(r5);
__ LoadRoot(r6,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
@@ -2280,7 +2194,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ Push(key->value());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2297,7 +2211,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2456,7 +2370,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ push(r3);
}
@@ -2471,19 +2385,19 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ mov(r3, Operand(Smi::FromInt(DONT_ENUM)));
__ push(r3);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ mov(r3, Operand(Smi::FromInt(DONT_ENUM)));
__ push(r3);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
@@ -2493,7 +2407,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2509,7 +2423,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2529,7 +2443,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2577,7 +2491,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), r3);
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2602,34 +2516,15 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- const int slot = var->index();
- const int depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(r3));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ push(r3);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2640,12 +2535,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bne(&assign);
__ mov(r6, Operand(var->name()));
__ push(r6);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
// Perform the assignment.
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2656,11 +2551,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bne(&const_error);
__ mov(r6, Operand(var->name()));
__ push(r6);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2670,24 +2565,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ beq(&uninitialized_this);
__ mov(r4, Operand(var->name()));
__ push(r4);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ push(r3); // Value.
__ mov(r4, Operand(var->name()));
__ mov(r3, Operand(Smi::FromInt(language_mode())));
__ Push(cp, r4, r3); // Context, name, language mode.
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r4);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ LoadP(r5, location);
__ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
@@ -2695,15 +2591,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(r3);
__ mov(r3, Operand(var->name()));
__ Push(cp, r3); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
@@ -2716,9 +2611,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2734,12 +2629,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r3);
@@ -2757,8 +2648,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(r3);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2769,10 +2659,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(r3);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2783,12 +2672,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r3);
@@ -2842,11 +2727,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
-
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{
StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
@@ -2856,6 +2739,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// is a sloppy mode method.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ push(r0);
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2867,9 +2751,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
__ LoadP(r0, MemOperand(sp, 0));
__ push(r0);
__ StoreP(r3, MemOperand(sp, kPointerSize));
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2899,7 +2784,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2907,7 +2792,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2930,7 +2815,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
__ push(ip);
__ StoreP(r3, MemOperand(sp, kPointerSize));
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2958,7 +2843,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2966,11 +2851,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2978,8 +2863,9 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
- SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ SetCallPosition(expr);
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -3012,7 +2898,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Do the runtime call.
__ Push(r7, r6, r5, r4);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -3032,7 +2918,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
DCHECK(!context_register().is(r5));
__ mov(r5, Operand(callee->name()));
__ Push(context_register(), r5);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ Push(r3, r4); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -3060,91 +2946,40 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ PushCalleeAndWithBaseObject(expr);
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ push(r4);
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Touch up the stack with the resolved function.
- __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ push(r4);
+ EmitResolvePossiblyDirectEval(arg_count);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Touch up the stack with the resolved function.
+ __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- // Record source position for debugger.
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r3);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
-
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ push(r4);
- // Emit function call.
- EmitCall(expr);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ // Record source position for debugger.
+ SetCallPosition(expr);
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ mov(r3, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, r3);
}
@@ -3179,8 +3014,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(r5);
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallNewFeedbackSlot()));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3193,8 +3028,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ LoadP(result_register(),
+ FieldMemOperand(result_register(), HeapObject::kMapOffset));
+ __ LoadP(result_register(),
+ FieldMemOperand(result_register(), Map::kPrototypeOffset));
+ __ Push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3207,20 +3049,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into r7.
+ // Load new target into r6.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mr(r7, result_register());
+ __ mr(r6, result_register());
// Load function and argument count into r1 and r0.
__ mov(r3, Operand(arg_count));
__ LoadP(r4, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(r5);
- __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3251,7 +3088,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3265,7 +3102,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@@ -3309,9 +3146,9 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r5, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r3, r4, r5, FIRST_FUNCTION_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
+ Split(ge, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3434,44 +3271,7 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(r3, if_false);
- Register map = r4;
- Register type_reg = r5;
- __ LoadP(map, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ subi(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- __ cmpli(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&check_frame_marker);
- __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
- STATIC_ASSERT(StackFrame::CONSTRUCT < 0x4000);
- __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::CONSTRUCT), r0);
+ __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3546,26 +3346,16 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
+ // If the object is not a JSReceiver, we return null.
__ JumpIfSmi(r3, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r3, r3, r4, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r3, r3, r4, FIRST_JS_RECEIVER_TYPE);
// Map is now in r3.
__ blt(&null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ beq(&function);
- __ cmpi(r4, Operand(LAST_SPEC_OBJECT_TYPE));
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_SPEC_OBJECT_TYPE - 1);
+ // Return 'Function' for JSFunction objects.
+ __ cmpi(r4, Operand(JS_FUNCTION_TYPE));
__ beq(&function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
Register instance_type = r5;
@@ -3641,47 +3431,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = r3;
- Register result = r3;
- Register scratch0 = r11;
- Register scratch1 = r4;
-
- if (index->value() == 0) {
- __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch1, Operand(stamp));
- __ LoadP(scratch1, MemOperand(scratch1));
- __ LoadP(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch1, scratch0);
- __ bne(&runtime);
- __ LoadP(result,
- FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()),
- scratch0);
- __ b(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch1);
- __ LoadSmiLiteral(r4, index);
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3785,37 +3534,12 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(r3, &done_convert);
__ Push(r3);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(r3);
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
- // Load the argument into r3 and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into r3 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3830,24 +3554,12 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ ble(&done_convert);
__ bind(&convert);
__ Push(r3);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(r3);
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
- // Load the argument into r3 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3949,19 +3661,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(r4);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3969,6 +3668,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to r4.
int const argc = args->length() - 2;
__ LoadP(r4, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3982,107 +3682,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(r3, &runtime);
- __ CompareObjectType(r3, r4, r4, JS_FUNCTION_TYPE);
- __ bne(&runtime);
-
- // InvokeFunction requires the function in r4. Move it in there.
- __ mr(r4, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(r4, count, CALL_FUNCTION, NullCallWrapper());
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ b(&done);
-
- __ bind(&runtime);
- __ push(r3);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(r3);
-}
-
-
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target.
- VisitForStackValue(args->at(0));
-
- // Evaluate super constructor (to stack and r4).
- VisitForAccumulatorValue(args->at(1));
- __ push(result_register());
- __ mr(r4, result_register());
-
- // Load original constructor into r6.
- __ LoadP(r6, MemOperand(sp, 1 * kPointerSize));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r7, MemOperand(r5, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ beq(&adaptor_frame);
-
- // default constructor has no arguments, so no adaptor frame means no args.
- __ li(r3, Operand::Zero());
- __ b(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(r3);
-
- // Get arguments pointer in r5.
- __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
- __ add(r5, r5, r0);
- __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
-
- Label loop;
- __ mtctr(r3);
- __ bind(&loop);
- // Pre-decrement in order to skip receiver.
- __ LoadPU(r7, MemOperand(r5, -kPointerSize));
- __ Push(r7);
- __ bdnz(&loop);
- }
-
- __ bind(&args_set_up);
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, r3);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ Pop(r5, r4);
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -4119,6 +3718,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(r3);
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadP(r3, FieldMemOperand(r3, Map::kPrototypeOffset));
+ context()->Plug(r3);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator, non_trivial_array,
not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
@@ -4252,6 +3862,10 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ BranchOnOverflow(&bailout);
__ SmiUntag(string_length);
+ // Bailout for large object allocations.
+ __ Cmpi(string_length, Operand(Page::kMaxRegularHeapObjectSize), r0);
+ __ bgt(&bailout);
+
// Get first element in the array to free up the elements register to be used
// for the result.
__ addi(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4390,9 +4004,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, r3, r5, r6, &runtime, TAG_OBJECT);
- __ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
- __ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
__ Pop(r5, r6);
__ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
__ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
@@ -4404,7 +4016,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ b(&done);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(r3);
@@ -4416,9 +4028,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ push(r3);
- __ LoadP(r3, GlobalObjectOperand());
- __ LoadP(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
- __ LoadP(r3, ContextOperand(r3, expr->context_index()));
+ __ LoadNativeContextSlot(expr->context_index(), r3);
}
@@ -4426,10 +4036,11 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ SetCallPosition(expr);
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ CallStub(&stub);
+ __ mov(r3, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -4498,8 +4109,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(r3);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4508,10 +4118,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ LoadP(r5, GlobalObjectOperand());
+ __ LoadGlobalObject(r5);
__ mov(r4, Operand(var->name()));
__ Push(r5, r4);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(r3);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4523,7 +4133,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
DCHECK(!context_register().is(r5));
__ mov(r5, Operand(var->name()));
__ Push(context_register(), r5);
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(r3);
}
} else {
@@ -4793,12 +4403,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4836,12 +4442,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4914,8 +4516,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(r3, if_false);
__ CompareRoot(r3, Heap::kNullValueRootIndex);
__ beq(if_true);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r3, r3, r4, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r3, r3, r4, FIRST_JS_RECEIVER_TYPE);
__ blt(if_false);
// Check for callable or undetectable objects => false.
__ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
@@ -4961,7 +4563,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -5032,8 +4634,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ cmpi(r3, Operand::Zero());
- Split(ne, if_true, if_false, fall_through);
+ __ CompareRoot(r3, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -5058,7 +4660,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ LoadP(dst, ContextOperand(cp, context_index), r0);
+ __ LoadP(dst, ContextMemOperand(cp, context_index), r0);
}
@@ -5068,14 +4670,13 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ LoadSmiLiteral(ip, Smi::FromInt(0));
+ // code.
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, ip);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ LoadP(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
+ __ LoadP(ip, ContextMemOperand(cp, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
__ LoadP(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -5143,8 +4744,8 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+ DCHECK(!slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
}
@@ -5158,7 +4759,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
Code* replacement_code) {
Address mov_address = Assembler::target_address_from_return_address(pc);
Address cmp_address = mov_address - 2 * Assembler::kInstrSize;
- CodePatcher patcher(cmp_address, 1);
+ Isolate* isolate = unoptimized_code->GetIsolate();
+ CodePatcher patcher(isolate, cmp_address, 1);
switch (target_state) {
case INTERRUPT: {
@@ -5191,7 +4793,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
// Replace the stack check address in the mov sequence with the
// entry address of the replacement code.
- Assembler::set_target_address_at(mov_address, unoptimized_code,
+ Assembler::set_target_address_at(isolate, mov_address, unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
diff --git a/chromium/v8/src/full-codegen/x64/full-codegen-x64.cc b/chromium/v8/src/full-codegen/x64/full-codegen-x64.cc
index 0133c09d6e7..615eb67ba64 100644
--- a/chromium/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/chromium/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -4,15 +4,14 @@
#if V8_TARGET_ARCH_X64
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -83,6 +82,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o rdi: the JS function object being called (i.e. ourselves)
+// o rdx: the new target value
// o rsi: our context
// o rbp: our caller's frame pointer
// o rsp: stack pointer (pointing to return address)
@@ -105,24 +105,12 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- // +1 for return address.
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
__ movp(rcx, args.GetReceiverOperand());
-
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &ok, Label::kNear);
-
- __ movp(rcx, GlobalObjectOperand());
- __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
-
- __ movp(args.GetReceiverOperand(), rcx);
-
- __ bind(&ok);
+ __ AssertNotSmi(rcx);
+ __ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rcx);
+ __ Assert(above_equal, kSloppyFunctionExpectsJSReceiverReceiver);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -131,8 +119,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
+ __ Prologue(info->GeneratePreagedPrologue());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -147,10 +134,10 @@ void FullCodeGenerator::Generate() {
__ subp(rcx, Immediate(locals_count * kPointerSize));
__ CompareRoot(rcx, Heap::kRealStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
const int kMaxPushes = 32;
if (locals_count >= kMaxPushes) {
int loop_iterations = locals_count / kMaxPushes;
@@ -159,7 +146,7 @@ void FullCodeGenerator::Generate() {
__ bind(&loop_header);
// Do pushes.
for (int i = 0; i < kMaxPushes; i++) {
- __ Push(rdx);
+ __ Push(rax);
}
// Continue loop if not done.
__ decp(rcx);
@@ -168,7 +155,7 @@ void FullCodeGenerator::Generate() {
int remaining = locals_count % kMaxPushes;
// Emit the remaining pushes.
for (int i = 0; i < remaining; i++) {
- __ Push(rdx);
+ __ Push(rax);
}
}
}
@@ -184,15 +171,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ Push(rdi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ __ CallRuntime(Runtime::kNewScriptContext);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ Push(rdi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ Push(rdx); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ Pop(rdx); // Restore new target.
+ }
}
function_in_register = false;
// Context is returned in rax. It replaces the context passed to us.
@@ -227,10 +225,10 @@ void FullCodeGenerator::Generate() {
}
}
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -241,38 +239,37 @@ void FullCodeGenerator::Generate() {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
// The write barrier clobbers register again, keep it marked as such.
}
- SetVar(this_function_var, rdi, rbx, rdx);
+ SetVar(this_function_var, rdi, rbx, rcx);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
+ SetVar(new_target_var, rdx, rbx, rcx);
+ }
- __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- Label non_adaptor_frame;
- __ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &non_adaptor_frame);
- __ movp(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
-
- __ bind(&non_adaptor_frame);
- __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
- Label non_construct_frame, done;
- __ j(not_equal, &non_construct_frame);
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
- // Construct frame
- __ movp(rax,
- Operand(rax, ConstructFrameConstants::kOriginalConstructorOffset));
- __ jmp(&done);
+ __ Move(RestParamAccessDescriptor::parameter_count(),
+ Smi::FromInt(num_parameters));
+ __ leap(RestParamAccessDescriptor::parameter_pointer(),
+ Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ Move(RestParamAccessDescriptor::rest_parameter_index(),
+ Smi::FromInt(rest_index));
+ function_in_register = false;
- // Non-construct frame
- __ bind(&non_construct_frame);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
- __ bind(&done);
- SetVar(new_target_var, rax, rbx, rdx);
+ SetVar(rest_param, rax, rbx, rdx);
}
// Possibly allocate an arguments object.
@@ -307,7 +304,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -417,7 +414,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&return_label_);
if (FLAG_trace) {
__ Push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -439,14 +436,11 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&ok);
SetReturnPosition(literal());
- int no_frame_start = masm_->pc_offset();
__ leave();
int arg_count = info_->scope()->num_parameters() + 1;
int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, rcx);
-
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -653,9 +647,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ testp(result_register(), result_register());
- // The stub returns nonzero for true.
- Split(not_zero, if_true, if_false, fall_through);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
}
@@ -814,10 +807,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ Push(Smi::FromInt(0)); // Indicates no initial value.
}
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -870,7 +861,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
__ Push(variable->name());
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -881,7 +873,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -889,7 +881,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -1014,7 +1006,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(rax, &convert, Label::kNear);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
__ j(above_equal, &done_convert, Label::kNear);
__ bind(&convert);
ToObjectStub stub(isolate());
@@ -1025,9 +1017,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
- __ j(below_equal, &call_runtime);
+ __ CmpObjectType(rax, JS_PROXY_TYPE, rcx);
+ __ j(equal, &call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1044,7 +1035,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ Push(rax); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1080,7 +1071,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&exit);
// We got a fixed array in register rax. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
// No need for a write barrier, we are storing a Smi in the feedback vector.
@@ -1088,21 +1078,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int vector_index = SmiFromSlot(slot)->value();
__ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(vector_index)),
TypeFeedbackVector::MegamorphicSentinel(isolate()));
- __ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
__ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
- __ j(above, &non_proxy);
- __ Move(rbx, Smi::FromInt(0)); // Zero indicates proxy
- __ bind(&non_proxy);
- __ Push(rbx); // Smi
+ __ Push(Smi::FromInt(1)); // Smi(1) indicates slow check
__ Push(rax); // Array
__ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ Push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1129,17 +1112,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- __ Cmp(rdx, Smi::FromInt(0));
- __ j(equal, &update_each, Label::kNear);
-
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
__ Push(rcx); // Enumerable.
__ Push(rbx); // Current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, loop_statement.continue_label());
@@ -1155,6 +1133,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1195,35 +1175,36 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ Push(info);
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(rax);
}
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
__ Move(StoreDescriptor::NameRegister(),
isolate()->factory()->home_object_symbol());
__ movp(StoreDescriptor::ValueRegister(),
Operand(rsp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ movp(StoreDescriptor::ReceiverRegister(), rax);
__ Move(StoreDescriptor::NameRegister(),
isolate()->factory()->home_object_symbol());
__ movp(StoreDescriptor::ValueRegister(),
Operand(rsp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1238,10 +1219,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
__ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
@@ -1268,9 +1248,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// Terminate at native context.
__ cmpp(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
__ j(equal, &fast, Label::kNear);
- // Check that extension is NULL.
- __ cmpp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(temp, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
__ movp(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
@@ -1292,19 +1272,18 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
__ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
}
- // Check that last extension is NULL.
- __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
+ // Check that last extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an rsi-based operand (the write barrier cannot be allowed to
@@ -1336,7 +1315,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
} else { // LET || CONST
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ jmp(done);
@@ -1349,27 +1328,11 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ Set(LoadGlobalViaContextDescriptor::SlotRegister(), slot);
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-
- } else {
- __ Move(LoadDescriptor::NameRegister(), var->name());
- __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ Move(LoadDescriptor::SlotRegister(),
- SmiFromSlot(proxy->VariableFeedbackSlot()));
- CallLoadIC(typeof_mode);
- }
+ __ Move(LoadDescriptor::NameRegister(), var->name());
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
+ __ Move(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(proxy->VariableFeedbackSlot()));
+ CallLoadIC(typeof_mode);
}
@@ -1407,7 +1370,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
} else {
// Uninitialized legacy const bindings are unholed.
DCHECK(var->mode() == CONST_LEGACY);
@@ -1434,7 +1397,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(rax);
break;
@@ -1445,53 +1408,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // rdi = JS function.
- // rcx = literals array.
- // rbx = regexp literal.
- // rax = regexp literal clone.
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ movp(rbx, FieldOperand(rcx, literal_offset));
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in rax.
- __ Push(rcx);
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->pattern());
- __ Push(expr->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ movp(rbx, rax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ Push(rbx);
- __ Push(Smi::FromInt(size));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(rbx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movp(rdx, FieldOperand(rbx, i));
- __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movp(FieldOperand(rax, i), rdx);
- __ movp(FieldOperand(rax, i + kPointerSize), rcx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movp(FieldOperand(rax, size - kPointerSize), rdx);
- }
+ __ Move(rax, Smi::FromInt(expr->literal_index()));
+ __ Move(rcx, expr->pattern());
+ __ Move(rdx, Smi::FromInt(expr->flags()));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(rax);
}
@@ -1518,15 +1440,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<FixedArray> constant_properties = expr->constant_properties();
int flags = expr->ComputeFlags();
if (MustCreateObjectLiteralWithRuntime(expr)) {
- __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movp(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_properties);
__ Move(rdx, Smi::FromInt(flags));
@@ -1567,12 +1487,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(rax));
__ Move(StoreDescriptor::NameRegister(), key->value());
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1591,7 +1507,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
__ Push(Smi::FromInt(SLOPPY)); // Language mode
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1600,7 +1516,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1625,7 +1543,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
__ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1652,9 +1570,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1666,7 +1586,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
__ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1678,12 +1598,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1692,7 +1612,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (expr->has_function()) {
DCHECK(result_saved);
__ Push(Operand(rsp, 0));
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1706,7 +1626,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_constant_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1719,15 +1638,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (MustCreateArrayLiteralWithRuntime(expr)) {
- __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_elements);
__ Push(Smi::FromInt(expr->ComputeFlags()));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
- __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
@@ -1752,30 +1669,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ Push(rax); // array literal
- __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (has_constant_fast_elements) {
- // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
- // cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ movp(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
- __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- // Store the subexpression value in the array's elements.
- __ movp(FieldOperand(rbx, offset), result_register());
- // Update the write barrier for the array store.
- __ RecordWriteField(rbx, offset, result_register(), rcx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- } else {
- // Store the subexpression value in the array's elements.
- __ Move(rcx, Smi::FromInt(array_index));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ Move(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
+ __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1786,7 +1689,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Drop(1); // literal index
__ Pop(rax);
result_saved = false;
}
@@ -1800,14 +1702,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(rax);
@@ -2088,9 +1989,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(rdi, rax);
__ movp(Operand(rsp, 2 * kPointerSize), rdi);
- SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ SetCallPosition(expr);
+ __ Set(rax, 1);
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2105,8 +2008,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallLoadIC(NOT_INSIDE_TYPEOF); // rax=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ testp(result_register(), result_register());
- __ j(zero, &l_try);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ j(not_equal, &l_try);
// result.value
__ Pop(load_receiver); // result
@@ -2196,7 +2099,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Push(rbx);
__ Push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
@@ -2213,12 +2116,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ movp(rbx, GlobalObjectOperand());
- __ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movp(rbx, ContextOperand(rbx, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, rbx);
__ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
__ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
@@ -2251,7 +2152,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ Push(key->value());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2268,7 +2169,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2360,7 +2261,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ Push(rax);
}
@@ -2375,17 +2276,17 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ Push(Smi::FromInt(DONT_ENUM));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ Push(Smi::FromInt(DONT_ENUM));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
@@ -2395,7 +2296,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2411,7 +2312,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2431,7 +2332,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Pop(StoreDescriptor::ValueRegister()); // Restore value.
__ Move(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2479,7 +2380,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), rax);
__ Pop(StoreDescriptor::ReceiverRegister());
__ Pop(StoreDescriptor::ValueRegister()); // Restore value.
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2502,35 +2403,15 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(StoreDescriptor::NameRegister(), var->name());
- __ movp(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ Set(StoreGlobalViaContextDescriptor::SlotRegister(), slot);
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(rax));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(rax);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2540,11 +2421,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &assign, Label::kNear);
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2554,11 +2435,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &const_error, Label::kNear);
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2567,24 +2448,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(equal, &uninitialized_this);
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(rax); // Value.
__ Push(rsi); // Context.
__ Push(var->name());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, rcx);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
@@ -2593,15 +2475,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ Push(rax);
__ Push(rsi);
__ Push(var->name());
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackLocal() || var->IsContextSlot());
Label skip;
@@ -2614,9 +2495,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2631,12 +2512,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ Move(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
__ Pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2654,8 +2531,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(rax);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2666,10 +2542,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(rax);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2680,12 +2555,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(rax));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2741,17 +2612,17 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
}
- // Push undefined as receiver. This is patched in the method prologue if it
+ // Push undefined as receiver. This is patched in the Call builtin if it
// is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2762,9 +2633,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push the target function under the receiver.
__ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2794,7 +2666,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ movp(Operand(rsp, kPointerSize), rax);
@@ -2802,7 +2674,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2825,7 +2697,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2853,7 +2725,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ movp(Operand(rsp, kPointerSize), rax);
@@ -2861,11 +2733,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2873,8 +2745,9 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
- SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ SetCallPosition(expr);
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -2908,7 +2781,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ Push(Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2926,7 +2799,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
// the object holding it (returned in rdx).
__ Push(context_register());
__ Push(callee->name());
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ Push(rax); // Function.
__ Push(rdx); // Receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -2953,87 +2826,37 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ PushCalleeAndWithBaseObject(expr);
- // Push a copy of the function (found below the arguments) and resolve
- // eval.
- __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Touch up the callee.
- __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
+ // Push a copy of the function (found below the arguments) and resolve
+ // eval.
+ __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
+ EmitResolvePossiblyDirectEval(arg_count);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Touch up the callee.
+ __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
-
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- // Emit function call.
- EmitCall(expr);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ SetCallPosition(expr);
+ __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ Set(rax, arg_count);
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, rax);
}
@@ -3068,8 +2891,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(rbx);
__ Move(rdx, SmiFromSlot(expr->CallNewFeedbackSlot()));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -3082,8 +2905,13 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ Push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ movp(result_register(),
+ FieldOperand(result_register(), HeapObject::kMapOffset));
+ __ Push(FieldOperand(result_register(), Map::kPrototypeOffset));
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3096,20 +2924,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into rcx.
+ // Load new target into rdx.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ movp(rcx, result_register());
+ __ movp(rdx, result_register());
// Load function and argument count into rdi and rax.
__ Set(rax, arg_count);
__ movp(rdi, Operand(rsp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(rbx);
- __ Move(rdx, SmiFromSlot(expr->CallFeedbackSlot()));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3141,7 +2964,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3155,7 +2978,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
@@ -3199,9 +3022,9 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+ __ CmpObjectType(rax, FIRST_FUNCTION_TYPE, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
+ Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3313,43 +3136,9 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
- __ JumpIfSmi(rax, if_false);
- Register map = rbx;
- __ movp(map, FieldOperand(rax, HeapObject::kMapOffset));
- __ CmpInstanceType(map, FIRST_JS_PROXY_TYPE);
- __ j(less, if_false);
- __ CmpInstanceType(map, LAST_JS_PROXY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(less_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
- // Get the frame pointer for the calling frame.
- __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movp(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
+ __ JumpIfSmi(rax, if_false);
+ __ CmpObjectType(rax, JS_PROXY_TYPE, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3426,52 +3215,40 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
- __ JumpIfSmi(rax, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
- // Map is now in rax.
- __ j(below, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ j(equal, &function);
-
- __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ j(equal, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+ // If the object is not a JSReceiver, we return null.
+ __ JumpIfSmi(rax, &null, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rax);
+ __ j(below, &null, Label::kNear);
+
+ // Return 'Function' for JSFunction objects.
+ __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+ __ j(equal, &function, Label::kNear);
// Check if the constructor in the map is a JS function.
__ GetMapConstructor(rax, rax, rbx);
__ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
- __ j(not_equal, &non_function_constructor);
+ __ j(not_equal, &non_function_constructor, Label::kNear);
// rax now contains the constructor function. Grab the
// instance class name from there.
__ movp(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
__ movp(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ LoadRoot(rax, Heap::kNullValueRootIndex);
+ __ jmp(&done, Label::kNear);
// Functions have class 'Function'.
__ bind(&function);
- __ Move(rax, isolate()->factory()->Function_string());
- __ jmp(&done);
+ __ LoadRoot(rax, Heap::kFunction_stringRootIndex);
+ __ jmp(&done, Label::kNear);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ Move(rax, isolate()->factory()->Object_string());
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ LoadRoot(rax, Heap::kNullValueRootIndex);
+ __ LoadRoot(rax, Heap::kObject_stringRootIndex);
// All done.
__ bind(&done);
@@ -3521,51 +3298,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = rax;
- Register result = rax;
- Register scratch = rcx;
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(object);
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ Check(equal, kOperandIsNotADate);
- }
-
- if (index->value() == 0) {
- __ movp(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- Operand stamp_operand = __ ExternalOperand(stamp);
- __ movp(scratch, stamp_operand);
- __ cmpp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ movp(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2);
- __ movp(arg_reg_1, object);
- __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&done);
- }
-
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3671,38 +3403,12 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(rax, &done_convert, Label::kNear);
__ Push(rax);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(rax);
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into rax and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into rax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3718,25 +3424,12 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ j(below_equal, &done_convert, Label::kNear);
__ bind(&convert);
__ Push(rax);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(rax);
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into rax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3850,19 +3543,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ Pop(rdx);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3870,6 +3550,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to rdi.
int const argc = args->length() - 2;
__ movp(rdi, Operand(rsp, (argc + 1) * kPointerSize));
@@ -3883,99 +3564,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &runtime);
-
- // InvokeFunction requires the function in rdi. Move it in there.
- __ movp(rdi, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION, NullCallWrapper());
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ Push(rax);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(rbx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rbx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
- // default constructor has no arguments, so no adaptor frame means no args.
- __ movp(rax, Immediate(0));
- __ jmp(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ movp(rbx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger64(rbx, rbx);
-
- __ movp(rax, rbx);
- __ leap(rdx, Operand(rdx, rbx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- __ Push(Operand(rdx, -1 * kPointerSize));
- __ subp(rdx, Immediate(kPointerSize));
- __ decp(rbx);
- __ j(not_zero, &loop);
- }
-
- __ bind(&args_set_up);
- __ movp(rdx, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ movp(rdi, Operand(rsp, rax, times_pointer_size, 0 * kPointerSize));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, rax);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ Pop(rbx);
- __ Pop(rcx);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4014,6 +3602,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(rax);
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, Map::kPrototypeOffset));
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, return_result, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
@@ -4152,6 +3751,11 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ j(overflow, &bailout);
__ addl(string_length, scratch);
__ j(overflow, &bailout);
+ __ jmp(&bailout);
+
+ // Bailout for large object allocations.
+ __ cmpl(string_length, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ j(greater, &bailout);
// Live registers and stack values:
// string_length: Total length of result string.
@@ -4319,9 +3923,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &runtime, TAG_OBJECT);
- __ movp(rbx, GlobalObjectOperand());
- __ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movp(rbx, ContextOperand(rbx, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, rbx);
__ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
__ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
@@ -4332,7 +3934,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done, Label::kNear);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(rax);
@@ -4343,9 +3945,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as receiver.
__ PushRoot(Heap::kUndefinedValueRootIndex);
- __ movp(rax, GlobalObjectOperand());
- __ movp(rax, FieldOperand(rax, GlobalObject::kNativeContextOffset));
- __ movp(rax, ContextOperand(rax, expr->context_index()));
+ __ LoadNativeContextSlot(expr->context_index(), rax);
}
@@ -4353,10 +3953,11 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ SetCallPosition(expr);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ Set(rax, arg_count);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -4424,8 +4025,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4434,9 +4034,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ Push(GlobalObjectOperand());
+ __ movp(rax, NativeContextOperand());
+ __ Push(ContextOperand(rax, Context::EXTENSION_INDEX));
__ Push(var->name());
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(rax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is
@@ -4448,7 +4049,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ Push(context_register());
__ Push(var->name());
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(rax);
}
} else {
@@ -4726,12 +4327,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Move(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
__ Pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4769,12 +4366,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4845,8 +4438,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rdx);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rdx);
__ j(below, if_false);
// Check for callable or undetectable objects => false.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
@@ -4891,7 +4484,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
@@ -4963,8 +4556,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ testp(rax, rax);
- Split(not_zero, if_true, if_false, fall_through);
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -5003,9 +4596,9 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ Push(Smi::FromInt(0));
+ // code.
+ __ movp(rax, NativeContextOperand());
+ __ Push(ContextOperand(rax, Context::CLOSURE_INDEX));
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
@@ -5075,8 +4668,8 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+ DCHECK(!slot.IsInvalid());
__ Move(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
}
@@ -5121,8 +4714,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
break;
}
- Assembler::set_target_address_at(call_target_address,
- unoptimized_code,
+ Assembler::set_target_address_at(unoptimized_code->GetIsolate(),
+ call_target_address, unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
diff --git a/chromium/v8/src/full-codegen/x87/full-codegen-x87.cc b/chromium/v8/src/full-codegen/x87/full-codegen-x87.cc
index a85152d7a98..c38230ad1e5 100644
--- a/chromium/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ b/chromium/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -4,15 +4,14 @@
#if V8_TARGET_ARCH_X87
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/x87/frames-x87.h"
namespace v8 {
@@ -84,6 +83,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o edi: the JS function object being called (i.e. ourselves)
+// o edx: the new target value
// o esi: our context
// o ebp: our caller's frame pointer
// o esp: stack pointer (pointing to return address)
@@ -106,24 +106,12 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Sloppy mode functions and builtins need to replace the receiver with the
- // global proxy when called as functions (without an explicit receiver
- // object).
- if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
- Label ok;
- // +1 for return address.
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(ecx, Operand(esp, receiver_offset));
-
- __ cmp(ecx, isolate()->factory()->undefined_value());
- __ j(not_equal, &ok, Label::kNear);
-
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
-
- __ mov(Operand(esp, receiver_offset), ecx);
-
- __ bind(&ok);
+ __ AssertNotSmi(ecx);
+ __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ Assert(above_equal, kSloppyFunctionExpectsJSReceiverReceiver);
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -132,8 +120,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
- info->AddNoFrameRange(0, masm_->pc_offset());
+ __ Prologue(info->GeneratePreagedPrologue());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -150,7 +137,7 @@ void FullCodeGenerator::Generate() {
ExternalReference::address_of_real_stack_limit(isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
__ mov(eax, Immediate(isolate()->factory()->undefined_value()));
@@ -186,15 +173,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ push(edi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ __ CallRuntime(Runtime::kNewScriptContext);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(edx); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(edi);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(edx); // Restore new target.
+ }
}
function_in_register = false;
// Context is returned in eax. It replaces the context passed to us.
@@ -229,10 +227,10 @@ void FullCodeGenerator::Generate() {
}
}
- PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -241,39 +239,38 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ This function");
if (!function_in_register) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep is marked as such.
+ // The write barrier clobbers register again, keep it marked as such.
}
- SetVar(this_function_var, edi, ebx, edx);
+ SetVar(this_function_var, edi, ebx, ecx);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- Label non_adaptor_frame;
- __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &non_adaptor_frame);
- __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
-
- __ bind(&non_adaptor_frame);
- __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-
- Label non_construct_frame, done;
- __ j(not_equal, &non_construct_frame);
-
- // Construct frame
- __ mov(eax,
- Operand(eax, ConstructFrameConstants::kOriginalConstructorOffset));
- __ jmp(&done);
-
- // Non-construct frame
- __ bind(&non_construct_frame);
- __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
-
- __ bind(&done);
- SetVar(new_target_var, eax, ebx, edx);
+ SetVar(new_target_var, edx, ebx, ecx);
+ }
+
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+
+ __ mov(RestParamAccessDescriptor::parameter_count(),
+ Immediate(Smi::FromInt(num_parameters)));
+ __ lea(RestParamAccessDescriptor::parameter_pointer(),
+ Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ mov(RestParamAccessDescriptor::rest_parameter_index(),
+ Immediate(Smi::FromInt(rest_index)));
+ function_in_register = false;
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+ SetVar(rest_param, eax, ebx, edx);
}
Variable* arguments = scope()->arguments();
@@ -306,7 +303,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -411,7 +408,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&return_label_);
if (FLAG_trace) {
__ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -433,13 +430,11 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&ok);
SetReturnPosition(literal());
- int no_frame_start = masm_->pc_offset();
__ leave();
int arg_count = info_->scope()->num_parameters() + 1;
int arguments_bytes = arg_count * kPointerSize;
__ Ret(arguments_bytes, ecx);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
@@ -637,9 +632,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ test(result_register(), result_register());
- // The stub returns nonzero for true.
- Split(not_zero, if_true, if_false, fall_through);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
}
@@ -797,16 +791,14 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
}
- __ CallRuntime(IsImmutableVariableMode(mode)
- ? Runtime::kDeclareReadOnlyLookupSlot
- : Runtime::kDeclareLookupSlot,
- 2);
+ __ push(
+ Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
}
-
void FullCodeGenerator::VisitFunctionDeclaration(
FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
@@ -848,7 +840,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
__ push(Immediate(variable->name()));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareLookupSlot, 2);
+ __ push(
+ Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -859,7 +853,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -867,7 +861,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -989,7 +983,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(eax, &convert, Label::kNear);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(above_equal, &done_convert, Label::kNear);
__ bind(&convert);
ToObjectStub stub(isolate());
@@ -1000,9 +994,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime, use_cache, fixed_array;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- __ j(below_equal, &call_runtime);
+ __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
+ __ j(equal, &call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1016,7 +1009,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
@@ -1047,7 +1040,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&exit);
// We got a fixed array in register eax. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
// No need for a write barrier, we are storing a Smi in the feedback vector.
@@ -1055,22 +1047,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int vector_index = SmiFromSlot(slot)->value();
__ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-
- __ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
- __ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
- __ j(above, &non_proxy);
- __ Move(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
- __ push(ebx); // Smi
+ __ push(Immediate(Smi::FromInt(1))); // Smi(1) undicates slow check
__ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
// Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
@@ -1093,18 +1076,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- DCHECK(Smi::FromInt(0) == 0);
- __ test(edx, edx);
- __ j(zero, &update_each);
-
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
__ push(ecx); // Enumerable.
__ push(ebx); // Current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, loop_statement.continue_label());
@@ -1120,6 +1097,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
}
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1160,33 +1139,34 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ push(Immediate(info));
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(eax);
}
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(StoreDescriptor::NameRegister(),
Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
-void FullCodeGenerator::EmitSetHomeObjectAccumulator(
- Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ mov(StoreDescriptor::ReceiverRegister(), eax);
__ mov(StoreDescriptor::NameRegister(),
Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1201,10 +1181,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
__ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
@@ -1230,9 +1209,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ cmp(FieldOperand(temp, HeapObject::kMapOffset),
Immediate(isolate()->factory()->native_context_map()));
__ j(equal, &fast, Label::kNear);
- // Check that extension is NULL.
- __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(temp, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
__ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
@@ -1254,19 +1233,18 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
__ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering esi.
context = temp;
}
}
- // Check that last extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
+ // Check that last extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an esi-based operand (the write barrier cannot be allowed to
@@ -1298,7 +1276,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ mov(eax, isolate()->factory()->undefined_value());
} else { // LET || CONST
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ jmp(done);
@@ -1311,27 +1289,14 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- if (var->IsGlobalSlot()) {
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
- __ Move(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- LoadGlobalViaContextStub stub(isolate(), depth);
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
- }
-
- } else {
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
- __ mov(LoadDescriptor::NameRegister(), var->name());
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
- }
+ __ mov(LoadDescriptor::ReceiverRegister(), NativeContextOperand());
+ __ mov(LoadDescriptor::ReceiverRegister(),
+ ContextOperand(LoadDescriptor::ReceiverRegister(),
+ Context::EXTENSION_INDEX));
+ __ mov(LoadDescriptor::NameRegister(), var->name());
+ __ mov(LoadDescriptor::SlotRegister(),
+ Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
}
@@ -1369,7 +1334,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
} else {
// Uninitialized legacy const bindings are unholed.
DCHECK(var->mode() == CONST_LEGACY);
@@ -1396,7 +1361,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(eax);
break;
@@ -1407,53 +1372,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // edi = JS function.
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, isolate()->factory()->undefined_value());
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->pattern()));
- __ push(Immediate(expr->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
+ __ Move(eax, Immediate(Smi::FromInt(expr->literal_index())));
+ __ Move(ecx, Immediate(expr->pattern()));
+ __ Move(edx, Immediate(Smi::FromInt(expr->flags())));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(eax);
}
@@ -1482,15 +1406,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// If any of the keys would store to the elements array, then we shouldn't
// allow it.
if (MustCreateObjectLiteralWithRuntime(expr)) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_properties));
__ mov(edx, Immediate(Smi::FromInt(flags)));
@@ -1531,12 +1453,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
__ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1554,7 +1472,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
__ push(Immediate(Smi::FromInt(SLOPPY))); // Language mode
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1563,7 +1481,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1590,7 +1510,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1617,9 +1537,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1631,7 +1553,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1643,12 +1565,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1657,7 +1579,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (expr->has_function()) {
DCHECK(result_saved);
__ push(Operand(esp, 0));
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1671,7 +1593,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- expr->BuildConstantElements(isolate());
Handle<FixedArray> constant_elements = expr->constant_elements();
bool has_constant_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1684,15 +1605,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (MustCreateArrayLiteralWithRuntime(expr)) {
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
__ push(Immediate(Smi::FromInt(expr->ComputeFlags())));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
@@ -1717,29 +1636,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(eax); // array literal.
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (has_constant_fast_elements) {
- // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
- // cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ mov(ebx, Operand(esp, kPointerSize)); // Copy of array literal.
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- // Store the subexpression value in the array's elements.
- __ mov(FieldOperand(ebx, offset), result_register());
- // Update the write barrier for the array store.
- __ RecordWriteField(ebx, offset, result_register(), ecx, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- // Store the subexpression value in the array's elements.
- __ mov(ecx, Immediate(Smi::FromInt(array_index)));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
-
+ __ mov(StoreDescriptor::NameRegister(),
+ Immediate(Smi::FromInt(array_index)));
+ __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1749,7 +1656,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Drop(1); // literal index
__ Pop(eax);
result_saved = false;
}
@@ -1763,14 +1669,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(eax);
@@ -2050,9 +1955,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
- SetCallPosition(expr, 1);
- CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
- __ CallStub(&stub);
+ SetCallPosition(expr);
+ __ Set(eax, 1);
+ __ Call(
+ isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
@@ -2068,8 +1975,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallLoadIC(NOT_INSIDE_TYPEOF); // result.done in eax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ test(eax, eax);
- __ j(zero, &l_try);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ j(not_equal, &l_try);
// result.value
__ pop(load_receiver); // result
@@ -2160,7 +2067,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(ebx);
__ push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
@@ -2177,11 +2084,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, NativeContextOperand());
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -2217,7 +2123,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ push(Immediate(key->value()));
__ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2234,7 +2140,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2360,7 +2266,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ push(eax);
}
@@ -2375,24 +2281,24 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ push(Immediate(Smi::FromInt(DONT_ENUM)));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ push(Immediate(Smi::FromInt(DONT_ENUM)));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2408,7 +2314,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitAssignment(Expression* expr,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
Property* prop = expr->AsProperty();
@@ -2428,7 +2334,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2476,7 +2382,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), eax);
__ pop(StoreDescriptor::ReceiverRegister()); // Receiver.
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2499,35 +2405,18 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), var->name());
- __ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
+ __ mov(StoreDescriptor::ReceiverRegister(),
+ ContextOperand(StoreDescriptor::ReceiverRegister(),
+ Context::EXTENSION_INDEX));
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->IsGlobalSlot()) {
- // Global var, const, or let.
- DCHECK(var->index() > 0);
- DCHECK(var->IsStaticGlobalObjectProperty());
- int const slot = var->index();
- int const depth = scope()->ContextChainLength(var->scope());
- if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
- __ Move(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
- DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(eax));
- StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
- __ CallStub(&stub);
- } else {
- __ Push(Smi::FromInt(slot));
- __ Push(eax);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2537,11 +2426,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &assign, Label::kNear);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2551,11 +2440,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &const_error, Label::kNear);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2564,24 +2453,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(equal, &uninitialized_this);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ push(eax); // Value.
__ push(esi); // Context.
__ push(Immediate(var->name()));
__ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, ecx);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2590,15 +2480,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(eax);
__ push(esi);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackLocal() || var->IsContextSlot());
Label skip;
@@ -2611,9 +2500,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2630,12 +2519,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2652,8 +2537,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ push(Immediate(key->value()));
__ push(eax);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2663,10 +2547,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// stack : receiver ('this'), home_object, key
__ push(eax);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2681,13 +2564,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
-
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2741,10 +2619,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
- CallICState::CallType call_type =
- callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
- if (call_type == CallICState::FUNCTION) {
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
@@ -2752,6 +2629,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ push(Immediate(isolate()->factory()->undefined_value()));
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2762,9 +2640,10 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push the target function under the receiver.
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
- EmitCall(expr, call_type);
+ EmitCall(expr, convert_mode);
}
@@ -2793,7 +2672,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2801,7 +2680,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
@@ -2824,7 +2703,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2851,7 +2730,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2859,11 +2738,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// Stack here:
// - target function
// - this (receiver)
- EmitCall(expr, CallICState::METHOD);
+ EmitCall(expr);
}
-void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2871,8 +2750,9 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
VisitForStackValue(args->at(i));
}
- SetCallPosition(expr, arg_count);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code();
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ SetCallPosition(expr);
+ Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
@@ -2906,7 +2786,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ push(Immediate(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2925,7 +2805,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
// the object holding it (returned in edx).
__ push(context_register());
__ push(Immediate(callee->name()));
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ push(eax); // Function.
__ push(edx); // Receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -2951,88 +2831,38 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
-
- if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
+ PushCalleeAndWithBaseObject(expr);
- // Touch up the stack with the resolved function.
- __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ push(Operand(esp, (arg_count + 1) * kPointerSize));
+ EmitResolvePossiblyDirectEval(arg_count);
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
+ // Touch up the stack with the resolved function.
+ __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
- } else if (call_type == Call::GLOBAL_CALL) {
- EmitCallWithLoadIC(expr);
- } else if (call_type == Call::LOOKUP_SLOT_CALL) {
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- } else if (call_type == Call::PROPERTY_CALL) {
- Property* property = callee->AsProperty();
- bool is_named_call = property->key()->IsPropertyName();
- if (property->IsSuperAccess()) {
- if (is_named_call) {
- EmitSuperCallWithLoadIC(expr);
- } else {
- EmitKeyedSuperCallWithLoadIC(expr);
- }
- } else {
- VisitForStackValue(property->obj());
- if (is_named_call) {
- EmitCallWithLoadIC(expr);
- } else {
- EmitKeyedCallWithLoadIC(expr, property->key());
- }
- }
- } else if (call_type == Call::SUPER_CALL) {
- EmitSuperConstructorCall(expr);
- } else {
- DCHECK(call_type == Call::OTHER_CALL);
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- __ push(Immediate(isolate()->factory()->undefined_value()));
- // Emit function call.
- EmitCall(expr);
- }
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- DCHECK(expr->return_is_recorded_);
-#endif
+ SetCallPosition(expr);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
+ __ Set(eax, arg_count);
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, eax);
}
@@ -3067,8 +2897,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(ebx);
__ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -3081,8 +2911,13 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ mov(result_register(),
+ FieldOperand(result_register(), HeapObject::kMapOffset));
+ __ Push(FieldOperand(result_register(), Map::kPrototypeOffset));
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3095,20 +2930,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into ecx.
+ // Load new target into edx.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(ecx, result_register());
+ __ mov(edx, result_register());
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(ebx);
- __ mov(edx, Immediate(SmiFromSlot(expr->CallFeedbackSlot())));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3139,7 +2969,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3153,7 +2983,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
@@ -3197,9 +3027,9 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ CmpObjectType(eax, FIRST_FUNCTION_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
+ Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3312,42 +3142,7 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- Register map = ebx;
- __ mov(map, FieldOperand(eax, HeapObject::kMapOffset));
- __ CmpInstanceType(map, FIRST_JS_PROXY_TYPE);
- __ j(less, if_false);
- __ CmpInstanceType(map, LAST_JS_PROXY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(less_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker);
- __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+ __ CmpObjectType(eax, JS_PROXY_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3424,52 +3219,40 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
- __ JumpIfSmi(eax, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
- // Map is now in eax.
- __ j(below, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ j(equal, &function);
-
- __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ j(equal, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+ // If the object is not a JSReceiver, we return null.
+ __ JumpIfSmi(eax, &null, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, eax);
+ __ j(below, &null, Label::kNear);
+
+ // Return 'Function' for JSFunction objects.
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+ __ j(equal, &function, Label::kNear);
// Check if the constructor in the map is a JS function.
__ GetMapConstructor(eax, eax, ebx);
__ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
- __ j(not_equal, &non_function_constructor);
+ __ j(not_equal, &non_function_constructor, Label::kNear);
// eax now contains the constructor function. Grab the
// instance class name from there.
__ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
__ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ mov(eax, isolate()->factory()->null_value());
+ __ jmp(&done, Label::kNear);
// Functions have class 'Function'.
__ bind(&function);
__ mov(eax, isolate()->factory()->Function_string());
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
__ mov(eax, isolate()->factory()->Object_string());
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ mov(eax, isolate()->factory()->null_value());
// All done.
__ bind(&done);
@@ -3519,43 +3302,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = eax;
- Register result = eax;
- Register scratch = ecx;
-
- if (index->value() == 0) {
- __ mov(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand::StaticVariable(stamp));
- __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ mov(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(Operand(esp, 0), object);
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3665,38 +3411,12 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(eax, &done_convert, Label::kNear);
__ Push(eax);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(args->length(), 1);
-
- // Load the argument into eax and call the stub.
- VisitForAccumulatorValue(args->at(0));
-
- NumberToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into eax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToStringStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitToName(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3712,25 +3432,12 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ j(below_equal, &done_convert, Label::kNear);
__ bind(&convert);
__ Push(eax);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(eax);
}
-void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into eax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3844,19 +3551,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- __ pop(edx);
- StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3864,6 +3558,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Move target to edi.
int const argc = args->length() - 2;
__ mov(edi, Operand(esp, (argc + 1) * kPointerSize));
@@ -3877,101 +3572,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; ++i) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &runtime);
-
- // InvokeFunction requires the function in edi. Move it in there.
- __ mov(edi, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION, NullCallWrapper());
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(eax);
- __ CallRuntime(Runtime::kCallFunction, args->length());
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ebx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame);
- // default constructor has no arguments, so no adaptor frame means no args.
- __ mov(eax, Immediate(0));
- __ jmp(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ mov(ebx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(ebx);
-
- __ mov(eax, ebx);
- __ lea(edx, Operand(edx, ebx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- __ push(Operand(edx, -1 * kPointerSize));
- __ sub(edx, Immediate(kPointerSize));
- __ dec(ebx);
- __ j(not_zero, &loop);
- }
-
- __ bind(&args_set_up);
-
- __ mov(edx, Operand(esp, eax, times_pointer_size, 1 * kPointerSize));
- __ mov(edi, Operand(esp, eax, times_pointer_size, 0 * kPointerSize));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, eax);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpConstructResultStub stub(isolate());
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(2));
- __ pop(ebx);
- __ pop(ecx);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4010,6 +3610,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(eax);
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(eax, FieldOperand(eax, Map::kPrototypeOffset));
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
@@ -4136,6 +3747,11 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ j(overflow, &bailout);
__ shr(string_length, 1);
+
+ // Bailout for large object allocations.
+ __ cmp(string_length, Page::kMaxRegularHeapObjectSize);
+ __ j(greater, &bailout);
+
// Live registers and stack values:
// string_length
// elements
@@ -4293,8 +3909,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime, TAG_OBJECT);
- __ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, NativeContextOperand());
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -4307,7 +3922,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done, Label::kNear);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(eax);
@@ -4318,9 +3933,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push undefined as receiver.
__ push(Immediate(isolate()->factory()->undefined_value()));
- __ mov(eax, GlobalObjectOperand());
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
- __ mov(eax, ContextOperand(eax, expr->context_index()));
+ __ LoadGlobalFunction(expr->context_index(), eax);
}
@@ -4328,10 +3941,11 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
- CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+ SetCallPosition(expr);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ Set(eax, arg_count);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
}
@@ -4398,8 +4012,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4408,9 +4021,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ push(GlobalObjectOperand());
+ __ mov(eax, NativeContextOperand());
+ __ push(ContextOperand(eax, Context::EXTENSION_INDEX));
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(eax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is
@@ -4422,7 +4036,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ push(context_register());
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(eax);
}
} else {
@@ -4702,12 +4316,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4745,12 +4355,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4821,8 +4427,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(eax, if_false);
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, edx);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, edx);
__ j(below, if_false);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
@@ -4867,7 +4473,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
@@ -4940,8 +4546,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ test(eax, eax);
- Split(not_zero, if_true, if_false, fall_through);
+ __ cmp(eax, isolate()->factory()->true_value());
+ Split(equal, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
@@ -4980,9 +4586,9 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ push(Immediate(Smi::FromInt(0)));
+ // code.
+ __ mov(eax, NativeContextOperand());
+ __ push(ContextOperand(eax, Context::CLOSURE_INDEX));
} else if (closure_scope->is_eval_scope()) {
// Contexts nested inside eval code have the same closure as the context
// calling eval, not the anonymous closure containing the eval code.
@@ -5049,8 +4655,8 @@ void FullCodeGenerator::ClearPendingMessage() {
}
-void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorICSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+ DCHECK(!slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Immediate(SmiFromSlot(slot)));
}
@@ -5097,8 +4703,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
break;
}
- Assembler::set_target_address_at(call_target_address,
- unoptimized_code,
+ Assembler::set_target_address_at(unoptimized_code->GetIsolate(),
+ call_target_address, unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
diff --git a/chromium/v8/src/futex-emulation.cc b/chromium/v8/src/futex-emulation.cc
index b0e514e8af2..991e4c37116 100644
--- a/chromium/v8/src/futex-emulation.cc
+++ b/chromium/v8/src/futex-emulation.cc
@@ -262,7 +262,8 @@ Object* FutexEmulation::NumWaitersForTesting(Isolate* isolate,
int waiters = 0;
FutexWaitListNode* node = wait_list_.Pointer()->head_;
while (node) {
- if (backing_store == node->backing_store_ && addr == node->wait_addr_) {
+ if (backing_store == node->backing_store_ && addr == node->wait_addr_ &&
+ node->waiting_) {
waiters++;
}
diff --git a/chromium/v8/src/futex-emulation.h b/chromium/v8/src/futex-emulation.h
index e7e2230da20..9949bdf44ff 100644
--- a/chromium/v8/src/futex-emulation.h
+++ b/chromium/v8/src/futex-emulation.h
@@ -125,7 +125,7 @@ class FutexEmulation : public AllStatic {
static base::LazyMutex mutex_;
static base::LazyInstance<FutexWaitList>::type wait_list_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FUTEX_EMULATION_H_
diff --git a/chromium/v8/src/gdb-jit.cc b/chromium/v8/src/gdb-jit.cc
index d0fd8223e14..819bd69e070 100644
--- a/chromium/v8/src/gdb-jit.cc
+++ b/chromium/v8/src/gdb-jit.cc
@@ -1187,7 +1187,7 @@ class DebugInfoSection : public DebugSection {
DCHECK(Context::CLOSURE_INDEX == 0);
DCHECK(Context::PREVIOUS_INDEX == 1);
DCHECK(Context::EXTENSION_INDEX == 2);
- DCHECK(Context::GLOBAL_OBJECT_INDEX == 3);
+ DCHECK(Context::NATIVE_CONTEXT_INDEX == 3);
w->WriteULEB128(current_abbreviation++);
w->WriteString(".closure");
w->WriteULEB128(current_abbreviation++);
@@ -1195,7 +1195,7 @@ class DebugInfoSection : public DebugSection {
w->WriteULEB128(current_abbreviation++);
w->WriteString(".extension");
w->WriteULEB128(current_abbreviation++);
- w->WriteString(".global");
+ w->WriteString(".native_context");
for (int context_slot = 0;
context_slot < context_slots;
diff --git a/chromium/v8/src/global-handles.cc b/chromium/v8/src/global-handles.cc
index 650999f394c..f9c4a4f5802 100644
--- a/chromium/v8/src/global-handles.cc
+++ b/chromium/v8/src/global-handles.cc
@@ -54,6 +54,8 @@ class GlobalHandles::Node {
Internals::kNodeIsIndependentShift);
STATIC_ASSERT(static_cast<int>(IsPartiallyDependent::kShift) ==
Internals::kNodeIsPartiallyDependentShift);
+ STATIC_ASSERT(static_cast<int>(IsActive::kShift) ==
+ Internals::kNodeIsActiveShift);
}
#ifdef ENABLE_HANDLE_ZAPPING
@@ -64,7 +66,11 @@ class GlobalHandles::Node {
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
index_ = 0;
set_independent(false);
- set_partially_dependent(false);
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ set_active(false);
+ } else {
+ set_partially_dependent(false);
+ }
set_in_new_space_list(false);
parameter_or_next_free_.next_free = NULL;
weak_callback_ = NULL;
@@ -86,7 +92,11 @@ class GlobalHandles::Node {
object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false);
- set_partially_dependent(false);
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ set_active(false);
+ } else {
+ set_partially_dependent(false);
+ }
set_state(NORMAL);
parameter_or_next_free_.parameter = NULL;
weak_callback_ = NULL;
@@ -106,7 +116,11 @@ class GlobalHandles::Node {
object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false);
- set_partially_dependent(false);
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ set_active(false);
+ } else {
+ set_partially_dependent(false);
+ }
weak_callback_ = NULL;
DecreaseBlockUses();
}
@@ -140,12 +154,23 @@ class GlobalHandles::Node {
}
bool is_partially_dependent() {
+ CHECK(!FLAG_scavenge_reclaim_unmodified_objects);
return IsPartiallyDependent::decode(flags_);
}
void set_partially_dependent(bool v) {
+ CHECK(!FLAG_scavenge_reclaim_unmodified_objects);
flags_ = IsPartiallyDependent::update(flags_, v);
}
+ bool is_active() {
+ CHECK(FLAG_scavenge_reclaim_unmodified_objects);
+ return IsActive::decode(flags_);
+ }
+ void set_active(bool v) {
+ CHECK(FLAG_scavenge_reclaim_unmodified_objects);
+ flags_ = IsActive::update(flags_, v);
+ }
+
bool is_in_new_space_list() {
return IsInNewSpaceList::decode(flags_);
}
@@ -349,6 +374,8 @@ class GlobalHandles::Node {
// in_new_space_list) and a State.
class NodeState : public BitField<State, 0, 3> {};
class IsIndependent : public BitField<bool, 3, 1> {};
+ // The following two fields are mutually exclusive
+ class IsActive : public BitField<bool, 4, 1> {};
class IsPartiallyDependent : public BitField<bool, 4, 1> {};
class IsInNewSpaceList : public BitField<bool, 5, 1> {};
class NodeWeaknessType : public BitField<WeaknessType, 6, 2> {};
@@ -506,10 +533,10 @@ class GlobalHandles::PendingPhantomCallbacksSecondPassTask
}
void RunInternal() override {
- isolate_->heap()->CallGCPrologueCallbacks(
+ isolate()->heap()->CallGCPrologueCallbacks(
GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
- InvokeSecondPassPhantomCallbacks(&pending_phantom_callbacks_, isolate_);
- isolate_->heap()->CallGCEpilogueCallbacks(
+ InvokeSecondPassPhantomCallbacks(&pending_phantom_callbacks_, isolate());
+ isolate()->heap()->CallGCEpilogueCallbacks(
GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
}
@@ -646,10 +673,18 @@ void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
- if (node->IsStrongRetainer() ||
- (node->IsWeakRetainer() && !node->is_independent() &&
- !node->is_partially_dependent())) {
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ if (node->IsStrongRetainer() ||
+ (node->IsWeakRetainer() && !node->is_independent() &&
+ node->is_active())) {
+ v->VisitPointer(node->location());
+ }
+ } else {
+ if (node->IsStrongRetainer() ||
+ (node->IsWeakRetainer() && !node->is_independent() &&
+ !node->is_partially_dependent())) {
v->VisitPointer(node->location());
+ }
}
}
}
@@ -687,6 +722,49 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
}
+void GlobalHandles::IdentifyWeakUnmodifiedObjects(
+ WeakSlotCallback is_unmodified) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ if (node->IsWeak() && !is_unmodified(node->location())) {
+ node->set_active(true);
+ }
+ }
+}
+
+
+void GlobalHandles::MarkNewSpaceWeakUnmodifiedObjectsPending(
+ WeakSlotCallbackWithHeap is_unscavenged) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ DCHECK(node->is_in_new_space_list());
+ if ((node->is_independent() || !node->is_active()) && node->IsWeak() &&
+ is_unscavenged(isolate_->heap(), node->location())) {
+ node->MarkPending();
+ }
+ }
+}
+
+
+void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ DCHECK(node->is_in_new_space_list());
+ if ((node->is_independent() || !node->is_active()) &&
+ node->IsWeakRetainer()) {
+ // Pending weak phantom handles die immediately. Everything else survives.
+ if (node->state() == Node::PENDING &&
+ node->weakness_type() != NORMAL_WEAK) {
+ node->CollectPhantomCallbackData(isolate(),
+ &pending_phantom_callbacks_);
+ } else {
+ v->VisitPointer(node->location());
+ }
+ }
+ }
+}
+
+
bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
WeakSlotCallbackWithHeap can_skip) {
ComputeObjectGroupsAndImplicitReferences();
@@ -757,13 +835,23 @@ int GlobalHandles::PostScavengeProcessing(
// the freed_nodes.
continue;
}
- // Skip dependent handles. Their weak callbacks might expect to be
+ // Skip dependent or unmodified handles. Their weak callbacks might expect
+ // to be
// called between two global garbage collection callbacks which
// are not called for minor collections.
- if (!node->is_independent() && !node->is_partially_dependent()) {
- continue;
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ if (!node->is_independent() && (node->is_active())) {
+ node->set_active(false);
+ continue;
+ }
+ node->set_active(false);
+ } else {
+ if (!node->is_independent() && !node->is_partially_dependent()) {
+ continue;
+ }
+ node->clear_partially_dependent();
}
- node->clear_partially_dependent();
+
if (node->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// Weak callback triggered another GC and another round of
@@ -790,7 +878,11 @@ int GlobalHandles::PostMarkSweepProcessing(
// the freed_nodes.
continue;
}
- it.node()->clear_partially_dependent();
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ it.node()->set_active(false);
+ } else {
+ it.node()->clear_partially_dependent();
+ }
if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
@@ -955,6 +1047,16 @@ void GlobalHandles::IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v) {
}
+void GlobalHandles::IterateWeakRootsInNewSpaceWithClassIds(ObjectVisitor* v) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ if (node->has_wrapper_class_id() && node->IsWeak()) {
+ v->VisitEmbedderReference(node->location(), node->wrapper_class_id());
+ }
+ }
+}
+
+
int GlobalHandles::NumberOfWeakHandles() {
int count = 0;
for (NodeIterator it(this); !it.done(); it.Advance()) {
diff --git a/chromium/v8/src/global-handles.h b/chromium/v8/src/global-handles.h
index 0ee8c20a375..7047d8ca015 100644
--- a/chromium/v8/src/global-handles.h
+++ b/chromium/v8/src/global-handles.h
@@ -197,6 +197,10 @@ class GlobalHandles {
// class ID.
void IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v);
+ // Iterate over all handles in the new space that are weak, unmodified
+ // and have class IDs
+ void IterateWeakRootsInNewSpaceWithClassIds(ObjectVisitor* v);
+
// Iterates over all weak roots in heap.
void IterateWeakRoots(ObjectVisitor* v);
@@ -204,7 +208,7 @@ class GlobalHandles {
// them as pending.
void IdentifyWeakHandles(WeakSlotCallback f);
- // NOTE: Three ...NewSpace... functions below are used during
+ // NOTE: Five ...NewSpace... functions below are used during
// scavenge collections and iterate over sets of handles that are
// guaranteed to contain all handles holding new space objects (but
// may also include old space objects).
@@ -220,6 +224,19 @@ class GlobalHandles {
// See the note above.
void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
+ // Finds weak independent or unmodified handles satisfying
+ // the callback predicate and marks them as pending. See the note above.
+ void MarkNewSpaceWeakUnmodifiedObjectsPending(
+ WeakSlotCallbackWithHeap is_unscavenged);
+
+ // Iterates over weak independent or unmodified handles.
+ // See the note above.
+ void IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v);
+
+ // Identify unmodified objects that are in weak state and marks them
+ // unmodified
+ void IdentifyWeakUnmodifiedObjects(WeakSlotCallback is_unmodified);
+
// Iterate over objects in object groups that have at least one object
// which requires visiting. The callback has to return true if objects
// can be skipped and false otherwise.
@@ -438,6 +455,7 @@ class EternalHandles {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_GLOBAL_HANDLES_H_
diff --git a/chromium/v8/src/globals.h b/chromium/v8/src/globals.h
index 9d4bafb6ffd..67bdb63b86e 100644
--- a/chromium/v8/src/globals.h
+++ b/chromium/v8/src/globals.h
@@ -217,6 +217,20 @@ F FUNCTION_CAST(Address addr) {
}
+// Determine whether the architecture uses function descriptors
+// which provide a level of indirection between the function pointer
+// and the function entrypoint.
+#if V8_HOST_ARCH_PPC && \
+ (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
+#define USES_FUNCTION_DESCRIPTORS 1
+#define FUNCTION_ENTRYPOINT_ADDRESS(f) \
+ (reinterpret_cast<v8::internal::Address*>( \
+ &(reinterpret_cast<intptr_t*>(f)[0])))
+#else
+#define USES_FUNCTION_DESCRIPTORS 0
+#endif
+
+
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
// (sorted alphabetically)
@@ -443,6 +457,7 @@ class String;
class Symbol;
class Name;
class Struct;
+class TypeFeedbackVector;
class Variable;
class RelocInfo;
class Deserializer;
@@ -580,33 +595,7 @@ enum InlineCacheState {
// A generic handler is installed and no extra typefeedback is recorded.
GENERIC,
// Special state for debug break or step in prepare stubs.
- DEBUG_STUB,
- // Type-vector-based ICs have a default state, with the full calculation
- // of IC state only determined by a look at the IC and the typevector
- // together.
- DEFAULT
-};
-
-
-enum CallFunctionFlags {
- NO_CALL_FUNCTION_FLAGS,
- CALL_AS_METHOD,
- // Always wrap the receiver and call to the JSFunction. Only use this flag
- // both the receiver type and the target method are statically known.
- WRAP_AND_CALL
-};
-
-
-enum CallConstructorFlags {
- NO_CALL_CONSTRUCTOR_FLAGS = 0,
- // The call target is cached in the instruction stream.
- RECORD_CONSTRUCTOR_TARGET = 1,
- // TODO(bmeurer): Kill these SUPER_* modes and use the Construct builtin
- // directly instead; also there's no point in collecting any "targets" for
- // super constructor calls, since these are known when we optimize the
- // constructor that contains the super call.
- SUPER_CONSTRUCTOR_CALL = 1 << 1,
- SUPER_CALL_RECORD_TARGET = SUPER_CONSTRUCTOR_CALL | RECORD_CONSTRUCTOR_TARGET
+ DEBUG_STUB
};
@@ -741,6 +730,31 @@ enum CpuFeature {
};
+// Defines hints about receiver values based on structural knowledge.
+enum class ConvertReceiverMode : unsigned {
+ kNullOrUndefined, // Guaranteed to be null or undefined.
+ kNotNullOrUndefined, // Guaranteed to never be null or undefined.
+ kAny // No specific knowledge about receiver.
+};
+
+inline size_t hash_value(ConvertReceiverMode mode) {
+ return bit_cast<unsigned>(mode);
+}
+
+inline std::ostream& operator<<(std::ostream& os, ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return os << "NULL_OR_UNDEFINED";
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return os << "NOT_NULL_OR_UNDEFINED";
+ case ConvertReceiverMode::kAny:
+ return os << "ANY";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
// Used to specify if a macro instruction must perform a smi check on tagged
// values.
enum SmiCheckType {
@@ -756,8 +770,7 @@ enum ScopeType {
SCRIPT_SCOPE, // The top-level scope for a script or a top-level eval.
CATCH_SCOPE, // The scope introduced by catch.
BLOCK_SCOPE, // The scope introduced by a new block.
- WITH_SCOPE, // The scope introduced by with.
- ARROW_SCOPE // The top-level scope for an arrow function literal.
+ WITH_SCOPE // The scope introduced by with.
};
// The mips architecture prior to revision 5 has inverted encoding for sNaN.
@@ -935,6 +948,8 @@ enum FunctionKind {
kInObjectLiteral = 1 << 7,
kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
+ kClassConstructor =
+ kBaseConstructor | kSubclassConstructor | kDefaultConstructor,
kConciseMethodInObjectLiteral = kConciseMethod | kInObjectLiteral,
kConciseGeneratorMethodInObjectLiteral =
kConciseGeneratorMethod | kInObjectLiteral,
@@ -1003,9 +1018,16 @@ inline bool IsSubclassConstructor(FunctionKind kind) {
inline bool IsClassConstructor(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
- return kind &
- (FunctionKind::kBaseConstructor | FunctionKind::kSubclassConstructor |
- FunctionKind::kDefaultConstructor);
+ return kind & FunctionKind::kClassConstructor;
+}
+
+
+inline bool IsConstructable(FunctionKind kind, LanguageMode mode) {
+ if (IsAccessorFunction(kind)) return false;
+ if (IsConciseMethod(kind) && !IsGeneratorFunction(kind)) return false;
+ if (IsArrowFunction(kind)) return false;
+ if (is_strong(mode)) return IsClassConstructor(kind);
+ return true;
}
@@ -1020,7 +1042,8 @@ inline FunctionKind WithObjectLiteralBit(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
return kind;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
namespace i = v8::internal;
diff --git a/chromium/v8/src/handles-inl.h b/chromium/v8/src/handles-inl.h
index 8c547e1b9c3..cfaf4fb6eb6 100644
--- a/chromium/v8/src/handles-inl.h
+++ b/chromium/v8/src/handles-inl.h
@@ -14,15 +14,23 @@ namespace v8 {
namespace internal {
HandleBase::HandleBase(Object* object, Isolate* isolate)
- : location_(HandleScope::CreateHandle(isolate, object)) {}
+ : location_(HandleScope::GetHandle(isolate, object)) {}
+
+
+template <typename T>
+// Allocate a new handle for the object, do not canonicalize.
+Handle<T> Handle<T>::New(T* object, Isolate* isolate) {
+ return Handle(
+ reinterpret_cast<T**>(HandleScope::CreateHandle(isolate, object)));
+}
HandleScope::HandleScope(Isolate* isolate) {
- HandleScopeData* current = isolate->handle_scope_data();
+ HandleScopeData* data = isolate->handle_scope_data();
isolate_ = isolate;
- prev_next_ = current->next;
- prev_limit_ = current->limit;
- current->level++;
+ prev_next_ = data->next;
+ prev_limit_ = data->limit;
+ data->level++;
}
@@ -76,7 +84,7 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
// Throw away all handles in the current scope.
CloseScope(isolate_, prev_next_, prev_limit_);
// Allocate one handle in the parent scope.
- DCHECK(current->level > 0);
+ DCHECK(current->level > current->sealed_level);
Handle<T> result(value, isolate_);
// Reinitialize the current scope (so that it's ready
// to be used or closed again).
@@ -87,24 +95,30 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
}
-template <typename T>
-T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
+Object** HandleScope::CreateHandle(Isolate* isolate, Object* value) {
DCHECK(AllowHandleAllocation::IsAllowed());
- HandleScopeData* current = isolate->handle_scope_data();
+ HandleScopeData* data = isolate->handle_scope_data();
- Object** cur = current->next;
- if (cur == current->limit) cur = Extend(isolate);
+ Object** result = data->next;
+ if (result == data->limit) result = Extend(isolate);
// Update the current next field, set the value in the created
// handle, and return the result.
- DCHECK(cur < current->limit);
- current->next = cur + 1;
+ DCHECK(result < data->limit);
+ data->next = result + 1;
- T** result = reinterpret_cast<T**>(cur);
*result = value;
return result;
}
+Object** HandleScope::GetHandle(Isolate* isolate, Object* value) {
+ DCHECK(AllowHandleAllocation::IsAllowed());
+ HandleScopeData* data = isolate->handle_scope_data();
+ CanonicalHandleScope* canonical = data->canonical_scope;
+ return canonical ? canonical->Lookup(value) : CreateHandle(isolate, value);
+}
+
+
#ifdef DEBUG
inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
// Make sure the current thread is allowed to create handles to begin with.
@@ -112,10 +126,10 @@ inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
HandleScopeData* current = isolate_->handle_scope_data();
// Shrink the current handle scope to make it impossible to do
// handle allocations without an explicit handle scope.
- limit_ = current->limit;
+ prev_limit_ = current->limit;
current->limit = current->next;
- level_ = current->level;
- current->level = 0;
+ prev_sealed_level_ = current->sealed_level;
+ current->sealed_level = current->level;
}
@@ -123,10 +137,10 @@ inline SealHandleScope::~SealHandleScope() {
// Restore state in current handle scope to re-enable handle
// allocations.
HandleScopeData* current = isolate_->handle_scope_data();
- DCHECK_EQ(0, current->level);
- current->level = level_;
DCHECK_EQ(current->next, current->limit);
- current->limit = limit_;
+ current->limit = prev_limit_;
+ DCHECK_EQ(current->level, current->sealed_level);
+ current->sealed_level = prev_sealed_level_;
}
#endif
diff --git a/chromium/v8/src/handles.cc b/chromium/v8/src/handles.cc
index ae6fac89d3f..b162ba8645a 100644
--- a/chromium/v8/src/handles.cc
+++ b/chromium/v8/src/handles.cc
@@ -4,7 +4,9 @@
#include "src/handles.h"
+#include "src/address-map.h"
#include "src/base/logging.h"
+#include "src/identity-map.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -55,7 +57,7 @@ Object** HandleScope::Extend(Isolate* isolate) {
DCHECK(result == current->limit);
// Make sure there's at least one scope on the stack and that the
// top of the scope stack isn't a barrier.
- if (!Utils::ApiCheck(current->level != 0,
+ if (!Utils::ApiCheck(current->level != current->sealed_level,
"v8::HandleScope::CreateHandle()",
"Cannot create a handle without a HandleScope")) {
return NULL;
@@ -117,6 +119,48 @@ Address HandleScope::current_limit_address(Isolate* isolate) {
}
+CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate)
+ : isolate_(isolate) {
+ HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
+ prev_canonical_scope_ = handle_scope_data->canonical_scope;
+ handle_scope_data->canonical_scope = this;
+ root_index_map_ = new RootIndexMap(isolate);
+ identity_map_ = new IdentityMap<Object**>(isolate->heap(), &zone_);
+ canonical_level_ = handle_scope_data->level;
+}
+
+
+CanonicalHandleScope::~CanonicalHandleScope() {
+ delete root_index_map_;
+ delete identity_map_;
+ isolate_->handle_scope_data()->canonical_scope = prev_canonical_scope_;
+}
+
+
+Object** CanonicalHandleScope::Lookup(Object* object) {
+ DCHECK_LE(canonical_level_, isolate_->handle_scope_data()->level);
+ if (isolate_->handle_scope_data()->level != canonical_level_) {
+ // We are in an inner handle scope. Do not canonicalize since we will leave
+ // this handle scope while still being in the canonical scope.
+ return HandleScope::CreateHandle(isolate_, object);
+ }
+ if (object->IsHeapObject()) {
+ int index = root_index_map_->Lookup(HeapObject::cast(object));
+ if (index != RootIndexMap::kInvalidRootIndex) {
+ return isolate_->heap()
+ ->root_handle(static_cast<Heap::RootListIndex>(index))
+ .location();
+ }
+ }
+ Object*** entry = identity_map_->Get(object);
+ if (*entry == nullptr) {
+ // Allocate new handle location.
+ *entry = HandleScope::CreateHandle(isolate_, object);
+ }
+ return reinterpret_cast<Object**>(*entry);
+}
+
+
DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
: impl_(isolate->handle_scope_implementer()) {
impl_->BeginDeferredScope();
diff --git a/chromium/v8/src/handles.h b/chromium/v8/src/handles.h
index 85fa839f3f3..1f97d6ff7e6 100644
--- a/chromium/v8/src/handles.h
+++ b/chromium/v8/src/handles.h
@@ -10,6 +10,7 @@
#include "src/base/macros.h"
#include "src/checks.h"
#include "src/globals.h"
+#include "src/zone.h"
namespace v8 {
namespace internal {
@@ -92,6 +93,9 @@ class Handle final : public HandleBase {
V8_INLINE explicit Handle(T* object) : Handle(object, object->GetIsolate()) {}
V8_INLINE Handle(T* object, Isolate* isolate) : HandleBase(object, isolate) {}
+ // Allocate a new handle for the object, do not canonicalize.
+ V8_INLINE static Handle<T> New(T* object, Isolate* isolate);
+
// Constructor for handling automatic up casting.
// Ex. Handle<JSFunction> can be passed when Handle<Object> is expected.
template <typename S>
@@ -167,8 +171,6 @@ V8_INLINE Handle<T> handle(T* object) {
// into a Handle requires checking that it does not point to NULL. This
// ensures NULL checks before use.
//
-// Do not use MaybeHandle as argument type.
-//
// Also note that Handles do not provide default equality comparison or hashing
// operators on purpose. Such operators would be misleading, because intended
// semantics is ambiguous between Handle location and object identity.
@@ -254,9 +256,11 @@ class HandleScope {
// Counts the number of allocated handles.
static int NumberOfHandles(Isolate* isolate);
+ // Create a new handle or lookup a canonical handle.
+ V8_INLINE static Object** GetHandle(Isolate* isolate, Object* value);
+
// Creates a new handle with the given value.
- template <typename T>
- static inline T** CreateHandle(Isolate* isolate, T* value);
+ V8_INLINE static Object** CreateHandle(Isolate* isolate, Object* value);
// Deallocates any extensions used by the current scope.
static void DeleteExtensions(Isolate* isolate);
@@ -305,11 +309,44 @@ class HandleScope {
friend class v8::HandleScope;
friend class DeferredHandles;
+ friend class DeferredHandleScope;
friend class HandleScopeImplementer;
friend class Isolate;
};
+// Forward declarations for CanonicalHandleScope.
+template <typename V>
+class IdentityMap;
+class RootIndexMap;
+
+
+// A CanonicalHandleScope does not open a new HandleScope. It changes the
+// existing HandleScope so that Handles created within are canonicalized.
+// This does not apply to nested inner HandleScopes unless a nested
+// CanonicalHandleScope is introduced. Handles are only canonicalized within
+// the same CanonicalHandleScope, but not across nested ones.
+class CanonicalHandleScope final {
+ public:
+ explicit CanonicalHandleScope(Isolate* isolate);
+ ~CanonicalHandleScope();
+
+ private:
+ Object** Lookup(Object* object);
+
+ Isolate* isolate_;
+ Zone zone_;
+ RootIndexMap* root_index_map_;
+ IdentityMap<Object**>* identity_map_;
+ // Ordinary nested handle scopes within the current one are not canonical.
+ int canonical_level_;
+ // We may have nested canonical scopes. Handles are canonical within each one.
+ CanonicalHandleScope* prev_canonical_scope_;
+
+ friend class HandleScope;
+};
+
+
class DeferredHandleScope final {
public:
explicit DeferredHandleScope(Isolate* isolate);
@@ -345,8 +382,8 @@ class SealHandleScope final {
inline ~SealHandleScope();
private:
Isolate* isolate_;
- Object** limit_;
- int level_;
+ Object** prev_limit_;
+ int prev_sealed_level_;
#endif
};
@@ -355,10 +392,13 @@ struct HandleScopeData final {
Object** next;
Object** limit;
int level;
+ int sealed_level;
+ CanonicalHandleScope* canonical_scope;
void Initialize() {
next = limit = NULL;
- level = 0;
+ sealed_level = level = 0;
+ canonical_scope = NULL;
}
};
diff --git a/chromium/v8/src/harmony-array-includes.js b/chromium/v8/src/harmony-array-includes.js
deleted file mode 100644
index a6b59137d21..00000000000
--- a/chromium/v8/src/harmony-array-includes.js
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-var GlobalArray = global.Array;
-
-// -------------------------------------------------------------------
-
-// Proposed for ES7
-// https://github.com/tc39/Array.prototype.includes
-// 46c7532ec8499dea3e51aeb940d09e07547ed3f5
-function InnerArrayIncludes(searchElement, fromIndex, array, length) {
- if (length === 0) {
- return false;
- }
-
- var n = TO_INTEGER(fromIndex);
-
- var k;
- if (n >= 0) {
- k = n;
- } else {
- k = length + n;
- if (k < 0) {
- k = 0;
- }
- }
-
- while (k < length) {
- var elementK = array[k];
- if ($sameValueZero(searchElement, elementK)) {
- return true;
- }
-
- ++k;
- }
-
- return false;
-}
-
-
-function ArrayIncludes(searchElement, fromIndex) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.includes");
-
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
-
- return InnerArrayIncludes(searchElement, fromIndex, array, length);
-}
-
-
-function TypedArrayIncludes(searchElement, fromIndex) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayIncludes(searchElement, fromIndex, this, length);
-}
-
-// -------------------------------------------------------------------
-
-%FunctionSetLength(ArrayIncludes, 1);
-%FunctionSetLength(TypedArrayIncludes, 1);
-
-// Set up the non-enumerable function on the Array prototype object.
-utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
- "includes", ArrayIncludes
-]);
-
-// Set up the non-enumerable function on the typed array prototypes.
-// This duplicates some of the machinery in harmony-typedarray.js in order to
-// keep includes behind the separate --harmony-array-includes flag.
-// TODO(littledan): Fix the TypedArray proto chain (bug v8:4085).
-
-macro TYPED_ARRAYS(FUNCTION)
-// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
-FUNCTION(Uint8Array)
-FUNCTION(Int8Array)
-FUNCTION(Uint16Array)
-FUNCTION(Int16Array)
-FUNCTION(Uint32Array)
-FUNCTION(Int32Array)
-FUNCTION(Float32Array)
-FUNCTION(Float64Array)
-FUNCTION(Uint8ClampedArray)
-endmacro
-
-macro DECLARE_GLOBALS(NAME)
-var GlobalNAME = global.NAME;
-endmacro
-
-macro EXTEND_TYPED_ARRAY(NAME)
-// Set up non-enumerable functions on the prototype object.
-utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
- "includes", TypedArrayIncludes
-]);
-endmacro
-
-TYPED_ARRAYS(DECLARE_GLOBALS)
-TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
-
-})
diff --git a/chromium/v8/src/harmony-array.js b/chromium/v8/src/harmony-array.js
deleted file mode 100644
index 0867f7cd463..00000000000
--- a/chromium/v8/src/harmony-array.js
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GetIterator;
-var GetMethod;
-var GlobalArray = global.Array;
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var MathMax;
-var MathMin;
-var ObjectIsFrozen;
-var ObjectDefineProperty;
-var ToNumber;
-
-utils.Import(function(from) {
- GetIterator = from.GetIterator;
- GetMethod = from.GetMethod;
- MathMax = from.MathMax;
- MathMin = from.MathMin;
- ObjectIsFrozen = from.ObjectIsFrozen;
- ObjectDefineProperty = from.ObjectDefineProperty;
- ToNumber = from.ToNumber;
-});
-
-// -------------------------------------------------------------------
-
-function InnerArrayCopyWithin(target, start, end, array, length) {
- target = TO_INTEGER(target);
- var to;
- if (target < 0) {
- to = MathMax(length + target, 0);
- } else {
- to = MathMin(target, length);
- }
-
- start = TO_INTEGER(start);
- var from;
- if (start < 0) {
- from = MathMax(length + start, 0);
- } else {
- from = MathMin(start, length);
- }
-
- end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
- var final;
- if (end < 0) {
- final = MathMax(length + end, 0);
- } else {
- final = MathMin(end, length);
- }
-
- var count = MathMin(final - from, length - to);
- var direction = 1;
- if (from < to && to < (from + count)) {
- direction = -1;
- from = from + count - 1;
- to = to + count - 1;
- }
-
- while (count > 0) {
- if (from in array) {
- array[to] = array[from];
- } else {
- delete array[to];
- }
- from = from + direction;
- to = to + direction;
- count--;
- }
-
- return array;
-}
-
-// ES6 draft 03-17-15, section 22.1.3.3
-function ArrayCopyWithin(target, start, end) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
-
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
-
- return InnerArrayCopyWithin(target, start, end, array, length);
-}
-
-function InnerArrayFind(predicate, thisArg, array, length) {
- if (!IS_CALLABLE(predicate)) {
- throw MakeTypeError(kCalledNonCallable, predicate);
- }
-
- for (var i = 0; i < length; i++) {
- var element = array[i];
- if (%_Call(predicate, thisArg, element, i, array)) {
- return element;
- }
- }
-
- return;
-}
-
-// ES6 draft 07-15-13, section 15.4.3.23
-function ArrayFind(predicate, thisArg) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
-
- var array = TO_OBJECT(this);
- var length = TO_INTEGER(array.length);
-
- return InnerArrayFind(predicate, thisArg, array, length);
-}
-
-function InnerArrayFindIndex(predicate, thisArg, array, length) {
- if (!IS_CALLABLE(predicate)) {
- throw MakeTypeError(kCalledNonCallable, predicate);
- }
-
- for (var i = 0; i < length; i++) {
- var element = array[i];
- if (%_Call(predicate, thisArg, element, i, array)) {
- return i;
- }
- }
-
- return -1;
-}
-
-// ES6 draft 07-15-13, section 15.4.3.24
-function ArrayFindIndex(predicate, thisArg) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
-
- var array = TO_OBJECT(this);
- var length = TO_INTEGER(array.length);
-
- return InnerArrayFindIndex(predicate, thisArg, array, length);
-}
-
-// ES6, draft 04-05-14, section 22.1.3.6
-function InnerArrayFill(value, start, end, array, length) {
- var i = IS_UNDEFINED(start) ? 0 : TO_INTEGER(start);
- var end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
-
- if (i < 0) {
- i += length;
- if (i < 0) i = 0;
- } else {
- if (i > length) i = length;
- }
-
- if (end < 0) {
- end += length;
- if (end < 0) end = 0;
- } else {
- if (end > length) end = length;
- }
-
- if ((end - i) > 0 && ObjectIsFrozen(array)) {
- throw MakeTypeError(kArrayFunctionsOnFrozen);
- }
-
- for (; i < end; i++)
- array[i] = value;
- return array;
-}
-
-// ES6, draft 04-05-14, section 22.1.3.6
-function ArrayFill(value, start, end) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
-
- var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
-
- return InnerArrayFill(value, start, end, array, length);
-}
-
-function AddArrayElement(constructor, array, i, value) {
- if (constructor === GlobalArray) {
- %AddElement(array, i, value);
- } else {
- ObjectDefineProperty(array, i, {
- value: value, writable: true, configurable: true, enumerable: true
- });
- }
-}
-
-// ES6, draft 10-14-14, section 22.1.2.1
-function ArrayFrom(arrayLike, mapfn, receiver) {
- var items = TO_OBJECT(arrayLike);
- var mapping = !IS_UNDEFINED(mapfn);
-
- if (mapping) {
- if (!IS_CALLABLE(mapfn)) {
- throw MakeTypeError(kCalledNonCallable, mapfn);
- }
- }
-
- var iterable = GetMethod(items, iteratorSymbol);
- var k;
- var result;
- var mappedValue;
- var nextValue;
-
- if (!IS_UNDEFINED(iterable)) {
- result = %IsConstructor(this) ? new this() : [];
-
- var iterator = GetIterator(items, iterable);
-
- k = 0;
- while (true) {
- var next = iterator.next();
-
- if (!IS_OBJECT(next)) {
- throw MakeTypeError(kIteratorResultNotAnObject, next);
- }
-
- if (next.done) {
- result.length = k;
- return result;
- }
-
- nextValue = next.value;
- if (mapping) {
- mappedValue = %_Call(mapfn, receiver, nextValue, k);
- } else {
- mappedValue = nextValue;
- }
- AddArrayElement(this, result, k, mappedValue);
- k++;
- }
- } else {
- var len = TO_LENGTH(items.length);
- result = %IsConstructor(this) ? new this(len) : new GlobalArray(len);
-
- for (k = 0; k < len; ++k) {
- nextValue = items[k];
- if (mapping) {
- mappedValue = %_Call(mapfn, receiver, nextValue, k);
- } else {
- mappedValue = nextValue;
- }
- AddArrayElement(this, result, k, mappedValue);
- }
-
- result.length = k;
- return result;
- }
-}
-
-// ES6, draft 05-22-14, section 22.1.2.3
-function ArrayOf() {
- var length = %_ArgumentsLength();
- var constructor = this;
- // TODO: Implement IsConstructor (ES6 section 7.2.5)
- var array = %IsConstructor(constructor) ? new constructor(length) : [];
- for (var i = 0; i < length; i++) {
- AddArrayElement(constructor, array, i, %_Arguments(i));
- }
- array.length = length;
- return array;
-}
-
-// -------------------------------------------------------------------
-
-%FunctionSetLength(ArrayCopyWithin, 2);
-%FunctionSetLength(ArrayFrom, 1);
-%FunctionSetLength(ArrayFill, 1);
-%FunctionSetLength(ArrayFind, 1);
-%FunctionSetLength(ArrayFindIndex, 1);
-
-// Set up non-enumerable functions on the Array object.
-utils.InstallFunctions(GlobalArray, DONT_ENUM, [
- "from", ArrayFrom,
- "of", ArrayOf
-]);
-
-// Set up the non-enumerable functions on the Array prototype object.
-utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
- "copyWithin", ArrayCopyWithin,
- "find", ArrayFind,
- "findIndex", ArrayFindIndex,
- "fill", ArrayFill
-]);
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.ArrayFrom = ArrayFrom;
- to.InnerArrayCopyWithin = InnerArrayCopyWithin;
- to.InnerArrayFill = InnerArrayFill;
- to.InnerArrayFind = InnerArrayFind;
- to.InnerArrayFindIndex = InnerArrayFindIndex;
-});
-
-})
diff --git a/chromium/v8/src/harmony-concat-spreadable.js b/chromium/v8/src/harmony-concat-spreadable.js
deleted file mode 100644
index c5d906a6422..00000000000
--- a/chromium/v8/src/harmony-concat-spreadable.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-var isConcatSpreadableSymbol =
- utils.ImportNow("is_concat_spreadable_symbol");
-
-utils.InstallConstants(global.Symbol, [
- // TODO(littledan): Move to symbol.js when shipping
- "isConcatSpreadable", isConcatSpreadableSymbol
-]);
-
-})
diff --git a/chromium/v8/src/harmony-object-observe.js b/chromium/v8/src/harmony-object-observe.js
deleted file mode 100644
index 44006cd2e92..00000000000
--- a/chromium/v8/src/harmony-object-observe.js
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-utils.InstallFunctions(global.Object, DONT_ENUM, $observeObjectMethods);
-utils.InstallFunctions(global.Array, DONT_ENUM, $observeArrayMethods);
-
-})
diff --git a/chromium/v8/src/harmony-reflect.js b/chromium/v8/src/harmony-reflect.js
deleted file mode 100644
index f1fe8605e54..00000000000
--- a/chromium/v8/src/harmony-reflect.js
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-var GlobalReflect = global.Reflect;
-var ReflectApply = utils.ImportNow("reflect_apply");
-var ReflectConstruct = utils.ImportNow("reflect_construct");
-
-utils.InstallFunctions(GlobalReflect, DONT_ENUM, [
- "apply", ReflectApply,
- "construct", ReflectConstruct
-]);
-
-})
diff --git a/chromium/v8/src/harmony-regexp.js b/chromium/v8/src/harmony-regexp.js
deleted file mode 100644
index 1ab76fad4ae..00000000000
--- a/chromium/v8/src/harmony-regexp.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalRegExp = global.RegExp;
-
-// -------------------------------------------------------------------
-
-// ES6 draft 12-06-13, section 21.2.5.3
-// + https://bugs.ecmascript.org/show_bug.cgi?id=3423
-function RegExpGetFlags() {
- if (!IS_SPEC_OBJECT(this)) {
- throw MakeTypeError(kFlagsGetterNonObject, TO_STRING(this));
- }
- var result = '';
- if (this.global) result += 'g';
- if (this.ignoreCase) result += 'i';
- if (this.multiline) result += 'm';
- if (this.unicode) result += 'u';
- if (this.sticky) result += 'y';
- return result;
-}
-
-%DefineAccessorPropertyUnchecked(GlobalRegExp.prototype, 'flags',
- RegExpGetFlags, null, DONT_ENUM);
-%SetNativeFlag(RegExpGetFlags);
-
-})
diff --git a/chromium/v8/src/harmony-sharedarraybuffer.js b/chromium/v8/src/harmony-sharedarraybuffer.js
deleted file mode 100644
index 3a72d6c353b..00000000000
--- a/chromium/v8/src/harmony-sharedarraybuffer.js
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalSharedArrayBuffer = global.SharedArrayBuffer;
-var GlobalObject = global.Object;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-// -------------------------------------------------------------------
-
-function SharedArrayBufferConstructor(length) { // length = 1
- if (%_IsConstructCall()) {
- var byteLength = $toPositiveInteger(length, kInvalidArrayBufferLength);
- %ArrayBufferInitialize(this, byteLength, kShared);
- } else {
- throw MakeTypeError(kConstructorNotFunction, "SharedArrayBuffer");
- }
-}
-
-function SharedArrayBufferGetByteLen() {
- if (!IS_SHAREDARRAYBUFFER(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'SharedArrayBuffer.prototype.byteLength', this);
- }
- return %_ArrayBufferGetByteLength(this);
-}
-
-function SharedArrayBufferIsViewJS(obj) {
- return %ArrayBufferIsView(obj);
-}
-
-
-// Set up the SharedArrayBuffer constructor function.
-%SetCode(GlobalSharedArrayBuffer, SharedArrayBufferConstructor);
-%FunctionSetPrototype(GlobalSharedArrayBuffer, new GlobalObject());
-
-// Set up the constructor property on the SharedArrayBuffer prototype object.
-%AddNamedProperty(GlobalSharedArrayBuffer.prototype, "constructor",
- GlobalSharedArrayBuffer, DONT_ENUM);
-
-%AddNamedProperty(GlobalSharedArrayBuffer.prototype,
- toStringTagSymbol, "SharedArrayBuffer", DONT_ENUM | READ_ONLY);
-
-utils.InstallGetter(GlobalSharedArrayBuffer.prototype, "byteLength",
- SharedArrayBufferGetByteLen);
-
-utils.InstallFunctions(GlobalSharedArrayBuffer, DONT_ENUM, [
- "isView", SharedArrayBufferIsViewJS
-]);
-
-})
diff --git a/chromium/v8/src/harmony-tostring.js b/chromium/v8/src/harmony-tostring.js
deleted file mode 100644
index 8e76c3a5bbd..00000000000
--- a/chromium/v8/src/harmony-tostring.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalSymbol = global.Symbol;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-utils.InstallConstants(GlobalSymbol, [
- // TODO(dslomov, caitp): Move to symbol.js when shipping
- "toStringTag", toStringTagSymbol
-]);
-
-})
diff --git a/chromium/v8/src/harmony-typedarray.js b/chromium/v8/src/harmony-typedarray.js
deleted file mode 100644
index 9d66e211e98..00000000000
--- a/chromium/v8/src/harmony-typedarray.js
+++ /dev/null
@@ -1,414 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-macro TYPED_ARRAYS(FUNCTION)
-// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
-FUNCTION(Uint8Array)
-FUNCTION(Int8Array)
-FUNCTION(Uint16Array)
-FUNCTION(Int16Array)
-FUNCTION(Uint32Array)
-FUNCTION(Int32Array)
-FUNCTION(Float32Array)
-FUNCTION(Float64Array)
-FUNCTION(Uint8ClampedArray)
-endmacro
-
-macro DECLARE_GLOBALS(NAME)
-var GlobalNAME = global.NAME;
-endmacro
-
-TYPED_ARRAYS(DECLARE_GLOBALS)
-DECLARE_GLOBALS(Array)
-
-var ArrayFrom;
-var ArrayToString;
-var InnerArrayCopyWithin;
-var InnerArrayEvery;
-var InnerArrayFill;
-var InnerArrayFilter;
-var InnerArrayFind;
-var InnerArrayFindIndex;
-var InnerArrayForEach;
-var InnerArrayIndexOf;
-var InnerArrayJoin;
-var InnerArrayLastIndexOf;
-var InnerArrayMap;
-var InnerArraySome;
-var InnerArraySort;
-var InnerArrayToLocaleString;
-var IsNaN;
-var MathMax;
-var MathMin;
-var PackedArrayReverse;
-var ToNumber;
-
-utils.Import(function(from) {
- ArrayFrom = from.ArrayFrom;
- ArrayToString = from.ArrayToString;
- InnerArrayCopyWithin = from.InnerArrayCopyWithin;
- InnerArrayEvery = from.InnerArrayEvery;
- InnerArrayFill = from.InnerArrayFill;
- InnerArrayFilter = from.InnerArrayFilter;
- InnerArrayFind = from.InnerArrayFind;
- InnerArrayFindIndex = from.InnerArrayFindIndex;
- InnerArrayForEach = from.InnerArrayForEach;
- InnerArrayIndexOf = from.InnerArrayIndexOf;
- InnerArrayJoin = from.InnerArrayJoin;
- InnerArrayLastIndexOf = from.InnerArrayLastIndexOf;
- InnerArrayMap = from.InnerArrayMap;
- InnerArrayReduce = from.InnerArrayReduce;
- InnerArrayReduceRight = from.InnerArrayReduceRight;
- InnerArraySome = from.InnerArraySome;
- InnerArraySort = from.InnerArraySort;
- InnerArrayToLocaleString = from.InnerArrayToLocaleString;
- IsNaN = from.IsNaN;
- MathMax = from.MathMax;
- MathMin = from.MathMin;
- PackedArrayReverse = from.PackedArrayReverse;
- ToNumber = from.ToNumber;
-});
-
-// -------------------------------------------------------------------
-
-function ConstructTypedArray(constructor, arg) {
- // TODO(littledan): This is an approximation of the spec, which requires
- // that only real TypedArray classes should be accepted (22.2.2.1.1)
- if (!%IsConstructor(constructor) || IS_UNDEFINED(constructor.prototype) ||
- !%HasOwnProperty(constructor.prototype, "BYTES_PER_ELEMENT")) {
- throw MakeTypeError(kNotTypedArray);
- }
-
- // TODO(littledan): The spec requires that, rather than directly calling
- // the constructor, a TypedArray is created with the proper proto and
- // underlying size and element size, and elements are put in one by one.
- // By contrast, this would allow subclasses to make a radically different
- // constructor with different semantics.
- return new constructor(arg);
-}
-
-function ConstructTypedArrayLike(typedArray, arg) {
- // TODO(littledan): The spec requires that we actuallly use
- // typedArray.constructor[Symbol.species] (bug v8:4093)
- // Also, it should default to the default constructor from
- // table 49 if typedArray.constructor doesn't exist.
- return ConstructTypedArray(typedArray.constructor, arg);
-}
-
-function TypedArrayCopyWithin(target, start, end) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- // TODO(littledan): Replace with a memcpy for better performance
- return InnerArrayCopyWithin(target, start, end, this, length);
-}
-%FunctionSetLength(TypedArrayCopyWithin, 2);
-
-// ES6 draft 05-05-15, section 22.2.3.7
-function TypedArrayEvery(f, receiver) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayEvery(f, receiver, this, length);
-}
-%FunctionSetLength(TypedArrayEvery, 1);
-
-// ES6 draft 08-24-14, section 22.2.3.12
-function TypedArrayForEach(f, receiver) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- InnerArrayForEach(f, receiver, this, length);
-}
-%FunctionSetLength(TypedArrayForEach, 1);
-
-// ES6 draft 04-05-14 section 22.2.3.8
-function TypedArrayFill(value, start, end) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayFill(value, start, end, this, length);
-}
-%FunctionSetLength(TypedArrayFill, 1);
-
-// ES6 draft 07-15-13, section 22.2.3.9
-function TypedArrayFilter(predicate, thisArg) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
- var array = InnerArrayFilter(predicate, thisArg, this, length);
- return ConstructTypedArrayLike(this, array);
-}
-%FunctionSetLength(TypedArrayFilter, 1);
-
-// ES6 draft 07-15-13, section 22.2.3.10
-function TypedArrayFind(predicate, thisArg) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayFind(predicate, thisArg, this, length);
-}
-%FunctionSetLength(TypedArrayFind, 1);
-
-// ES6 draft 07-15-13, section 22.2.3.11
-function TypedArrayFindIndex(predicate, thisArg) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayFindIndex(predicate, thisArg, this, length);
-}
-%FunctionSetLength(TypedArrayFindIndex, 1);
-
-// ES6 draft 05-18-15, section 22.2.3.21
-function TypedArrayReverse() {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return PackedArrayReverse(this, length);
-}
-
-
-function TypedArrayComparefn(x, y) {
- if (IsNaN(x) && IsNaN(y)) {
- return IsNaN(y) ? 0 : 1;
- }
- if (IsNaN(x)) {
- return 1;
- }
- if (x === 0 && x === y) {
- if (%_IsMinusZero(x)) {
- if (!%_IsMinusZero(y)) {
- return -1;
- }
- } else if (%_IsMinusZero(y)) {
- return 1;
- }
- }
- return x - y;
-}
-
-
-// ES6 draft 05-18-15, section 22.2.3.25
-function TypedArraySort(comparefn) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- if (IS_UNDEFINED(comparefn)) {
- comparefn = TypedArrayComparefn;
- }
-
- return InnerArraySort(this, length, comparefn);
-}
-
-
-// ES6 section 22.2.3.13
-function TypedArrayIndexOf(element, index) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
- return InnerArrayIndexOf(this, element, index, length);
-}
-%FunctionSetLength(TypedArrayIndexOf, 1);
-
-
-// ES6 section 22.2.3.16
-function TypedArrayLastIndexOf(element, index) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayLastIndexOf(this, element, index, length,
- %_ArgumentsLength());
-}
-%FunctionSetLength(TypedArrayLastIndexOf, 1);
-
-
-// ES6 draft 07-15-13, section 22.2.3.18
-function TypedArrayMap(predicate, thisArg) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- // TODO(littledan): Preallocate rather than making an intermediate
- // InternalArray, for better performance.
- var length = %_TypedArrayGetLength(this);
- var array = InnerArrayMap(predicate, thisArg, this, length);
- return ConstructTypedArrayLike(this, array);
-}
-%FunctionSetLength(TypedArrayMap, 1);
-
-
-// ES6 draft 05-05-15, section 22.2.3.24
-function TypedArraySome(f, receiver) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArraySome(f, receiver, this, length);
-}
-%FunctionSetLength(TypedArraySome, 1);
-
-
-// ES6 section 22.2.3.27
-function TypedArrayToLocaleString() {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayToLocaleString(this, length);
-}
-
-
-// ES6 section 22.2.3.28
-function TypedArrayToString() {
- return %_Call(ArrayToString, this);
-}
-
-
-// ES6 section 22.2.3.14
-function TypedArrayJoin(separator) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayJoin(separator, this, length);
-}
-
-
-// ES6 draft 07-15-13, section 22.2.3.19
-function TypedArrayReduce(callback, current) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
- return InnerArrayReduce(callback, current, this, length,
- %_ArgumentsLength());
-}
-%FunctionSetLength(TypedArrayReduce, 1);
-
-
-// ES6 draft 07-15-13, section 22.2.3.19
-function TypedArrayReduceRight(callback, current) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
- return InnerArrayReduceRight(callback, current, this, length,
- %_ArgumentsLength());
-}
-%FunctionSetLength(TypedArrayReduceRight, 1);
-
-
-function TypedArraySlice(start, end) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
- var len = %_TypedArrayGetLength(this);
-
- var relativeStart = TO_INTEGER(start);
-
- var k;
- if (relativeStart < 0) {
- k = MathMax(len + relativeStart, 0);
- } else {
- k = MathMin(relativeStart, len);
- }
-
- var relativeEnd;
- if (IS_UNDEFINED(end)) {
- relativeEnd = len;
- } else {
- relativeEnd = TO_INTEGER(end);
- }
-
- var final;
- if (relativeEnd < 0) {
- final = MathMax(len + relativeEnd, 0);
- } else {
- final = MathMin(relativeEnd, len);
- }
-
- var count = MathMax(final - k, 0);
- var array = ConstructTypedArrayLike(this, count);
- // The code below is the 'then' branch; the 'else' branch species
- // a memcpy. Because V8 doesn't canonicalize NaN, the difference is
- // unobservable.
- var n = 0;
- while (k < final) {
- var kValue = this[k];
- // TODO(littledan): The spec says to throw on an error in setting;
- // does this throw?
- array[n] = kValue;
- k++;
- n++;
- }
- return array;
-}
-
-
-// ES6 draft 08-24-14, section 22.2.2.2
-function TypedArrayOf() {
- var length = %_ArgumentsLength();
- var array = new this(length);
- for (var i = 0; i < length; i++) {
- array[i] = %_Arguments(i);
- }
- return array;
-}
-
-
-function TypedArrayFrom(source, mapfn, thisArg) {
- // TODO(littledan): Investigate if there is a receiver which could be
- // faster to accumulate on than Array, e.g., a TypedVector.
- var array = %_Call(ArrayFrom, GlobalArray, source, mapfn, thisArg);
- return ConstructTypedArray(this, array);
-}
-%FunctionSetLength(TypedArrayFrom, 1);
-
-// TODO(littledan): Fix the TypedArray proto chain (bug v8:4085).
-macro EXTEND_TYPED_ARRAY(NAME)
- // Set up non-enumerable functions on the object.
- utils.InstallFunctions(GlobalNAME, DONT_ENUM | DONT_DELETE | READ_ONLY, [
- "from", TypedArrayFrom,
- "of", TypedArrayOf
- ]);
-
- // Set up non-enumerable functions on the prototype object.
- utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
- "copyWithin", TypedArrayCopyWithin,
- "every", TypedArrayEvery,
- "fill", TypedArrayFill,
- "filter", TypedArrayFilter,
- "find", TypedArrayFind,
- "findIndex", TypedArrayFindIndex,
- "indexOf", TypedArrayIndexOf,
- "join", TypedArrayJoin,
- "lastIndexOf", TypedArrayLastIndexOf,
- "forEach", TypedArrayForEach,
- "map", TypedArrayMap,
- "reduce", TypedArrayReduce,
- "reduceRight", TypedArrayReduceRight,
- "reverse", TypedArrayReverse,
- "slice", TypedArraySlice,
- "some", TypedArraySome,
- "sort", TypedArraySort,
- "toString", TypedArrayToString,
- "toLocaleString", TypedArrayToLocaleString
- ]);
-endmacro
-
-TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
-
-})
diff --git a/chromium/v8/src/hashmap.h b/chromium/v8/src/hashmap.h
index ee3797fe594..f94def7c3c7 100644
--- a/chromium/v8/src/hashmap.h
+++ b/chromium/v8/src/hashmap.h
@@ -350,6 +350,7 @@ class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HASHMAP_H_
diff --git a/chromium/v8/src/heap/array-buffer-tracker.h b/chromium/v8/src/heap/array-buffer-tracker.h
index c12557a9fce..7ba22fb5732 100644
--- a/chromium/v8/src/heap/array-buffer-tracker.h
+++ b/chromium/v8/src/heap/array-buffer-tracker.h
@@ -68,6 +68,6 @@ class ArrayBufferTracker {
std::map<void*, size_t> live_array_buffers_for_scavenge_;
std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_ARRAY_BUFFER_TRACKER_H_
diff --git a/chromium/v8/src/heap/gc-idle-time-handler.cc b/chromium/v8/src/heap/gc-idle-time-handler.cc
index e1f9ef43e72..4e6e6081d7b 100644
--- a/chromium/v8/src/heap/gc-idle-time-handler.cc
+++ b/chromium/v8/src/heap/gc-idle-time-handler.cc
@@ -180,5 +180,5 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
}
-}
-}
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/gc-tracer.cc b/chromium/v8/src/heap/gc-tracer.cc
index e27fa27d83c..ec1ad653910 100644
--- a/chromium/v8/src/heap/gc-tracer.cc
+++ b/chromium/v8/src/heap/gc-tracer.cc
@@ -21,6 +21,19 @@ static intptr_t CountTotalHolesSize(Heap* heap) {
}
+GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
+ : tracer_(tracer), scope_(scope) {
+ start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
+}
+
+
+GCTracer::Scope::~Scope() {
+ DCHECK(scope_ < NUMBER_OF_SCOPES); // scope_ is unsigned.
+ tracer_->current_.scopes[scope_] +=
+ tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_;
+}
+
+
GCTracer::AllocationEvent::AllocationEvent(double duration,
size_t allocation_in_bytes) {
duration_ = duration;
@@ -45,6 +58,7 @@ GCTracer::Event::Event(Type type, const char* gc_reason,
collector_reason(collector_reason),
start_time(0.0),
end_time(0.0),
+ reduce_memory(false),
start_object_size(0),
end_object_size(0),
start_memory_size(0),
@@ -99,6 +113,9 @@ GCTracer::GCTracer(Heap* heap)
cumulative_incremental_marking_duration_(0.0),
cumulative_pure_incremental_marking_duration_(0.0),
longest_incremental_marking_step_(0.0),
+ cumulative_incremental_marking_finalization_steps_(0),
+ cumulative_incremental_marking_finalization_duration_(0.0),
+ longest_incremental_marking_finalization_step_(0.0),
cumulative_marking_duration_(0.0),
cumulative_sweeping_duration_(0.0),
allocation_time_ms_(0.0),
@@ -110,7 +127,7 @@ GCTracer::GCTracer(Heap* heap)
combined_mark_compact_speed_cache_(0.0),
start_counter_(0) {
current_ = Event(Event::START, NULL, NULL);
- current_.end_time = base::OS::TimeCurrentMillis();
+ current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
previous_ = previous_incremental_mark_compactor_event_ = current_;
}
@@ -138,6 +155,7 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
}
}
+ current_.reduce_memory = heap_->ShouldReduceMemory();
current_.start_time = start_time;
current_.start_object_size = heap_->SizeOfObjects();
current_.start_memory_size = heap_->isolate()->memory_allocator()->Size();
@@ -259,6 +277,10 @@ void GCTracer::Stop(GarbageCollector collector) {
if (FLAG_trace_gc) {
heap_->PrintShortHeapStatistics();
}
+
+ longest_incremental_marking_finalization_step_ = 0.0;
+ cumulative_incremental_marking_finalization_steps_ = 0;
+ cumulative_incremental_marking_finalization_duration_ = 0.0;
}
@@ -307,6 +329,13 @@ void GCTracer::AddContextDisposalTime(double time) {
}
+void GCTracer::AddCompactionEvent(double duration,
+ intptr_t live_bytes_compacted) {
+ compaction_events_.push_front(
+ CompactionEvent(duration, live_bytes_compacted));
+}
+
+
void GCTracer::AddSurvivalRatio(double promotion_ratio) {
survival_events_.push_front(SurvivalEvent(promotion_ratio));
}
@@ -325,6 +354,14 @@ void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
}
+void GCTracer::AddIncrementalMarkingFinalizationStep(double duration) {
+ cumulative_incremental_marking_finalization_steps_++;
+ cumulative_incremental_marking_finalization_duration_ += duration;
+ longest_incremental_marking_finalization_step_ =
+ Max(longest_incremental_marking_finalization_step_, duration);
+}
+
+
void GCTracer::Output(const char* format, ...) const {
if (FLAG_trace_gc) {
va_list arguments;
@@ -391,104 +428,208 @@ void GCTracer::Print() const {
void GCTracer::PrintNVP() const {
- PrintIsolate(heap_->isolate(), "[I:%p] %8.0f ms: ", heap_->isolate(),
- heap_->isolate()->time_millis_since_init());
-
double duration = current_.end_time - current_.start_time;
double spent_in_mutator = current_.start_time - previous_.end_time;
-
- PrintF("pause=%.1f ", duration);
- PrintF("mutator=%.1f ", spent_in_mutator);
- PrintF("gc=%s ", current_.TypeName(true));
+ intptr_t allocated_since_last_gc =
+ current_.start_object_size - previous_.end_object_size;
switch (current_.type) {
case Event::SCAVENGER:
- PrintF("scavenge=%.2f ", current_.scopes[Scope::SCAVENGER_SCAVENGE]);
- PrintF("old_new=%.2f ",
- current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS]);
- PrintF("weak=%.2f ", current_.scopes[Scope::SCAVENGER_WEAK]);
- PrintF("roots=%.2f ", current_.scopes[Scope::SCAVENGER_ROOTS]);
- PrintF("code=%.2f ",
- current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES]);
- PrintF("semispace=%.2f ", current_.scopes[Scope::SCAVENGER_SEMISPACE]);
- PrintF("object_groups=%.2f ",
- current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS]);
- PrintF("steps_count=%d ", current_.incremental_marking_steps);
- PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
- PrintF("scavenge_throughput=%" V8_PTR_PREFIX "d ",
- ScavengeSpeedInBytesPerMillisecond());
+ PrintIsolate(heap_->isolate(),
+ "%8.0f ms: "
+ "pause=%.1f "
+ "mutator=%.1f "
+ "gc=%s "
+ "reduce_memory=%d "
+ "scavenge=%.2f "
+ "old_new=%.2f "
+ "weak=%.2f "
+ "roots=%.2f "
+ "code=%.2f "
+ "semispace=%.2f "
+ "object_groups=%.2f "
+ "steps_count=%d "
+ "steps_took=%.1f "
+ "scavenge_throughput=%" V8_PTR_PREFIX
+ "d "
+ "total_size_before=%" V8_PTR_PREFIX
+ "d "
+ "total_size_after=%" V8_PTR_PREFIX
+ "d "
+ "holes_size_before=%" V8_PTR_PREFIX
+ "d "
+ "holes_size_after=%" V8_PTR_PREFIX
+ "d "
+ "allocated=%" V8_PTR_PREFIX
+ "d "
+ "promoted=%" V8_PTR_PREFIX
+ "d "
+ "semi_space_copied=%" V8_PTR_PREFIX
+ "d "
+ "nodes_died_in_new=%d "
+ "nodes_copied_in_new=%d "
+ "nodes_promoted=%d "
+ "promotion_ratio=%.1f%% "
+ "average_survival_ratio=%.1f%% "
+ "promotion_rate=%.1f%% "
+ "semi_space_copy_rate=%.1f%% "
+ "new_space_allocation_throughput=%" V8_PTR_PREFIX
+ "d "
+ "context_disposal_rate=%.1f\n",
+ heap_->isolate()->time_millis_since_init(), duration,
+ spent_in_mutator, current_.TypeName(true),
+ current_.reduce_memory,
+ current_.scopes[Scope::SCAVENGER_SCAVENGE],
+ current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS],
+ current_.scopes[Scope::SCAVENGER_WEAK],
+ current_.scopes[Scope::SCAVENGER_ROOTS],
+ current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
+ current_.scopes[Scope::SCAVENGER_SEMISPACE],
+ current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS],
+ current_.incremental_marking_steps,
+ current_.incremental_marking_duration,
+ ScavengeSpeedInBytesPerMillisecond(),
+ current_.start_object_size, current_.end_object_size,
+ current_.start_holes_size, current_.end_holes_size,
+ allocated_since_last_gc, heap_->promoted_objects_size(),
+ heap_->semi_space_copied_object_size(),
+ heap_->nodes_died_in_new_space_,
+ heap_->nodes_copied_in_new_space_, heap_->nodes_promoted_,
+ heap_->promotion_ratio_, AverageSurvivalRatio(),
+ heap_->promotion_rate_, heap_->semi_space_copied_rate_,
+ NewSpaceAllocationThroughputInBytesPerMillisecond(),
+ ContextDisposalRateInMilliseconds());
break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
- PrintF("external=%.1f ", current_.scopes[Scope::EXTERNAL]);
- PrintF("mark=%.1f ", current_.scopes[Scope::MC_MARK]);
- PrintF("sweep=%.2f ", current_.scopes[Scope::MC_SWEEP]);
- PrintF("sweepns=%.2f ", current_.scopes[Scope::MC_SWEEP_NEWSPACE]);
- PrintF("sweepos=%.2f ", current_.scopes[Scope::MC_SWEEP_OLDSPACE]);
- PrintF("sweepcode=%.2f ", current_.scopes[Scope::MC_SWEEP_CODE]);
- PrintF("sweepcell=%.2f ", current_.scopes[Scope::MC_SWEEP_CELL]);
- PrintF("sweepmap=%.2f ", current_.scopes[Scope::MC_SWEEP_MAP]);
- PrintF("evacuate=%.1f ", current_.scopes[Scope::MC_EVACUATE_PAGES]);
- PrintF("new_new=%.1f ",
- current_.scopes[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
- PrintF("root_new=%.1f ",
- current_.scopes[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
- PrintF("old_new=%.1f ",
- current_.scopes[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
- PrintF("compaction_ptrs=%.1f ",
- current_.scopes[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
- PrintF("intracompaction_ptrs=%.1f ",
- current_.scopes[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
- PrintF("misc_compaction=%.1f ",
- current_.scopes[Scope::MC_UPDATE_MISC_POINTERS]);
- PrintF("weak_closure=%.1f ", current_.scopes[Scope::MC_WEAKCLOSURE]);
- PrintF("inc_weak_closure=%.1f ",
- current_.scopes[Scope::MC_INCREMENTAL_WEAKCLOSURE]);
- PrintF("weakcollection_process=%.1f ",
- current_.scopes[Scope::MC_WEAKCOLLECTION_PROCESS]);
- PrintF("weakcollection_clear=%.1f ",
- current_.scopes[Scope::MC_WEAKCOLLECTION_CLEAR]);
- PrintF("weakcollection_abort=%.1f ",
- current_.scopes[Scope::MC_WEAKCOLLECTION_ABORT]);
- PrintF("weakcells=%.1f ", current_.scopes[Scope::MC_WEAKCELL]);
- PrintF("nonlive_refs=%.1f ",
- current_.scopes[Scope::MC_NONLIVEREFERENCES]);
-
- PrintF("steps_count=%d ", current_.incremental_marking_steps);
- PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
- PrintF("longest_step=%.1f ", current_.longest_incremental_marking_step);
- PrintF("incremental_marking_throughput=%" V8_PTR_PREFIX "d ",
- IncrementalMarkingSpeedInBytesPerMillisecond());
+ PrintIsolate(
+ heap_->isolate(),
+ "%8.0f ms: "
+ "pause=%.1f "
+ "mutator=%.1f "
+ "gc=%s "
+ "reduce_memory=%d "
+ "external=%.1f "
+ "clear=%1.f "
+ "clear.code_flush=%.1f "
+ "clear.dependent_code=%.1f "
+ "clear.global_handles=%.1f "
+ "clear.maps=%.1f "
+ "clear.slots_buffer=%.1f "
+ "clear.store_buffer=%.1f "
+ "clear.string_table=%.1f "
+ "clear.weak_cells=%.1f "
+ "clear.weak_collections=%.1f "
+ "clear.weak_lists=%.1f "
+ "evacuate=%.1f "
+ "evacuate.candidates=%.1f "
+ "evacuate.clean_up=%.1f "
+ "evacuate.new_space=%.1f "
+ "evacuate.update_pointers=%.1f "
+ "evacuate.update_pointers.between_evacuated=%.1f "
+ "evacuate.update_pointers.to_evacuated=%.1f "
+ "evacuate.update_pointers.to_new=%.1f "
+ "evacuate.update_pointers.weak=%.1f "
+ "finish=%.1f "
+ "mark=%.1f "
+ "mark.finish_incremental=%.1f "
+ "mark.prepare_code_flush=%.1f "
+ "mark.roots=%.1f "
+ "mark.weak_closure=%.1f "
+ "sweep=%.1f "
+ "sweep.code=%.1f "
+ "sweep.map=%.1f "
+ "sweep.old=%.1f "
+ "incremental_finalize=%.1f "
+ "steps_count=%d "
+ "steps_took=%.1f "
+ "longest_step=%.1f "
+ "finalization_steps_count=%d "
+ "finalization_steps_took=%.1f "
+ "finalization_longest_step=%.1f "
+ "incremental_marking_throughput=%" V8_PTR_PREFIX
+ "d "
+ "total_size_before=%" V8_PTR_PREFIX
+ "d "
+ "total_size_after=%" V8_PTR_PREFIX
+ "d "
+ "holes_size_before=%" V8_PTR_PREFIX
+ "d "
+ "holes_size_after=%" V8_PTR_PREFIX
+ "d "
+ "allocated=%" V8_PTR_PREFIX
+ "d "
+ "promoted=%" V8_PTR_PREFIX
+ "d "
+ "semi_space_copied=%" V8_PTR_PREFIX
+ "d "
+ "nodes_died_in_new=%d "
+ "nodes_copied_in_new=%d "
+ "nodes_promoted=%d "
+ "promotion_ratio=%.1f%% "
+ "average_survival_ratio=%.1f%% "
+ "promotion_rate=%.1f%% "
+ "semi_space_copy_rate=%.1f%% "
+ "new_space_allocation_throughput=%" V8_PTR_PREFIX
+ "d "
+ "context_disposal_rate=%.1f "
+ "compaction_speed=%" V8_PTR_PREFIX "d\n",
+ heap_->isolate()->time_millis_since_init(), duration,
+ spent_in_mutator, current_.TypeName(true), current_.reduce_memory,
+ current_.scopes[Scope::EXTERNAL], current_.scopes[Scope::MC_CLEAR],
+ current_.scopes[Scope::MC_CLEAR_CODE_FLUSH],
+ current_.scopes[Scope::MC_CLEAR_DEPENDENT_CODE],
+ current_.scopes[Scope::MC_CLEAR_GLOBAL_HANDLES],
+ current_.scopes[Scope::MC_CLEAR_MAPS],
+ current_.scopes[Scope::MC_CLEAR_SLOTS_BUFFER],
+ current_.scopes[Scope::MC_CLEAR_STORE_BUFFER],
+ current_.scopes[Scope::MC_CLEAR_STRING_TABLE],
+ current_.scopes[Scope::MC_CLEAR_WEAK_CELLS],
+ current_.scopes[Scope::MC_CLEAR_WEAK_COLLECTIONS],
+ current_.scopes[Scope::MC_CLEAR_WEAK_LISTS],
+ current_.scopes[Scope::MC_EVACUATE],
+ current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
+ current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
+ current_.scopes[Scope::MC_EVACUATE_NEW_SPACE],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
+ current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
+ current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
+ current_.scopes[Scope::MC_MARK_PREPARE_CODE_FLUSH],
+ current_.scopes[Scope::MC_MARK_ROOTS],
+ current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
+ current_.scopes[Scope::MC_SWEEP],
+ current_.scopes[Scope::MC_SWEEP_CODE],
+ current_.scopes[Scope::MC_SWEEP_MAP],
+ current_.scopes[Scope::MC_SWEEP_OLD],
+ current_.scopes[Scope::MC_INCREMENTAL_FINALIZE],
+ current_.incremental_marking_steps,
+ current_.incremental_marking_duration,
+ current_.longest_incremental_marking_step,
+ cumulative_incremental_marking_finalization_steps_,
+ cumulative_incremental_marking_finalization_duration_,
+ longest_incremental_marking_finalization_step_,
+ IncrementalMarkingSpeedInBytesPerMillisecond(),
+ current_.start_object_size, current_.end_object_size,
+ current_.start_holes_size, current_.end_holes_size,
+ allocated_since_last_gc, heap_->promoted_objects_size(),
+ heap_->semi_space_copied_object_size(),
+ heap_->nodes_died_in_new_space_, heap_->nodes_copied_in_new_space_,
+ heap_->nodes_promoted_, heap_->promotion_ratio_,
+ AverageSurvivalRatio(), heap_->promotion_rate_,
+ heap_->semi_space_copied_rate_,
+ NewSpaceAllocationThroughputInBytesPerMillisecond(),
+ ContextDisposalRateInMilliseconds(),
+ CompactionSpeedInBytesPerMillisecond());
break;
case Event::START:
break;
default:
UNREACHABLE();
}
-
- PrintF("total_size_before=%" V8_PTR_PREFIX "d ", current_.start_object_size);
- PrintF("total_size_after=%" V8_PTR_PREFIX "d ", current_.end_object_size);
- PrintF("holes_size_before=%" V8_PTR_PREFIX "d ", current_.start_holes_size);
- PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", current_.end_holes_size);
-
- intptr_t allocated_since_last_gc =
- current_.start_object_size - previous_.end_object_size;
- PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc);
- PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size());
- PrintF("semi_space_copied=%" V8_PTR_PREFIX "d ",
- heap_->semi_space_copied_object_size());
- PrintF("nodes_died_in_new=%d ", heap_->nodes_died_in_new_space_);
- PrintF("nodes_copied_in_new=%d ", heap_->nodes_copied_in_new_space_);
- PrintF("nodes_promoted=%d ", heap_->nodes_promoted_);
- PrintF("promotion_ratio=%.1f%% ", heap_->promotion_ratio_);
- PrintF("average_survival_ratio=%.1f%% ", AverageSurvivalRatio());
- PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
- PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
- PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
- NewSpaceAllocationThroughputInBytesPerMillisecond());
- PrintF("context_disposal_rate=%.1f ", ContextDisposalRateInMilliseconds());
-
- PrintF("\n");
}
@@ -603,6 +744,23 @@ intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond(
}
+intptr_t GCTracer::CompactionSpeedInBytesPerMillisecond() const {
+ if (compaction_events_.size() == 0) return 0;
+ intptr_t bytes = 0;
+ double durations = 0.0;
+ CompactionEventBuffer::const_iterator iter = compaction_events_.begin();
+ while (iter != compaction_events_.end()) {
+ bytes += iter->live_bytes_compacted;
+ durations += iter->duration;
+ ++iter;
+ }
+
+ if (durations == 0.0) return 0;
+ // Make sure the result is at least 1.
+ return Max<intptr_t>(static_cast<intptr_t>(bytes / durations + 0.5), 1);
+}
+
+
intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
intptr_t bytes = 0;
double durations = 0.0;
@@ -720,7 +878,7 @@ size_t GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
double GCTracer::ContextDisposalRateInMilliseconds() const {
if (context_disposal_events_.size() < kRingBufferMaxSize) return 0.0;
- double begin = base::OS::TimeCurrentMillis();
+ double begin = heap_->MonotonicallyIncreasingTimeInMs();
double end = 0.0;
ContextDisposalEventBuffer::const_iterator iter =
context_disposal_events_.begin();
diff --git a/chromium/v8/src/heap/gc-tracer.h b/chromium/v8/src/heap/gc-tracer.h
index de48d23997b..e8ec1681876 100644
--- a/chromium/v8/src/heap/gc-tracer.h
+++ b/chromium/v8/src/heap/gc-tracer.h
@@ -98,28 +98,37 @@ class GCTracer {
public:
enum ScopeId {
EXTERNAL,
+ MC_CLEAR,
+ MC_CLEAR_CODE_FLUSH,
+ MC_CLEAR_DEPENDENT_CODE,
+ MC_CLEAR_GLOBAL_HANDLES,
+ MC_CLEAR_MAPS,
+ MC_CLEAR_SLOTS_BUFFER,
+ MC_CLEAR_STORE_BUFFER,
+ MC_CLEAR_STRING_TABLE,
+ MC_CLEAR_WEAK_CELLS,
+ MC_CLEAR_WEAK_COLLECTIONS,
+ MC_CLEAR_WEAK_LISTS,
+ MC_EVACUATE,
+ MC_EVACUATE_CANDIDATES,
+ MC_EVACUATE_CLEAN_UP,
+ MC_EVACUATE_NEW_SPACE,
+ MC_EVACUATE_UPDATE_POINTERS,
+ MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED,
+ MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED,
+ MC_EVACUATE_UPDATE_POINTERS_TO_NEW,
+ MC_EVACUATE_UPDATE_POINTERS_WEAK,
+ MC_FINISH,
+ MC_INCREMENTAL_FINALIZE,
MC_MARK,
+ MC_MARK_FINISH_INCREMENTAL,
+ MC_MARK_PREPARE_CODE_FLUSH,
+ MC_MARK_ROOTS,
+ MC_MARK_WEAK_CLOSURE,
MC_SWEEP,
- MC_SWEEP_NEWSPACE,
- MC_SWEEP_OLDSPACE,
MC_SWEEP_CODE,
- MC_SWEEP_CELL,
MC_SWEEP_MAP,
- MC_EVACUATE_PAGES,
- MC_UPDATE_NEW_TO_NEW_POINTERS,
- MC_UPDATE_ROOT_TO_NEW_POINTERS,
- MC_UPDATE_OLD_TO_NEW_POINTERS,
- MC_UPDATE_POINTERS_TO_EVACUATED,
- MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
- MC_UPDATE_MISC_POINTERS,
- MC_INCREMENTAL_WEAKCLOSURE,
- MC_WEAKCLOSURE,
- MC_WEAKCOLLECTION_PROCESS,
- MC_WEAKCOLLECTION_CLEAR,
- MC_WEAKCOLLECTION_ABORT,
- MC_WEAKCELL,
- MC_NONLIVEREFERENCES,
- MC_FLUSH_CODE,
+ MC_SWEEP_OLD,
SCAVENGER_CODE_FLUSH_CANDIDATES,
SCAVENGER_OBJECT_GROUPS,
SCAVENGER_OLD_TO_NEW_POINTERS,
@@ -130,15 +139,8 @@ class GCTracer {
NUMBER_OF_SCOPES
};
- Scope(GCTracer* tracer, ScopeId scope) : tracer_(tracer), scope_(scope) {
- start_time_ = base::OS::TimeCurrentMillis();
- }
-
- ~Scope() {
- DCHECK(scope_ < NUMBER_OF_SCOPES); // scope_ is unsigned.
- tracer_->current_.scopes[scope_] +=
- base::OS::TimeCurrentMillis() - start_time_;
- }
+ Scope(GCTracer* tracer, ScopeId scope);
+ ~Scope();
private:
GCTracer* tracer_;
@@ -166,6 +168,18 @@ class GCTracer {
};
+ class CompactionEvent {
+ public:
+ CompactionEvent() : duration(0), live_bytes_compacted(0) {}
+
+ CompactionEvent(double duration, intptr_t live_bytes_compacted)
+ : duration(duration), live_bytes_compacted(live_bytes_compacted) {}
+
+ double duration;
+ intptr_t live_bytes_compacted;
+ };
+
+
class ContextDisposalEvent {
public:
// Default constructor leaves the event uninitialized.
@@ -218,6 +232,9 @@ class GCTracer {
// Timestamp set in the destructor.
double end_time;
+ // Memory reduction flag set.
+ bool reduce_memory;
+
// Size of objects in heap set in constructor.
intptr_t start_object_size;
@@ -299,6 +316,8 @@ class GCTracer {
typedef RingBuffer<ContextDisposalEvent, kRingBufferMaxSize>
ContextDisposalEventBuffer;
+ typedef RingBuffer<CompactionEvent, kRingBufferMaxSize> CompactionEventBuffer;
+
typedef RingBuffer<SurvivalEvent, kRingBufferMaxSize> SurvivalEventBuffer;
static const int kThroughputTimeFrameMs = 5000;
@@ -321,11 +340,15 @@ class GCTracer {
void AddContextDisposalTime(double time);
+ void AddCompactionEvent(double duration, intptr_t live_bytes_compacted);
+
void AddSurvivalRatio(double survival_ratio);
// Log an incremental marking step.
void AddIncrementalMarkingStep(double duration, intptr_t bytes);
+ void AddIncrementalMarkingFinalizationStep(double duration);
+
// Log time spent in marking.
void AddMarkingTime(double duration) {
cumulative_marking_duration_ += duration;
@@ -391,6 +414,10 @@ class GCTracer {
intptr_t ScavengeSpeedInBytesPerMillisecond(
ScavengeSpeedMode mode = kForAllObjects) const;
+ // Compute the average compaction speed in bytes/millisecond.
+ // Returns 0 if not enough events have been recorded.
+ intptr_t CompactionSpeedInBytesPerMillisecond() const;
+
// Compute the average mark-sweep speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
intptr_t MarkCompactSpeedInBytesPerMillisecond() const;
@@ -472,6 +499,9 @@ class GCTracer {
cumulative_incremental_marking_duration_ = 0;
cumulative_pure_incremental_marking_duration_ = 0;
longest_incremental_marking_step_ = 0;
+ cumulative_incremental_marking_finalization_steps_ = 0;
+ cumulative_incremental_marking_finalization_duration_ = 0;
+ longest_incremental_marking_finalization_step_ = 0;
cumulative_marking_duration_ = 0;
cumulative_sweeping_duration_ = 0;
}
@@ -505,6 +535,9 @@ class GCTracer {
// RingBuffer for context disposal events.
ContextDisposalEventBuffer context_disposal_events_;
+ // RingBuffer for compaction events.
+ CompactionEventBuffer compaction_events_;
+
// RingBuffer for survival events.
SurvivalEventBuffer survival_events_;
@@ -525,6 +558,17 @@ class GCTracer {
// Longest incremental marking step since start of marking.
double longest_incremental_marking_step_;
+ // Cumulative number of incremental marking finalization steps since creation
+ // of tracer.
+ int cumulative_incremental_marking_finalization_steps_;
+
+ // Cumulative duration of incremental marking finalization steps since
+ // creation of tracer.
+ double cumulative_incremental_marking_finalization_duration_;
+
+ // Longest incremental marking finalization step since start of marking.
+ double longest_incremental_marking_finalization_step_;
+
// Total marking time.
// This timer is precise when run with --print-cumulative-gc-stat
double cumulative_marking_duration_;
@@ -555,7 +599,7 @@ class GCTracer {
DISALLOW_COPY_AND_ASSIGN(GCTracer);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_GC_TRACER_H_
diff --git a/chromium/v8/src/heap/heap-inl.h b/chromium/v8/src/heap/heap-inl.h
index cff69b1e17d..a723b3bdae7 100644
--- a/chromium/v8/src/heap/heap-inl.h
+++ b/chromium/v8/src/heap/heap-inl.h
@@ -11,6 +11,7 @@
#include "src/counters.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
+#include "src/heap/mark-compact.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
#include "src/heap/store-buffer-inl.h"
@@ -19,6 +20,7 @@
#include "src/log.h"
#include "src/msan.h"
#include "src/objects-inl.h"
+#include "src/type-feedback-vector-inl.h"
namespace v8 {
namespace internal {
@@ -68,6 +70,7 @@ PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
#define SYMBOL_ACCESSOR(name, description) \
Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
#define ROOT_ACCESSOR(type, name, camel_name) \
@@ -254,20 +257,21 @@ void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
profiler->AllocationEvent(object->address(), size_in_bytes);
}
- ++allocations_count_;
-
if (FLAG_verify_predictable) {
+ ++allocations_count_;
+ // Advance synthetic time by making a time request.
+ MonotonicallyIncreasingTimeInMs();
+
UpdateAllocationsHash(object);
UpdateAllocationsHash(size_in_bytes);
- if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
- (--dump_allocations_hash_countdown_ == 0)) {
- dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
+ if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAlloctionsHash();
}
}
if (FLAG_trace_allocation_stack_interval > 0) {
+ if (!FLAG_verify_predictable) ++allocations_count_;
if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
}
@@ -289,14 +293,14 @@ void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
if (FLAG_verify_predictable) {
++allocations_count_;
+ // Advance synthetic time by making a time request.
+ MonotonicallyIncreasingTimeInMs();
UpdateAllocationsHash(source);
UpdateAllocationsHash(target);
UpdateAllocationsHash(size_in_bytes);
- if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
- (--dump_allocations_hash_countdown_ == 0)) {
- dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
+ if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAlloctionsHash();
}
}
@@ -503,21 +507,39 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
}
-void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
- ScratchpadSlotMode mode) {
- Heap* heap = object->GetHeap();
- DCHECK(heap->InFromSpace(object));
-
+void Heap::UpdateAllocationSite(HeapObject* object,
+ HashMap* pretenuring_feedback) {
+ DCHECK(InFromSpace(object));
if (!FLAG_allocation_site_pretenuring ||
!AllocationSite::CanTrack(object->map()->instance_type()))
return;
+ AllocationMemento* memento = FindAllocationMemento(object);
+ if (memento == nullptr) return;
+
+ AllocationSite* key = memento->GetAllocationSite();
+ DCHECK(!key->IsZombie());
+
+ if (pretenuring_feedback == global_pretenuring_feedback_) {
+ // For inserting in the global pretenuring storage we need to first
+ // increment the memento found count on the allocation site.
+ if (key->IncrementMementoFoundCount()) {
+ global_pretenuring_feedback_->LookupOrInsert(
+ key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
+ }
+ } else {
+ // Any other pretenuring storage than the global one is used as a cache,
+ // where the count is later on merge in the allocation site.
+ HashMap::Entry* e = pretenuring_feedback->LookupOrInsert(
+ key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
+ DCHECK(e != nullptr);
+ (*bit_cast<intptr_t*>(&e->value))++;
+ }
+}
- AllocationMemento* memento = heap->FindAllocationMemento(object);
- if (memento == NULL) return;
- if (memento->GetAllocationSite()->IncrementMementoFoundCount()) {
- heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(), mode);
- }
+void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
+ global_pretenuring_feedback_->Remove(
+ site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
}
@@ -700,7 +722,7 @@ void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
CHECK((*current)->IsSmi());
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_HEAP_INL_H_
diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc
index e04f99ff7ee..84b3c79b3e9 100644
--- a/chromium/v8/src/heap/heap.cc
+++ b/chromium/v8/src/heap/heap.cc
@@ -6,6 +6,7 @@
#include "src/accessors.h"
#include "src/api.h"
+#include "src/ast/scopeinfo.h"
#include "src/base/bits.h"
#include "src/base/once.h"
#include "src/base/utils/random-number-generator.h"
@@ -31,8 +32,8 @@
#include "src/heap/store-buffer.h"
#include "src/interpreter/interpreter.h"
#include "src/profiler/cpu-profiler.h"
+#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
-#include "src/scopeinfo.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serialize.h"
#include "src/snapshot/snapshot.h"
@@ -52,6 +53,19 @@ struct Heap::StrongRootsList {
StrongRootsList* next;
};
+class IdleScavengeObserver : public InlineAllocationObserver {
+ public:
+ IdleScavengeObserver(Heap& heap, intptr_t step_size)
+ : InlineAllocationObserver(step_size), heap_(heap) {}
+
+ void Step(int bytes_allocated, Address, size_t) override {
+ heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
+ }
+
+ private:
+ Heap& heap_;
+};
+
Heap::Heap()
: amount_of_external_allocated_memory_(0),
@@ -78,6 +92,7 @@ Heap::Heap()
survived_last_scavenge_(0),
always_allocate_scope_count_(0),
contexts_disposed_(0),
+ number_of_disposed_maps_(0),
global_ic_age_(0),
scan_on_scavenge_pages_(0),
new_space_(this),
@@ -89,7 +104,6 @@ Heap::Heap()
gc_post_processing_depth_(0),
allocations_count_(0),
raw_allocations_hash_(0),
- dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
ms_count_(0),
gc_count_(0),
remembered_unmapped_pages_index_(0),
@@ -129,13 +143,14 @@ Heap::Heap()
memory_reducer_(nullptr),
object_stats_(nullptr),
scavenge_job_(nullptr),
+ idle_scavenge_observer_(nullptr),
full_codegen_bytes_generated_(0),
crankshaft_codegen_bytes_generated_(0),
new_space_allocation_counter_(0),
old_generation_allocation_counter_(0),
old_generation_size_at_last_gc_(0),
gcs_since_last_deopt_(0),
- allocation_sites_scratchpad_length_(0),
+ global_pretenuring_feedback_(nullptr),
ring_buffer_full_(false),
ring_buffer_end_(0),
promotion_queue_(this),
@@ -148,9 +163,10 @@ Heap::Heap()
pending_unmapping_tasks_semaphore_(0),
gc_callbacks_depth_(0),
deserialization_complete_(false),
- concurrent_sweeping_enabled_(false),
strong_roots_list_(NULL),
- array_buffer_tracker_(NULL) {
+ array_buffer_tracker_(NULL),
+ heap_iterator_depth_(0),
+ force_oom_(false) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -166,6 +182,7 @@ Heap::Heap()
set_allocation_sites_list(Smi::FromInt(0));
set_encountered_weak_collections(Smi::FromInt(0));
set_encountered_weak_cells(Smi::FromInt(0));
+ set_encountered_transition_arrays(Smi::FromInt(0));
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
@@ -412,10 +429,6 @@ void Heap::GarbageCollectionPrologue() {
AllowHeapAllocation for_the_first_part_of_prologue;
gc_count_++;
- if (FLAG_flush_code) {
- mark_compact_collector()->EnableCodeFlushing(true);
- }
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -486,37 +499,60 @@ const char* Heap::GetSpaceName(int idx) {
}
-void Heap::ClearAllKeyedStoreICs() {
- if (FLAG_vector_stores) {
- TypeFeedbackVector::ClearAllKeyedStoreICs(isolate_);
- return;
+void Heap::RepairFreeListsAfterDeserialization() {
+ PagedSpaces spaces(this);
+ for (PagedSpace* space = spaces.next(); space != NULL;
+ space = spaces.next()) {
+ space->RepairFreeListsAfterDeserialization();
}
+}
- // TODO(mvstanton): Remove this function when FLAG_vector_stores is turned on
- // permanently, and divert all callers to KeyedStoreIC::ClearAllKeyedStoreICs.
- HeapObjectIterator it(code_space());
- for (Object* object = it.Next(); object != NULL; object = it.Next()) {
- Code* code = Code::cast(object);
- Code::Kind current_kind = code->kind();
- if (current_kind == Code::FUNCTION ||
- current_kind == Code::OPTIMIZED_FUNCTION) {
- code->ClearInlineCaches(Code::KEYED_STORE_IC);
+void Heap::MergeAllocationSitePretenuringFeedback(
+ const HashMap& local_pretenuring_feedback) {
+ AllocationSite* site = nullptr;
+ for (HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
+ local_entry != nullptr;
+ local_entry = local_pretenuring_feedback.Next(local_entry)) {
+ site = reinterpret_cast<AllocationSite*>(local_entry->key);
+ MapWord map_word = site->map_word();
+ if (map_word.IsForwardingAddress()) {
+ site = AllocationSite::cast(map_word.ToForwardingAddress());
+ }
+ DCHECK(site->IsAllocationSite());
+ int value =
+ static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
+ DCHECK_GT(value, 0);
+
+ {
+ // TODO(mlippautz): For parallel processing we need synchronization here.
+ if (site->IncrementMementoFoundCount(value)) {
+ global_pretenuring_feedback_->LookupOrInsert(
+ site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
+ }
}
}
}
-void Heap::RepairFreeListsAfterDeserialization() {
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != NULL;
- space = spaces.next()) {
- space->RepairFreeListsAfterDeserialization();
+class Heap::PretenuringScope {
+ public:
+ explicit PretenuringScope(Heap* heap) : heap_(heap) {
+ heap_->global_pretenuring_feedback_ =
+ new HashMap(HashMap::PointersMatch, kInitialFeedbackCapacity);
}
-}
+
+ ~PretenuringScope() {
+ delete heap_->global_pretenuring_feedback_;
+ heap_->global_pretenuring_feedback_ = nullptr;
+ }
+
+ private:
+ Heap* heap_;
+};
-bool Heap::ProcessPretenuringFeedback() {
+void Heap::ProcessPretenuringFeedback() {
bool trigger_deoptimization = false;
if (FLAG_allocation_site_pretenuring) {
int tenure_decisions = 0;
@@ -525,48 +561,43 @@ bool Heap::ProcessPretenuringFeedback() {
int allocation_sites = 0;
int active_allocation_sites = 0;
- // If the scratchpad overflowed, we have to iterate over the allocation
- // sites list.
- // TODO(hpayer): We iterate over the whole list of allocation sites when
- // we grew to the maximum semi-space size to deopt maybe tenured
- // allocation sites. We could hold the maybe tenured allocation sites
- // in a seperate data structure if this is a performance problem.
- bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
- bool use_scratchpad =
- allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
- !deopt_maybe_tenured;
+ AllocationSite* site = nullptr;
- int i = 0;
- Object* list_element = allocation_sites_list();
+ // Step 1: Digest feedback for recorded allocation sites.
bool maximum_size_scavenge = MaximumSizeScavenge();
- while (use_scratchpad ? i < allocation_sites_scratchpad_length_
- : list_element->IsAllocationSite()) {
- AllocationSite* site =
- use_scratchpad
- ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
- : AllocationSite::cast(list_element);
- allocation_mementos_found += site->memento_found_count();
- if (site->memento_found_count() > 0) {
- active_allocation_sites++;
- if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
- trigger_deoptimization = true;
- }
- if (site->GetPretenureMode() == TENURED) {
- tenure_decisions++;
- } else {
- dont_tenure_decisions++;
- }
- allocation_sites++;
- }
-
- if (deopt_maybe_tenured && site->IsMaybeTenure()) {
- site->set_deopt_dependent_code(true);
+ for (HashMap::Entry* e = global_pretenuring_feedback_->Start();
+ e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
+ site = reinterpret_cast<AllocationSite*>(e->key);
+ int found_count = site->memento_found_count();
+ // The fact that we have an entry in the storage means that we've found
+ // the site at least once.
+ DCHECK_GT(found_count, 0);
+ DCHECK(site->IsAllocationSite());
+ allocation_sites++;
+ active_allocation_sites++;
+ allocation_mementos_found += found_count;
+ if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
trigger_deoptimization = true;
}
-
- if (use_scratchpad) {
- i++;
+ if (site->GetPretenureMode() == TENURED) {
+ tenure_decisions++;
} else {
+ dont_tenure_decisions++;
+ }
+ }
+
+ // Step 2: Deopt maybe tenured allocation sites if necessary.
+ bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
+ if (deopt_maybe_tenured) {
+ Object* list_element = allocation_sites_list();
+ while (list_element->IsAllocationSite()) {
+ site = AllocationSite::cast(list_element);
+ DCHECK(site->IsAllocationSite());
+ allocation_sites++;
+ if (site->IsMaybeTenure()) {
+ site->set_deopt_dependent_code(true);
+ trigger_deoptimization = true;
+ }
list_element = site->weak_next();
}
}
@@ -575,28 +606,24 @@ bool Heap::ProcessPretenuringFeedback() {
isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
}
- FlushAllocationSitesScratchpad();
-
if (FLAG_trace_pretenuring_statistics &&
(allocation_mementos_found > 0 || tenure_decisions > 0 ||
dont_tenure_decisions > 0)) {
- PrintF(
- "GC: (mode, #visited allocation sites, #active allocation sites, "
- "#mementos, #tenure decisions, #donttenure decisions) "
- "(%s, %d, %d, %d, %d, %d)\n",
- use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
- active_allocation_sites, allocation_mementos_found, tenure_decisions,
- dont_tenure_decisions);
+ PrintIsolate(isolate(),
+ "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
+ "active_sites=%d "
+ "mementos=%d tenured=%d not_tenured=%d\n",
+ deopt_maybe_tenured ? 1 : 0, allocation_sites,
+ active_allocation_sites, allocation_mementos_found,
+ tenure_decisions, dont_tenure_decisions);
}
}
- return trigger_deoptimization;
}
void Heap::DeoptMarkedAllocationSites() {
// TODO(hpayer): If iterating over the allocation sites list becomes a
- // performance issue, use a cache heap data structure instead (similar to the
- // allocation sites scratchpad).
+ // performance issue, use a cache data structure in heap instead.
Object* list_element = allocation_sites_list();
while (list_element->IsAllocationSite()) {
AllocationSite* site = AllocationSite::cast(list_element);
@@ -772,11 +799,9 @@ void Heap::HandleGCRequest() {
IncrementalMarking::COMPLETE_MARKING) {
CollectAllGarbage(current_gc_flags_, "GC interrupt",
current_gc_callback_flags_);
- return;
- }
- DCHECK(FLAG_overapproximate_weak_closure);
- if (!incremental_marking()->weak_closure_was_overapproximated()) {
- OverApproximateWeakClosure("GC interrupt");
+ } else if (incremental_marking()->IsMarking() &&
+ !incremental_marking()->finalize_marking_completed()) {
+ FinalizeIncrementalMarking("GC interrupt: finalize incremental marking");
}
}
@@ -786,14 +811,14 @@ void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
}
-void Heap::OverApproximateWeakClosure(const char* gc_reason) {
+void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Overapproximate weak closure (%s).\n",
- gc_reason);
+ PrintF("[IncrementalMarking] (%s).\n", gc_reason);
}
- GCTracer::Scope gc_scope(tracer(),
- GCTracer::Scope::MC_INCREMENTAL_WEAKCLOSURE);
+ GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
+ HistogramTimerScope incremental_marking_scope(
+ isolate()->counters()->gc_incremental_marking_finalize());
{
GCCallbacksScope scope(this);
@@ -805,7 +830,7 @@ void Heap::OverApproximateWeakClosure(const char* gc_reason) {
CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
}
}
- incremental_marking()->MarkObjectGroups();
+ incremental_marking()->FinalizeIncrementally();
{
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
@@ -819,6 +844,23 @@ void Heap::OverApproximateWeakClosure(const char* gc_reason) {
}
+HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
+ if (collector == SCAVENGER) {
+ return isolate_->counters()->gc_scavenger();
+ } else {
+ if (!incremental_marking()->IsStopped()) {
+ if (ShouldReduceMemory()) {
+ return isolate_->counters()->gc_finalize_reduce_memory();
+ } else {
+ return isolate_->counters()->gc_finalize();
+ }
+ } else {
+ return isolate_->counters()->gc_compactor();
+ }
+ }
+}
+
+
void Heap::CollectAllGarbage(int flags, const char* gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
@@ -935,7 +977,8 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
!ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
- !incremental_marking()->should_hurry() && FLAG_incremental_marking) {
+ !incremental_marking()->should_hurry() && FLAG_incremental_marking &&
+ OldGenerationAllocationLimitReached()) {
// Make progress in incremental marking.
const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
@@ -965,9 +1008,8 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
GarbageCollectionPrologue();
{
- HistogramTimerScope histogram_timer_scope(
- (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
- : isolate_->counters()->gc_compactor());
+ HistogramTimerScope histogram_timer_scope(GCTypeTimer(collector));
+
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, gc_callback_flags);
}
@@ -1019,18 +1061,18 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
if (!dependant_context) {
tracer()->ResetSurvivalEvents();
old_generation_size_configured_ = false;
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kContextDisposed;
+ event.time_ms = MonotonicallyIncreasingTimeInMs();
+ memory_reducer_->NotifyContextDisposed(event);
}
if (isolate()->concurrent_recompilation_enabled()) {
// Flush the queued recompilation tasks.
isolate()->optimizing_compile_dispatcher()->Flush();
}
AgeInlineCaches();
- set_retained_maps(ArrayList::cast(empty_fixed_array()));
- tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
- MemoryReducer::Event event;
- event.type = MemoryReducer::kContextDisposed;
- event.time_ms = MonotonicallyIncreasingTimeInMs();
- memory_reducer_->NotifyContextDisposed(event);
+ number_of_disposed_maps_ = retained_maps()->Length();
+ tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
return ++contexts_disposed_;
}
@@ -1075,7 +1117,7 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
// Helper class for verifying the string table.
class StringTableVerifier : public ObjectVisitor {
public:
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) {
@@ -1255,22 +1297,27 @@ bool Heap::PerformGarbageCollection(
incremental_marking()->NotifyOfHighPromotionRate();
}
- if (collector == MARK_COMPACTOR) {
- UpdateOldGenerationAllocationCounter();
- // Perform mark-sweep with optional compaction.
- MarkCompact();
- old_gen_exhausted_ = false;
- old_generation_size_configured_ = true;
- // This should be updated before PostGarbageCollectionProcessing, which can
- // cause another GC. Take into account the objects promoted during GC.
- old_generation_allocation_counter_ +=
- static_cast<size_t>(promoted_objects_size_);
- old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
- } else {
- Scavenge();
+ {
+ Heap::PretenuringScope pretenuring_scope(this);
+
+ if (collector == MARK_COMPACTOR) {
+ UpdateOldGenerationAllocationCounter();
+ // Perform mark-sweep with optional compaction.
+ MarkCompact();
+ old_gen_exhausted_ = false;
+ old_generation_size_configured_ = true;
+ // This should be updated before PostGarbageCollectionProcessing, which
+ // can cause another GC. Take into account the objects promoted during GC.
+ old_generation_allocation_counter_ +=
+ static_cast<size_t>(promoted_objects_size_);
+ old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
+ } else {
+ Scavenge();
+ }
+
+ ProcessPretenuringFeedback();
}
- ProcessPretenuringFeedback();
UpdateSurvivalStatistics(start_new_space_size);
ConfigureInitialOldGenerationSize();
@@ -1373,6 +1420,8 @@ void Heap::CallGCEpilogueCallbacks(GCType gc_type,
void Heap::MarkCompact() {
+ PauseInlineAllocationObserversScope pause_observers(new_space());
+
gc_state_ = MARK_COMPACT;
LOG(isolate_, ResourceEvent("markcompact", "begin"));
@@ -1435,7 +1484,8 @@ void Heap::MarkCompactPrologue() {
class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
public:
explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointers(Object** start, Object** end) {
+
+ void VisitPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
@@ -1485,6 +1535,23 @@ static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
}
+static bool IsUnmodifiedHeapObject(Object** p) {
+ Object* object = *p;
+ if (object->IsSmi()) return false;
+ HeapObject* heap_object = HeapObject::cast(object);
+ if (!object->IsJSObject()) return false;
+ Object* obj_constructor = (JSObject::cast(object))->map()->GetConstructor();
+ if (!obj_constructor->IsJSFunction()) return false;
+ JSFunction* constructor = JSFunction::cast(obj_constructor);
+ if (!constructor->shared()->IsApiFunction()) return false;
+ if (constructor != nullptr &&
+ constructor->initial_map() == heap_object->map()) {
+ return true;
+ }
+ return false;
+}
+
+
void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
StoreBufferEvent event) {
heap->store_buffer_rebuilder_.Callback(page, event);
@@ -1558,6 +1625,10 @@ void Heap::Scavenge() {
// trigger one during scavenge: scavenges allocation should always succeed.
AlwaysAllocateScope scope(isolate());
+ // Bump-pointer allocations done during scavenge are not real allocations.
+ // Pause the inline allocation steps.
+ PauseInlineAllocationObserversScope pause_observers(new_space());
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
@@ -1603,6 +1674,12 @@ void Heap::Scavenge() {
promotion_queue_.Initialize();
ScavengeVisitor scavenge_visitor(this);
+
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+ &IsUnmodifiedHeapObject);
+ }
+
{
// Copy roots.
GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
@@ -1641,7 +1718,14 @@ void Heap::Scavenge() {
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
}
- {
+ if (FLAG_scavenge_reclaim_unmodified_objects) {
+ isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ &IsUnscavengedHeapObject);
+
+ isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
+ &scavenge_visitor);
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ } else {
GCTracer::Scope gc_scope(tracer(),
GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
while (isolate()->global_handles()->IterateObjectGroups(
@@ -1650,14 +1734,14 @@ void Heap::Scavenge() {
}
isolate()->global_handles()->RemoveObjectGroups();
isolate()->global_handles()->RemoveImplicitRefGroups();
- }
- isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
- &IsUnscavengedHeapObject);
+ isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
+ &IsUnscavengedHeapObject);
- isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
- &scavenge_visitor);
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
+ &scavenge_visitor);
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ }
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
@@ -1674,9 +1758,6 @@ void Heap::Scavenge() {
// Set age mark.
new_space_.set_age_mark(new_space_.top());
- new_space_.LowerInlineAllocationLimit(
- new_space_.inline_allocation_limit_step());
-
array_buffer_tracker()->FreeDead(true);
// Update how much has survived scavenge.
@@ -1789,6 +1870,7 @@ void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
casted->ResetPretenureDecision();
casted->set_deopt_dependent_code(true);
marked = true;
+ RemoveAllocationSitePretenuringFeedback(casted);
}
cur = casted->weak_next();
}
@@ -1876,42 +1958,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// for pointers to from semispace instead of looking for pointers
// to new space.
DCHECK(!target->IsMap());
- Address obj_address = target->address();
-
- // We are not collecting slots on new space objects during mutation
- // thus we have to scan for pointers to evacuation candidates when we
- // promote objects. But we should not record any slots in non-black
- // objects. Grey object's slots would be rescanned.
- // White object might not survive until the end of collection
- // it would be a violation of the invariant to record it's slots.
- bool record_slots = false;
- if (incremental_marking()->IsCompacting()) {
- MarkBit mark_bit = Marking::MarkBitFrom(target);
- record_slots = Marking::IsBlack(mark_bit);
- }
-#if V8_DOUBLE_FIELDS_UNBOXING
- LayoutDescriptorHelper helper(target->map());
- bool has_only_tagged_fields = helper.all_fields_tagged();
-
- if (!has_only_tagged_fields) {
- for (int offset = 0; offset < size;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, size, &end_of_region_offset)) {
- IterateAndMarkPointersToFromSpace(
- target, obj_address + offset,
- obj_address + end_of_region_offset, record_slots,
- &Scavenger::ScavengeObject);
- }
- offset = end_of_region_offset;
- }
- } else {
-#endif
- IterateAndMarkPointersToFromSpace(target, obj_address,
- obj_address + size, record_slots,
- &Scavenger::ScavengeObject);
-#if V8_DOUBLE_FIELDS_UNBOXING
- }
-#endif
+
+ IteratePointersToFromSpace(target, size, &Scavenger::ScavengeObject);
}
}
@@ -2037,7 +2085,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_bit_field2(0);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true) |
- Map::Counter::encode(Map::kRetainingCounterStart);
+ Map::ConstructionCounter::encode(Map::kNoSlackTracking);
reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::FromInt(0));
return result;
@@ -2076,9 +2124,10 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
map->set_bit_field2(1 << Map::kIsExtensible);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true) |
- Map::Counter::encode(Map::kRetainingCounterStart);
+ Map::ConstructionCounter::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
map->set_elements_kind(elements_kind);
+ map->set_new_target_is_base(true);
return map;
}
@@ -2325,6 +2374,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
+ ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
for (unsigned i = 0; i < arraysize(struct_table); i++) {
const StructTable& entry = struct_table[i];
@@ -2481,11 +2531,26 @@ AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
}
result->set_map_no_write_barrier(weak_cell_map());
WeakCell::cast(result)->initialize(value);
- WeakCell::cast(result)->clear_next(this);
+ WeakCell::cast(result)->clear_next(the_hole_value());
return result;
}
+AllocationResult Heap::AllocateTransitionArray(int capacity) {
+ DCHECK(capacity > 0);
+ HeapObject* raw_array = nullptr;
+ {
+ AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
+ if (!allocation.To(&raw_array)) return allocation;
+ }
+ raw_array->set_map_no_write_barrier(transition_array_map());
+ TransitionArray* array = TransitionArray::cast(raw_array);
+ array->set_length(capacity);
+ MemsetPointer(array->data_start(), undefined_value(), capacity);
+ return array;
+}
+
+
void Heap::CreateApiObjects() {
HandleScope scope(isolate());
Factory* factory = isolate()->factory();
@@ -2648,11 +2713,12 @@ void Heap::CreateInitialObjects() {
{
HandleScope scope(isolate());
-#define SYMBOL_INIT(name) \
- { \
- Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
- Handle<Object> symbol(isolate()->factory()->NewPrivateSymbol(name##d)); \
- roots_[k##name##RootIndex] = *symbol; \
+#define SYMBOL_INIT(name) \
+ { \
+ Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
+ Handle<Symbol> symbol(isolate()->factory()->NewPrivateSymbol()); \
+ symbol->set_name(*name##d); \
+ roots_[k##name##RootIndex] = *symbol; \
}
PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
#undef SYMBOL_INIT
@@ -2667,6 +2733,15 @@ void Heap::CreateInitialObjects() {
roots_[k##name##RootIndex] = *name;
PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
#undef SYMBOL_INIT
+
+#define SYMBOL_INIT(name, description) \
+ Handle<Symbol> name = factory->NewSymbol(); \
+ Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
+ name->set_is_well_known_symbol(true); \
+ name->set_name(*name##d); \
+ roots_[k##name##RootIndex] = *name;
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
+#undef SYMBOL_INIT
}
CreateFixedStubs();
@@ -2677,6 +2752,11 @@ void Heap::CreateInitialObjects() {
Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
set_intrinsic_function_names(*intrinsic_names);
+ Handle<NameDictionary> empty_properties_dictionary =
+ NameDictionary::New(isolate(), 0, TENURED);
+ empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
+ set_empty_properties_dictionary(*empty_properties_dictionary);
+
set_number_string_cache(
*factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
@@ -2703,9 +2783,6 @@ void Heap::CreateInitialObjects() {
set_experimental_extra_natives_source_cache(
*factory->NewFixedArray(ExperimentalExtraNatives::GetBuiltinsCount()));
- set_code_stub_natives_source_cache(
- *factory->NewFixedArray(CodeStubNatives::GetBuiltinsCount()));
-
set_undefined_cell(*factory->NewCell(factory->undefined_value()));
// The symbol registry is initialized lazily.
@@ -2720,21 +2797,49 @@ void Heap::CreateInitialObjects() {
set_microtask_queue(empty_fixed_array());
{
- FeedbackVectorSlotKind kinds[] = {FeedbackVectorSlotKind::LOAD_IC,
- FeedbackVectorSlotKind::KEYED_LOAD_IC,
- FeedbackVectorSlotKind::STORE_IC,
- FeedbackVectorSlotKind::KEYED_STORE_IC};
- StaticFeedbackVectorSpec spec(0, 4, kinds);
+ StaticFeedbackVectorSpec spec;
+ FeedbackVectorSlot load_ic_slot = spec.AddLoadICSlot();
+ FeedbackVectorSlot keyed_load_ic_slot = spec.AddKeyedLoadICSlot();
+ FeedbackVectorSlot store_ic_slot = spec.AddStoreICSlot();
+ FeedbackVectorSlot keyed_store_ic_slot = spec.AddKeyedStoreICSlot();
+
+ DCHECK_EQ(load_ic_slot,
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot));
+ DCHECK_EQ(keyed_load_ic_slot,
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ DCHECK_EQ(store_ic_slot,
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot));
+ DCHECK_EQ(keyed_store_ic_slot,
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+
+ Handle<TypeFeedbackMetadata> dummy_metadata =
+ TypeFeedbackMetadata::New(isolate(), &spec);
Handle<TypeFeedbackVector> dummy_vector =
- factory->NewTypeFeedbackVector(&spec);
- for (int i = 0; i < 4; i++) {
- dummy_vector->Set(FeedbackVectorICSlot(0),
- *TypeFeedbackVector::MegamorphicSentinel(isolate()),
- SKIP_WRITE_BARRIER);
- }
+ TypeFeedbackVector::New(isolate(), dummy_metadata);
+
+ Object* megamorphic = *TypeFeedbackVector::MegamorphicSentinel(isolate());
+ dummy_vector->Set(load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
+ dummy_vector->Set(keyed_load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
+ dummy_vector->Set(store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
+ dummy_vector->Set(keyed_store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
+
set_dummy_vector(*dummy_vector);
}
+ {
+ Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
+ set_empty_weak_cell(*cell);
+ cell->clear();
+
+ Handle<FixedArray> cleared_optimized_code_map =
+ factory->NewFixedArray(SharedFunctionInfo::kEntriesStart, TENURED);
+ cleared_optimized_code_map->set(SharedFunctionInfo::kSharedCodeIndex,
+ *cell);
+ STATIC_ASSERT(SharedFunctionInfo::kEntriesStart == 1 &&
+ SharedFunctionInfo::kSharedCodeIndex == 0);
+ set_cleared_optimized_code_map(*cleared_optimized_code_map);
+ }
+
set_detached_contexts(empty_fixed_array());
set_retained_maps(ArrayList::cast(empty_fixed_array()));
@@ -2769,15 +2874,13 @@ void Heap::CreateInitialObjects() {
set_weak_stack_trace_list(Smi::FromInt(0));
+ set_noscript_shared_function_infos(Smi::FromInt(0));
+
// Will be filled in by Interpreter::Initialize().
set_interpreter_table(
*interpreter::Interpreter::CreateUninitializedInterpreterTable(
isolate()));
- set_allocation_sites_scratchpad(
- *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
- InitializeAllocationSitesScratchpad();
-
// Initialize keyed lookup cache.
isolate_->keyed_lookup_cache()->Clear();
@@ -2806,11 +2909,11 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kSymbolRegistryRootIndex:
case kScriptListRootIndex:
case kMaterializedObjectsRootIndex:
- case kAllocationSitesScratchpadRootIndex:
case kMicrotaskQueueRootIndex:
case kDetachedContextsRootIndex:
case kWeakObjectToCodeTableRootIndex:
case kRetainedMapsRootIndex:
+ case kNoScriptSharedFunctionInfosRootIndex:
case kWeakStackTraceListRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
@@ -2854,48 +2957,6 @@ void Heap::FlushNumberStringCache() {
}
-void Heap::FlushAllocationSitesScratchpad() {
- for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
- allocation_sites_scratchpad()->set_undefined(i);
- }
- allocation_sites_scratchpad_length_ = 0;
-}
-
-
-void Heap::InitializeAllocationSitesScratchpad() {
- DCHECK(allocation_sites_scratchpad()->length() ==
- kAllocationSiteScratchpadSize);
- for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
- allocation_sites_scratchpad()->set_undefined(i);
- }
-}
-
-
-void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
- ScratchpadSlotMode mode) {
- if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
- // We cannot use the normal write-barrier because slots need to be
- // recorded with non-incremental marking as well. We have to explicitly
- // record the slot to take evacuation candidates into account.
- allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
- site, SKIP_WRITE_BARRIER);
- Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
- allocation_sites_scratchpad_length_);
-
- if (mode == RECORD_SCRATCHPAD_SLOT) {
- // We need to allow slots buffer overflow here since the evacuation
- // candidates are not part of the global list of old space pages and
- // releasing an evacuation candidate due to a slots buffer overflow
- // results in lost pages.
- mark_compact_collector()->ForceRecordSlot(allocation_sites_scratchpad(),
- slot, *slot);
- }
- allocation_sites_scratchpad_length_++;
- }
-}
-
-
-
Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
}
@@ -3011,6 +3072,7 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
filler->set_map_no_write_barrier(
reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)));
} else {
+ DCHECK_GT(size, 2 * kPointerSize);
filler->set_map_no_write_barrier(
reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
FreeSpace::cast(filler)->nobarrier_set_size(size);
@@ -3041,7 +3103,12 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
- if (incremental_marking()->IsMarking() &&
+ // As long as the inspected object is black and we are currently not iterating
+ // the heap using HeapIterator, we can update the live byte count. We cannot
+ // update while using HeapIterator because the iterator is temporarily
+ // marking the whole object graph, without updating live bytes.
+ if (!in_heap_iterator() &&
+ !mark_compact_collector()->sweeping_in_progress() &&
Marking::IsBlack(Marking::MarkBitFrom(object->address()))) {
if (mode == SEQUENTIAL_TO_SWEEPER) {
MemoryChunk::IncrementLiveBytesFromGC(object, by);
@@ -3055,6 +3122,7 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) {
DCHECK(!object->IsFixedTypedArrayBase());
+ DCHECK(!object->IsByteArray());
const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
const int bytes_to_trim = elements_to_trim * element_size;
Map* map = object->map();
@@ -3111,7 +3179,8 @@ template void Heap::RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
template<Heap::InvocationMode mode>
void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
const int len = object->length();
- DCHECK(elements_to_trim < len);
+ DCHECK_LE(elements_to_trim, len);
+ DCHECK_GE(elements_to_trim, 0);
int bytes_to_trim;
if (object->IsFixedTypedArrayBase()) {
@@ -3119,12 +3188,17 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
bytes_to_trim =
FixedTypedArrayBase::TypedArraySize(type, len) -
FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
+ } else if (object->IsByteArray()) {
+ int new_size = ByteArray::SizeFor(len - elements_to_trim);
+ bytes_to_trim = ByteArray::SizeFor(len) - new_size;
+ DCHECK_GE(bytes_to_trim, 0);
} else {
const int element_size =
object->IsFixedArray() ? kPointerSize : kDoubleSize;
bytes_to_trim = elements_to_trim * element_size;
}
+
// For now this trick is only applied to objects in new and paged space.
DCHECK(object->map() != fixed_cow_array_map());
@@ -3388,6 +3462,14 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
// fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
// verification code has to cope with (temporarily) invalid objects. See
// for example, JSArray::JSArrayVerify).
+ InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
+}
+
+
+void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
+ if (start_offset == map->instance_size()) return;
+ DCHECK_LT(start_offset, map->instance_size());
+
Object* filler;
// We cannot always fill with one_pointer_filler_map because objects
// created from API functions expect their internal fields to be initialized
@@ -3395,16 +3477,18 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
// Pre-allocated fields need to be initialized with undefined_value as well
// so that object accesses before the constructor completes (e.g. in the
// debugger) will not cause a crash.
- Object* constructor = map->GetConstructor();
- if (constructor->IsJSFunction() &&
- JSFunction::cast(constructor)->IsInobjectSlackTrackingInProgress()) {
+
+ // In case of Array subclassing the |map| could already be transitioned
+ // to different elements kind from the initial map on which we track slack.
+ Map* initial_map = map->FindRootMap();
+ if (initial_map->IsInobjectSlackTrackingInProgress()) {
// We might want to shrink the object later.
- DCHECK(obj->GetInternalFieldCount() == 0);
filler = Heap::one_pointer_filler_map();
} else {
filler = Heap::undefined_value();
}
- obj->InitializeBody(map, Heap::undefined_value(), filler);
+ obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
+ initial_map->InobjectSlackTrackingStep();
}
@@ -3417,7 +3501,6 @@ AllocationResult Heap::AllocateJSObjectFromMap(
// Both types of global objects should be allocated using
// AllocateGlobalObject to be properly initialized.
DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
- DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
FixedArray* properties = empty_fixed_array();
@@ -3446,7 +3529,7 @@ AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
#ifdef DEBUG
// Make sure result is NOT a global object if valid.
HeapObject* obj = nullptr;
- DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
+ DCHECK(!allocation.To(&obj) || !obj->IsJSGlobalObject());
#endif
return allocation;
}
@@ -3456,9 +3539,10 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// Make the clone.
Map* map = source->map();
- // We can only clone normal objects or arrays. Copying anything else
+ // We can only clone regexps, normal objects or arrays. Copying anything else
// will break invariants.
- CHECK(map->instance_type() == JS_OBJECT_TYPE ||
+ CHECK(map->instance_type() == JS_REGEXP_TYPE ||
+ map->instance_type() == JS_OBJECT_TYPE ||
map->instance_type() == JS_ARRAY_TYPE);
int object_size = map->instance_size();
@@ -4033,11 +4117,11 @@ void Heap::ReduceNewSpaceSize() {
void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
- if (FLAG_overapproximate_weak_closure && incremental_marking()->IsMarking() &&
+ if (incremental_marking()->IsMarking() &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
- (!incremental_marking()->weak_closure_was_overapproximated() &&
+ (!incremental_marking()->finalize_marking_completed() &&
mark_compact_collector()->marking_deque()->IsEmpty()))) {
- OverApproximateWeakClosure(comment);
+ FinalizeIncrementalMarking(comment);
} else if (incremental_marking()->IsComplete() ||
(mark_compact_collector()->marking_deque()->IsEmpty())) {
CollectAllGarbage(current_gc_flags_, comment);
@@ -4050,14 +4134,13 @@ bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
size_t final_incremental_mark_compact_speed_in_bytes_per_ms =
static_cast<size_t>(
tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
- if (FLAG_overapproximate_weak_closure &&
- (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
- (!incremental_marking()->weak_closure_was_overapproximated() &&
- mark_compact_collector()->marking_deque()->IsEmpty() &&
- gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
- static_cast<size_t>(idle_time_in_ms))))) {
- OverApproximateWeakClosure(
- "Idle notification: overapproximate weak closure");
+ if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
+ (!incremental_marking()->finalize_marking_completed() &&
+ mark_compact_collector()->marking_deque()->IsEmpty() &&
+ gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
+ static_cast<size_t>(idle_time_in_ms)))) {
+ FinalizeIncrementalMarking(
+ "Idle notification: finalize incremental marking");
return true;
} else if (incremental_marking()->IsComplete() ||
(mark_compact_collector()->marking_deque()->IsEmpty() &&
@@ -4065,7 +4148,7 @@ bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
static_cast<size_t>(idle_time_in_ms), size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) {
CollectAllGarbage(current_gc_flags_,
- "idle notification: finalize incremental");
+ "idle notification: finalize incremental marking");
return true;
}
return false;
@@ -4170,22 +4253,6 @@ void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
}
-void Heap::CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms,
- double now_ms) {
- if (idle_time_in_ms >= GCIdleTimeHandler::kMinBackgroundIdleTime) {
- MemoryReducer::Event event;
- event.type = MemoryReducer::kBackgroundIdleNotification;
- event.time_ms = now_ms;
- event.can_start_incremental_gc = incremental_marking()->IsStopped() &&
- incremental_marking()->CanBeActivated();
- memory_reducer_->NotifyBackgroundIdleNotification(event);
- optimize_for_memory_usage_ = true;
- } else {
- optimize_for_memory_usage_ = false;
- }
-}
-
-
double Heap::MonotonicallyIncreasingTimeInMs() {
return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
static_cast<double>(base::Time::kMillisecondsPerSecond);
@@ -4210,8 +4277,6 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
double start_ms = MonotonicallyIncreasingTimeInMs();
double idle_time_in_ms = deadline_in_ms - start_ms;
- CheckAndNotifyBackgroundIdleNotification(idle_time_in_ms, start_ms);
-
tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
OldGenerationAllocationCounter());
@@ -4343,11 +4408,16 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
bool Heap::RootIsImmortalImmovable(int root_index) {
switch (root_index) {
-#define CASE(name) \
- case Heap::k##name##RootIndex: \
+#define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
+ IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
+#undef IMMORTAL_IMMOVABLE_ROOT
+#define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
+ INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
+#undef INTERNALIZED_STRING
+#define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
+ STRING_TYPE_LIST(STRING_TYPE)
+#undef STRING_TYPE
return true;
- IMMORTAL_IMMOVABLE_ROOT_LIST(CASE);
-#undef CASE
default:
return false;
}
@@ -4438,6 +4508,52 @@ void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
}
+class IteratePointersToFromSpaceVisitor final : public ObjectVisitor {
+ public:
+ IteratePointersToFromSpaceVisitor(Heap* heap, HeapObject* target,
+ bool record_slots,
+ ObjectSlotCallback callback)
+ : heap_(heap),
+ target_(target),
+ record_slots_(record_slots),
+ callback_(callback) {}
+
+ V8_INLINE void VisitPointers(Object** start, Object** end) override {
+ heap_->IterateAndMarkPointersToFromSpace(
+ target_, reinterpret_cast<Address>(start),
+ reinterpret_cast<Address>(end), record_slots_, callback_);
+ }
+
+ V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
+
+ private:
+ Heap* heap_;
+ HeapObject* target_;
+ bool record_slots_;
+ ObjectSlotCallback callback_;
+};
+
+
+void Heap::IteratePointersToFromSpace(HeapObject* target, int size,
+ ObjectSlotCallback callback) {
+ // We are not collecting slots on new space objects during mutation
+ // thus we have to scan for pointers to evacuation candidates when we
+ // promote objects. But we should not record any slots in non-black
+ // objects. Grey object's slots would be rescanned.
+ // White object might not survive until the end of collection
+ // it would be a violation of the invariant to record it's slots.
+ bool record_slots = false;
+ if (incremental_marking()->IsCompacting()) {
+ MarkBit mark_bit = Marking::MarkBitFrom(target);
+ record_slots = Marking::IsBlack(mark_bit);
+ }
+
+ IteratePointersToFromSpaceVisitor visitor(this, target, record_slots,
+ callback);
+ target->IterateBody(target->map()->instance_type(), size, &visitor);
+}
+
+
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
IterateWeakRoots(v, mode);
@@ -4682,7 +4798,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
// We rely on being able to allocate new arrays in paged spaces.
DCHECK(Page::kMaxRegularHeapObjectSize >=
(JSArray::kSize +
- FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
+ FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
AllocationMemento::kSize));
code_range_size_ = code_range_size * MB;
@@ -4753,7 +4869,11 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
if (stats->js_stacktrace != NULL) {
FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
- isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
+ if (gc_state() == Heap::NOT_IN_GC) {
+ isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
+ } else {
+ accumulator.Add("Cannot get stack trace in GC.");
+ }
}
}
@@ -4879,6 +4999,10 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
factor = kMinHeapGrowingFactor;
}
+ if (FLAG_heap_growing_percent > 0) {
+ factor = 1.0 + FLAG_heap_growing_percent / 100.0;
+ }
+
old_generation_allocation_limit_ =
CalculateOldGenerationAllocationLimit(factor, old_gen_size);
@@ -4935,17 +5059,6 @@ void Heap::DisableInlineAllocation() {
}
-void Heap::LowerInlineAllocationLimit(intptr_t step) {
- new_space()->LowerInlineAllocationLimit(step);
-}
-
-
-void Heap::ResetInlineAllocationLimit() {
- new_space()->LowerInlineAllocationLimit(
- ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
-}
-
-
V8_DECLARE_ONCE(initialize_gc_once);
static void InitializeGCOnce() {
@@ -4972,8 +5085,6 @@ bool Heap::SetUp() {
if (!ConfigureHeapDefault()) return false;
}
- concurrent_sweeping_enabled_ = FLAG_concurrent_sweeping;
-
base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
// Set up memory allocator.
@@ -5054,7 +5165,9 @@ bool Heap::SetUp() {
mark_compact_collector()->SetUp();
- ResetInlineAllocationLimit();
+ idle_scavenge_observer_ = new IdleScavengeObserver(
+ *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
+ new_space()->AddInlineAllocationObserver(idle_scavenge_observer_);
return true;
}
@@ -5071,6 +5184,7 @@ bool Heap::CreateHeapObjects() {
set_native_contexts_list(undefined_value());
set_allocation_sites_list(undefined_value());
+
return true;
}
@@ -5153,6 +5267,10 @@ void Heap::TearDown() {
PrintAlloctionsHash();
}
+ new_space()->RemoveInlineAllocationObserver(idle_scavenge_observer_);
+ delete idle_scavenge_observer_;
+ idle_scavenge_observer_ = nullptr;
+
delete scavenge_collector_;
scavenge_collector_ = nullptr;
@@ -5291,9 +5409,11 @@ DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
void Heap::AddRetainedMap(Handle<Map> map) {
- if (FLAG_retain_maps_for_n_gc == 0) return;
Handle<WeakCell> cell = Map::WeakCellForMap(map);
Handle<ArrayList> array(retained_maps(), isolate());
+ if (array->IsFull()) {
+ CompactRetainedMaps(*array);
+ }
array = ArrayList::Add(
array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
ArrayList::kReloadLengthAfterAllocation);
@@ -5303,6 +5423,35 @@ void Heap::AddRetainedMap(Handle<Map> map) {
}
+void Heap::CompactRetainedMaps(ArrayList* retained_maps) {
+ DCHECK_EQ(retained_maps, this->retained_maps());
+ int length = retained_maps->Length();
+ int new_length = 0;
+ int new_number_of_disposed_maps = 0;
+ // This loop compacts the array by removing cleared weak cells.
+ for (int i = 0; i < length; i += 2) {
+ DCHECK(retained_maps->Get(i)->IsWeakCell());
+ WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
+ Object* age = retained_maps->Get(i + 1);
+ if (cell->cleared()) continue;
+ if (i != new_length) {
+ retained_maps->Set(new_length, cell);
+ retained_maps->Set(new_length + 1, age);
+ }
+ if (i < number_of_disposed_maps_) {
+ new_number_of_disposed_maps += 2;
+ }
+ new_length += 2;
+ }
+ number_of_disposed_maps_ = new_number_of_disposed_maps;
+ Object* undefined = undefined_value();
+ for (int i = new_length; i < length; i++) {
+ retained_maps->Clear(i, undefined);
+ }
+ if (new_length != length) retained_maps->SetLength(new_length);
+}
+
+
void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
}
@@ -5311,7 +5460,7 @@ void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
class PrintHandleVisitor : public ObjectVisitor {
public:
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++)
PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
reinterpret_cast<void*>(*p));
@@ -5330,10 +5479,10 @@ void Heap::PrintHandles() {
class CheckHandleCountVisitor : public ObjectVisitor {
public:
CheckHandleCountVisitor() : handle_count_(0) {}
- ~CheckHandleCountVisitor() {
+ ~CheckHandleCountVisitor() override {
CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
handle_count_ += end - start;
}
@@ -5480,7 +5629,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
public:
MarkingVisitor() : marking_stack_(10) {}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
@@ -5523,6 +5672,7 @@ HeapIterator::HeapIterator(Heap* heap,
filter_(nullptr),
space_iterator_(nullptr),
object_iterator_(nullptr) {
+ heap_->heap_iterator_start();
// Start the iteration.
space_iterator_ = new SpaceIterator(heap_);
switch (filtering_) {
@@ -5537,6 +5687,7 @@ HeapIterator::HeapIterator(Heap* heap,
HeapIterator::~HeapIterator() {
+ heap_->heap_iterator_end();
#ifdef DEBUG
// Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state.
@@ -5589,7 +5740,8 @@ Object* const PathTracer::kAnyGlobalObject = NULL;
class PathTracer::MarkVisitor : public ObjectVisitor {
public:
explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
- void VisitPointers(Object** start, Object** end) {
+
+ void VisitPointers(Object** start, Object** end) override {
// Scan all HeapObject pointers in [start, end)
for (Object** p = start; !tracer_->found() && (p < end); p++) {
if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
@@ -5604,7 +5756,8 @@ class PathTracer::MarkVisitor : public ObjectVisitor {
class PathTracer::UnmarkVisitor : public ObjectVisitor {
public:
explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
- void VisitPointers(Object** start, Object** end) {
+
+ void VisitPointers(Object** start, Object** end) override {
// Scan all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
@@ -5961,9 +6114,14 @@ void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
void Heap::FreeQueuedChunks() {
if (chunks_queued_for_free_ != NULL) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
- v8::Platform::kShortRunningTask);
+ if (FLAG_concurrent_sweeping) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
+ v8::Platform::kShortRunningTask);
+ } else {
+ FreeQueuedChunks(chunks_queued_for_free_);
+ pending_unmapping_tasks_semaphore_.Signal();
+ }
chunks_queued_for_free_ = NULL;
} else {
// If we do not have anything to unmap, we just signal the semaphore
diff --git a/chromium/v8/src/heap/heap.h b/chromium/v8/src/heap/heap.h
index 0e427de1c93..af9d0a6235f 100644
--- a/chromium/v8/src/heap/heap.h
+++ b/chromium/v8/src/heap/heap.h
@@ -61,6 +61,7 @@ namespace internal {
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, weak_cell_map, WeakCellMap) \
+ V(Map, transition_array_map, TransitionArrayMap) \
V(Map, one_byte_string_map, OneByteStringMap) \
V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
V(Map, function_context_map, FunctionContextMap) \
@@ -166,9 +167,9 @@ namespace internal {
V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache) \
V(FixedArray, experimental_extra_natives_source_cache, \
ExperimentalExtraNativesSourceCache) \
- V(FixedArray, code_stub_natives_source_cache, CodeStubNativesSourceCache) \
V(Script, empty_script, EmptyScript) \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
+ V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \
V(Cell, undefined_cell, UndefinedCell) \
V(JSObject, observation_state, ObservationState) \
V(Object, symbol_registry, SymbolRegistry) \
@@ -176,19 +177,19 @@ namespace internal {
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
V(FixedArray, materialized_objects, MaterializedObjects) \
- V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
V(FixedArray, microtask_queue, MicrotaskQueue) \
- V(FixedArray, dummy_vector, DummyVector) \
+ V(TypeFeedbackVector, dummy_vector, DummyVector) \
+ V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \
V(FixedArray, detached_contexts, DetachedContexts) \
V(ArrayList, retained_maps, RetainedMaps) \
V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \
V(PropertyCell, array_protector, ArrayProtector) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(Object, weak_stack_trace_list, WeakStackTraceList) \
- V(Object, code_stub_context, CodeStubContext) \
- V(JSObject, code_stub_exports_object, CodeStubExportsObject) \
+ V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
V(FixedArray, interpreter_table, InterpreterTable) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
+ V(WeakCell, empty_weak_cell, EmptyWeakCell) \
V(BytecodeArray, empty_bytecode_array, EmptyBytecodeArray)
@@ -208,102 +209,129 @@ namespace internal {
SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)
-#define INTERNALIZED_STRING_LIST(V) \
- V(Object_string, "Object") \
- V(proto_string, "__proto__") \
- V(arguments_string, "arguments") \
- V(Arguments_string, "Arguments") \
- V(caller_string, "caller") \
- V(boolean_string, "boolean") \
- V(Boolean_string, "Boolean") \
- V(callee_string, "callee") \
- V(constructor_string, "constructor") \
- V(default_string, "default") \
- V(dot_result_string, ".result") \
- V(eval_string, "eval") \
- V(float32x4_string, "float32x4") \
- V(Float32x4_string, "Float32x4") \
- V(int32x4_string, "int32x4") \
- V(Int32x4_string, "Int32x4") \
- V(uint32x4_string, "uint32x4") \
- V(Uint32x4_string, "Uint32x4") \
- V(bool32x4_string, "bool32x4") \
- V(Bool32x4_string, "Bool32x4") \
- V(int16x8_string, "int16x8") \
- V(Int16x8_string, "Int16x8") \
- V(uint16x8_string, "uint16x8") \
- V(Uint16x8_string, "Uint16x8") \
- V(bool16x8_string, "bool16x8") \
- V(Bool16x8_string, "Bool16x8") \
- V(int8x16_string, "int8x16") \
- V(Int8x16_string, "Int8x16") \
- V(uint8x16_string, "uint8x16") \
- V(Uint8x16_string, "Uint8x16") \
- V(bool8x16_string, "bool8x16") \
- V(Bool8x16_string, "Bool8x16") \
- V(function_string, "function") \
- V(Function_string, "Function") \
- V(length_string, "length") \
- V(name_string, "name") \
- V(null_string, "null") \
- V(number_string, "number") \
- V(Number_string, "Number") \
- V(nan_string, "NaN") \
- V(source_string, "source") \
- V(source_url_string, "source_url") \
- V(source_mapping_url_string, "source_mapping_url") \
- V(this_string, "this") \
- V(global_string, "global") \
- V(ignore_case_string, "ignoreCase") \
- V(multiline_string, "multiline") \
- V(sticky_string, "sticky") \
- V(unicode_string, "unicode") \
- V(harmony_tolength_string, "harmony_tolength") \
- V(input_string, "input") \
- V(index_string, "index") \
- V(last_index_string, "lastIndex") \
- V(object_string, "object") \
- V(prototype_string, "prototype") \
- V(string_string, "string") \
- V(String_string, "String") \
- V(symbol_string, "symbol") \
- V(Symbol_string, "Symbol") \
- V(Map_string, "Map") \
- V(Set_string, "Set") \
- V(WeakMap_string, "WeakMap") \
- V(WeakSet_string, "WeakSet") \
- V(for_string, "for") \
- V(for_api_string, "for_api") \
- V(Date_string, "Date") \
- V(char_at_string, "CharAt") \
- V(undefined_string, "undefined") \
- V(valueOf_string, "valueOf") \
- V(stack_string, "stack") \
- V(toString_string, "toString") \
- V(toJSON_string, "toJSON") \
- V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
- V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
- V(illegal_access_string, "illegal access") \
- V(cell_value_string, "%cell_value") \
- V(illegal_argument_string, "illegal argument") \
- V(closure_string, "(closure)") \
- V(dot_string, ".") \
- V(compare_ic_string, "==") \
- V(strict_compare_ic_string, "===") \
- V(infinity_string, "Infinity") \
- V(minus_infinity_string, "-Infinity") \
- V(query_colon_string, "(?:)") \
- V(Generator_string, "Generator") \
- V(throw_string, "throw") \
- V(done_string, "done") \
- V(value_string, "value") \
- V(next_string, "next") \
- V(byte_length_string, "byteLength") \
- V(byte_offset_string, "byteOffset") \
- V(minus_zero_string, "-0") \
- V(Array_string, "Array") \
- V(Error_string, "Error") \
- V(RegExp_string, "RegExp")
+#define INTERNALIZED_STRING_LIST(V) \
+ V(anonymous_string, "anonymous") \
+ V(apply_string, "apply") \
+ V(assign_string, "assign") \
+ V(arguments_string, "arguments") \
+ V(Arguments_string, "Arguments") \
+ V(Array_string, "Array") \
+ V(bind_string, "bind") \
+ V(bool16x8_string, "bool16x8") \
+ V(Bool16x8_string, "Bool16x8") \
+ V(bool32x4_string, "bool32x4") \
+ V(Bool32x4_string, "Bool32x4") \
+ V(bool8x16_string, "bool8x16") \
+ V(Bool8x16_string, "Bool8x16") \
+ V(boolean_string, "boolean") \
+ V(Boolean_string, "Boolean") \
+ V(bound__string, "bound ") \
+ V(byte_length_string, "byteLength") \
+ V(byte_offset_string, "byteOffset") \
+ V(call_string, "call") \
+ V(callee_string, "callee") \
+ V(caller_string, "caller") \
+ V(cell_value_string, "%cell_value") \
+ V(char_at_string, "CharAt") \
+ V(closure_string, "(closure)") \
+ V(compare_ic_string, "==") \
+ V(configurable_string, "configurable") \
+ V(constructor_string, "constructor") \
+ V(construct_string, "construct") \
+ V(create_string, "create") \
+ V(Date_string, "Date") \
+ V(default_string, "default") \
+ V(defineProperty_string, "defineProperty") \
+ V(deleteProperty_string, "deleteProperty") \
+ V(display_name_string, "displayName") \
+ V(done_string, "done") \
+ V(dot_result_string, ".result") \
+ V(dot_string, ".") \
+ V(enumerable_string, "enumerable") \
+ V(enumerate_string, "enumerate") \
+ V(Error_string, "Error") \
+ V(eval_string, "eval") \
+ V(false_string, "false") \
+ V(float32x4_string, "float32x4") \
+ V(Float32x4_string, "Float32x4") \
+ V(for_api_string, "for_api") \
+ V(for_string, "for") \
+ V(function_string, "function") \
+ V(Function_string, "Function") \
+ V(Generator_string, "Generator") \
+ V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
+ V(getPrototypeOf_string, "getPrototypeOf") \
+ V(get_string, "get") \
+ V(global_string, "global") \
+ V(has_string, "has") \
+ V(illegal_access_string, "illegal access") \
+ V(illegal_argument_string, "illegal argument") \
+ V(index_string, "index") \
+ V(infinity_string, "Infinity") \
+ V(input_string, "input") \
+ V(int16x8_string, "int16x8") \
+ V(Int16x8_string, "Int16x8") \
+ V(int32x4_string, "int32x4") \
+ V(Int32x4_string, "Int32x4") \
+ V(int8x16_string, "int8x16") \
+ V(Int8x16_string, "Int8x16") \
+ V(isExtensible_string, "isExtensible") \
+ V(isView_string, "isView") \
+ V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
+ V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
+ V(last_index_string, "lastIndex") \
+ V(length_string, "length") \
+ V(Map_string, "Map") \
+ V(minus_infinity_string, "-Infinity") \
+ V(minus_zero_string, "-0") \
+ V(name_string, "name") \
+ V(nan_string, "NaN") \
+ V(next_string, "next") \
+ V(null_string, "null") \
+ V(null_to_string, "[object Null]") \
+ V(number_string, "number") \
+ V(Number_string, "Number") \
+ V(object_string, "object") \
+ V(Object_string, "Object") \
+ V(ownKeys_string, "ownKeys") \
+ V(preventExtensions_string, "preventExtensions") \
+ V(private_api_string, "private_api") \
+ V(Promise_string, "Promise") \
+ V(proto_string, "__proto__") \
+ V(prototype_string, "prototype") \
+ V(Proxy_string, "Proxy") \
+ V(query_colon_string, "(?:)") \
+ V(RegExp_string, "RegExp") \
+ V(setPrototypeOf_string, "setPrototypeOf") \
+ V(set_string, "set") \
+ V(Set_string, "Set") \
+ V(source_mapping_url_string, "source_mapping_url") \
+ V(source_string, "source") \
+ V(source_url_string, "source_url") \
+ V(stack_string, "stack") \
+ V(strict_compare_ic_string, "===") \
+ V(string_string, "string") \
+ V(String_string, "String") \
+ V(symbol_string, "symbol") \
+ V(Symbol_string, "Symbol") \
+ V(this_string, "this") \
+ V(throw_string, "throw") \
+ V(toJSON_string, "toJSON") \
+ V(toString_string, "toString") \
+ V(true_string, "true") \
+ V(uint16x8_string, "uint16x8") \
+ V(Uint16x8_string, "Uint16x8") \
+ V(uint32x4_string, "uint32x4") \
+ V(Uint32x4_string, "Uint32x4") \
+ V(uint8x16_string, "uint8x16") \
+ V(Uint8x16_string, "Uint8x16") \
+ V(undefined_string, "undefined") \
+ V(undefined_to_string, "[object Undefined]") \
+ V(valueOf_string, "valueOf") \
+ V(value_string, "value") \
+ V(WeakMap_string, "WeakMap") \
+ V(WeakSet_string, "WeakSet") \
+ V(writable_string, "writable")
#define PRIVATE_SYMBOL_LIST(V) \
V(array_iteration_kind_symbol) \
@@ -327,12 +355,17 @@ namespace internal {
V(internal_error_symbol) \
V(intl_impl_object_symbol) \
V(intl_initialized_marker_symbol) \
+ V(intl_pattern_symbol) \
+ V(intl_resolved_symbol) \
V(megamorphic_symbol) \
+ V(native_context_index_symbol) \
V(nonexistent_symbol) \
V(nonextensible_symbol) \
V(normal_ic_symbol) \
+ V(not_mapped_symbol) \
V(observed_symbol) \
V(premonomorphic_symbol) \
+ V(promise_combined_deferred_symbol) \
V(promise_debug_marker_symbol) \
V(promise_has_handler_symbol) \
V(promise_on_resolve_symbol) \
@@ -342,19 +375,31 @@ namespace internal {
V(promise_value_symbol) \
V(sealed_symbol) \
V(stack_trace_symbol) \
+ V(strict_function_transition_symbol) \
V(string_iterator_iterated_string_symbol) \
V(string_iterator_next_index_symbol) \
+ V(strong_function_transition_symbol) \
V(uninitialized_symbol)
-#define PUBLIC_SYMBOL_LIST(V) \
- V(has_instance_symbol, Symbol.hasInstance) \
- V(is_concat_spreadable_symbol, Symbol.isConcatSpreadable) \
- V(is_regexp_symbol, Symbol.isRegExp) \
- V(iterator_symbol, Symbol.iterator) \
- V(to_primitive_symbol, Symbol.toPrimitive) \
- V(to_string_tag_symbol, Symbol.toStringTag) \
+#define PUBLIC_SYMBOL_LIST(V) \
+ V(has_instance_symbol, Symbol.hasInstance) \
+ V(iterator_symbol, Symbol.iterator) \
+ V(match_symbol, Symbol.match) \
+ V(replace_symbol, Symbol.replace) \
+ V(search_symbol, Symbol.search) \
+ V(species_symbol, Symbol.species) \
+ V(split_symbol, Symbol.split) \
+ V(to_primitive_symbol, Symbol.toPrimitive) \
V(unscopables_symbol, Symbol.unscopables)
+// Well-Known Symbols are "Public" symbols, which have a bit set which causes
+// them to produce an undefined value when a load results in a failed access
+// check. Because this behaviour is not specified properly as of yet, it only
+// applies to a subset of spec-defined Well-Known Symbols.
+#define WELL_KNOWN_SYMBOL_LIST(V) \
+ V(is_concat_spreadable_symbol, Symbol.isConcatSpreadable) \
+ V(to_string_tag_symbol, Symbol.toStringTag)
+
// Heap roots that are known to be immortal immovable, for which we can safely
// skip write barriers. This list is not complete and has omissions.
#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
@@ -392,6 +437,7 @@ namespace internal {
V(FixedCOWArrayMap) \
V(FixedDoubleArrayMap) \
V(WeakCellMap) \
+ V(TransitionArrayMap) \
V(NoInterceptorResultSentinel) \
V(HashTableMap) \
V(OrderedHashTableMap) \
@@ -417,6 +463,7 @@ namespace internal {
V(JSMessageObjectMap) \
V(ForeignMap) \
V(NeanderMap) \
+ V(EmptyWeakCell) \
V(empty_string) \
PRIVATE_SYMBOL_LIST(V)
@@ -428,6 +475,7 @@ class GCIdleTimeHeapState;
class GCTracer;
class HeapObjectsFilter;
class HeapStats;
+class HistogramTimer;
class Isolate;
class MemoryReducer;
class ObjectStats;
@@ -565,13 +613,14 @@ class Heap {
#define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex,
PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
#undef SYMBOL_INDEX_DECLARATION
// Utility type maps
#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECLARE_STRUCT_MAP)
+ STRUCT_LIST(DECLARE_STRUCT_MAP)
#undef DECLARE_STRUCT_MAP
- kStringTableRootIndex,
+ kStringTableRootIndex,
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
@@ -587,7 +636,7 @@ class Heap {
// - or mutator code (CONCURRENT_TO_SWEEPER).
enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
- enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
+ enum PretenuringFeedbackInsertionMode { kCached, kGlobal };
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
@@ -605,25 +654,6 @@ class Heap {
Heap* heap_;
};
- // An optional version of the above lock that can be used for some critical
- // sections on the mutator thread; only safe since the GC currently does not
- // do concurrent compaction.
- class OptionalRelocationLock {
- public:
- OptionalRelocationLock(Heap* heap, bool concurrent)
- : heap_(heap), concurrent_(concurrent) {
- if (concurrent_) heap_->relocation_mutex_.Lock();
- }
-
- ~OptionalRelocationLock() {
- if (concurrent_) heap_->relocation_mutex_.Unlock();
- }
-
- private:
- Heap* heap_;
- bool concurrent_;
- };
-
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
struct Chunk {
@@ -732,12 +762,6 @@ class Heap {
// Checks whether the space is valid.
static bool IsValidAllocationSpace(AllocationSpace space);
- // An object may have an AllocationSite associated with it through a trailing
- // AllocationMemento. Its feedback should be updated when objects are found
- // in the heap.
- static inline void UpdateAllocationSiteFeedback(HeapObject* object,
- ScratchpadSlotMode mode);
-
// Generated code can embed direct references to non-writable roots if
// they are in new space.
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
@@ -795,16 +819,14 @@ class Heap {
// TODO(hpayer): There is still a missmatch between capacity and actual
// committed memory size.
- bool CanExpandOldGeneration(int size) {
+ bool CanExpandOldGeneration(int size = 0) {
+ if (force_oom_) return false;
return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize();
}
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
- // Iterates the whole code space to clear all keyed store ICs.
- void ClearAllKeyedStoreICs();
-
// FreeSpace objects have a null map after deserialization. Update the map.
void RepairFreeListsAfterDeserialization();
@@ -877,6 +899,13 @@ class Heap {
}
Object* encountered_weak_cells() const { return encountered_weak_cells_; }
+ void set_encountered_transition_arrays(Object* transition_array) {
+ encountered_transition_arrays_ = transition_array;
+ }
+ Object* encountered_transition_arrays() const {
+ return encountered_transition_arrays_;
+ }
+
// Number of mark-sweeps.
int ms_count() const { return ms_count_; }
@@ -889,10 +918,6 @@ class Heap {
// Number of "runtime allocations" done so far.
uint32_t allocations_count() { return allocations_count_; }
- // Returns deterministic "time" value in ms. Works only with
- // FLAG_verify_predictable.
- double synthetic_time() { return allocations_count() / 2.0; }
-
// Print short heap statistics.
void PrintShortHeapStatistics();
@@ -944,8 +969,6 @@ class Heap {
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
- bool concurrent_sweeping_enabled() { return concurrent_sweeping_enabled_; }
-
inline bool OldGenerationAllocationLimitReached();
void QueueMemoryChunkForFree(MemoryChunk* chunk);
@@ -1015,6 +1038,8 @@ class Heap {
bool HasHighFragmentation();
bool HasHighFragmentation(intptr_t used, intptr_t committed);
+ void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; }
+ void SetOptimizeForMemoryUsage() { optimize_for_memory_usage_ = true; }
bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
// ===========================================================================
@@ -1125,6 +1150,7 @@ class Heap {
#define SYMBOL_ACCESSOR(name, description) inline Symbol* name();
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
Object* root(RootListIndex index) { return roots_[index]; }
@@ -1149,14 +1175,6 @@ class Heap {
roots_[kMaterializedObjectsRootIndex] = objects;
}
- void SetRootCodeStubContext(Object* value) {
- roots_[kCodeStubContextRootIndex] = value;
- }
-
- void SetRootCodeStubExportsObject(JSObject* value) {
- roots_[kCodeStubExportsObjectRootIndex] = value;
- }
-
void SetRootScriptList(Object* value) {
roots_[kScriptListRootIndex] = value;
}
@@ -1165,6 +1183,10 @@ class Heap {
roots_[kStringTableRootIndex] = value;
}
+ void SetRootNoScriptSharedFunctionInfos(Object* value) {
+ roots_[kNoScriptSharedFunctionInfosRootIndex] = value;
+ }
+
// Set the stack limit in the roots_ array. Some architectures generate
// code that looks here, because it is faster than loading from the static
// jslimit_/real_jslimit_ variable in the StackGuard.
@@ -1237,6 +1259,9 @@ class Heap {
// Iterate pointers to from semispace of new space found in memory interval
// from start to end within |object|.
+ void IteratePointersToFromSpace(HeapObject* target, int size,
+ ObjectSlotCallback callback);
+
void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
Address end, bool record_slots,
ObjectSlotCallback callback);
@@ -1384,13 +1409,13 @@ class Heap {
void UpdateSurvivalStatistics(int start_new_space_size);
inline void IncrementPromotedObjectsSize(int object_size) {
- DCHECK(object_size > 0);
+ DCHECK_GE(object_size, 0);
promoted_objects_size_ += object_size;
}
inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
- DCHECK(object_size > 0);
+ DCHECK_GE(object_size, 0);
semi_space_copied_object_size_ += object_size;
}
inline intptr_t semi_space_copied_object_size() {
@@ -1513,6 +1538,27 @@ class Heap {
return array_buffer_tracker_;
}
+ // ===========================================================================
+ // Allocation site tracking. =================================================
+ // ===========================================================================
+
+ // Updates the AllocationSite of a given {object}. If the global prenuring
+ // storage is passed as {pretenuring_feedback} the memento found count on
+ // the corresponding allocation site is immediately updated and an entry
+ // in the hash map is created. Otherwise the entry (including a the count
+ // value) is cached on the local pretenuring feedback.
+ inline void UpdateAllocationSite(HeapObject* object,
+ HashMap* pretenuring_feedback);
+
+ // Removes an entry from the global pretenuring storage.
+ inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
+
+ // Merges local pretenuring feedback into the global one. Note that this
+ // method needs to be called after evacuation, as allocation sites may be
+ // evacuated and this method resolves forward pointers accordingly.
+ void MergeAllocationSitePretenuringFeedback(
+ const HashMap& local_pretenuring_feedback);
+
// =============================================================================
#ifdef VERIFY_HEAP
@@ -1536,6 +1582,7 @@ class Heap {
#endif
private:
+ class PretenuringScope;
class UnmapFreeMemoryTask;
// External strings table is a place where all external strings are
@@ -1630,7 +1677,7 @@ class Heap {
static const int kMaxMarkCompactsInIdleRound = 7;
static const int kIdleScavengeThreshold = 5;
- static const int kAllocationSiteScratchpadSize = 256;
+ static const int kInitialFeedbackCapacity = 256;
Heap();
@@ -1672,12 +1719,6 @@ class Heap {
void PreprocessStackTraces();
- // Pretenuring decisions are made based on feedback collected during new
- // space evacuation. Note that between feedback collection and calling this
- // method object in old space must not move.
- // Right now we only process pretenuring feedback in high promotion mode.
- bool ProcessPretenuringFeedback();
-
// Checks whether a global GC is necessary
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
@@ -1711,6 +1752,10 @@ class Heap {
// Initializes a JSObject based on its map.
void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
Map* map);
+
+ // Initializes JSObject body starting at given offset.
+ void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset);
+
void InitializeAllocationMemento(AllocationMemento* memento,
AllocationSite* allocation_site);
@@ -1753,16 +1798,6 @@ class Heap {
// Flush the number to string cache.
void FlushNumberStringCache();
- // Sets used allocation sites entries to undefined.
- void FlushAllocationSitesScratchpad();
-
- // Initializes the allocation sites scratchpad with undefined values.
- void InitializeAllocationSitesScratchpad();
-
- // Adds an allocation site to the scratchpad if there is space left.
- void AddAllocationSiteToScratchpad(AllocationSite* site,
- ScratchpadSlotMode mode);
-
// TODO(hpayer): Allocation site pretenuring may make this method obsolete.
// Re-visit incremental marking heuristics.
bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
@@ -1789,8 +1824,6 @@ class Heap {
void IdleNotificationEpilogue(GCIdleTimeAction action,
GCIdleTimeHeapState heap_state, double start_ms,
double deadline_in_ms);
- void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms,
- double now_ms);
inline void UpdateAllocationsHash(HeapObject* object);
inline void UpdateAllocationsHash(uint32_t value);
@@ -1799,11 +1832,30 @@ class Heap {
void AddToRingBuffer(const char* string);
void GetFromRingBuffer(char* buffer);
+ void CompactRetainedMaps(ArrayList* retained_maps);
+
// Attempt to over-approximate the weak closure by marking object groups and
// implicit references from global handles, but don't atomically complete
// marking. If we continue to mark incrementally, we might have marked
// objects that die later.
- void OverApproximateWeakClosure(const char* gc_reason);
+ void FinalizeIncrementalMarking(const char* gc_reason);
+
+ // Returns the timer used for a given GC type.
+ // - GCScavenger: young generation GC
+ // - GCCompactor: full GC
+ // - GCFinalzeMC: finalization of incremental full GC
+ // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
+ // memory reduction
+ HistogramTimer* GCTypeTimer(GarbageCollector collector);
+
+ // ===========================================================================
+ // Pretenuring. ==============================================================
+ // ===========================================================================
+
+ // Pretenuring decisions are made based on feedback collected during new space
+ // evacuation. Note that between feedback collection and calling this method
+ // object in old space must not move.
+ void ProcessPretenuringFeedback();
// ===========================================================================
// Actual GC. ================================================================
@@ -1881,18 +1933,21 @@ class Heap {
double mutator_speed);
// ===========================================================================
- // Inline allocation. ========================================================
+ // Idle notification. ========================================================
// ===========================================================================
- void LowerInlineAllocationLimit(intptr_t step);
- void ResetInlineAllocationLimit();
+ bool RecentIdleNotificationHappened();
+ void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
// ===========================================================================
- // Idle notification. ========================================================
+ // HeapIterator helpers. =====================================================
// ===========================================================================
- bool RecentIdleNotificationHappened();
- void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
+ void heap_iterator_start() { heap_iterator_depth_++; }
+
+ void heap_iterator_end() { heap_iterator_depth_--; }
+
+ bool in_heap_iterator() { return heap_iterator_depth_ > 0; }
// ===========================================================================
// Allocation methods. =======================================================
@@ -2081,6 +2136,8 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
+ MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);
+
// Allocates a new utility object in the old generation.
MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
@@ -2095,6 +2152,10 @@ class Heap {
MUST_USE_RESULT AllocationResult InternalizeString(String* str);
+ // ===========================================================================
+
+ void set_force_oom(bool value) { force_oom_ = value; }
+
// The amount of external memory registered through the API kept alive
// by global handles
int64_t amount_of_external_allocated_memory_;
@@ -2133,6 +2194,11 @@ class Heap {
// For keeping track of context disposals.
int contexts_disposed_;
+ // The length of the retained_maps array at the time of context disposal.
+ // This separates maps in the retained_maps array that were created before
+ // and after context disposal.
+ int number_of_disposed_maps_;
+
int global_ic_age_;
int scan_on_scavenge_pages_;
@@ -2155,9 +2221,6 @@ class Heap {
// Running hash over allocations performed.
uint32_t raw_allocations_hash_;
- // Countdown counter, dumps allocation hash when 0.
- uint32_t dump_allocations_hash_countdown_;
-
// How many mark-sweep collections happened.
unsigned int ms_count_;
@@ -2205,6 +2268,8 @@ class Heap {
Object* encountered_weak_cells_;
+ Object* encountered_transition_arrays_;
+
StoreBufferRebuilder store_buffer_rebuilder_;
List<GCCallbackPair> gc_epilogue_callbacks_;
@@ -2274,6 +2339,8 @@ class Heap {
ScavengeJob* scavenge_job_;
+ InlineAllocationObserver* idle_scavenge_observer_;
+
// These two counters are monotomically increasing and never reset.
size_t full_codegen_bytes_generated_;
size_t crankshaft_codegen_bytes_generated_;
@@ -2296,7 +2363,12 @@ class Heap {
// deoptimization triggered by garbage collection.
int gcs_since_last_deopt_;
- int allocation_sites_scratchpad_length_;
+ // The feedback storage is used to store allocation sites (keys) and how often
+ // they have been visited (values) by finding a memento behind an object. The
+ // storage is only alive temporary during a GC. The invariant is that all
+ // pointers in this map are already fixed, i.e., they do not point to
+ // forwarding pointers.
+ HashMap* global_pretenuring_feedback_;
char trace_ring_buffer_[kTraceRingBufferSize];
// If it's not full then the data is from 0 to ring_buffer_end_. If it's
@@ -2333,18 +2405,24 @@ class Heap {
bool deserialization_complete_;
- bool concurrent_sweeping_enabled_;
-
StrongRootsList* strong_roots_list_;
ArrayBufferTracker* array_buffer_tracker_;
+ // The depth of HeapIterator nestings.
+ int heap_iterator_depth_;
+
+ // Used for testing purposes.
+ bool force_oom_;
+
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
friend class GCCallbacksScope;
friend class GCTracer;
friend class HeapIterator;
+ friend class IdleScavengeObserver;
friend class IncrementalMarking;
+ friend class IteratePointersToFromSpaceVisitor;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class NewSpace;
@@ -2414,14 +2492,14 @@ class AlwaysAllocateScope {
// objects in a heap space but above the allocation pointer.
class VerifyPointersVisitor : public ObjectVisitor {
public:
- inline void VisitPointers(Object** start, Object** end);
+ inline void VisitPointers(Object** start, Object** end) override;
};
// Verify that all objects are Smis.
class VerifySmisVisitor : public ObjectVisitor {
public:
- inline void VisitPointers(Object** start, Object** end);
+ inline void VisitPointers(Object** start, Object** end) override;
};
@@ -2682,7 +2760,7 @@ class PathTracer : public ObjectVisitor {
object_stack_(20),
no_allocation() {}
- virtual void VisitPointers(Object** start, Object** end);
+ void VisitPointers(Object** start, Object** end) override;
void Reset();
void TracePathFrom(Object** root);
@@ -2712,7 +2790,7 @@ class PathTracer : public ObjectVisitor {
DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
};
#endif // DEBUG
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_HEAP_H_
diff --git a/chromium/v8/src/heap/incremental-marking-inl.h b/chromium/v8/src/heap/incremental-marking-inl.h
index 5988426fd5d..0d55b83a9d5 100644
--- a/chromium/v8/src/heap/incremental-marking-inl.h
+++ b/chromium/v8/src/heap/incremental-marking-inl.h
@@ -6,42 +6,11 @@
#define V8_HEAP_INCREMENTAL_MARKING_INL_H_
#include "src/heap/incremental-marking.h"
-#include "src/heap/mark-compact.h"
namespace v8 {
namespace internal {
-bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object** slot,
- Object* value) {
- HeapObject* value_heap_obj = HeapObject::cast(value);
- MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
- if (Marking::IsWhite(value_bit)) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- if (chunk->IsLeftOfProgressBar(slot)) {
- WhiteToGreyAndPush(value_heap_obj, value_bit);
- RestartIfNotMarking();
- } else {
- return false;
- }
- } else {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- return false;
- }
- } else {
- return false;
- }
- }
- if (!is_compacting_) return false;
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- return Marking::IsBlack(obj_bit);
-}
-
-
void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
Object* value) {
if (IsMarking() && value->IsHeapObject()) {
@@ -52,7 +21,9 @@ void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value) {
- if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
+ if (IsMarking()) {
+ RecordWriteOfCodeEntrySlow(host, slot, value);
+ }
}
@@ -64,57 +35,7 @@ void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
}
-void IncrementalMarking::RecordWrites(HeapObject* obj) {
- if (IsMarking()) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- chunk->set_progress_bar(0);
- }
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- }
- }
-}
-
-
-void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
- MarkBit mark_bit) {
- DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
- DCHECK(obj->Size() >= 2 * kPointerSize);
- DCHECK(IsMarking());
- Marking::BlackToGrey(mark_bit);
- int obj_size = obj->Size();
- MemoryChunk::IncrementLiveBytesFromGC(obj, -obj_size);
- bytes_scanned_ -= obj_size;
- int64_t old_bytes_rescanned = bytes_rescanned_;
- bytes_rescanned_ = old_bytes_rescanned + obj_size;
- if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
- if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
- // If we have queued twice the heap size for rescanning then we are
- // going around in circles, scanning the same objects again and again
- // as the program mutates the heap faster than we can incrementally
- // trace it. In this case we switch to non-incremental marking in
- // order to finish off this marking phase.
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(
- heap()->isolate(),
- "Hurrying incremental marking because of lack of progress\n");
- }
- marking_speed_ = kMaxMarkingSpeed;
- }
- }
-
- heap_->mark_compact_collector()->marking_deque()->Unshift(obj);
-}
-
-
-void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
- Marking::WhiteToGrey(mark_bit);
- heap_->mark_compact_collector()->marking_deque()->Push(obj);
-}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_INCREMENTAL_MARKING_INL_H_
diff --git a/chromium/v8/src/heap/incremental-marking-job.cc b/chromium/v8/src/heap/incremental-marking-job.cc
index 43e8b7628f6..a69dfac2fa6 100644
--- a/chromium/v8/src/heap/incremental-marking-job.cc
+++ b/chromium/v8/src/heap/incremental-marking-job.cc
@@ -53,7 +53,7 @@ void IncrementalMarkingJob::ScheduleIdleTask(Heap* heap) {
void IncrementalMarkingJob::ScheduleDelayedTask(Heap* heap) {
- if (!delayed_task_pending_) {
+ if (!delayed_task_pending_ && FLAG_memory_reducer) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
delayed_task_pending_ = true;
made_progress_since_last_delayed_task_ = false;
@@ -91,7 +91,7 @@ void IncrementalMarkingJob::IdleTask::RunInternal(double deadline_in_seconds) {
double deadline_in_ms =
deadline_in_seconds *
static_cast<double>(base::Time::kMillisecondsPerSecond);
- Heap* heap = isolate_->heap();
+ Heap* heap = isolate()->heap();
double start_ms = heap->MonotonicallyIncreasingTimeInMs();
job_->NotifyIdleTask();
job_->NotifyIdleTaskProgress();
@@ -102,7 +102,7 @@ void IncrementalMarkingJob::IdleTask::RunInternal(double deadline_in_seconds) {
double current_time_ms = heap->MonotonicallyIncreasingTimeInMs();
double idle_time_in_ms = deadline_in_ms - start_ms;
double deadline_difference = deadline_in_ms - current_time_ms;
- PrintIsolate(isolate_, "%8.0f ms: ", isolate_->time_millis_since_init());
+ PrintIsolate(isolate(), "%8.0f ms: ", isolate()->time_millis_since_init());
PrintF(
"Idle task: requested idle time %.2f ms, used idle time %.2f "
"ms, deadline usage %.2f ms\n",
@@ -127,7 +127,7 @@ void IncrementalMarkingJob::DelayedTask::Step(Heap* heap) {
void IncrementalMarkingJob::DelayedTask::RunInternal() {
- Heap* heap = isolate_->heap();
+ Heap* heap = isolate()->heap();
job_->NotifyDelayedTask();
IncrementalMarking* incremental_marking = heap->incremental_marking();
if (!incremental_marking->IsStopped()) {
diff --git a/chromium/v8/src/heap/incremental-marking-job.h b/chromium/v8/src/heap/incremental-marking-job.h
index fad46c12468..c998139a923 100644
--- a/chromium/v8/src/heap/incremental-marking-job.h
+++ b/chromium/v8/src/heap/incremental-marking-job.h
@@ -75,7 +75,7 @@ class IncrementalMarkingJob {
bool delayed_task_pending_;
bool made_progress_since_last_delayed_task_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_INCREMENTAL_MARKING_JOB_H_
diff --git a/chromium/v8/src/heap/incremental-marking.cc b/chromium/v8/src/heap/incremental-marking.cc
index cbc26516bb6..52d0ca4e51b 100644
--- a/chromium/v8/src/heap/incremental-marking.cc
+++ b/chromium/v8/src/heap/incremental-marking.cc
@@ -17,7 +17,6 @@
namespace v8 {
namespace internal {
-
IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_MARKING,
@@ -27,6 +26,7 @@ IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap),
+ observer_(*this, kAllocatedThreshold),
state_(STOPPED),
is_compacting_(false),
steps_count_(0),
@@ -42,19 +42,33 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
no_marking_scope_depth_(0),
unscanned_bytes_of_large_object_(0),
was_activated_(false),
- weak_closure_was_overapproximated_(false),
- weak_closure_approximation_rounds_(0),
+ finalize_marking_completed_(false),
+ incremental_marking_finalization_rounds_(0),
request_type_(COMPLETE_MARKING) {}
+bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
+ HeapObject* value_heap_obj = HeapObject::cast(value);
+ MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
+ DCHECK(!Marking::IsImpossible(value_bit));
+
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ DCHECK(!Marking::IsImpossible(obj_bit));
+ bool is_black = Marking::IsBlack(obj_bit);
+
+ if (is_black && Marking::IsWhite(value_bit)) {
+ WhiteToGreyAndPush(value_heap_obj, value_bit);
+ RestartIfNotMarking();
+ }
+ return is_compacting_ && is_black;
+}
+
+
void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
Object* value) {
- if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- // Object is not going to be rescanned we need to record the slot.
- heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
- }
+ if (BaseRecordWrite(obj, value) && slot != NULL) {
+ // Object is not going to be rescanned we need to record the slot.
+ heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
}
}
@@ -81,7 +95,7 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
HeapObject* value) {
if (IsMarking()) {
- RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+ RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
RecordWriteIntoCode(host, &rinfo, value);
}
}
@@ -92,7 +106,7 @@ void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
Code* host = heap_->isolate()
->inner_pointer_to_code_cache()
->GcSafeFindCodeForInnerPointer(pc);
- RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+ RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
RecordWriteIntoCode(host, &rinfo, value);
}
}
@@ -101,7 +115,7 @@ void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
Object** slot,
Code* value) {
- if (BaseRecordWrite(host, slot, value)) {
+ if (BaseRecordWrite(host, value)) {
DCHECK(slot != NULL);
heap_->mark_compact_collector()->RecordCodeEntrySlot(
host, reinterpret_cast<Address>(slot), value);
@@ -112,25 +126,63 @@ void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
RelocInfo* rinfo,
Object* value) {
- MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
- if (Marking::IsWhite(value_bit)) {
+ if (BaseRecordWrite(obj, value)) {
+ // Object is not going to be rescanned. We need to record the slot.
+ heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
+ Code::cast(value));
+ }
+}
+
+
+void IncrementalMarking::RecordWrites(HeapObject* obj) {
+ if (IsMarking()) {
MarkBit obj_bit = Marking::MarkBitFrom(obj);
if (Marking::IsBlack(obj_bit)) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ chunk->set_progress_bar(0);
+ }
BlackToGreyAndUnshift(obj, obj_bit);
RestartIfNotMarking();
}
- // Object is either grey or white. It will be scanned if survives.
- return;
}
+}
- if (is_compacting_) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- // Object is not going to be rescanned. We need to record the slot.
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
- Code::cast(value));
+
+void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
+ MarkBit mark_bit) {
+ DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
+ DCHECK(obj->Size() >= 2 * kPointerSize);
+ DCHECK(IsMarking());
+ Marking::BlackToGrey(mark_bit);
+ int obj_size = obj->Size();
+ MemoryChunk::IncrementLiveBytesFromGC(obj, -obj_size);
+ bytes_scanned_ -= obj_size;
+ int64_t old_bytes_rescanned = bytes_rescanned_;
+ bytes_rescanned_ = old_bytes_rescanned + obj_size;
+ if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
+ if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
+ // If we have queued twice the heap size for rescanning then we are
+ // going around in circles, scanning the same objects again and again
+ // as the program mutates the heap faster than we can incrementally
+ // trace it. In this case we switch to non-incremental marking in
+ // order to finish off this marking phase.
+ if (FLAG_trace_incremental_marking) {
+ PrintIsolate(
+ heap()->isolate(),
+ "Hurrying incremental marking because of lack of progress\n");
+ }
+ marking_speed_ = kMaxMarkingSpeed;
}
}
+
+ heap_->mark_compact_collector()->marking_deque()->Unshift(obj);
+}
+
+
+void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
+ Marking::WhiteToGrey(mark_bit);
+ heap_->mark_compact_collector()->marking_deque()->Push(obj);
}
@@ -270,9 +322,9 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
IncrementalMarking* incremental_marking)
: heap_(incremental_marking->heap()) {}
- void VisitPointer(Object** p) { MarkObjectByPointer(p); }
+ void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
@@ -474,6 +526,8 @@ void IncrementalMarking::Start(const char* reason) {
DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
DCHECK(!heap_->isolate()->serializer_enabled());
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking_start());
ResetStepCounters();
was_activated_ = true;
@@ -487,7 +541,8 @@ void IncrementalMarking::Start(const char* reason) {
state_ = SWEEPING;
}
- heap_->LowerInlineAllocationLimit(kAllocatedThreshold);
+ heap_->new_space()->AddInlineAllocationObserver(&observer_);
+
incremental_marking_job()->Start(heap_);
}
@@ -541,33 +596,177 @@ void IncrementalMarking::StartMarking() {
}
-void IncrementalMarking::MarkObjectGroups() {
- DCHECK(FLAG_overapproximate_weak_closure);
- DCHECK(!weak_closure_was_overapproximated_);
+void IncrementalMarking::MarkRoots() {
+ DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
- int old_marking_deque_top =
- heap_->mark_compact_collector()->marking_deque()->top();
+ IncrementalMarkingRootMarkingVisitor visitor(this);
+ heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+}
- heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
+
+void IncrementalMarking::MarkObjectGroups() {
+ DCHECK(!finalize_marking_completed_);
+ DCHECK(IsMarking());
IncrementalMarkingRootMarkingVisitor visitor(this);
+ heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
heap_->isolate()->global_handles()->IterateObjectGroups(
&visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
+ heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
+ heap_->isolate()->global_handles()->RemoveObjectGroups();
+}
+
+
+void IncrementalMarking::ProcessWeakCells() {
+ DCHECK(!finalize_marking_completed_);
+ DCHECK(IsMarking());
+
+ Object* the_hole_value = heap()->the_hole_value();
+ Object* weak_cell_obj = heap()->encountered_weak_cells();
+ Object* weak_cell_head = Smi::FromInt(0);
+ WeakCell* prev_weak_cell_obj = NULL;
+ while (weak_cell_obj != Smi::FromInt(0)) {
+ WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
+ // We do not insert cleared weak cells into the list, so the value
+ // cannot be a Smi here.
+ HeapObject* value = HeapObject::cast(weak_cell->value());
+ // Remove weak cells with live objects from the list, they do not need
+ // clearing.
+ if (MarkCompactCollector::IsMarked(value)) {
+ // Record slot, if value is pointing to an evacuation candidate.
+ Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
+ heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
+ // Remove entry somewhere after top.
+ if (prev_weak_cell_obj != NULL) {
+ prev_weak_cell_obj->set_next(weak_cell->next());
+ }
+ weak_cell_obj = weak_cell->next();
+ weak_cell->clear_next(the_hole_value);
+ } else {
+ if (weak_cell_head == Smi::FromInt(0)) {
+ weak_cell_head = weak_cell;
+ }
+ prev_weak_cell_obj = weak_cell;
+ weak_cell_obj = weak_cell->next();
+ }
+ }
+ // Top may have changed.
+ heap()->set_encountered_weak_cells(weak_cell_head);
+}
+
+
+bool ShouldRetainMap(Map* map, int age) {
+ if (age == 0) {
+ // The map has aged. Do not retain this map.
+ return false;
+ }
+ Object* constructor = map->GetConstructor();
+ if (!constructor->IsHeapObject() ||
+ Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(constructor)))) {
+ // The constructor is dead, no new objects with this map can
+ // be created. Do not retain this map.
+ return false;
+ }
+ return true;
+}
+
+
+void IncrementalMarking::RetainMaps() {
+ // Do not retain dead maps if flag disables it or there is
+ // - memory pressure (reduce_memory_footprint_),
+ // - GC is requested by tests or dev-tools (abort_incremental_marking_).
+ bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
+ heap()->ShouldAbortIncrementalMarking() ||
+ FLAG_retain_maps_for_n_gc == 0;
+ ArrayList* retained_maps = heap()->retained_maps();
+ int length = retained_maps->Length();
+ // The number_of_disposed_maps separates maps in the retained_maps
+ // array that were created before and after context disposal.
+ // We do not age and retain disposed maps to avoid memory leaks.
+ int number_of_disposed_maps = heap()->number_of_disposed_maps_;
+ for (int i = 0; i < length; i += 2) {
+ DCHECK(retained_maps->Get(i)->IsWeakCell());
+ WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
+ if (cell->cleared()) continue;
+ int age = Smi::cast(retained_maps->Get(i + 1))->value();
+ int new_age;
+ Map* map = Map::cast(cell->value());
+ MarkBit map_mark = Marking::MarkBitFrom(map);
+ if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
+ Marking::IsWhite(map_mark)) {
+ if (ShouldRetainMap(map, age)) {
+ MarkObject(heap(), map);
+ }
+ Object* prototype = map->prototype();
+ if (age > 0 && prototype->IsHeapObject() &&
+ Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
+ // The prototype is not marked, age the map.
+ new_age = age - 1;
+ } else {
+ // The prototype and the constructor are marked, this map keeps only
+ // transition tree alive, not JSObjects. Do not age the map.
+ new_age = age;
+ }
+ } else {
+ new_age = FLAG_retain_maps_for_n_gc;
+ }
+ // Compact the array and update the age.
+ if (new_age != age) {
+ retained_maps->Set(i + 1, Smi::FromInt(new_age));
+ }
+ }
+}
+
+
+void IncrementalMarking::FinalizeIncrementally() {
+ DCHECK(!finalize_marking_completed_);
+ DCHECK(IsMarking());
+
+ double start = heap_->MonotonicallyIncreasingTimeInMs();
+
+ int old_marking_deque_top =
+ heap_->mark_compact_collector()->marking_deque()->top();
+
+ // After finishing incremental marking, we try to discover all unmarked
+ // objects to reduce the marking load in the final pause.
+ // 1) We scan and mark the roots again to find all changes to the root set.
+ // 2) We mark the object groups.
+ // 3) Age and retain maps embedded in optimized code.
+ // 4) Remove weak cell with live values from the list of weak cells, they
+ // do not need processing during GC.
+ MarkRoots();
+ MarkObjectGroups();
+ if (incremental_marking_finalization_rounds_ == 0) {
+ // Map retaining is needed for perfromance, not correctness,
+ // so we can do it only once at the beginning of the finalization.
+ RetainMaps();
+ }
+ ProcessWeakCells();
int marking_progress =
abs(old_marking_deque_top -
heap_->mark_compact_collector()->marking_deque()->top());
- ++weak_closure_approximation_rounds_;
- if ((weak_closure_approximation_rounds_ >=
- FLAG_max_object_groups_marking_rounds) ||
- (marking_progress < FLAG_min_progress_during_object_groups_marking)) {
- weak_closure_was_overapproximated_ = true;
+ double end = heap_->MonotonicallyIncreasingTimeInMs();
+ double delta = end - start;
+ heap_->tracer()->AddMarkingTime(delta);
+ heap_->tracer()->AddIncrementalMarkingFinalizationStep(delta);
+ if (FLAG_trace_incremental_marking) {
+ PrintF(
+ "[IncrementalMarking] Finalize incrementally round %d, "
+ "spent %d ms, marking progress %d.\n",
+ static_cast<int>(delta), incremental_marking_finalization_rounds_,
+ marking_progress);
}
- heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
- heap_->isolate()->global_handles()->RemoveObjectGroups();
+ ++incremental_marking_finalization_rounds_;
+ if ((incremental_marking_finalization_rounds_ >=
+ FLAG_max_incremental_marking_finalization_rounds) ||
+ (marking_progress <
+ FLAG_min_progress_during_incremental_marking_finalization)) {
+ finalize_marking_completed_ = true;
+ }
}
@@ -689,7 +888,7 @@ void IncrementalMarking::Hurry() {
if (state() == MARKING) {
double start = 0.0;
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
- start = base::OS::TimeCurrentMillis();
+ start = heap_->MonotonicallyIncreasingTimeInMs();
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Hurry\n");
}
@@ -699,7 +898,7 @@ void IncrementalMarking::Hurry() {
ProcessMarkingDeque();
state_ = COMPLETE;
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
- double end = base::OS::TimeCurrentMillis();
+ double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
heap_->tracer()->AddMarkingTime(delta);
if (FLAG_trace_incremental_marking) {
@@ -739,7 +938,8 @@ void IncrementalMarking::Stop() {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Stopping.\n");
}
- heap_->ResetInlineAllocationLimit();
+
+ heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
if (IsMarking()) {
@@ -767,7 +967,8 @@ void IncrementalMarking::Finalize() {
Hurry();
state_ = STOPPED;
is_compacting_ = false;
- heap_->ResetInlineAllocationLimit();
+
+ heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
PatchIncrementalMarkingRecordWriteStubs(heap_,
@@ -778,13 +979,14 @@ void IncrementalMarking::Finalize() {
}
-void IncrementalMarking::OverApproximateWeakClosure(CompletionAction action) {
- DCHECK(FLAG_overapproximate_weak_closure);
- DCHECK(!weak_closure_was_overapproximated_);
+void IncrementalMarking::FinalizeMarking(CompletionAction action) {
+ DCHECK(!finalize_marking_completed_);
if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] requesting weak closure overapproximation.\n");
+ PrintF(
+ "[IncrementalMarking] requesting finalization of incremental "
+ "marking.\n");
}
- request_type_ = OVERAPPROXIMATION;
+ request_type_ = FINALIZATION;
if (action == GC_VIA_STACK_GUARD) {
heap_->isolate()->stack_guard()->RequestGC();
}
@@ -811,8 +1013,8 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
void IncrementalMarking::Epilogue() {
was_activated_ = false;
- weak_closure_was_overapproximated_ = false;
- weak_closure_approximation_rounds_ = 0;
+ finalize_marking_completed_ = false;
+ incremental_marking_finalization_rounds_ = 0;
}
@@ -957,7 +1159,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
{
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
- double start = base::OS::TimeCurrentMillis();
+ double start = heap_->MonotonicallyIncreasingTimeInMs();
// The marking speed is driven either by the allocation rate or by the rate
// at which we are having to check the color of objects in the write
@@ -978,7 +1180,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
if (state_ == SWEEPING) {
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
(heap_->mark_compact_collector()->IsSweepingCompleted() ||
- !heap_->concurrent_sweeping_enabled())) {
+ !FLAG_concurrent_sweeping)) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
@@ -990,9 +1192,8 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
if (completion == FORCE_COMPLETION ||
IsIdleMarkingDelayCounterLimitReached()) {
- if (FLAG_overapproximate_weak_closure &&
- !weak_closure_was_overapproximated_) {
- OverApproximateWeakClosure(action);
+ if (!finalize_marking_completed_) {
+ FinalizeMarking(action);
} else {
MarkingComplete(action);
}
@@ -1008,7 +1209,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
// with marking.
SpeedUp();
- double end = base::OS::TimeCurrentMillis();
+ double end = heap_->MonotonicallyIncreasingTimeInMs();
double duration = (end - start);
// Note that we report zero bytes here when sweeping was in progress or
// when we just started incremental marking. In these cases we did not
diff --git a/chromium/v8/src/heap/incremental-marking.h b/chromium/v8/src/heap/incremental-marking.h
index 010392875e6..be630213ac5 100644
--- a/chromium/v8/src/heap/incremental-marking.h
+++ b/chromium/v8/src/heap/incremental-marking.h
@@ -8,6 +8,7 @@
#include "src/cancelable-task.h"
#include "src/execution.h"
#include "src/heap/incremental-marking-job.h"
+#include "src/heap/spaces.h"
#include "src/objects.h"
namespace v8 {
@@ -27,7 +28,7 @@ class IncrementalMarking {
enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
- enum GCRequestType { COMPLETE_MARKING, OVERAPPROXIMATION };
+ enum GCRequestType { COMPLETE_MARKING, FINALIZATION };
struct StepActions {
StepActions(CompletionAction complete_action_,
@@ -56,12 +57,12 @@ class IncrementalMarking {
bool should_hurry() { return should_hurry_; }
void set_should_hurry(bool val) { should_hurry_ = val; }
- bool weak_closure_was_overapproximated() const {
- return weak_closure_was_overapproximated_;
+ bool finalize_marking_completed() const {
+ return finalize_marking_completed_;
}
void SetWeakClosureWasOverApproximatedForTesting(bool val) {
- weak_closure_was_overapproximated_ = val;
+ finalize_marking_completed_ = val;
}
inline bool IsStopped() { return state() == STOPPED; }
@@ -73,8 +74,7 @@ class IncrementalMarking {
inline bool IsComplete() { return state() == COMPLETE; }
inline bool IsReadyToOverApproximateWeakClosure() const {
- return request_type_ == OVERAPPROXIMATION &&
- !weak_closure_was_overapproximated_;
+ return request_type_ == FINALIZATION && !finalize_marking_completed_;
}
GCRequestType request_type() const { return request_type_; }
@@ -87,7 +87,7 @@ class IncrementalMarking {
void Start(const char* reason = nullptr);
- void MarkObjectGroups();
+ void FinalizeIncrementally();
void UpdateMarkingDequeAfterScavenge();
@@ -97,7 +97,7 @@ class IncrementalMarking {
void Stop();
- void OverApproximateWeakClosure(CompletionAction action);
+ void FinalizeMarking(CompletionAction action);
void MarkingComplete(CompletionAction action);
@@ -159,7 +159,7 @@ class IncrementalMarking {
// No slots in white objects should be recorded, as some slots are typed and
// cannot be interpreted correctly if the underlying object does not survive
// the incremental cycle (stays white).
- INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
+ INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
Object* value));
@@ -174,11 +174,11 @@ class IncrementalMarking {
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
void RecordCodeTargetPatch(Address pc, HeapObject* value);
- inline void RecordWrites(HeapObject* obj);
+ void RecordWrites(HeapObject* obj);
- inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
+ void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
- inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+ void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
@@ -215,6 +215,21 @@ class IncrementalMarking {
}
private:
+ class Observer : public InlineAllocationObserver {
+ public:
+ Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
+ : InlineAllocationObserver(step_size),
+ incremental_marking_(incremental_marking) {}
+
+ void Step(int bytes_allocated, Address, size_t) override {
+ incremental_marking_.Step(bytes_allocated,
+ IncrementalMarking::GC_VIA_STACK_GUARD);
+ }
+
+ private:
+ IncrementalMarking& incremental_marking_;
+ };
+
int64_t SpaceLeftInOldSpace();
void SpeedUp();
@@ -223,6 +238,13 @@ class IncrementalMarking {
void StartMarking();
+ void MarkRoots();
+ void MarkObjectGroups();
+ void ProcessWeakCells();
+ // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
+ // increase chances of reusing of map transition tree in future.
+ void RetainMaps();
+
void ActivateIncrementalWriteBarrier(PagedSpace* space);
static void ActivateIncrementalWriteBarrier(NewSpace* space);
void ActivateIncrementalWriteBarrier();
@@ -246,6 +268,8 @@ class IncrementalMarking {
Heap* heap_;
+ Observer observer_;
+
State state_;
bool is_compacting_;
@@ -266,9 +290,9 @@ class IncrementalMarking {
bool was_activated_;
- bool weak_closure_was_overapproximated_;
+ bool finalize_marking_completed_;
- int weak_closure_approximation_rounds_;
+ int incremental_marking_finalization_rounds_;
GCRequestType request_type_;
@@ -276,7 +300,7 @@ class IncrementalMarking {
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_INCREMENTAL_MARKING_H_
diff --git a/chromium/v8/src/heap/mark-compact-inl.h b/chromium/v8/src/heap/mark-compact-inl.h
index 6e3ebd7fc7f..a59d36bfa1c 100644
--- a/chromium/v8/src/heap/mark-compact-inl.h
+++ b/chromium/v8/src/heap/mark-compact-inl.h
@@ -99,14 +99,6 @@ void CodeFlusher::AddCandidate(JSFunction* function) {
}
-void CodeFlusher::AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
- if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
- SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
- optimized_code_map_holder_head_ = code_map_holder;
- }
-}
-
-
JSFunction** CodeFlusher::GetNextCandidateSlot(JSFunction* candidate) {
return reinterpret_cast<JSFunction**>(
HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
@@ -149,23 +141,52 @@ void CodeFlusher::ClearNextCandidate(SharedFunctionInfo* candidate) {
}
-SharedFunctionInfo* CodeFlusher::GetNextCodeMap(SharedFunctionInfo* holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
- return reinterpret_cast<SharedFunctionInfo*>(next_map);
-}
-
-
-void CodeFlusher::SetNextCodeMap(SharedFunctionInfo* holder,
- SharedFunctionInfo* next_holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
-}
-
-
-void CodeFlusher::ClearNextCodeMap(SharedFunctionInfo* holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
+template <LiveObjectIterationMode T>
+HeapObject* LiveObjectIterator<T>::Next() {
+ while (!it_.Done()) {
+ HeapObject* object = nullptr;
+ while (current_cell_ != 0) {
+ uint32_t trailing_zeros = base::bits::CountTrailingZeros32(current_cell_);
+ Address addr = cell_base_ + trailing_zeros * kPointerSize;
+
+ // Clear the first bit of the found object..
+ current_cell_ &= ~(1u << trailing_zeros);
+
+ uint32_t second_bit_index = 0;
+ if (trailing_zeros < Bitmap::kBitIndexMask) {
+ second_bit_index = 1u << (trailing_zeros + 1);
+ } else {
+ second_bit_index = 0x1;
+ // The overlapping case; there has to exist a cell after the current
+ // cell.
+ DCHECK(!it_.Done());
+ it_.Advance();
+ cell_base_ = it_.CurrentCellBase();
+ current_cell_ = *it_.CurrentCell();
+ }
+ if (T == kBlackObjects && (current_cell_ & second_bit_index)) {
+ object = HeapObject::FromAddress(addr);
+ } else if (T == kGreyObjects && !(current_cell_ & second_bit_index)) {
+ object = HeapObject::FromAddress(addr);
+ } else if (T == kAllLiveObjects) {
+ object = HeapObject::FromAddress(addr);
+ }
+ // Clear the second bit of the found object.
+ current_cell_ &= ~second_bit_index;
+
+ // We found a live object.
+ if (object != nullptr) break;
+ }
+ if (current_cell_ == 0) {
+ if (!it_.Done()) {
+ it_.Advance();
+ cell_base_ = it_.CurrentCellBase();
+ current_cell_ = *it_.CurrentCell();
+ }
+ }
+ if (object != nullptr) return object;
+ }
+ return nullptr;
}
} // namespace internal
diff --git a/chromium/v8/src/heap/mark-compact.cc b/chromium/v8/src/heap/mark-compact.cc
index 9e317e7d082..65bfdd92d87 100644
--- a/chromium/v8/src/heap/mark-compact.cc
+++ b/chromium/v8/src/heap/mark-compact.cc
@@ -33,8 +33,8 @@ namespace internal {
const char* Marking::kWhiteBitPattern = "00";
-const char* Marking::kBlackBitPattern = "10";
-const char* Marking::kGreyBitPattern = "11";
+const char* Marking::kBlackBitPattern = "11";
+const char* Marking::kGreyBitPattern = "10";
const char* Marking::kImpossibleBitPattern = "01";
@@ -59,14 +59,13 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
heap_(heap),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(0),
- code_flusher_(NULL),
+ code_flusher_(nullptr),
have_code_to_deoptimize_(false),
compacting_(false),
sweeping_in_progress_(false),
compaction_in_progress_(false),
pending_sweeper_tasks_semaphore_(0),
- pending_compaction_tasks_semaphore_(0),
- concurrent_compaction_tasks_active_(0) {
+ pending_compaction_tasks_semaphore_(0) {
}
#ifdef VERIFY_HEAP
@@ -74,7 +73,7 @@ class VerifyMarkingVisitor : public ObjectVisitor {
public:
explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
@@ -83,7 +82,7 @@ class VerifyMarkingVisitor : public ObjectVisitor {
}
}
- void VisitEmbeddedPointer(RelocInfo* rinfo) {
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
Object* p = rinfo->target_object();
@@ -91,7 +90,7 @@ class VerifyMarkingVisitor : public ObjectVisitor {
}
}
- void VisitCell(RelocInfo* rinfo) {
+ void VisitCell(RelocInfo* rinfo) override {
Code* code = rinfo->host();
DCHECK(rinfo->rmode() == RelocInfo::CELL);
if (!code->IsWeakObject(rinfo->target_cell())) {
@@ -116,6 +115,8 @@ static void VerifyMarking(Heap* heap, Address bottom, Address top) {
CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
next_object_must_be_here_or_later = current + object->Size();
+ // The next word for sure belongs to the current object, jump over it.
+ current += kPointerSize;
}
}
}
@@ -168,7 +169,7 @@ static void VerifyMarking(Heap* heap) {
class VerifyEvacuationVisitor : public ObjectVisitor {
public:
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
@@ -237,12 +238,24 @@ static void VerifyEvacuation(Heap* heap) {
void MarkCompactCollector::SetUp() {
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
free_list_old_space_.Reset(new FreeList(heap_->old_space()));
free_list_code_space_.Reset(new FreeList(heap_->code_space()));
free_list_map_space_.Reset(new FreeList(heap_->map_space()));
EnsureMarkingDequeIsReserved();
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
slots_buffer_allocator_ = new SlotsBufferAllocator();
+
+ if (FLAG_flush_code) {
+ code_flusher_ = new CodeFlusher(isolate());
+ if (FLAG_trace_code_flushing) {
+ PrintF("[code-flushing is now on]\n");
+ }
+ }
}
@@ -250,6 +263,7 @@ void MarkCompactCollector::TearDown() {
AbortCompaction();
delete marking_deque_memory_;
delete slots_buffer_allocator_;
+ delete code_flusher_;
}
@@ -286,8 +300,8 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
TraceFragmentation(heap()->map_space());
}
- heap()->old_space()->EvictEvacuationCandidatesFromFreeLists();
- heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
+ heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
+ heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
compacting_ = evacuation_candidates_.length() > 0;
}
@@ -297,13 +311,26 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
- heap_->store_buffer()->ClearInvalidStoreBufferEntries();
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
+ heap_->store_buffer()->ClearInvalidStoreBufferEntries();
+ }
- int number_of_pages = evacuation_candidates_.length();
- for (int i = 0; i < number_of_pages; i++) {
- Page* p = evacuation_candidates_[i];
- SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
+ int number_of_pages = evacuation_candidates_.length();
+ for (int i = 0; i < number_of_pages; i++) {
+ Page* p = evacuation_candidates_[i];
+ SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
+ }
}
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ VerifyValidStoreAndSlotsBufferEntries();
+ }
+#endif
}
@@ -342,41 +369,19 @@ void MarkCompactCollector::CollectGarbage() {
DCHECK(heap_->incremental_marking()->IsStopped());
- // ClearNonLiveReferences can deoptimize code in dependent code arrays.
- // Process weak cells before so that weak cells in dependent code
- // arrays are cleared or contain only live code objects.
- ProcessAndClearWeakCells();
-
ClearNonLiveReferences();
- ClearWeakCollections();
-
- heap_->set_encountered_weak_cells(Smi::FromInt(0));
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyMarking(heap_);
}
#endif
- ClearInvalidStoreAndSlotsBufferEntries();
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- VerifyValidStoreAndSlotsBufferEntries();
- }
-#endif
-
SweepSpaces();
- Finish();
+ EvacuateNewSpaceAndCandidates();
- if (marking_parity_ == EVEN_MARKING_PARITY) {
- marking_parity_ = ODD_MARKING_PARITY;
- } else {
- DCHECK(marking_parity_ == ODD_MARKING_PARITY);
- marking_parity_ = EVEN_MARKING_PARITY;
- }
+ Finish();
}
@@ -473,24 +478,24 @@ void MarkCompactCollector::ClearMarkbits() {
}
-class MarkCompactCollector::CompactionTask : public v8::Task {
+class MarkCompactCollector::CompactionTask : public CancelableTask {
public:
explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
- : heap_(heap), spaces_(spaces) {}
+ : CancelableTask(heap->isolate()), spaces_(spaces) {}
virtual ~CompactionTask() {}
private:
- // v8::Task overrides.
- void Run() override {
- MarkCompactCollector* mark_compact = heap_->mark_compact_collector();
+ // v8::internal::CancelableTask overrides.
+ void RunInternal() override {
+ MarkCompactCollector* mark_compact =
+ isolate()->heap()->mark_compact_collector();
SlotsBuffer* evacuation_slots_buffer = nullptr;
mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
mark_compact->pending_compaction_tasks_semaphore_.Signal();
}
- Heap* heap_;
CompactionSpaceCollection* spaces_;
DISALLOW_COPY_AND_ASSIGN(CompactionTask);
@@ -547,18 +552,26 @@ void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
}
+void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
+ if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) {
+ SweepInParallel(heap()->paged_space(space->identity()), 0);
+ space->RefillFreeList();
+ }
+}
+
+
void MarkCompactCollector::EnsureSweepingCompleted() {
DCHECK(sweeping_in_progress_ == true);
// If sweeping is not completed or not running at all, we try to complete it
// here.
- if (!heap()->concurrent_sweeping_enabled() || !IsSweepingCompleted()) {
+ if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
}
- if (heap()->concurrent_sweeping_enabled()) {
+ if (FLAG_concurrent_sweeping) {
pending_sweeper_tasks_semaphore_.Wait();
pending_sweeper_tasks_semaphore_.Wait();
pending_sweeper_tasks_semaphore_.Wait();
@@ -566,12 +579,9 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
ParallelSweepSpacesComplete();
sweeping_in_progress_ = false;
- RefillFreeList(heap()->paged_space(OLD_SPACE));
- RefillFreeList(heap()->paged_space(CODE_SPACE));
- RefillFreeList(heap()->paged_space(MAP_SPACE));
- heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes();
- heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes();
- heap()->paged_space(MAP_SPACE)->ResetUnsweptFreeBytes();
+ heap()->old_space()->RefillFreeList();
+ heap()->code_space()->RefillFreeList();
+ heap()->map_space()->RefillFreeList();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
@@ -591,27 +601,6 @@ bool MarkCompactCollector::IsSweepingCompleted() {
}
-void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
- FreeList* free_list;
-
- if (space == heap()->old_space()) {
- free_list = free_list_old_space_.get();
- } else if (space == heap()->code_space()) {
- free_list = free_list_code_space_.get();
- } else if (space == heap()->map_space()) {
- free_list = free_list_map_space_.get();
- } else {
- // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
- // to only refill them for the old space.
- return;
- }
-
- intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
- space->AddToAccountingStats(freed_bytes);
- space->DecrementUnsweptFreeBytes(freed_bytes);
-}
-
-
void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
// This is only used when resizing an object.
DCHECK(MemoryChunk::FromAddress(old_start) ==
@@ -669,6 +658,48 @@ const char* AllocationSpaceName(AllocationSpace space) {
}
+void MarkCompactCollector::ComputeEvacuationHeuristics(
+ int area_size, int* target_fragmentation_percent,
+ int* max_evacuated_bytes) {
+ // For memory reducing mode we directly define both constants.
+ const int kTargetFragmentationPercentForReduceMemory = 20;
+ const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
+
+ // For regular mode (which is latency critical) we define less aggressive
+ // defaults to start and switch to a trace-based (using compaction speed)
+ // approach as soon as we have enough samples.
+ const int kTargetFragmentationPercent = 70;
+ const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
+ // Time to take for a single area (=payload of page). Used as soon as there
+ // exist enough compaction speed samples.
+ const int kTargetMsPerArea = 1;
+
+ if (heap()->ShouldReduceMemory()) {
+ *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
+ *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
+ } else {
+ const intptr_t estimated_compaction_speed =
+ heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+ if (estimated_compaction_speed != 0) {
+ // Estimate the target fragmentation based on traced compaction speed
+ // and a goal for a single page.
+ const intptr_t estimated_ms_per_area =
+ 1 + static_cast<intptr_t>(area_size) / estimated_compaction_speed;
+ *target_fragmentation_percent =
+ 100 - 100 * kTargetMsPerArea / estimated_ms_per_area;
+ if (*target_fragmentation_percent <
+ kTargetFragmentationPercentForReduceMemory) {
+ *target_fragmentation_percent =
+ kTargetFragmentationPercentForReduceMemory;
+ }
+ } else {
+ *target_fragmentation_percent = kTargetFragmentationPercent;
+ }
+ *max_evacuated_bytes = kMaxEvacuatedBytes;
+ }
+}
+
+
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
@@ -676,7 +707,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int area_size = space->AreaSize();
// Pairs of (live_bytes_in_page, page).
- std::vector<std::pair<int, Page*> > pages;
+ typedef std::pair<int, Page*> LiveBytesPagePair;
+ std::vector<LiveBytesPagePair> pages;
pages.reserve(number_of_pages);
PageIterator it(space);
@@ -702,8 +734,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int candidate_count = 0;
int total_live_bytes = 0;
- bool reduce_memory =
- heap()->ShouldReduceMemory() || heap()->HasLowAllocationRate();
+ const bool reduce_memory = heap()->ShouldReduceMemory();
if (FLAG_manual_evacuation_candidates_selection) {
for (size_t i = 0; i < pages.size(); i++) {
Page* p = pages[i].second;
@@ -724,23 +755,25 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
}
} else {
- const int kTargetFragmentationPercent = 50;
- const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
-
- const int kTargetFragmentationPercentForReduceMemory = 20;
- const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
-
+ // The following approach determines the pages that should be evacuated.
+ //
+ // We use two conditions to decide whether a page qualifies as an evacuation
+ // candidate, or not:
+ // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
+ // between live bytes and capacity of this page (= area).
+ // * Evacuation quota: A global quota determining how much bytes should be
+ // compacted.
+ //
+ // The algorithm sorts all pages by live bytes and then iterates through
+ // them starting with the page with the most free memory, adding them to the
+ // set of evacuation candidates as long as both conditions (fragmentation
+ // and quota) hold.
int max_evacuated_bytes;
int target_fragmentation_percent;
+ ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
+ &max_evacuated_bytes);
- if (reduce_memory) {
- target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
- max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
- } else {
- target_fragmentation_percent = kTargetFragmentationPercent;
- max_evacuated_bytes = kMaxEvacuatedBytes;
- }
- intptr_t free_bytes_threshold =
+ const intptr_t free_bytes_threshold =
target_fragmentation_percent * (area_size / 100);
// Sort pages from the most free to the least free, then select
@@ -748,25 +781,28 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
// - the total size of evacuated objects does not exceed the specified
// limit.
// - fragmentation of (n+1)-th page does not exceed the specified limit.
- std::sort(pages.begin(), pages.end());
+ std::sort(pages.begin(), pages.end(),
+ [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
+ return a.first < b.first;
+ });
for (size_t i = 0; i < pages.size(); i++) {
int live_bytes = pages[i].first;
int free_bytes = area_size - live_bytes;
if (FLAG_always_compact ||
- (free_bytes >= free_bytes_threshold &&
- total_live_bytes + live_bytes <= max_evacuated_bytes)) {
+ ((free_bytes >= free_bytes_threshold) &&
+ ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
candidate_count++;
total_live_bytes += live_bytes;
}
if (FLAG_trace_fragmentation_verbose) {
- PrintF(
- "Page in %s: %d KB free [fragmented if this >= %d KB], "
- "sum of live bytes in fragmented pages %d KB [max is %d KB]\n",
- AllocationSpaceName(space->identity()),
- static_cast<int>(free_bytes / KB),
- static_cast<int>(free_bytes_threshold / KB),
- static_cast<int>(total_live_bytes / KB),
- static_cast<int>(max_evacuated_bytes / KB));
+ PrintIsolate(isolate(),
+ "compaction-selection-page: space=%s free_bytes_page=%d "
+ "fragmentation_limit_kb=%d fragmentation_limit_percent=%d "
+ "sum_compaction_kb=%d "
+ "compaction_limit_kb=%d\n",
+ AllocationSpaceName(space->identity()), free_bytes / KB,
+ free_bytes_threshold / KB, target_fragmentation_percent,
+ total_live_bytes / KB, max_evacuated_bytes / KB);
}
}
// How many pages we will allocated for the evacuated objects
@@ -775,20 +811,20 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK_LE(estimated_new_pages, candidate_count);
int estimated_released_pages = candidate_count - estimated_new_pages;
// Avoid (compact -> expand) cycles.
- if (estimated_released_pages == 0 && !FLAG_always_compact)
+ if ((estimated_released_pages == 0) && !FLAG_always_compact) {
candidate_count = 0;
+ }
for (int i = 0; i < candidate_count; i++) {
AddEvacuationCandidate(pages[i].second);
}
}
if (FLAG_trace_fragmentation) {
- PrintF(
- "Collected %d evacuation candidates [%d KB live] for space %s "
- "[mode %s]\n",
- candidate_count, static_cast<int>(total_live_bytes / KB),
- AllocationSpaceName(space->identity()),
- (reduce_memory ? "reduce memory footprint" : "normal"));
+ PrintIsolate(isolate(),
+ "compaction-selection: space=%s reduce_memory=%d pages=%d "
+ "total_live_bytes=%d\n",
+ AllocationSpaceName(space->identity()), reduce_memory,
+ candidate_count, total_live_bytes / KB);
}
}
@@ -834,6 +870,7 @@ void MarkCompactCollector::Prepare() {
ClearMarkbits();
AbortWeakCollections();
AbortWeakCells();
+ AbortTransitionArrays();
AbortCompaction();
was_marked_incrementally_ = false;
}
@@ -859,10 +896,21 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::Finish() {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_FINISH);
+
+ // The hashing of weak_object_to_code_table is no longer valid.
+ heap()->weak_object_to_code_table()->Rehash(
+ heap()->isolate()->factory()->undefined_value());
+
+ // Clear the marking state of live large objects.
+ heap_->lo_space()->ClearMarkingStateOfLiveObjects();
+
#ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
state_ = IDLE;
#endif
+ heap_->isolate()->inner_pointer_to_code_cache()->Flush();
+
// The stub cache is not traversed during GC; clear the cache to
// force lazy re-initialization of it. This must be done after the
// GC, because it relies on the new address of certain old space
@@ -876,6 +924,13 @@ void MarkCompactCollector::Finish() {
}
heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
+
+ if (marking_parity_ == EVEN_MARKING_PARITY) {
+ marking_parity_ = ODD_MARKING_PARITY;
+ } else {
+ DCHECK(marking_parity_ == ODD_MARKING_PARITY);
+ marking_parity_ = EVEN_MARKING_PARITY;
+ }
}
@@ -928,7 +983,7 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
PrintF(" - age: %d]\n", code->GetAge());
}
// Always flush the optimized code map if there is one.
- if (!shared->optimized_code_map()->IsSmi()) {
+ if (!shared->OptimizedCodeMapIsCleared()) {
shared->ClearOptimizedCodeMap();
}
shared->set_code(lazy_compile);
@@ -975,7 +1030,7 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
PrintF(" - age: %d]\n", code->GetAge());
}
// Always flush the optimized code map if there is one.
- if (!candidate->optimized_code_map()->IsSmi()) {
+ if (!candidate->OptimizedCodeMapIsCleared()) {
candidate->ClearOptimizedCodeMap();
}
candidate->set_code(lazy_compile);
@@ -993,85 +1048,6 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
}
-void CodeFlusher::ProcessOptimizedCodeMaps() {
- STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
-
- SharedFunctionInfo* holder = optimized_code_map_holder_head_;
- SharedFunctionInfo* next_holder;
-
- while (holder != NULL) {
- next_holder = GetNextCodeMap(holder);
- ClearNextCodeMap(holder);
-
- // Process context-dependent entries in the optimized code map.
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- int new_length = SharedFunctionInfo::kEntriesStart;
- int old_length = code_map->length();
- for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
- i += SharedFunctionInfo::kEntryLength) {
- // Each entry contains [ context, code, literals, ast-id ] as fields.
- STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
- Context* context =
- Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
- HeapObject* code = HeapObject::cast(
- code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
- FixedArray* literals = FixedArray::cast(
- code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
- Smi* ast_id =
- Smi::cast(code_map->get(i + SharedFunctionInfo::kOsrAstIdOffset));
- if (Marking::IsWhite(Marking::MarkBitFrom(context))) continue;
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(context)));
- if (Marking::IsWhite(Marking::MarkBitFrom(code))) continue;
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(code)));
- if (Marking::IsWhite(Marking::MarkBitFrom(literals))) continue;
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(literals)));
- // Move every slot in the entry and record slots when needed.
- code_map->set(new_length + SharedFunctionInfo::kCachedCodeOffset, code);
- code_map->set(new_length + SharedFunctionInfo::kContextOffset, context);
- code_map->set(new_length + SharedFunctionInfo::kLiteralsOffset, literals);
- code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id);
- Object** code_slot = code_map->RawFieldOfElementAt(
- new_length + SharedFunctionInfo::kCachedCodeOffset);
- isolate_->heap()->mark_compact_collector()->RecordSlot(
- code_map, code_slot, *code_slot);
- Object** context_slot = code_map->RawFieldOfElementAt(
- new_length + SharedFunctionInfo::kContextOffset);
- isolate_->heap()->mark_compact_collector()->RecordSlot(
- code_map, context_slot, *context_slot);
- Object** literals_slot = code_map->RawFieldOfElementAt(
- new_length + SharedFunctionInfo::kLiteralsOffset);
- isolate_->heap()->mark_compact_collector()->RecordSlot(
- code_map, literals_slot, *literals_slot);
- new_length += SharedFunctionInfo::kEntryLength;
- }
-
- // Process context-independent entry in the optimized code map.
- Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
- if (shared_object->IsCode()) {
- Code* shared_code = Code::cast(shared_object);
- if (Marking::IsWhite(Marking::MarkBitFrom(shared_code))) {
- code_map->set_undefined(SharedFunctionInfo::kSharedCodeIndex);
- } else {
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code)));
- Object** slot =
- code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex);
- isolate_->heap()->mark_compact_collector()->RecordSlot(code_map, slot,
- *slot);
- }
- }
-
- // Trim the optimized code map if entries have been removed.
- if (new_length < old_length) {
- holder->TrimOptimizedCodeMap(old_length - new_length);
- }
-
- holder = next_holder;
- }
-
- optimized_code_map_holder_head_ = NULL;
-}
-
-
void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
// Make sure previous flushing decisions are revisited.
isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
@@ -1142,80 +1118,6 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
}
-void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
- FixedArray* code_map =
- FixedArray::cast(code_map_holder->optimized_code_map());
- DCHECK(!code_map->get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
-
- // Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->RecordWrites(code_map);
- isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
-
- if (FLAG_trace_code_flushing) {
- PrintF("[code-flushing abandons code-map: ");
- code_map_holder->ShortPrint();
- PrintF("]\n");
- }
-
- SharedFunctionInfo* holder = optimized_code_map_holder_head_;
- SharedFunctionInfo* next_holder;
- if (holder == code_map_holder) {
- next_holder = GetNextCodeMap(code_map_holder);
- optimized_code_map_holder_head_ = next_holder;
- ClearNextCodeMap(code_map_holder);
- } else {
- while (holder != NULL) {
- next_holder = GetNextCodeMap(holder);
-
- if (next_holder == code_map_holder) {
- next_holder = GetNextCodeMap(code_map_holder);
- SetNextCodeMap(holder, next_holder);
- ClearNextCodeMap(code_map_holder);
- break;
- }
-
- holder = next_holder;
- }
- }
-}
-
-
-void CodeFlusher::EvictJSFunctionCandidates() {
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- EvictCandidate(candidate);
- candidate = next_candidate;
- }
- DCHECK(jsfunction_candidates_head_ == NULL);
-}
-
-
-void CodeFlusher::EvictSharedFunctionInfoCandidates() {
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- EvictCandidate(candidate);
- candidate = next_candidate;
- }
- DCHECK(shared_function_info_candidates_head_ == NULL);
-}
-
-
-void CodeFlusher::EvictOptimizedCodeMaps() {
- SharedFunctionInfo* holder = optimized_code_map_holder_head_;
- SharedFunctionInfo* next_holder;
- while (holder != NULL) {
- next_holder = GetNextCodeMap(holder);
- EvictOptimizedCodeMap(holder);
- holder = next_holder;
- }
- DCHECK(optimized_code_map_holder_head_ == NULL);
-}
-
-
void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
Heap* heap = isolate_->heap();
@@ -1231,14 +1133,6 @@ void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
}
-MarkCompactCollector::~MarkCompactCollector() {
- if (code_flusher_ != NULL) {
- delete code_flusher_;
- code_flusher_ = NULL;
- }
-}
-
-
class MarkCompactMarkingVisitor
: public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
public:
@@ -1437,11 +1331,11 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) VisitPointer(p);
}
- void VisitPointer(Object** slot) {
+ void VisitPointer(Object** slot) override {
Object* obj = *slot;
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
@@ -1469,8 +1363,9 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
MarkBit code_mark = Marking::MarkBitFrom(code);
MarkObject(code, code_mark);
if (frame->is_optimized()) {
- MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
- frame->LookupCode());
+ Code* optimized_code = frame->LookupCode();
+ MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code);
+ MarkObject(optimized_code, optimized_code_mark);
}
}
}
@@ -1511,15 +1406,15 @@ class RootMarkingVisitor : public ObjectVisitor {
explicit RootMarkingVisitor(Heap* heap)
: collector_(heap->mark_compact_collector()) {}
- void VisitPointer(Object** p) { MarkObjectByPointer(p); }
+ void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
// Skip the weak next code link in a code object, which is visited in
// ProcessTopOptimizedFrame.
- void VisitNextCodeLink(Object** p) {}
+ void VisitNextCodeLink(Object** p) override {}
private:
void MarkObjectByPointer(Object** p) {
@@ -1554,7 +1449,7 @@ class StringTableCleaner : public ObjectVisitor {
public:
explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
- virtual void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
Object* o = *p;
@@ -1592,8 +1487,9 @@ typedef StringTableCleaner<true> ExternalStringTableCleaner;
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
- if (Marking::IsBlackOrGrey(
- Marking::MarkBitFrom(HeapObject::cast(object)))) {
+ MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object));
+ DCHECK(!Marking::IsGrey(mark_bit));
+ if (Marking::IsBlack(mark_bit)) {
return object;
} else if (object->IsAllocationSite() &&
!(AllocationSite::cast(object)->IsZombie())) {
@@ -1631,117 +1527,224 @@ void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
}
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
-
-
void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
DCHECK(!marking_deque()->IsFull());
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+ LiveObjectIterator<kGreyObjects> it(p);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ MarkBit markbit = Marking::MarkBitFrom(object);
+ DCHECK(Marking::IsGrey(markbit));
+ Marking::GreyToBlack(markbit);
+ PushBlack(object);
+ if (marking_deque()->IsFull()) return;
+ }
+}
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
- const MarkBit::CellType current_cell = *cell;
- if (current_cell == 0) continue;
+class MarkCompactCollector::HeapObjectVisitor {
+ public:
+ virtual ~HeapObjectVisitor() {}
+ virtual bool Visit(HeapObject* object) = 0;
+};
- MarkBit::CellType grey_objects;
- if (it.HasNext()) {
- const MarkBit::CellType next_cell = *(cell + 1);
- grey_objects = current_cell & ((current_cell >> 1) |
- (next_cell << (Bitmap::kBitsPerCell - 1)));
- } else {
- grey_objects = current_cell & (current_cell >> 1);
- }
- int offset = 0;
- while (grey_objects != 0) {
- int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
- grey_objects >>= trailing_zeros;
- offset += trailing_zeros;
- MarkBit markbit(cell, 1 << offset);
- DCHECK(Marking::IsGrey(markbit));
- Marking::GreyToBlack(markbit);
- Address addr = cell_base + offset * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(addr);
- PushBlack(object);
- if (marking_deque()->IsFull()) return;
- offset += 2;
- grey_objects >>= 2;
+class MarkCompactCollector::EvacuateVisitorBase
+ : public MarkCompactCollector::HeapObjectVisitor {
+ public:
+ EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
+ : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
+
+ bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
+ HeapObject** target_object) {
+ int size = object->Size();
+ AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationResult allocation = target_space->AllocateRaw(size, alignment);
+ if (allocation.To(target_object)) {
+ heap_->mark_compact_collector()->MigrateObject(
+ *target_object, object, size, target_space->identity(),
+ evacuation_slots_buffer_);
+ return true;
}
-
- grey_objects >>= (Bitmap::kBitsPerCell - 1);
+ return false;
}
-}
-
-int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
- NewSpace* new_space, NewSpacePage* p) {
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+ protected:
+ Heap* heap_;
+ SlotsBuffer** evacuation_slots_buffer_;
+};
- MarkBit::CellType* cells = p->markbits()->cells();
- int survivors_size = 0;
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
+class MarkCompactCollector::EvacuateNewSpaceVisitor final
+ : public MarkCompactCollector::EvacuateVisitorBase {
+ public:
+ static const intptr_t kLabSize = 4 * KB;
+ static const intptr_t kMaxLabObjectSize = 256;
+
+ explicit EvacuateNewSpaceVisitor(Heap* heap,
+ SlotsBuffer** evacuation_slots_buffer,
+ HashMap* local_pretenuring_feedback)
+ : EvacuateVisitorBase(heap, evacuation_slots_buffer),
+ buffer_(LocalAllocationBuffer::InvalidBuffer()),
+ space_to_allocate_(NEW_SPACE),
+ promoted_size_(0),
+ semispace_copied_size_(0),
+ local_pretenuring_feedback_(local_pretenuring_feedback) {}
+
+ bool Visit(HeapObject* object) override {
+ heap_->UpdateAllocationSite(object, local_pretenuring_feedback_);
+ int size = object->Size();
+ HeapObject* target_object = nullptr;
+ if (heap_->ShouldBePromoted(object->address(), size) &&
+ TryEvacuateObject(heap_->old_space(), object, &target_object)) {
+ // If we end up needing more special cases, we should factor this out.
+ if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
+ heap_->array_buffer_tracker()->Promote(
+ JSArrayBuffer::cast(target_object));
+ }
+ promoted_size_ += size;
+ return true;
+ }
+ HeapObject* target = nullptr;
+ AllocationSpace space = AllocateTargetObject(object, &target);
+ heap_->mark_compact_collector()->MigrateObject(
+ HeapObject::cast(target), object, size, space,
+ (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
+ if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
+ heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
+ }
+ semispace_copied_size_ += size;
+ return true;
+ }
- MarkBit::CellType current_cell = *cell;
- if (current_cell == 0) continue;
+ intptr_t promoted_size() { return promoted_size_; }
+ intptr_t semispace_copied_size() { return semispace_copied_size_; }
- int offset = 0;
- while (current_cell != 0) {
- int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
- current_cell >>= trailing_zeros;
- offset += trailing_zeros;
- Address address = cell_base + offset * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(address);
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ private:
+ enum NewSpaceAllocationMode {
+ kNonstickyBailoutOldSpace,
+ kStickyBailoutOldSpace,
+ };
+
+ inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
+ HeapObject** target_object) {
+ const int size = old_object->Size();
+ AllocationAlignment alignment = old_object->RequiredAlignment();
+ AllocationResult allocation;
+ if (space_to_allocate_ == NEW_SPACE) {
+ if (size > kMaxLabObjectSize) {
+ allocation =
+ AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
+ } else {
+ allocation = AllocateInLab(size, alignment);
+ }
+ }
+ if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
+ allocation = AllocateInOldSpace(size, alignment);
+ }
+ bool ok = allocation.To(target_object);
+ DCHECK(ok);
+ USE(ok);
+ return space_to_allocate_;
+ }
- int size = object->Size();
- survivors_size += size;
+ inline bool NewLocalAllocationBuffer() {
+ AllocationResult result =
+ AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
+ LocalAllocationBuffer saved_old_buffer = buffer_;
+ buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
+ if (buffer_.IsValid()) {
+ buffer_.TryMerge(&saved_old_buffer);
+ return true;
+ }
+ return false;
+ }
- Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
+ inline AllocationResult AllocateInNewSpace(int size_in_bytes,
+ AllocationAlignment alignment,
+ NewSpaceAllocationMode mode) {
+ AllocationResult allocation =
+ heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
+ if (allocation.IsRetry()) {
+ if (!heap_->new_space()->AddFreshPageSynchronized()) {
+ if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
+ } else {
+ allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
+ alignment);
+ if (allocation.IsRetry()) {
+ if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
+ }
+ }
+ }
+ return allocation;
+ }
- offset += 2;
- current_cell >>= 2;
+ inline AllocationResult AllocateInOldSpace(int size_in_bytes,
+ AllocationAlignment alignment) {
+ AllocationResult allocation =
+ heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
+ if (allocation.IsRetry()) {
+ FatalProcessOutOfMemory(
+ "MarkCompactCollector: semi-space copy, fallback in old gen\n");
+ }
+ return allocation;
+ }
- // TODO(hpayer): Refactor EvacuateObject and call this function instead.
- if (heap()->ShouldBePromoted(object->address(), size) &&
- TryPromoteObject(object, size)) {
- continue;
+ inline AllocationResult AllocateInLab(int size_in_bytes,
+ AllocationAlignment alignment) {
+ AllocationResult allocation;
+ if (!buffer_.IsValid()) {
+ if (!NewLocalAllocationBuffer()) {
+ space_to_allocate_ = OLD_SPACE;
+ return AllocationResult::Retry(OLD_SPACE);
}
-
- AllocationAlignment alignment = object->RequiredAlignment();
- AllocationResult allocation = new_space->AllocateRaw(size, alignment);
- if (allocation.IsRetry()) {
- if (!new_space->AddFreshPage()) {
- // Shouldn't happen. We are sweeping linearly, and to-space
- // has the same number of pages as from-space, so there is
- // always room.
- UNREACHABLE();
+ }
+ allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
+ if (allocation.IsRetry()) {
+ if (!NewLocalAllocationBuffer()) {
+ space_to_allocate_ = OLD_SPACE;
+ return AllocationResult::Retry(OLD_SPACE);
+ } else {
+ allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
+ if (allocation.IsRetry()) {
+ space_to_allocate_ = OLD_SPACE;
+ return AllocationResult::Retry(OLD_SPACE);
}
- allocation = new_space->AllocateRaw(size, alignment);
- DCHECK(!allocation.IsRetry());
}
- Object* target = allocation.ToObjectChecked();
+ }
+ return allocation;
+ }
- MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
- if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
- heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
- }
- heap()->IncrementSemiSpaceCopiedObjectSize(size);
+ LocalAllocationBuffer buffer_;
+ AllocationSpace space_to_allocate_;
+ intptr_t promoted_size_;
+ intptr_t semispace_copied_size_;
+ HashMap* local_pretenuring_feedback_;
+};
+
+
+class MarkCompactCollector::EvacuateOldSpaceVisitor final
+ : public MarkCompactCollector::EvacuateVisitorBase {
+ public:
+ EvacuateOldSpaceVisitor(Heap* heap,
+ CompactionSpaceCollection* compaction_spaces,
+ SlotsBuffer** evacuation_slots_buffer)
+ : EvacuateVisitorBase(heap, evacuation_slots_buffer),
+ compaction_spaces_(compaction_spaces) {}
+
+ bool Visit(HeapObject* object) override {
+ CompactionSpace* target_space = compaction_spaces_->Get(
+ Page::FromAddress(object->address())->owner()->identity());
+ HeapObject* target_object = nullptr;
+ if (TryEvacuateObject(target_space, object, &target_object)) {
+ DCHECK(object->map_word().IsForwardingAddress());
+ return true;
}
- *cells = 0;
+ return false;
}
- return survivors_size;
-}
+
+ private:
+ CompactionSpaceCollection* compaction_spaces_;
+};
void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
@@ -1946,7 +1949,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code* code = it.frame()->LookupCode();
if (!code->CanDeoptAt(it.frame()->pc())) {
- code->CodeIterateBody(visitor);
+ Code::BodyDescriptor::IterateBody(code, visitor);
}
ProcessMarkingDeque();
return;
@@ -1955,71 +1958,6 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
}
-void MarkCompactCollector::RetainMaps() {
- if (heap()->ShouldReduceMemory() || heap()->ShouldAbortIncrementalMarking() ||
- FLAG_retain_maps_for_n_gc == 0) {
- // Do not retain dead maps if flag disables it or there is
- // - memory pressure (reduce_memory_footprint_),
- // - GC is requested by tests or dev-tools (abort_incremental_marking_).
- return;
- }
-
- ArrayList* retained_maps = heap()->retained_maps();
- int length = retained_maps->Length();
- int new_length = 0;
- for (int i = 0; i < length; i += 2) {
- DCHECK(retained_maps->Get(i)->IsWeakCell());
- WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
- if (cell->cleared()) continue;
- int age = Smi::cast(retained_maps->Get(i + 1))->value();
- int new_age;
- Map* map = Map::cast(cell->value());
- MarkBit map_mark = Marking::MarkBitFrom(map);
- if (Marking::IsWhite(map_mark)) {
- if (age == 0) {
- // The map has aged. Do not retain this map.
- continue;
- }
- Object* constructor = map->GetConstructor();
- if (!constructor->IsHeapObject() || Marking::IsWhite(Marking::MarkBitFrom(
- HeapObject::cast(constructor)))) {
- // The constructor is dead, no new objects with this map can
- // be created. Do not retain this map.
- continue;
- }
- Object* prototype = map->prototype();
- if (prototype->IsHeapObject() &&
- Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
- // The prototype is not marked, age the map.
- new_age = age - 1;
- } else {
- // The prototype and the constructor are marked, this map keeps only
- // transition tree alive, not JSObjects. Do not age the map.
- new_age = age;
- }
- MarkObject(map, map_mark);
- } else {
- new_age = FLAG_retain_maps_for_n_gc;
- }
- if (i != new_length) {
- retained_maps->Set(new_length, cell);
- Object** slot = retained_maps->Slot(new_length);
- RecordSlot(retained_maps, slot, cell);
- retained_maps->Set(new_length + 1, Smi::FromInt(new_age));
- } else if (new_age != age) {
- retained_maps->Set(new_length + 1, Smi::FromInt(new_age));
- }
- new_length += 2;
- }
- Object* undefined = heap()->undefined_value();
- for (int i = new_length; i < length; i++) {
- retained_maps->Clear(i, undefined);
- }
- if (new_length != length) retained_maps->SetLength(new_length);
- ProcessMarkingDeque();
-}
-
-
void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
DCHECK(!marking_deque_.in_use());
if (marking_deque_memory_ == NULL) {
@@ -2105,21 +2043,25 @@ void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
double start_time = 0.0;
if (FLAG_print_cumulative_gc_stat) {
- start_time = base::OS::TimeCurrentMillis();
+ start_time = heap_->MonotonicallyIncreasingTimeInMs();
}
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
// with the C stack limit check.
PostponeInterruptsScope postpone(isolate());
- IncrementalMarking* incremental_marking = heap_->incremental_marking();
- if (was_marked_incrementally_) {
- incremental_marking->Finalize();
- } else {
- // Abort any pending incremental activities e.g. incremental sweeping.
- incremental_marking->Stop();
- if (marking_deque_.in_use()) {
- marking_deque_.Uninitialize(true);
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
+ IncrementalMarking* incremental_marking = heap_->incremental_marking();
+ if (was_marked_incrementally_) {
+ incremental_marking->Finalize();
+ } else {
+ // Abort any pending incremental activities e.g. incremental sweeping.
+ incremental_marking->Stop();
+ if (marking_deque_.in_use()) {
+ marking_deque_.Uninitialize(true);
+ }
}
}
@@ -2131,20 +2073,23 @@ void MarkCompactCollector::MarkLiveObjects() {
EnsureMarkingDequeIsCommittedAndInitialize(
MarkCompactCollector::kMaxMarkingDequeSize);
- PrepareForCodeFlushing();
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
+ PrepareForCodeFlushing();
+ }
RootMarkingVisitor root_visitor(heap());
- MarkRoots(&root_visitor);
-
- ProcessTopOptimizedFrame(&root_visitor);
- // Retaining dying maps should happen before or during ephemeral marking
- // because a map could keep the key of an ephemeron alive. Note that map
- // aging is imprecise: maps that are kept alive only by ephemerons will age.
- RetainMaps();
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
+ MarkRoots(&root_visitor);
+ ProcessTopOptimizedFrame(&root_visitor);
+ }
{
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_WEAKCLOSURE);
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
// The objects reachable from the roots are marked, yet unreachable
// objects are unmarked. Mark objects reachable due to host
@@ -2172,72 +2117,92 @@ void MarkCompactCollector::MarkLiveObjects() {
ProcessEphemeralMarking(&root_visitor, true);
}
- AfterMarking();
-
if (FLAG_print_cumulative_gc_stat) {
- heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
+ heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
+ start_time);
+ }
+ if (FLAG_track_gc_object_stats) {
+ if (FLAG_trace_gc_object_stats) {
+ heap()->object_stats_->TraceObjectStats();
+ }
+ heap()->object_stats_->CheckpointObjectStats();
}
}
-void MarkCompactCollector::AfterMarking() {
- // Prune the string table removing all strings only pointed to by the
- // string table. Cannot use string_table() here because the string
- // table is marked.
- StringTable* string_table = heap()->string_table();
- InternalizedStringTableCleaner internalized_visitor(heap());
- string_table->IterateElements(&internalized_visitor);
- string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
+void MarkCompactCollector::ClearNonLiveReferences() {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
- ExternalStringTableCleaner external_visitor(heap());
- heap()->external_string_table_.Iterate(&external_visitor);
- heap()->external_string_table_.CleanUp();
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_STRING_TABLE);
- // Process the weak references.
- MarkCompactWeakObjectRetainer mark_compact_object_retainer;
- heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
+ // Prune the string table removing all strings only pointed to by the
+ // string table. Cannot use string_table() here because the string
+ // table is marked.
+ StringTable* string_table = heap()->string_table();
+ InternalizedStringTableCleaner internalized_visitor(heap());
+ string_table->IterateElements(&internalized_visitor);
+ string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
- // Remove object groups after marking phase.
- heap()->isolate()->global_handles()->RemoveObjectGroups();
- heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
+ ExternalStringTableCleaner external_visitor(heap());
+ heap()->external_string_table_.Iterate(&external_visitor);
+ heap()->external_string_table_.CleanUp();
+ }
+
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
+ // Process the weak references.
+ MarkCompactWeakObjectRetainer mark_compact_object_retainer;
+ heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
+ }
+
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
+
+ // Remove object groups after marking phase.
+ heap()->isolate()->global_handles()->RemoveObjectGroups();
+ heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
+ }
// Flush code from collected candidates.
if (is_code_flushing_enabled()) {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
code_flusher_->ProcessCandidates();
}
- if (FLAG_track_gc_object_stats) {
- if (FLAG_trace_gc_object_stats) {
- heap()->object_stats_->TraceObjectStats();
- }
- heap()->object_stats_->CheckpointObjectStats();
+
+ DependentCode* dependent_code_list;
+ Object* non_live_map_list;
+ ClearWeakCells(&non_live_map_list, &dependent_code_list);
+
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
+ ClearSimpleMapTransitions(non_live_map_list);
+ ClearFullMapTransitions();
}
-}
+ MarkDependentCodeForDeoptimization(dependent_code_list);
-void MarkCompactCollector::ClearNonLiveReferences() {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_NONLIVEREFERENCES);
- // Iterate over the map space, setting map transitions that go from
- // a marked map to an unmarked map to null transitions. This action
- // is carried out only on maps of JSObjects and related subtypes.
- HeapObjectIterator map_iterator(heap()->map_space());
- for (HeapObject* obj = map_iterator.Next(); obj != NULL;
- obj = map_iterator.Next()) {
- Map* map = Map::cast(obj);
+ ClearWeakCollections();
- if (!map->CanTransition()) continue;
+ ClearInvalidStoreAndSlotsBufferEntries();
+}
- MarkBit map_mark = Marking::MarkBitFrom(map);
- ClearNonLivePrototypeTransitions(map);
- ClearNonLiveMapTransitions(map, map_mark);
- if (Marking::IsWhite(map_mark)) {
- have_code_to_deoptimize_ |=
- map->dependent_code()->MarkCodeForDeoptimization(
- isolate(), DependentCode::kWeakCodeGroup);
- map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
- }
+void MarkCompactCollector::MarkDependentCodeForDeoptimization(
+ DependentCode* list_head) {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
+ Isolate* isolate = this->isolate();
+ DependentCode* current = list_head;
+ while (current->length() > 0) {
+ have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
+ isolate, DependentCode::kWeakCodeGroup);
+ current = current->next_link();
}
WeakHashTable* table = heap_->weak_object_to_code_table();
@@ -2252,7 +2217,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
if (WeakCell::cast(key)->cleared()) {
have_code_to_deoptimize_ |=
DependentCode::cast(value)->MarkCodeForDeoptimization(
- isolate(), DependentCode::kWeakCodeGroup);
+ isolate, DependentCode::kWeakCodeGroup);
table->set(key_index, heap_->the_hole_value());
table->set(value_index, heap_->the_hole_value());
table->ElementRemoved();
@@ -2261,165 +2226,145 @@ void MarkCompactCollector::ClearNonLiveReferences() {
}
-void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
- FixedArray* prototype_transitions =
- TransitionArray::GetPrototypeTransitions(map);
- int number_of_transitions =
- TransitionArray::NumberOfPrototypeTransitions(prototype_transitions);
-
- const int header = TransitionArray::kProtoTransitionHeaderSize;
- int new_number_of_transitions = 0;
- for (int i = 0; i < number_of_transitions; i++) {
- Object* cell = prototype_transitions->get(header + i);
- if (!WeakCell::cast(cell)->cleared()) {
- if (new_number_of_transitions != i) {
- prototype_transitions->set(header + new_number_of_transitions, cell);
- Object** slot = prototype_transitions->RawFieldOfElementAt(
- header + new_number_of_transitions);
- RecordSlot(prototype_transitions, slot, cell);
+void MarkCompactCollector::ClearSimpleMapTransitions(
+ Object* non_live_map_list) {
+ Object* the_hole_value = heap()->the_hole_value();
+ Object* weak_cell_obj = non_live_map_list;
+ while (weak_cell_obj != Smi::FromInt(0)) {
+ WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
+ Map* map = Map::cast(weak_cell->value());
+ DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map)));
+ Object* potential_parent = map->constructor_or_backpointer();
+ if (potential_parent->IsMap()) {
+ Map* parent = Map::cast(potential_parent);
+ if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) &&
+ parent->raw_transitions() == weak_cell) {
+ ClearSimpleMapTransition(parent, map);
}
- new_number_of_transitions++;
}
- }
-
- if (new_number_of_transitions != number_of_transitions) {
- TransitionArray::SetNumberOfPrototypeTransitions(prototype_transitions,
- new_number_of_transitions);
- }
-
- // Fill slots that became free with undefined value.
- for (int i = new_number_of_transitions; i < number_of_transitions; i++) {
- prototype_transitions->set_undefined(header + i);
- }
-}
-
-
-void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
- MarkBit map_mark) {
- Object* potential_parent = map->GetBackPointer();
- if (!potential_parent->IsMap()) return;
- Map* parent = Map::cast(potential_parent);
-
- // Follow back pointer, check whether we are dealing with a map transition
- // from a live map to a dead path and in case clear transitions of parent.
- bool current_is_alive = Marking::IsBlackOrGrey(map_mark);
- bool parent_is_alive = Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
- if (!current_is_alive && parent_is_alive) {
- ClearMapTransitions(parent, map);
+ weak_cell->clear();
+ weak_cell_obj = weak_cell->next();
+ weak_cell->clear_next(the_hole_value);
}
}
-// Clear a possible back pointer in case the transition leads to a dead map.
-// Return true in case a back pointer has been cleared and false otherwise.
-bool MarkCompactCollector::ClearMapBackPointer(Map* target) {
- if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(target))) return false;
- target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER);
- return true;
-}
-
-
-void MarkCompactCollector::ClearMapTransitions(Map* map, Map* dead_transition) {
- Object* transitions = map->raw_transitions();
- int num_transitions = TransitionArray::NumberOfTransitions(transitions);
-
+void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
+ Map* dead_transition) {
+ // A previously existing simple transition (stored in a WeakCell) is going
+ // to be cleared. Clear the useless cell pointer, and take ownership
+ // of the descriptor array.
+ map->set_raw_transitions(Smi::FromInt(0));
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
DescriptorArray* descriptors = map->instance_descriptors();
-
- // A previously existing simple transition (stored in a WeakCell) may have
- // been cleared. Clear the useless cell pointer, and take ownership
- // of the descriptor array.
- if (transitions->IsWeakCell() && WeakCell::cast(transitions)->cleared()) {
- map->set_raw_transitions(Smi::FromInt(0));
- }
- if (num_transitions == 0 &&
- descriptors == dead_transition->instance_descriptors() &&
+ if (descriptors == dead_transition->instance_descriptors() &&
number_of_own_descriptors > 0) {
- TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
+ TrimDescriptorArray(map, descriptors);
DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
map->set_owns_descriptors(true);
- return;
}
+}
- int transition_index = 0;
- bool descriptors_owner_died = false;
+void MarkCompactCollector::ClearFullMapTransitions() {
+ HeapObject* undefined = heap()->undefined_value();
+ Object* obj = heap()->encountered_transition_arrays();
+ while (obj != Smi::FromInt(0)) {
+ TransitionArray* array = TransitionArray::cast(obj);
+ int num_transitions = array->number_of_entries();
+ DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
+ if (num_transitions > 0) {
+ Map* map = array->GetTarget(0);
+ Map* parent = Map::cast(map->constructor_or_backpointer());
+ bool parent_is_alive =
+ Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
+ DescriptorArray* descriptors =
+ parent_is_alive ? parent->instance_descriptors() : nullptr;
+ bool descriptors_owner_died =
+ CompactTransitionArray(parent, array, descriptors);
+ if (descriptors_owner_died) {
+ TrimDescriptorArray(parent, descriptors);
+ }
+ }
+ obj = array->next_link();
+ array->set_next_link(undefined, SKIP_WRITE_BARRIER);
+ }
+ heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+}
+
- // Compact all live descriptors to the left.
+bool MarkCompactCollector::CompactTransitionArray(
+ Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
+ int num_transitions = transitions->number_of_entries();
+ bool descriptors_owner_died = false;
+ int transition_index = 0;
+ // Compact all live transitions to the left.
for (int i = 0; i < num_transitions; ++i) {
- Map* target = TransitionArray::GetTarget(transitions, i);
- if (ClearMapBackPointer(target)) {
- if (target->instance_descriptors() == descriptors) {
+ Map* target = transitions->GetTarget(i);
+ DCHECK_EQ(target->constructor_or_backpointer(), map);
+ if (Marking::IsWhite(Marking::MarkBitFrom(target))) {
+ if (descriptors != nullptr &&
+ target->instance_descriptors() == descriptors) {
descriptors_owner_died = true;
}
} else {
if (i != transition_index) {
- DCHECK(TransitionArray::IsFullTransitionArray(transitions));
- TransitionArray* t = TransitionArray::cast(transitions);
- Name* key = t->GetKey(i);
- t->SetKey(transition_index, key);
- Object** key_slot = t->GetKeySlot(transition_index);
- RecordSlot(t, key_slot, key);
+ Name* key = transitions->GetKey(i);
+ transitions->SetKey(transition_index, key);
+ Object** key_slot = transitions->GetKeySlot(transition_index);
+ RecordSlot(transitions, key_slot, key);
// Target slots do not need to be recorded since maps are not compacted.
- t->SetTarget(transition_index, t->GetTarget(i));
+ transitions->SetTarget(transition_index, transitions->GetTarget(i));
}
transition_index++;
}
}
-
// If there are no transitions to be cleared, return.
- // TODO(verwaest) Should be an assert, otherwise back pointers are not
- // properly cleared.
- if (transition_index == num_transitions) return;
-
- if (descriptors_owner_died) {
- if (number_of_own_descriptors > 0) {
- TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
- DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
- map->set_owns_descriptors(true);
- } else {
- DCHECK(descriptors == heap_->empty_descriptor_array());
- }
+ if (transition_index == num_transitions) {
+ DCHECK(!descriptors_owner_died);
+ return false;
}
-
// Note that we never eliminate a transition array, though we might right-trim
// such that number_of_transitions() == 0. If this assumption changes,
// TransitionArray::Insert() will need to deal with the case that a transition
// array disappeared during GC.
int trim = TransitionArray::Capacity(transitions) - transition_index;
if (trim > 0) {
- // Non-full-TransitionArray cases can never reach this point.
- DCHECK(TransitionArray::IsFullTransitionArray(transitions));
- TransitionArray* t = TransitionArray::cast(transitions);
heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- t, trim * TransitionArray::kTransitionSize);
- t->SetNumberOfTransitions(transition_index);
- // The map still has a full transition array.
- DCHECK(TransitionArray::IsFullTransitionArray(map->raw_transitions()));
+ transitions, trim * TransitionArray::kTransitionSize);
+ transitions->SetNumberOfTransitions(transition_index);
}
+ return descriptors_owner_died;
}
void MarkCompactCollector::TrimDescriptorArray(Map* map,
- DescriptorArray* descriptors,
- int number_of_own_descriptors) {
+ DescriptorArray* descriptors) {
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors == 0) {
+ DCHECK(descriptors == heap_->empty_descriptor_array());
+ return;
+ }
+
int number_of_descriptors = descriptors->number_of_descriptors_storage();
int to_trim = number_of_descriptors - number_of_own_descriptors;
- if (to_trim == 0) return;
-
- heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- descriptors, to_trim * DescriptorArray::kDescriptorSize);
- descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
+ if (to_trim > 0) {
+ heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+ descriptors, to_trim * DescriptorArray::kDescriptorSize);
+ descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
- if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
- descriptors->Sort();
+ if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
+ descriptors->Sort();
- if (FLAG_unbox_double_fields) {
- LayoutDescriptor* layout_descriptor = map->layout_descriptor();
- layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
- number_of_own_descriptors);
- SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
+ if (FLAG_unbox_double_fields) {
+ LayoutDescriptor* layout_descriptor = map->layout_descriptor();
+ layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
+ number_of_own_descriptors);
+ SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
+ }
}
+ DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ map->set_owns_descriptors(true);
}
@@ -2427,7 +2372,8 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
DescriptorArray* descriptors) {
int live_enum = map->EnumLength();
if (live_enum == kInvalidEnumCacheSentinel) {
- live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
+ live_enum =
+ map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
}
if (live_enum == 0) return descriptors->ClearEnumCache();
@@ -2446,8 +2392,6 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
void MarkCompactCollector::ProcessWeakCollections() {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::FromInt(0)) {
JSWeakCollection* weak_collection =
@@ -2474,7 +2418,7 @@ void MarkCompactCollector::ProcessWeakCollections() {
void MarkCompactCollector::ClearWeakCollections() {
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
+ GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::FromInt(0)) {
JSWeakCollection* weak_collection =
@@ -2497,8 +2441,6 @@ void MarkCompactCollector::ClearWeakCollections() {
void MarkCompactCollector::AbortWeakCollections() {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_WEAKCOLLECTION_ABORT);
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::FromInt(0)) {
JSWeakCollection* weak_collection =
@@ -2510,11 +2452,21 @@ void MarkCompactCollector::AbortWeakCollections() {
}
-void MarkCompactCollector::ProcessAndClearWeakCells() {
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_WEAKCELL);
- Object* weak_cell_obj = heap()->encountered_weak_cells();
+void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
+ DependentCode** dependent_code_list) {
+ Heap* heap = this->heap();
+ GCTracer::Scope gc_scope(heap->tracer(),
+ GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
+ Object* weak_cell_obj = heap->encountered_weak_cells();
+ Object* the_hole_value = heap->the_hole_value();
+ DependentCode* dependent_code_head =
+ DependentCode::cast(heap->empty_fixed_array());
+ Object* non_live_map_head = Smi::FromInt(0);
while (weak_cell_obj != Smi::FromInt(0)) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
+ Object* next_weak_cell = weak_cell->next();
+ bool clear_value = true;
+ bool clear_next = true;
// We do not insert cleared weak cells into the list, so the value
// cannot be a Smi here.
HeapObject* value = HeapObject::cast(weak_cell->value());
@@ -2535,34 +2487,71 @@ void MarkCompactCollector::ProcessAndClearWeakCells() {
RecordSlot(value, slot, *slot);
slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
RecordSlot(weak_cell, slot, *slot);
- } else {
- weak_cell->clear();
+ clear_value = false;
}
- } else {
- weak_cell->clear();
+ }
+ if (value->IsMap()) {
+ // The map is non-live.
+ Map* map = Map::cast(value);
+ // Add dependent code to the dependent_code_list.
+ DependentCode* candidate = map->dependent_code();
+ // We rely on the fact that the weak code group comes first.
+ STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
+ if (candidate->length() > 0 &&
+ candidate->group() == DependentCode::kWeakCodeGroup) {
+ candidate->set_next_link(dependent_code_head);
+ dependent_code_head = candidate;
+ }
+ // Add the weak cell to the non_live_map list.
+ weak_cell->set_next(non_live_map_head);
+ non_live_map_head = weak_cell;
+ clear_value = false;
+ clear_next = false;
}
} else {
+ // The value of the weak cell is alive.
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
RecordSlot(weak_cell, slot, *slot);
+ clear_value = false;
}
- weak_cell_obj = weak_cell->next();
- weak_cell->clear_next(heap());
+ if (clear_value) {
+ weak_cell->clear();
+ }
+ if (clear_next) {
+ weak_cell->clear_next(the_hole_value);
+ }
+ weak_cell_obj = next_weak_cell;
}
- heap()->set_encountered_weak_cells(Smi::FromInt(0));
+ heap->set_encountered_weak_cells(Smi::FromInt(0));
+ *non_live_map_list = non_live_map_head;
+ *dependent_code_list = dependent_code_head;
}
void MarkCompactCollector::AbortWeakCells() {
+ Object* the_hole_value = heap()->the_hole_value();
Object* weak_cell_obj = heap()->encountered_weak_cells();
while (weak_cell_obj != Smi::FromInt(0)) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
weak_cell_obj = weak_cell->next();
- weak_cell->clear_next(heap());
+ weak_cell->clear_next(the_hole_value);
}
heap()->set_encountered_weak_cells(Smi::FromInt(0));
}
+void MarkCompactCollector::AbortTransitionArrays() {
+ HeapObject* undefined = heap()->undefined_value();
+ Object* obj = heap()->encountered_transition_arrays();
+ while (obj != Smi::FromInt(0)) {
+ TransitionArray* array = TransitionArray::cast(obj);
+ obj = array->next_link();
+ array->set_next_link(undefined, SKIP_WRITE_BARRIER);
+ }
+ heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+}
+
+
void MarkCompactCollector::RecordMigratedSlot(
Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
// When parallel compaction is in progress, store and slots buffer entries
@@ -2648,6 +2637,40 @@ void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
}
+class RecordMigratedSlotVisitor final : public ObjectVisitor {
+ public:
+ RecordMigratedSlotVisitor(MarkCompactCollector* collector,
+ SlotsBuffer** evacuation_slots_buffer)
+ : collector_(collector),
+ evacuation_slots_buffer_(evacuation_slots_buffer) {}
+
+ V8_INLINE void VisitPointer(Object** p) override {
+ collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
+ evacuation_slots_buffer_);
+ }
+
+ V8_INLINE void VisitPointers(Object** start, Object** end) override {
+ while (start < end) {
+ collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
+ evacuation_slots_buffer_);
+ ++start;
+ }
+ }
+
+ V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
+ if (collector_->compacting_) {
+ Address code_entry = Memory::Address_at(code_entry_slot);
+ collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
+ evacuation_slots_buffer_);
+ }
+ }
+
+ private:
+ MarkCompactCollector* collector_;
+ SlotsBuffer** evacuation_slots_buffer_;
+};
+
+
// We scavenge new space simultaneously with sweeping. This is done in two
// passes.
//
@@ -2668,37 +2691,24 @@ void MarkCompactCollector::MigrateObject(
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(heap()->AllowedToBeMigrated(src, dest));
- DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(dest != LO_SPACE);
if (dest == OLD_SPACE) {
+ DCHECK_OBJECT_SIZE(size);
DCHECK(evacuation_slots_buffer != nullptr);
DCHECK(IsAligned(size, kPointerSize));
- switch (src->ContentType()) {
- case HeapObjectContents::kTaggedValues:
- MigrateObjectTagged(dst, src, size, evacuation_slots_buffer);
- break;
-
- case HeapObjectContents::kMixedValues:
- MigrateObjectMixed(dst, src, size, evacuation_slots_buffer);
- break;
- case HeapObjectContents::kRawValues:
- MigrateObjectRaw(dst, src, size);
- break;
- }
-
- if (compacting_ && dst->IsJSFunction()) {
- Address code_entry_slot = dst->address() + JSFunction::kCodeEntryOffset;
- Address code_entry = Memory::Address_at(code_entry_slot);
- RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
- evacuation_slots_buffer);
- }
+ heap()->MoveBlock(dst->address(), src->address(), size);
+ RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer);
+ dst->IterateBody(&visitor);
} else if (dest == CODE_SPACE) {
+ DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
DCHECK(evacuation_slots_buffer != nullptr);
PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
heap()->MoveBlock(dst_addr, src_addr, size);
RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
Code::cast(dst)->Relocate(dst_addr - src_addr);
} else {
+ DCHECK_OBJECT_SIZE(size);
DCHECK(evacuation_slots_buffer == nullptr);
DCHECK(dest == NEW_SPACE);
heap()->MoveBlock(dst_addr, src_addr, size);
@@ -2708,101 +2718,16 @@ void MarkCompactCollector::MigrateObject(
}
-void MarkCompactCollector::MigrateObjectTagged(
- HeapObject* dst, HeapObject* src, int size,
- SlotsBuffer** evacuation_slots_buffer) {
- Address src_slot = src->address();
- Address dst_slot = dst->address();
- for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
- Object* value = Memory::Object_at(src_slot);
- Memory::Object_at(dst_slot) = value;
- RecordMigratedSlot(value, dst_slot, evacuation_slots_buffer);
- src_slot += kPointerSize;
- dst_slot += kPointerSize;
- }
-}
-
-
-void MarkCompactCollector::MigrateObjectMixed(
- HeapObject* dst, HeapObject* src, int size,
- SlotsBuffer** evacuation_slots_buffer) {
- if (src->IsFixedTypedArrayBase()) {
- heap()->MoveBlock(dst->address(), src->address(), size);
- Address base_pointer_slot =
- dst->address() + FixedTypedArrayBase::kBasePointerOffset;
- RecordMigratedSlot(Memory::Object_at(base_pointer_slot), base_pointer_slot,
- evacuation_slots_buffer);
- } else if (src->IsBytecodeArray()) {
- heap()->MoveBlock(dst->address(), src->address(), size);
- Address constant_pool_slot =
- dst->address() + BytecodeArray::kConstantPoolOffset;
- RecordMigratedSlot(Memory::Object_at(constant_pool_slot),
- constant_pool_slot, evacuation_slots_buffer);
- } else if (src->IsJSArrayBuffer()) {
- heap()->MoveBlock(dst->address(), src->address(), size);
-
- // Visit inherited JSObject properties and byte length of ArrayBuffer
- Address regular_slot =
- dst->address() + JSArrayBuffer::BodyDescriptor::kStartOffset;
- Address regular_slots_end =
- dst->address() + JSArrayBuffer::kByteLengthOffset + kPointerSize;
- while (regular_slot < regular_slots_end) {
- RecordMigratedSlot(Memory::Object_at(regular_slot), regular_slot,
- evacuation_slots_buffer);
- regular_slot += kPointerSize;
- }
-
- // Skip backing store and visit just internal fields
- Address internal_field_slot = dst->address() + JSArrayBuffer::kSize;
- Address internal_fields_end =
- dst->address() + JSArrayBuffer::kSizeWithInternalFields;
- while (internal_field_slot < internal_fields_end) {
- RecordMigratedSlot(Memory::Object_at(internal_field_slot),
- internal_field_slot, evacuation_slots_buffer);
- internal_field_slot += kPointerSize;
- }
- } else if (FLAG_unbox_double_fields) {
- Address dst_addr = dst->address();
- Address src_addr = src->address();
- Address src_slot = src_addr;
- Address dst_slot = dst_addr;
-
- LayoutDescriptorHelper helper(src->map());
- DCHECK(!helper.all_fields_tagged());
- for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
- Object* value = Memory::Object_at(src_slot);
-
- Memory::Object_at(dst_slot) = value;
-
- if (helper.IsTagged(static_cast<int>(src_slot - src_addr))) {
- RecordMigratedSlot(value, dst_slot, evacuation_slots_buffer);
- }
-
- src_slot += kPointerSize;
- dst_slot += kPointerSize;
- }
- } else {
- UNREACHABLE();
- }
-}
-
-
-void MarkCompactCollector::MigrateObjectRaw(HeapObject* dst, HeapObject* src,
- int size) {
- heap()->MoveBlock(dst->address(), src->address(), size);
-}
-
-
static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
SlotsBuffer::SlotType slot_type, Address addr) {
switch (slot_type) {
case SlotsBuffer::CODE_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
+ RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::CELL_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CELL, 0, NULL);
+ RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
rinfo.Visit(isolate, v);
break;
}
@@ -2812,16 +2737,17 @@ static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
}
case SlotsBuffer::RELOCATED_CODE_OBJECT: {
HeapObject* obj = HeapObject::FromAddress(addr);
- Code::cast(obj)->CodeIterateBody(v);
+ Code::BodyDescriptor::IterateBody(obj, v);
break;
}
case SlotsBuffer::DEBUG_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0, NULL);
+ RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
+ NULL);
if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+ RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
rinfo.Visit(isolate, v);
break;
}
@@ -2842,13 +2768,13 @@ class PointersUpdatingVisitor : public ObjectVisitor {
public:
explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointer(Object** p) { UpdatePointer(p); }
+ void VisitPointer(Object** p) override { UpdatePointer(p); }
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) UpdatePointer(p);
}
- void VisitCell(RelocInfo* rinfo) {
+ void VisitCell(RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::CELL);
Object* cell = rinfo->target_cell();
Object* old_cell = cell;
@@ -2858,7 +2784,7 @@ class PointersUpdatingVisitor : public ObjectVisitor {
}
}
- void VisitEmbeddedPointer(RelocInfo* rinfo) {
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
Object* target = rinfo->target_object();
Object* old_target = target;
@@ -2870,7 +2796,7 @@ class PointersUpdatingVisitor : public ObjectVisitor {
}
}
- void VisitCodeTarget(RelocInfo* rinfo) {
+ void VisitCodeTarget(RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
Object* old_target = target;
@@ -2880,7 +2806,7 @@ class PointersUpdatingVisitor : public ObjectVisitor {
}
}
- void VisitCodeAgeSequence(RelocInfo* rinfo) {
+ void VisitCodeAgeSequence(RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
Object* stub = rinfo->code_age_stub();
DCHECK(stub != NULL);
@@ -2890,7 +2816,7 @@ class PointersUpdatingVisitor : public ObjectVisitor {
}
}
- void VisitDebugTarget(RelocInfo* rinfo) {
+ void VisitDebugTarget(RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence());
Object* target =
@@ -2907,20 +2833,12 @@ class PointersUpdatingVisitor : public ObjectVisitor {
HeapObject* heap_obj = HeapObject::cast(obj);
-// TODO(ishell): remove, once crbug/454297 is caught.
-#if V8_TARGET_ARCH_64_BIT
-#ifndef V8_OS_AIX // no point checking on AIX as full 64 range is supported
- const uintptr_t kBoundary = V8_UINT64_C(1) << 48;
- STATIC_ASSERT(kBoundary > 0);
- if (reinterpret_cast<uintptr_t>(heap_obj->address()) >= kBoundary) {
- CheckLayoutDescriptorAndDie(heap, slot);
- }
-#endif
-#endif
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
DCHECK(heap->InFromSpace(heap_obj) ||
- MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
+ MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
+ Page::FromAddress(heap_obj->address())
+ ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
HeapObject* target = map_word.ToForwardingAddress();
base::NoBarrier_CompareAndSwap(
reinterpret_cast<base::AtomicWord*>(slot),
@@ -2934,100 +2852,10 @@ class PointersUpdatingVisitor : public ObjectVisitor {
private:
inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
- static void CheckLayoutDescriptorAndDie(Heap* heap, Object** slot);
-
Heap* heap_;
};
-#if V8_TARGET_ARCH_64_BIT
-// TODO(ishell): remove, once crbug/454297 is caught.
-void PointersUpdatingVisitor::CheckLayoutDescriptorAndDie(Heap* heap,
- Object** slot) {
- const int kDataBufferSize = 128;
- uintptr_t data[kDataBufferSize] = {0};
- int index = 0;
- data[index++] = 0x10aaaaaaaaUL; // begin marker
-
- data[index++] = reinterpret_cast<uintptr_t>(slot);
- data[index++] = 0x15aaaaaaaaUL;
-
- Address slot_address = reinterpret_cast<Address>(slot);
-
- uintptr_t space_owner_id = 0xb001;
- if (heap->new_space()->ToSpaceContains(slot_address)) {
- space_owner_id = 1;
- } else if (heap->new_space()->FromSpaceContains(slot_address)) {
- space_owner_id = 2;
- } else if (heap->old_space()->ContainsSafe(slot_address)) {
- space_owner_id = 3;
- } else if (heap->code_space()->ContainsSafe(slot_address)) {
- space_owner_id = 4;
- } else if (heap->map_space()->ContainsSafe(slot_address)) {
- space_owner_id = 5;
- } else {
- // Lo space or other.
- space_owner_id = 6;
- }
- data[index++] = space_owner_id;
- data[index++] = 0x20aaaaaaaaUL;
-
- // Find map word lying near before the slot address (usually the map word is
- // at -3 words from the slot but just in case we look up further.
- Object** map_slot = slot;
- bool found = false;
- const int kMaxDistanceToMap = 64;
- for (int i = 0; i < kMaxDistanceToMap; i++, map_slot--) {
- Address map_address = reinterpret_cast<Address>(*map_slot);
- if (heap->map_space()->ContainsSafe(map_address)) {
- found = true;
- break;
- }
- }
- data[index++] = found;
- data[index++] = 0x30aaaaaaaaUL;
- data[index++] = reinterpret_cast<uintptr_t>(map_slot);
- data[index++] = 0x35aaaaaaaaUL;
-
- if (found) {
- Address obj_address = reinterpret_cast<Address>(map_slot);
- Address end_of_page =
- reinterpret_cast<Address>(Page::FromAddress(obj_address)) +
- Page::kPageSize;
- Address end_address =
- Min(obj_address + kPointerSize * kMaxDistanceToMap, end_of_page);
- int size = static_cast<int>(end_address - obj_address);
- data[index++] = size / kPointerSize;
- data[index++] = 0x40aaaaaaaaUL;
- memcpy(&data[index], reinterpret_cast<void*>(map_slot), size);
- index += size / kPointerSize;
- data[index++] = 0x50aaaaaaaaUL;
-
- HeapObject* object = HeapObject::FromAddress(obj_address);
- data[index++] = reinterpret_cast<uintptr_t>(object);
- data[index++] = 0x60aaaaaaaaUL;
-
- Map* map = object->map();
- data[index++] = reinterpret_cast<uintptr_t>(map);
- data[index++] = 0x70aaaaaaaaUL;
-
- LayoutDescriptor* layout_descriptor = map->layout_descriptor();
- data[index++] = reinterpret_cast<uintptr_t>(layout_descriptor);
- data[index++] = 0x80aaaaaaaaUL;
-
- memcpy(&data[index], reinterpret_cast<void*>(map->address()), Map::kSize);
- index += Map::kSize / kPointerSize;
- data[index++] = 0x90aaaaaaaaUL;
- }
-
- data[index++] = 0xeeeeeeeeeeUL;
- DCHECK(index < kDataBufferSize);
- base::OS::PrintError("Data: %p\n", static_cast<void*>(data));
- base::OS::Abort();
-}
-#endif
-
-
void MarkCompactCollector::UpdateSlots(SlotsBuffer* buffer) {
PointersUpdatingVisitor v(heap_);
size_t buffer_size = buffer->Size();
@@ -3080,30 +2908,6 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
}
-bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
- int object_size) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
-
- OldSpace* old_space = heap()->old_space();
-
- HeapObject* target = nullptr;
- AllocationAlignment alignment = object->RequiredAlignment();
- AllocationResult allocation = old_space->AllocateRaw(object_size, alignment);
- if (allocation.To(&target)) {
- MigrateObject(target, object, object_size, old_space->identity(),
- &migration_slots_buffer_);
- // If we end up needing more special cases, we should factor this out.
- if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
- heap()->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
- }
- heap()->IncrementPromotedObjectsSize(object_size);
- return true;
- }
-
- return false;
-}
-
-
bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
HeapObject** out_object) {
Space* owner = p->owner();
@@ -3121,68 +2925,77 @@ bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
}
uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
- unsigned int start_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType index_in_cell = 1U
- << (mark_bit_index & Bitmap::kBitIndexMask);
+ unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
MarkBit::CellType* cells = p->markbits()->cells();
- Address cell_base = p->area_start();
- unsigned int cell_base_start_index = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(cell_base)));
+ Address base_address = p->area_start();
+ unsigned int base_address_cell_index = Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
// Check if the slot points to the start of an object. This can happen e.g.
// when we left trim a fixed array. Such slots are invalid and we can remove
// them.
- if ((cells[start_index] & index_in_cell) != 0) {
- return false;
+ if (index_mask > 1) {
+ if ((cells[cell_index] & index_mask) != 0 &&
+ (cells[cell_index] & (index_mask >> 1)) == 0) {
+ return false;
+ }
+ } else {
+ // Left trimming moves the mark bits so we cannot be in the very first cell.
+ DCHECK(cell_index != base_address_cell_index);
+ if ((cells[cell_index] & index_mask) != 0 &&
+ (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
+ return false;
+ }
}
// Check if the object is in the current cell.
MarkBit::CellType slot_mask;
- if ((cells[start_index] == 0) ||
- (base::bits::CountTrailingZeros32(cells[start_index]) >
- base::bits::CountTrailingZeros32(cells[start_index] | index_in_cell))) {
+ if ((cells[cell_index] == 0) ||
+ (base::bits::CountTrailingZeros32(cells[cell_index]) >
+ base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
// If we are already in the first cell, there is no live object.
- if (start_index == cell_base_start_index) return false;
+ if (cell_index == base_address_cell_index) return false;
// If not, find a cell in a preceding cell slot that has a mark bit set.
do {
- start_index--;
- } while (start_index > cell_base_start_index && cells[start_index] == 0);
+ cell_index--;
+ } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
// The slot must be in a dead object if there are no preceding cells that
// have mark bits set.
- if (cells[start_index] == 0) {
+ if (cells[cell_index] == 0) {
return false;
}
// The object is in a preceding cell. Set the mask to find any object.
- slot_mask = 0xffffffff;
+ slot_mask = ~0u;
} else {
- // The object start is before the the slot index. Hence, in this case the
- // slot index can not be at the beginning of the cell.
- CHECK(index_in_cell > 1);
// We are interested in object mark bits right before the slot.
- slot_mask = index_in_cell - 1;
+ slot_mask = index_mask + (index_mask - 1);
}
- MarkBit::CellType current_cell = cells[start_index];
+ MarkBit::CellType current_cell = cells[cell_index];
CHECK(current_cell != 0);
// Find the last live object in the cell.
unsigned int leading_zeros =
base::bits::CountLeadingZeros32(current_cell & slot_mask);
- CHECK(leading_zeros != 32);
- unsigned int offset = Bitmap::kBitIndexMask - leading_zeros;
+ CHECK(leading_zeros != Bitmap::kBitsPerCell);
+ int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
- cell_base += (start_index - cell_base_start_index) * 32 * kPointerSize;
- Address address = cell_base + offset * kPointerSize;
+ base_address += (cell_index - base_address_cell_index) *
+ Bitmap::kBitsPerCell * kPointerSize;
+ Address address = base_address + offset * kPointerSize;
HeapObject* object = HeapObject::FromAddress(address);
CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
CHECK(object->address() < reinterpret_cast<Address>(slot));
- if (object->address() <= slot &&
+ if ((object->address() + kPointerSize) <= slot &&
(object->address() + object->Size()) > slot) {
// If the slot is within the last found object in the cell, the slot is
// in a live object.
+ // Slots pointing to the first word of an object are invalid and removed.
+ // This can happen when we move the object header while left trimming.
*out_object = object;
return true;
}
@@ -3193,32 +3006,26 @@ bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) {
// This function does not support large objects right now.
Space* owner = p->owner();
- if (owner == heap_->lo_space() || owner == NULL) return true;
-
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
-
- MarkBit::CellType current_cell = *cell;
- if (current_cell == 0) continue;
-
- int offset = 0;
- while (current_cell != 0) {
- int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
- current_cell >>= trailing_zeros;
- offset += trailing_zeros;
- Address address = cell_base + offset * kPointerSize;
-
- HeapObject* object = HeapObject::FromAddress(address);
- int size = object->Size();
+ if (owner == heap_->lo_space() || owner == NULL) {
+ Object* large_object = heap_->lo_space()->FindObject(slot);
+ // This object has to exist, otherwise we would not have recorded a slot
+ // for it.
+ CHECK(large_object->IsHeapObject());
+ HeapObject* large_heap_object = HeapObject::cast(large_object);
+ if (IsMarked(large_heap_object)) {
+ return true;
+ }
+ return false;
+ }
- if (object->address() > slot) return false;
- if (object->address() <= slot && slot < (object->address() + size)) {
- return true;
- }
+ LiveObjectIterator<kBlackObjects> it(p);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ int size = object->Size();
- offset++;
- current_cell >>= 1;
+ if (object->address() > slot) return false;
+ if (object->address() <= slot && slot < (object->address() + size)) {
+ return true;
}
}
return false;
@@ -3235,45 +3042,8 @@ bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
}
DCHECK(object != NULL);
-
- switch (object->ContentType()) {
- case HeapObjectContents::kTaggedValues:
- return true;
-
- case HeapObjectContents::kRawValues: {
- InstanceType type = object->map()->instance_type();
- // Slots in maps and code can't be invalid because they are never
- // shrunk.
- if (type == MAP_TYPE || type == CODE_TYPE) return true;
-
- // Consider slots in objects that contain ONLY raw data as invalid.
- return false;
- }
-
- case HeapObjectContents::kMixedValues: {
- if (object->IsFixedTypedArrayBase()) {
- return static_cast<int>(slot - object->address()) ==
- FixedTypedArrayBase::kBasePointerOffset;
- } else if (object->IsBytecodeArray()) {
- return static_cast<int>(slot - object->address()) ==
- BytecodeArray::kConstantPoolOffset;
- } else if (object->IsJSArrayBuffer()) {
- int off = static_cast<int>(slot - object->address());
- return (off >= JSArrayBuffer::BodyDescriptor::kStartOffset &&
- off <= JSArrayBuffer::kByteLengthOffset) ||
- (off >= JSArrayBuffer::kSize &&
- off < JSArrayBuffer::kSizeWithInternalFields);
- } else if (FLAG_unbox_double_fields) {
- // Filter out slots that happen to point to unboxed double fields.
- LayoutDescriptorHelper helper(object->map());
- DCHECK(!helper.all_fields_tagged());
- return helper.IsTagged(static_cast<int>(slot - object->address()));
- }
- break;
- }
- }
- UNREACHABLE();
- return true;
+ int offset = static_cast<int>(slot - object->address());
+ return object->IsValidSlot(offset);
}
@@ -3289,7 +3059,7 @@ void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
}
-void MarkCompactCollector::EvacuateNewSpace() {
+void MarkCompactCollector::EvacuateNewSpacePrologue() {
// There are soft limits in the allocation code, designed trigger a mark
// sweep collection by failing allocations. But since we are already in
// a mark-sweep allocation, there is no sense in trying to trigger one.
@@ -3306,20 +3076,38 @@ void MarkCompactCollector::EvacuateNewSpace() {
new_space->Flip();
new_space->ResetAllocationInfo();
- int survivors_size = 0;
+ newspace_evacuation_candidates_.Clear();
+ NewSpacePageIterator it(from_bottom, from_top);
+ while (it.has_next()) {
+ newspace_evacuation_candidates_.Add(it.next());
+ }
+}
+
+HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
+ HashMap* local_pretenuring_feedback = new HashMap(
+ HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
+ EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
+ local_pretenuring_feedback);
// First pass: traverse all objects in inactive semispace, remove marks,
// migrate live objects and write forwarding addresses. This stage puts
// new entries in the store buffer and may cause some pages to be marked
// scan-on-scavenge.
- NewSpacePageIterator it(from_bottom, from_top);
- while (it.has_next()) {
- NewSpacePage* p = it.next();
- survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
+ for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
+ NewSpacePage* p =
+ reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
+ bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
+ USE(ok);
+ DCHECK(ok);
}
-
- heap_->IncrementYoungSurvivorsCounter(survivors_size);
- new_space->set_age_mark(new_space->top());
+ heap_->IncrementPromotedObjectsSize(
+ static_cast<int>(new_space_visitor.promoted_size()));
+ heap_->IncrementSemiSpaceCopiedObjectSize(
+ static_cast<int>(new_space_visitor.semispace_copied_size()));
+ heap_->IncrementYoungSurvivorsCounter(
+ static_cast<int>(new_space_visitor.promoted_size()) +
+ static_cast<int>(new_space_visitor.semispace_copied_size()));
+ return local_pretenuring_feedback;
}
@@ -3330,69 +3118,51 @@ void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
}
-bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
- Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) {
- AlwaysAllocateScope always_allocate(isolate());
- DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
-
- int offsets[16];
-
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
-
- if (*cell == 0) continue;
-
- int live_objects = MarkWordToObjectStarts(*cell, offsets);
- for (int i = 0; i < live_objects; i++) {
- Address object_addr = cell_base + offsets[i] * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(object_addr);
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
-
- int size = object->Size();
- AllocationAlignment alignment = object->RequiredAlignment();
- HeapObject* target_object = nullptr;
- AllocationResult allocation = target_space->AllocateRaw(size, alignment);
- if (!allocation.To(&target_object)) {
- // We need to abort compaction for this page. Make sure that we reset
- // the mark bits for objects that have already been migrated.
- if (i > 0) {
- p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()),
- p->AddressToMarkbitIndex(object_addr));
- }
- return false;
- }
-
- MigrateObject(target_object, object, size, target_space->identity(),
- evacuation_slots_buffer);
- DCHECK(object->map_word().IsForwardingAddress());
- }
-
- // Clear marking bits for current cell.
- *cell = 0;
- }
- p->ResetLiveBytes();
- return true;
-}
-
-
int MarkCompactCollector::NumberOfParallelCompactionTasks() {
if (!FLAG_parallel_compaction) return 1;
- // We cap the number of parallel compaction tasks by
+ // Compute the number of needed tasks based on a target compaction time, the
+ // profiled compaction speed and marked live memory.
+ //
+ // The number of parallel compaction tasks is limited by:
+ // - #evacuation pages
// - (#cores - 1)
- // - a value depending on the list of evacuation candidates
// - a hard limit
- const int kPagesPerCompactionTask = 4;
+ const double kTargetCompactionTimeInMs = 1;
const int kMaxCompactionTasks = 8;
- return Min(kMaxCompactionTasks,
- Min(1 + evacuation_candidates_.length() / kPagesPerCompactionTask,
- Max(1, base::SysInfo::NumberOfProcessors() - 1)));
+
+ intptr_t compaction_speed =
+ heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+ if (compaction_speed == 0) return 1;
+
+ intptr_t live_bytes = 0;
+ for (Page* page : evacuation_candidates_) {
+ live_bytes += page->LiveBytes();
+ }
+
+ const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
+ const int tasks =
+ 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed /
+ kTargetCompactionTimeInMs);
+ const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
+ const int tasks_capped_cores = Min(cores, tasks_capped_pages);
+ const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores);
+ return tasks_capped_hard;
}
void MarkCompactCollector::EvacuatePagesInParallel() {
- if (evacuation_candidates_.length() == 0) return;
+ const int num_pages = evacuation_candidates_.length();
+ if (num_pages == 0) return;
+ // Used for trace summary.
+ intptr_t live_bytes = 0;
+ intptr_t compaction_speed = 0;
+ if (FLAG_trace_fragmentation) {
+ for (Page* page : evacuation_candidates_) {
+ live_bytes += page->LiveBytes();
+ }
+ compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+ }
const int num_tasks = NumberOfParallelCompactionTasks();
// Set up compaction spaces.
@@ -3402,40 +3172,34 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
}
- compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
- heap()->old_space());
- compaction_spaces_for_tasks[0]
- ->Get(CODE_SPACE)
- ->MoveOverFreeMemory(heap()->code_space());
+ heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
+ num_tasks);
+ heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
+ num_tasks);
- compaction_in_progress_ = true;
+ uint32_t* task_ids = new uint32_t[num_tasks - 1];
// Kick off parallel tasks.
- for (int i = 1; i < num_tasks; i++) {
- concurrent_compaction_tasks_active_++;
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
- v8::Platform::kShortRunningTask);
- }
-
- // Contribute in main thread. Counter and signal are in principal not needed.
- concurrent_compaction_tasks_active_++;
- EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
- pending_compaction_tasks_semaphore_.Signal();
-
- WaitUntilCompactionCompleted();
+ StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks);
+ // Wait for unfinished and not-yet-started tasks.
+ WaitUntilCompactionCompleted(task_ids, num_tasks - 1);
+ delete[] task_ids;
+ double compaction_duration = 0.0;
+ intptr_t compacted_memory = 0;
// Merge back memory (compacted and unused) from compaction spaces.
for (int i = 0; i < num_tasks; i++) {
heap()->old_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
+ compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
+ compaction_duration += compaction_spaces_for_tasks[i]->duration();
delete compaction_spaces_for_tasks[i];
}
delete[] compaction_spaces_for_tasks;
+ heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
// Finalize sequentially.
- const int num_pages = evacuation_candidates_.length();
int abandoned_pages = 0;
for (int i = 0; i < num_pages; i++) {
Page* p = evacuation_candidates_[i];
@@ -3452,8 +3216,13 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
// happens upon moving (which we potentially didn't do).
// - Leave the page in the list of pages of a space since we could not
// fully evacuate it.
+ // - Mark them for rescanning for store buffer entries as we otherwise
+ // might have stale store buffer entries that become "valid" again
+ // after reusing the memory. Note that all existing store buffer
+ // entries of such pages are filtered before rescanning.
DCHECK(p->IsEvacuationCandidate());
- p->SetFlag(Page::RESCAN_ON_EVACUATION);
+ p->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ p->set_scan_on_scavenge(true);
abandoned_pages++;
break;
case MemoryChunk::kCompactingFinalize:
@@ -3471,25 +3240,45 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
}
- if (num_pages > 0) {
- if (FLAG_trace_fragmentation) {
- if (abandoned_pages != 0) {
- PrintF(
- " Abandoned (at least partially) %d out of %d page compactions due"
- " to lack of memory\n",
- abandoned_pages, num_pages);
- } else {
- PrintF(" Compacted %d pages\n", num_pages);
- }
- }
+ if (FLAG_trace_fragmentation) {
+ PrintIsolate(isolate(),
+ "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
+ "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
+ "d compaction_speed=%" V8_PTR_PREFIX "d\n",
+ isolate()->time_millis_since_init(), FLAG_parallel_compaction,
+ num_pages, abandoned_pages, num_tasks,
+ base::SysInfo::NumberOfProcessors(), live_bytes,
+ compaction_speed);
}
}
-void MarkCompactCollector::WaitUntilCompactionCompleted() {
- while (concurrent_compaction_tasks_active_ > 0) {
- pending_compaction_tasks_semaphore_.Wait();
- concurrent_compaction_tasks_active_--;
+void MarkCompactCollector::StartParallelCompaction(
+ CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids,
+ int len) {
+ compaction_in_progress_ = true;
+ for (int i = 1; i < len; i++) {
+ CompactionTask* task = new CompactionTask(heap(), compaction_spaces[i]);
+ task_ids[i - 1] = task->id();
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
+
+ // Contribute in main thread.
+ EvacuatePages(compaction_spaces[0], &migration_slots_buffer_);
+}
+
+
+void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
+ int len) {
+ // Try to cancel compaction tasks that have not been run (as they might be
+ // stuck in a worker queue). Tasks that cannot be canceled, have either
+ // already completed or are still running, hence we need to wait for their
+ // semaphore signal.
+ for (int i = 0; i < len; i++) {
+ if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) {
+ pending_compaction_tasks_semaphore_.Wait();
+ }
}
compaction_in_progress_ = false;
}
@@ -3498,6 +3287,8 @@ void MarkCompactCollector::WaitUntilCompactionCompleted() {
void MarkCompactCollector::EvacuatePages(
CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer) {
+ EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
+ evacuation_slots_buffer);
for (int i = 0; i < evacuation_candidates_.length(); i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
@@ -3509,11 +3300,15 @@ void MarkCompactCollector::EvacuatePages(
if (p->IsEvacuationCandidate()) {
DCHECK_EQ(p->parallel_compaction_state().Value(),
MemoryChunk::kCompactingInProgress);
- if (EvacuateLiveObjectsFromPage(
- p, compaction_spaces->Get(p->owner()->identity()),
- evacuation_slots_buffer)) {
+ double start = heap()->MonotonicallyIncreasingTimeInMs();
+ intptr_t live_bytes = p->LiveBytes();
+ AlwaysAllocateScope always_allocate(isolate());
+ if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
+ p->ResetLiveBytes();
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingFinalize);
+ compaction_spaces->ReportCompactionProgress(
+ heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
} else {
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingAborted);
@@ -3559,7 +3354,6 @@ static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
DCHECK(free_list == NULL);
return space->Free(start, size);
} else {
- // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
return size - free_list->Free(start, size);
}
}
@@ -3584,7 +3378,6 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
- int offsets[16];
// If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the
@@ -3598,42 +3391,39 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
intptr_t max_freed_bytes = 0;
int curr_region = -1;
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
- int live_objects = MarkWordToObjectStarts(*cell, offsets);
- int live_index = 0;
- for (; live_objects != 0; live_objects--) {
- Address free_end = cell_base + offsets[live_index++] * kPointerSize;
- if (free_end != free_start) {
- int size = static_cast<int>(free_end - free_start);
- if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
- }
- freed_bytes = Free<parallelism>(space, free_list, free_start, size);
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
- }
- HeapObject* live_object = HeapObject::FromAddress(free_end);
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
- Map* map = live_object->synchronized_map();
- int size = live_object->SizeFromMap(map);
- if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
- live_object->IterateBody(map->instance_type(), size, v);
+ LiveObjectIterator<kBlackObjects> it(p);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ Address free_end = object->address();
+ if (free_end != free_start) {
+ int size = static_cast<int>(free_end - free_start);
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, size);
}
- if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
- int new_region_start = SkipList::RegionNumber(free_end);
- int new_region_end =
- SkipList::RegionNumber(free_end + size - kPointerSize);
- if (new_region_start != curr_region || new_region_end != curr_region) {
- skip_list->AddObject(free_end, size);
- curr_region = new_region_end;
- }
+ freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+ }
+ Map* map = object->synchronized_map();
+ int size = object->SizeFromMap(map);
+ if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
+ object->IterateBody(map->instance_type(), size, v);
+ }
+ if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
+ int new_region_start = SkipList::RegionNumber(free_end);
+ int new_region_end =
+ SkipList::RegionNumber(free_end + size - kPointerSize);
+ if (new_region_start != curr_region || new_region_end != curr_region) {
+ skip_list->AddObject(free_end, size);
+ curr_region = new_region_end;
}
- free_start = free_end + size;
}
- // Clear marking bits for current cell.
- *cell = 0;
+ free_start = free_end + size;
}
+
+ // Clear the mark bits of that page and reset live bytes count.
+ Bitmap::Clear(p);
+
if (free_start != p->area_end()) {
int size = static_cast<int>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
@@ -3642,7 +3432,6 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
freed_bytes = Free<parallelism>(space, free_list, free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
- p->ResetLiveBytes();
if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
// When concurrent sweeping is active, the page will be marked after
@@ -3696,26 +3485,162 @@ void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
}
+#ifdef VERIFY_HEAP
+static void VerifyAllBlackObjects(MemoryChunk* page) {
+ LiveObjectIterator<kAllLiveObjects> it(page);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ }
+}
+#endif // VERIFY_HEAP
+
+
+bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
+ HeapObjectVisitor* visitor,
+ IterationMode mode) {
+#ifdef VERIFY_HEAP
+ VerifyAllBlackObjects(page);
+#endif // VERIFY_HEAP
+
+ LiveObjectIterator<kBlackObjects> it(page);
+ HeapObject* object = nullptr;
+ while ((object = it.Next()) != nullptr) {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ if (!visitor->Visit(object)) {
+ if (mode == kClearMarkbits) {
+ page->markbits()->ClearRange(
+ page->AddressToMarkbitIndex(page->area_start()),
+ page->AddressToMarkbitIndex(object->address()));
+ RecomputeLiveBytes(page);
+ }
+ return false;
+ }
+ }
+ if (mode == kClearMarkbits) {
+ Bitmap::Clear(page);
+ }
+ return true;
+}
+
+
+void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
+ LiveObjectIterator<kBlackObjects> it(page);
+ int new_live_size = 0;
+ HeapObject* object = nullptr;
+ while ((object = it.Next()) != nullptr) {
+ new_live_size += object->Size();
+ }
+ page->SetLiveBytes(new_live_size);
+}
+
+
+void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
+ ObjectVisitor* visitor) {
+#ifdef VERIFY_HEAP
+ VerifyAllBlackObjects(page);
+#endif // VERIFY_HEAP
+
+ LiveObjectIterator<kBlackObjects> it(page);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ Map* map = object->synchronized_map();
+ int size = object->SizeFromMap(map);
+ object->IterateBody(map->instance_type(), size, visitor);
+ }
+}
+
+
+void MarkCompactCollector::SweepAbortedPages() {
+ // Second pass on aborted pages.
+ for (int i = 0; i < evacuation_candidates_.length(); i++) {
+ Page* p = evacuation_candidates_[i];
+ if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
+ PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+ switch (space->identity()) {
+ case OLD_SPACE:
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
+ break;
+ case CODE_SPACE:
+ if (FLAG_zap_code_space) {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ ZAP_FREE_SPACE>(space, NULL, p, nullptr);
+ } else {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, NULL, p, nullptr);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+}
+
+
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Heap::RelocationLock relocation_lock(heap());
+ HashMap* local_pretenuring_feedback = nullptr;
{
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_NEWSPACE);
+ GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
EvacuationScope evacuation_scope(this);
- EvacuateNewSpace();
+ EvacuateNewSpacePrologue();
+ local_pretenuring_feedback = EvacuateNewSpaceInParallel();
+ heap_->new_space()->set_age_mark(heap_->new_space()->top());
}
{
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_PAGES);
+ GCTracer::Scope::MC_EVACUATE_CANDIDATES);
EvacuationScope evacuation_scope(this);
EvacuatePagesInParallel();
}
{
+ heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
+ delete local_pretenuring_feedback;
+ }
+
+ UpdatePointersAfterEvacuation();
+
+ {
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
+ GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
+ // After updating all pointers, we can finally sweep the aborted pages,
+ // effectively overriding any forward pointers.
+ SweepAbortedPages();
+
+ // EvacuateNewSpaceAndCandidates iterates over new space objects and for
+ // ArrayBuffers either re-registers them as live or promotes them. This is
+ // needed to properly free them.
+ heap()->array_buffer_tracker()->FreeDead(false);
+
+ // Deallocate evacuated candidate pages.
+ ReleaseEvacuationCandidates();
+ }
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap && !sweeping_in_progress_) {
+ VerifyEvacuation(heap());
+ }
+#endif
+}
+
+
+void MarkCompactCollector::UpdatePointersAfterEvacuation() {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
+ {
+ GCTracer::Scope gc_scope(
+ heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
UpdateSlotsRecordedIn(migration_slots_buffer_);
if (FLAG_trace_fragmentation_verbose) {
PrintF(" migration slots buffer: %d\n",
@@ -3739,8 +3664,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
PointersUpdatingVisitor updating_visitor(heap());
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
+ GCTracer::Scope gc_scope(
+ heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
// Update pointers in to space.
SemiSpaceIterator to_it(heap()->new_space());
for (HeapObject* object = to_it.Next(); object != NULL;
@@ -3749,18 +3674,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
object->IterateBody(map->instance_type(), object->SizeFromMap(map),
&updating_visitor);
}
- }
-
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
// Update roots.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
- }
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
&Heap::ScavengeStoreBufferCallback);
heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
@@ -3770,7 +3686,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
{
GCTracer::Scope gc_scope(
heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
@@ -3789,13 +3705,12 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
// code objects from non-updated pc pointing into evacuation candidate.
SkipList* list = p->skip_list();
if (list != NULL) list->Clear();
- }
- if (p->IsEvacuationCandidate() &&
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- // Case where we've aborted compacting a page. Clear the flag here to
- // avoid release the page later on.
- p->ClearEvacuationCandidate();
+ // First pass on aborted pages, fixing up all live objects.
+ if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ p->ClearEvacuationCandidate();
+ VisitLiveObjectsBody(p, &updating_visitor);
+ }
}
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
@@ -3831,23 +3746,18 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
}
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
-
- heap_->string_table()->Iterate(&updating_visitor);
-
- // Update pointers from external string table.
- heap_->UpdateReferencesInExternalStringTable(
- &UpdateReferenceInExternalStringTableEntry);
-
- EvacuationWeakObjectRetainer evacuation_object_retainer;
- heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
+ heap_->string_table()->Iterate(&updating_visitor);
- heap_->isolate()->inner_pointer_to_code_cache()->Flush();
+ // Update pointers from external string table.
+ heap_->UpdateReferencesInExternalStringTable(
+ &UpdateReferenceInExternalStringTableEntry);
- // The hashing of weak_object_to_code_table is no longer valid.
- heap()->weak_object_to_code_table()->Rehash(
- heap()->isolate()->factory()->undefined_value());
+ EvacuationWeakObjectRetainer evacuation_object_retainer;
+ heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
+ }
}
@@ -3872,6 +3782,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false);
p->ResetLiveBytes();
+ CHECK(p->WasSwept());
space->ReleasePage(p);
}
evacuation_candidates_.Rewind(0);
@@ -3881,400 +3792,6 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
}
-static const int kStartTableEntriesPerLine = 5;
-static const int kStartTableLines = 171;
-static const int kStartTableInvalidLine = 127;
-static const int kStartTableUnusedEntry = 126;
-
-#define _ kStartTableUnusedEntry
-#define X kStartTableInvalidLine
-// Mark-bit to object start offset table.
-//
-// The line is indexed by the mark bits in a byte. The first number on
-// the line describes the number of live object starts for the line and the
-// other numbers on the line describe the offsets (in words) of the object
-// starts.
-//
-// Since objects are at least 2 words large we don't have entries for two
-// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
-char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
- 0, _, _,
- _, _, // 0
- 1, 0, _,
- _, _, // 1
- 1, 1, _,
- _, _, // 2
- X, _, _,
- _, _, // 3
- 1, 2, _,
- _, _, // 4
- 2, 0, 2,
- _, _, // 5
- X, _, _,
- _, _, // 6
- X, _, _,
- _, _, // 7
- 1, 3, _,
- _, _, // 8
- 2, 0, 3,
- _, _, // 9
- 2, 1, 3,
- _, _, // 10
- X, _, _,
- _, _, // 11
- X, _, _,
- _, _, // 12
- X, _, _,
- _, _, // 13
- X, _, _,
- _, _, // 14
- X, _, _,
- _, _, // 15
- 1, 4, _,
- _, _, // 16
- 2, 0, 4,
- _, _, // 17
- 2, 1, 4,
- _, _, // 18
- X, _, _,
- _, _, // 19
- 2, 2, 4,
- _, _, // 20
- 3, 0, 2,
- 4, _, // 21
- X, _, _,
- _, _, // 22
- X, _, _,
- _, _, // 23
- X, _, _,
- _, _, // 24
- X, _, _,
- _, _, // 25
- X, _, _,
- _, _, // 26
- X, _, _,
- _, _, // 27
- X, _, _,
- _, _, // 28
- X, _, _,
- _, _, // 29
- X, _, _,
- _, _, // 30
- X, _, _,
- _, _, // 31
- 1, 5, _,
- _, _, // 32
- 2, 0, 5,
- _, _, // 33
- 2, 1, 5,
- _, _, // 34
- X, _, _,
- _, _, // 35
- 2, 2, 5,
- _, _, // 36
- 3, 0, 2,
- 5, _, // 37
- X, _, _,
- _, _, // 38
- X, _, _,
- _, _, // 39
- 2, 3, 5,
- _, _, // 40
- 3, 0, 3,
- 5, _, // 41
- 3, 1, 3,
- 5, _, // 42
- X, _, _,
- _, _, // 43
- X, _, _,
- _, _, // 44
- X, _, _,
- _, _, // 45
- X, _, _,
- _, _, // 46
- X, _, _,
- _, _, // 47
- X, _, _,
- _, _, // 48
- X, _, _,
- _, _, // 49
- X, _, _,
- _, _, // 50
- X, _, _,
- _, _, // 51
- X, _, _,
- _, _, // 52
- X, _, _,
- _, _, // 53
- X, _, _,
- _, _, // 54
- X, _, _,
- _, _, // 55
- X, _, _,
- _, _, // 56
- X, _, _,
- _, _, // 57
- X, _, _,
- _, _, // 58
- X, _, _,
- _, _, // 59
- X, _, _,
- _, _, // 60
- X, _, _,
- _, _, // 61
- X, _, _,
- _, _, // 62
- X, _, _,
- _, _, // 63
- 1, 6, _,
- _, _, // 64
- 2, 0, 6,
- _, _, // 65
- 2, 1, 6,
- _, _, // 66
- X, _, _,
- _, _, // 67
- 2, 2, 6,
- _, _, // 68
- 3, 0, 2,
- 6, _, // 69
- X, _, _,
- _, _, // 70
- X, _, _,
- _, _, // 71
- 2, 3, 6,
- _, _, // 72
- 3, 0, 3,
- 6, _, // 73
- 3, 1, 3,
- 6, _, // 74
- X, _, _,
- _, _, // 75
- X, _, _,
- _, _, // 76
- X, _, _,
- _, _, // 77
- X, _, _,
- _, _, // 78
- X, _, _,
- _, _, // 79
- 2, 4, 6,
- _, _, // 80
- 3, 0, 4,
- 6, _, // 81
- 3, 1, 4,
- 6, _, // 82
- X, _, _,
- _, _, // 83
- 3, 2, 4,
- 6, _, // 84
- 4, 0, 2,
- 4, 6, // 85
- X, _, _,
- _, _, // 86
- X, _, _,
- _, _, // 87
- X, _, _,
- _, _, // 88
- X, _, _,
- _, _, // 89
- X, _, _,
- _, _, // 90
- X, _, _,
- _, _, // 91
- X, _, _,
- _, _, // 92
- X, _, _,
- _, _, // 93
- X, _, _,
- _, _, // 94
- X, _, _,
- _, _, // 95
- X, _, _,
- _, _, // 96
- X, _, _,
- _, _, // 97
- X, _, _,
- _, _, // 98
- X, _, _,
- _, _, // 99
- X, _, _,
- _, _, // 100
- X, _, _,
- _, _, // 101
- X, _, _,
- _, _, // 102
- X, _, _,
- _, _, // 103
- X, _, _,
- _, _, // 104
- X, _, _,
- _, _, // 105
- X, _, _,
- _, _, // 106
- X, _, _,
- _, _, // 107
- X, _, _,
- _, _, // 108
- X, _, _,
- _, _, // 109
- X, _, _,
- _, _, // 110
- X, _, _,
- _, _, // 111
- X, _, _,
- _, _, // 112
- X, _, _,
- _, _, // 113
- X, _, _,
- _, _, // 114
- X, _, _,
- _, _, // 115
- X, _, _,
- _, _, // 116
- X, _, _,
- _, _, // 117
- X, _, _,
- _, _, // 118
- X, _, _,
- _, _, // 119
- X, _, _,
- _, _, // 120
- X, _, _,
- _, _, // 121
- X, _, _,
- _, _, // 122
- X, _, _,
- _, _, // 123
- X, _, _,
- _, _, // 124
- X, _, _,
- _, _, // 125
- X, _, _,
- _, _, // 126
- X, _, _,
- _, _, // 127
- 1, 7, _,
- _, _, // 128
- 2, 0, 7,
- _, _, // 129
- 2, 1, 7,
- _, _, // 130
- X, _, _,
- _, _, // 131
- 2, 2, 7,
- _, _, // 132
- 3, 0, 2,
- 7, _, // 133
- X, _, _,
- _, _, // 134
- X, _, _,
- _, _, // 135
- 2, 3, 7,
- _, _, // 136
- 3, 0, 3,
- 7, _, // 137
- 3, 1, 3,
- 7, _, // 138
- X, _, _,
- _, _, // 139
- X, _, _,
- _, _, // 140
- X, _, _,
- _, _, // 141
- X, _, _,
- _, _, // 142
- X, _, _,
- _, _, // 143
- 2, 4, 7,
- _, _, // 144
- 3, 0, 4,
- 7, _, // 145
- 3, 1, 4,
- 7, _, // 146
- X, _, _,
- _, _, // 147
- 3, 2, 4,
- 7, _, // 148
- 4, 0, 2,
- 4, 7, // 149
- X, _, _,
- _, _, // 150
- X, _, _,
- _, _, // 151
- X, _, _,
- _, _, // 152
- X, _, _,
- _, _, // 153
- X, _, _,
- _, _, // 154
- X, _, _,
- _, _, // 155
- X, _, _,
- _, _, // 156
- X, _, _,
- _, _, // 157
- X, _, _,
- _, _, // 158
- X, _, _,
- _, _, // 159
- 2, 5, 7,
- _, _, // 160
- 3, 0, 5,
- 7, _, // 161
- 3, 1, 5,
- 7, _, // 162
- X, _, _,
- _, _, // 163
- 3, 2, 5,
- 7, _, // 164
- 4, 0, 2,
- 5, 7, // 165
- X, _, _,
- _, _, // 166
- X, _, _,
- _, _, // 167
- 3, 3, 5,
- 7, _, // 168
- 4, 0, 3,
- 5, 7, // 169
- 4, 1, 3,
- 5, 7 // 170
-};
-#undef _
-#undef X
-
-
-// Takes a word of mark bits. Returns the number of objects that start in the
-// range. Puts the offsets of the words in the supplied array.
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
- int objects = 0;
- int offset = 0;
-
- // No consecutive 1 bits.
- DCHECK((mark_bits & 0x180) != 0x180);
- DCHECK((mark_bits & 0x18000) != 0x18000);
- DCHECK((mark_bits & 0x1800000) != 0x1800000);
-
- while (mark_bits != 0) {
- int byte = (mark_bits & 0xff);
- mark_bits >>= 8;
- if (byte != 0) {
- DCHECK(byte < kStartTableLines); // No consecutive 1 bits.
- char* table = kStartTable + byte * kStartTableEntriesPerLine;
- int objects_in_these_8_words = table[0];
- DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
- DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
- for (int i = 0; i < objects_in_these_8_words; i++) {
- starts[objects++] = offset + table[1 + i];
- }
- }
- offset += 8;
- }
- return objects;
-}
-
-
int MarkCompactCollector::SweepInParallel(PagedSpace* space,
int required_freed_bytes) {
int max_freed = 0;
@@ -4329,7 +3846,7 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
}
-void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
+void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearStats();
// We defensively initialize end_of_unswept_pages_ here with the first page
@@ -4356,79 +3873,60 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
continue;
}
+ if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
+ // We need to sweep the page to get it into an iterable state again. Note
+ // that this adds unusable memory into the free list that is later on
+ // (in the free list) dropped again. Since we only use the flag for
+ // testing this is fine.
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
+ continue;
+ }
+
// One unused page is kept, all further are released before sweeping them.
if (p->LiveBytes() == 0) {
if (unused_page_present) {
if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
- reinterpret_cast<intptr_t>(p));
+ PrintIsolate(isolate(), "sweeping: released page: %p", p);
}
- // Adjust unswept free bytes because releasing a page expects said
- // counter to be accurate for unswept pages.
- space->IncreaseUnsweptFreeBytes(p);
space->ReleasePage(p);
continue;
}
unused_page_present = true;
}
- switch (sweeper) {
- case CONCURRENT_SWEEPING:
- if (!parallel_sweeping_active) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
- reinterpret_cast<intptr_t>(p));
- }
- if (space->identity() == CODE_SPACE) {
- if (FLAG_zap_code_space) {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- ZAP_FREE_SPACE>(space, NULL, p, NULL);
- } else {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
- }
- } else {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
- }
- pages_swept++;
- parallel_sweeping_active = true;
- } else {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
- reinterpret_cast<intptr_t>(p));
- }
- p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
- space->IncreaseUnsweptFreeBytes(p);
- }
- space->set_end_of_unswept_pages(p);
- break;
- case SEQUENTIAL_SWEEPING: {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
- }
- if (space->identity() == CODE_SPACE) {
- if (FLAG_zap_code_space) {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- ZAP_FREE_SPACE>(space, NULL, p, NULL);
- } else {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
- }
+ if (!parallel_sweeping_active) {
+ if (FLAG_gc_verbose) {
+ PrintIsolate(isolate(), "sweeping: %p", p);
+ }
+ if (space->identity() == CODE_SPACE) {
+ if (FLAG_zap_code_space) {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ ZAP_FREE_SPACE>(space, NULL, p, NULL);
} else {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
}
- pages_swept++;
- break;
+ } else {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+ }
+ pages_swept++;
+ parallel_sweeping_active = true;
+ } else {
+ if (FLAG_gc_verbose) {
+ PrintIsolate(isolate(), "sweeping: initialized for parallel: %p", p);
}
- default: { UNREACHABLE(); }
+ p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
+ int to_sweep = p->area_size() - p->LiveBytes();
+ space->accounting_stats_.ShrinkSpace(to_sweep);
}
+ space->set_end_of_unswept_pages(p);
}
if (FLAG_gc_verbose) {
- PrintF("SweepSpace: %s (%d pages swept)\n",
- AllocationSpaceName(space->identity()), pages_swept);
+ PrintIsolate(isolate(), "sweeping: space=%s pages_swept=%d",
+ AllocationSpaceName(space->identity()), pages_swept);
}
}
@@ -4437,7 +3935,7 @@ void MarkCompactCollector::SweepSpaces() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
double start_time = 0.0;
if (FLAG_print_cumulative_gc_stat) {
- start_time = base::OS::TimeCurrentMillis();
+ start_time = heap_->MonotonicallyIncreasingTimeInMs();
}
#ifdef DEBUG
@@ -4447,23 +3945,23 @@ void MarkCompactCollector::SweepSpaces() {
MoveEvacuationCandidatesToEndOfPagesList();
{
+ sweeping_in_progress_ = true;
{
GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_OLDSPACE);
- SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING);
+ GCTracer::Scope::MC_SWEEP_OLD);
+ StartSweepSpace(heap()->old_space());
}
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CODE);
- SweepSpace(heap()->code_space(), CONCURRENT_SWEEPING);
+ StartSweepSpace(heap()->code_space());
}
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_MAP);
- SweepSpace(heap()->map_space(), CONCURRENT_SWEEPING);
+ StartSweepSpace(heap()->map_space());
}
- sweeping_in_progress_ = true;
- if (heap()->concurrent_sweeping_enabled()) {
+ if (FLAG_concurrent_sweeping) {
StartSweeperThreads();
}
}
@@ -4475,29 +3973,10 @@ void MarkCompactCollector::SweepSpaces() {
// buffer entries are already filter out. We can just release the memory.
heap()->FreeQueuedChunks();
- EvacuateNewSpaceAndCandidates();
-
- // EvacuateNewSpaceAndCandidates iterates over new space objects and for
- // ArrayBuffers either re-registers them as live or promotes them. This is
- // needed to properly free them.
- heap()->array_buffer_tracker()->FreeDead(false);
-
- // Clear the marking state of live large objects.
- heap_->lo_space()->ClearMarkingStateOfLiveObjects();
-
- // Deallocate evacuated candidate pages.
- ReleaseEvacuationCandidates();
-
if (FLAG_print_cumulative_gc_stat) {
- heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
+ heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
start_time);
}
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && !sweeping_in_progress_) {
- VerifyEvacuation(heap());
- }
-#endif
}
@@ -4522,25 +4001,6 @@ void MarkCompactCollector::ParallelSweepSpacesComplete() {
}
-void MarkCompactCollector::EnableCodeFlushing(bool enable) {
- if (isolate()->debug()->is_active()) enable = false;
-
- if (enable) {
- if (code_flusher_ != NULL) return;
- code_flusher_ = new CodeFlusher(isolate());
- } else {
- if (code_flusher_ == NULL) return;
- code_flusher_->EvictAllCandidates();
- delete code_flusher_;
- code_flusher_ = NULL;
- }
-
- if (FLAG_trace_code_flushing) {
- PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
- }
-}
-
-
// TODO(1466) ReportDeleteIfNeeded is not called currently.
// Our profiling tools do not expect intersections between
// code objects. We should either reenable it or change our tools.
@@ -4607,7 +4067,7 @@ void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
pc);
MarkBit mark_bit = Marking::MarkBitFrom(host);
if (Marking::IsBlack(mark_bit)) {
- RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+ RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
RecordRelocSlot(&rinfo, target);
}
}
diff --git a/chromium/v8/src/heap/mark-compact.h b/chromium/v8/src/heap/mark-compact.h
index 724650c1c4e..cfb2d9d2702 100644
--- a/chromium/v8/src/heap/mark-compact.h
+++ b/chromium/v8/src/heap/mark-compact.h
@@ -45,10 +45,10 @@ class Marking : public AllStatic {
return !mark_bit.Get() && mark_bit.Next().Get();
}
- // Black markbits: 10 - this is required by the sweeper.
+ // Black markbits: 11
static const char* kBlackBitPattern;
INLINE(static bool IsBlack(MarkBit mark_bit)) {
- return mark_bit.Get() && !mark_bit.Next().Get();
+ return mark_bit.Get() && mark_bit.Next().Get();
}
// White markbits: 00 - this is required by the mark bit clearer.
@@ -58,10 +58,10 @@ class Marking : public AllStatic {
return !mark_bit.Get();
}
- // Grey markbits: 11
+ // Grey markbits: 10
static const char* kGreyBitPattern;
INLINE(static bool IsGrey(MarkBit mark_bit)) {
- return mark_bit.Get() && mark_bit.Next().Get();
+ return mark_bit.Get() && !mark_bit.Next().Get();
}
// IsBlackOrGrey assumes that the first bit is set for black or grey
@@ -70,7 +70,7 @@ class Marking : public AllStatic {
INLINE(static void MarkBlack(MarkBit mark_bit)) {
mark_bit.Set();
- mark_bit.Next().Clear();
+ mark_bit.Next().Set();
}
INLINE(static void MarkWhite(MarkBit mark_bit)) {
@@ -81,6 +81,7 @@ class Marking : public AllStatic {
INLINE(static void BlackToWhite(MarkBit markbit)) {
DCHECK(IsBlack(markbit));
markbit.Clear();
+ markbit.Next().Clear();
}
INLINE(static void GreyToWhite(MarkBit markbit)) {
@@ -91,23 +92,23 @@ class Marking : public AllStatic {
INLINE(static void BlackToGrey(MarkBit markbit)) {
DCHECK(IsBlack(markbit));
- markbit.Next().Set();
+ markbit.Next().Clear();
}
INLINE(static void WhiteToGrey(MarkBit markbit)) {
DCHECK(IsWhite(markbit));
markbit.Set();
- markbit.Next().Set();
}
INLINE(static void WhiteToBlack(MarkBit markbit)) {
DCHECK(IsWhite(markbit));
markbit.Set();
+ markbit.Next().Set();
}
INLINE(static void GreyToBlack(MarkBit markbit)) {
DCHECK(IsGrey(markbit));
- markbit.Next().Clear();
+ markbit.Next().Set();
}
INLINE(static void BlackToGrey(HeapObject* obj)) {
@@ -116,7 +117,7 @@ class Marking : public AllStatic {
INLINE(static void AnyToGrey(MarkBit markbit)) {
markbit.Set();
- markbit.Next().Set();
+ markbit.Next().Clear();
}
static void TransferMark(Heap* heap, Address old_start, Address new_start);
@@ -160,16 +161,15 @@ class Marking : public AllStatic {
INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
MarkBit from_mark_bit = MarkBitFrom(from);
MarkBit to_mark_bit = MarkBitFrom(to);
- bool is_black = false;
+ DCHECK(Marking::IsWhite(to_mark_bit));
if (from_mark_bit.Get()) {
to_mark_bit.Set();
- is_black = true; // Looks black so far.
+ if (from_mark_bit.Next().Get()) {
+ to_mark_bit.Next().Set();
+ return true;
+ }
}
- if (from_mark_bit.Next().Get()) {
- to_mark_bit.Next().Set();
- is_black = false; // Was actually gray.
- }
- return is_black;
+ return false;
}
private:
@@ -263,10 +263,9 @@ class MarkingDeque {
// CodeFlusher collects candidates for code flushing during marking and
// processes those candidates after marking has completed in order to
// reset those functions referencing code objects that would otherwise
-// be unreachable. Code objects can be referenced in three ways:
+// be unreachable. Code objects can be referenced in two ways:
// - SharedFunctionInfo references unoptimized code.
// - JSFunction references either unoptimized or optimized code.
-// - OptimizedCodeMap references optimized code.
// We are not allowed to flush unoptimized code for functions that got
// optimized or inlined into optimized code, because we might bailout
// into the unoptimized code again during deoptimization.
@@ -274,39 +273,25 @@ class CodeFlusher {
public:
explicit CodeFlusher(Isolate* isolate)
: isolate_(isolate),
- jsfunction_candidates_head_(NULL),
- shared_function_info_candidates_head_(NULL),
- optimized_code_map_holder_head_(NULL) {}
+ jsfunction_candidates_head_(nullptr),
+ shared_function_info_candidates_head_(nullptr) {}
inline void AddCandidate(SharedFunctionInfo* shared_info);
inline void AddCandidate(JSFunction* function);
- inline void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
- void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
void EvictCandidate(SharedFunctionInfo* shared_info);
void EvictCandidate(JSFunction* function);
void ProcessCandidates() {
- ProcessOptimizedCodeMaps();
ProcessSharedFunctionInfoCandidates();
ProcessJSFunctionCandidates();
}
- void EvictAllCandidates() {
- EvictOptimizedCodeMaps();
- EvictJSFunctionCandidates();
- EvictSharedFunctionInfoCandidates();
- }
-
void IteratePointersToFromSpace(ObjectVisitor* v);
private:
- void ProcessOptimizedCodeMaps();
void ProcessJSFunctionCandidates();
void ProcessSharedFunctionInfoCandidates();
- void EvictOptimizedCodeMaps();
- void EvictJSFunctionCandidates();
- void EvictSharedFunctionInfoCandidates();
static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
static inline JSFunction* GetNextCandidate(JSFunction* candidate);
@@ -321,15 +306,9 @@ class CodeFlusher {
SharedFunctionInfo* next_candidate);
static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
- static inline SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder);
- static inline void SetNextCodeMap(SharedFunctionInfo* holder,
- SharedFunctionInfo* next_holder);
- static inline void ClearNextCodeMap(SharedFunctionInfo* holder);
-
Isolate* isolate_;
JSFunction* jsfunction_candidates_head_;
SharedFunctionInfo* shared_function_info_candidates_head_;
- SharedFunctionInfo* optimized_code_map_holder_head_;
DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};
@@ -343,6 +322,11 @@ class ThreadLocalTop;
// Mark-Compact collector
class MarkCompactCollector {
public:
+ enum IterationMode {
+ kKeepMarking,
+ kClearMarkbits,
+ };
+
static void Initialize();
void SetUp();
@@ -388,12 +372,6 @@ class MarkCompactCollector {
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
- void EnableCodeFlushing(bool enable);
-
- enum SweeperType {
- CONCURRENT_SWEEPING,
- SEQUENTIAL_SWEEPING
- };
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
@@ -430,14 +408,6 @@ class MarkCompactCollector {
AllocationSpace to_old_space,
SlotsBuffer** evacuation_slots_buffer);
- void MigrateObjectTagged(HeapObject* dst, HeapObject* src, int size,
- SlotsBuffer** evacuation_slots_buffer);
- void MigrateObjectMixed(HeapObject* dst, HeapObject* src, int size,
- SlotsBuffer** evacuation_slots_buffer);
- void MigrateObjectRaw(HeapObject* dst, HeapObject* src, int size);
-
- bool TryPromoteObject(HeapObject* object, int object_size);
-
void InvalidateCode(Code* code);
void ClearMarkbits();
@@ -457,17 +427,24 @@ class MarkCompactCollector {
// size of the maximum continuous freed memory chunk.
int SweepInParallel(Page* page, PagedSpace* space);
+ // Ensures that sweeping is finished.
+ //
+ // Note: Can only be called safely from main thread.
void EnsureSweepingCompleted();
void SweepOrWaitUntilSweepingCompleted(Page* page);
+ // Help out in sweeping the corresponding space and refill memory that has
+ // been regained.
+ //
+ // Note: Thread-safe.
+ void SweepAndRefill(CompactionSpace* space);
+
// If sweeper threads are not active this method will return true. If
// this is a latency issue we should be smarter here. Otherwise, it will
// return true if the sweeper threads are done processing the pages.
bool IsSweepingCompleted();
- void RefillFreeList(PagedSpace* space);
-
// Checks if sweeping is in progress right now on any space.
bool sweeping_in_progress() { return sweeping_in_progress_; }
@@ -512,12 +489,31 @@ class MarkCompactCollector {
// address range.
void RemoveObjectSlots(Address start_slot, Address end_slot);
+ //
+ // Free lists filled by sweeper and consumed by corresponding spaces
+ // (including compaction spaces).
+ //
+ base::SmartPointer<FreeList>& free_list_old_space() {
+ return free_list_old_space_;
+ }
+ base::SmartPointer<FreeList>& free_list_code_space() {
+ return free_list_code_space_;
+ }
+ base::SmartPointer<FreeList>& free_list_map_space() {
+ return free_list_map_space_;
+ }
+
private:
class CompactionTask;
+ class EvacuateNewSpaceVisitor;
+ class EvacuateOldSpaceVisitor;
+ class EvacuateVisitorBase;
+ class HeapObjectVisitor;
class SweeperTask;
+ static const int kInitialLocalPretenuringFeedbackCapacity = 256;
+
explicit MarkCompactCollector(Heap* heap);
- ~MarkCompactCollector();
bool WillBeDeoptimized(Code* code);
void EvictPopularEvacuationCandidate(Page* page);
@@ -525,6 +521,10 @@ class MarkCompactCollector {
void StartSweeperThreads();
+ void ComputeEvacuationHeuristics(int area_size,
+ int* target_fragmentation_percent,
+ int* max_evacuated_bytes);
+
#ifdef DEBUG
enum CollectorState {
IDLE,
@@ -563,11 +563,12 @@ class MarkCompactCollector {
// After: Live objects are marked and non-live objects are unmarked.
friend class CodeMarkingVisitor;
+ friend class IncrementalMarkingMarkingVisitor;
friend class MarkCompactMarkingVisitor;
friend class MarkingVisitor;
+ friend class RecordMigratedSlotVisitor;
friend class RootMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
- friend class IncrementalMarkingMarkingVisitor;
// Mark code objects that are active on the stack to prevent them
// from being flushed.
@@ -578,8 +579,6 @@ class MarkCompactCollector {
// Marking operations for objects reachable from roots.
void MarkLiveObjects();
- void AfterMarking();
-
// Pushes a black object onto the marking stack and accounts for live bytes.
// Note that this assumes live bytes have not yet been counted.
INLINE(void PushBlack(HeapObject* obj));
@@ -621,9 +620,8 @@ class MarkCompactCollector {
// otherwise a map can die and deoptimize the code.
void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
- // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
- // increase chances of reusing of map transition tree in future.
- void RetainMaps();
+ // Collects a list of dependent code from maps embedded in optimize code.
+ DependentCode* DependentCodeListFromNonLiveMaps();
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
@@ -648,15 +646,20 @@ class MarkCompactCollector {
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
- // Map transitions from a live map to a dead map must be killed.
- // We replace them with a null descriptor, with the same key.
+ // Clear non-live references in weak cells, transition and descriptor arrays,
+ // and deoptimize dependent code of non-live maps.
void ClearNonLiveReferences();
- void ClearNonLivePrototypeTransitions(Map* map);
- void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
- void ClearMapTransitions(Map* map, Map* dead_transition);
- bool ClearMapBackPointer(Map* map);
- void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
- int number_of_own_descriptors);
+ void MarkDependentCodeForDeoptimization(DependentCode* list);
+ // Find non-live targets of simple transitions in the given list. Clear
+ // transitions to non-live targets and if needed trim descriptors arrays.
+ void ClearSimpleMapTransitions(Object* non_live_map_list);
+ void ClearSimpleMapTransition(Map* map, Map* dead_transition);
+ // Compact every array in the global list of transition arrays and
+ // trim the corresponding descriptor array if a transition target is non-live.
+ void ClearFullMapTransitions();
+ bool CompactTransitionArray(Map* map, TransitionArray* transitions,
+ DescriptorArray* descriptors);
+ void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
void TrimEnumCache(Map* map, DescriptorArray* descriptors);
// Mark all values associated with reachable keys in weak collections
@@ -673,10 +676,12 @@ class MarkCompactCollector {
// collections when incremental marking is aborted.
void AbortWeakCollections();
-
- void ProcessAndClearWeakCells();
+ void ClearWeakCells(Object** non_live_map_list,
+ DependentCode** dependent_code_list);
void AbortWeakCells();
+ void AbortTransitionArrays();
+
// -----------------------------------------------------------------------
// Phase 2: Sweeping to clear mark bits and free non-live objects for
// a non-compacting collection.
@@ -693,13 +698,10 @@ class MarkCompactCollector {
// regions to each space's free list.
void SweepSpaces();
- int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
- NewSpacePage* p);
+ void EvacuateNewSpacePrologue();
- void EvacuateNewSpace();
-
- bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space,
- SlotsBuffer** evacuation_slots_buffer);
+ // Returns local pretenuring feedback.
+ HashMap* EvacuateNewSpaceInParallel();
void AddEvacuationSlotsBufferSynchronized(
SlotsBuffer* evacuation_slots_buffer);
@@ -712,17 +714,35 @@ class MarkCompactCollector {
// The number of parallel compaction tasks, including the main thread.
int NumberOfParallelCompactionTasks();
- void WaitUntilCompactionCompleted();
+
+ void StartParallelCompaction(CompactionSpaceCollection** compaction_spaces,
+ uint32_t* task_ids, int len);
+ void WaitUntilCompactionCompleted(uint32_t* task_ids, int len);
void EvacuateNewSpaceAndCandidates();
+ void UpdatePointersAfterEvacuation();
+
+ // Iterates through all live objects on a page using marking information.
+ // Returns whether all objects have successfully been visited.
+ bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor,
+ IterationMode mode);
+
+ void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
+
+ void RecomputeLiveBytes(MemoryChunk* page);
+
+ void SweepAbortedPages();
+
void ReleaseEvacuationCandidates();
// Moves the pages of the evacuation_candidates_ list to the end of their
// corresponding space pages list.
void MoveEvacuationCandidatesToEndOfPagesList();
- void SweepSpace(PagedSpace* space, SweeperType sweeper);
+ // Starts sweeping of a space by contributing on the main thread and setting
+ // up other pages for sweeping.
+ void StartSweepSpace(PagedSpace* space);
// Finalizes the parallel sweeping phase. Marks all the pages that were
// swept in parallel.
@@ -759,6 +779,8 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
+ List<MemoryChunk*> newspace_evacuation_candidates_;
+
// The evacuation_slots_buffers_ are used by the compaction threads.
// When a compaction task finishes, it uses
// AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
@@ -787,10 +809,8 @@ class MarkCompactCollector {
// Semaphore used to synchronize compaction tasks.
base::Semaphore pending_compaction_tasks_semaphore_;
- // Number of active compaction tasks (including main thread).
- intptr_t concurrent_compaction_tasks_active_;
-
friend class Heap;
+ friend class StoreBuffer;
};
@@ -826,6 +846,14 @@ class MarkBitCellIterator BASE_EMBEDDED {
cell_base_ += 32 * kPointerSize;
}
+ // Return the next mark bit cell. If there is no next it returns 0;
+ inline MarkBit::CellType PeekNext() {
+ if (HasNext()) {
+ return cells_[cell_index_ + 1];
+ }
+ return 0;
+ }
+
private:
MemoryChunk* chunk_;
MarkBit::CellType* cells_;
@@ -834,6 +862,26 @@ class MarkBitCellIterator BASE_EMBEDDED {
Address cell_base_;
};
+enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects };
+
+template <LiveObjectIterationMode T>
+class LiveObjectIterator BASE_EMBEDDED {
+ public:
+ explicit LiveObjectIterator(MemoryChunk* chunk)
+ : chunk_(chunk),
+ it_(chunk_),
+ cell_base_(it_.CurrentCellBase()),
+ current_cell_(*it_.CurrentCell()) {}
+
+ HeapObject* Next();
+
+ private:
+ MemoryChunk* chunk_;
+ MarkBitCellIterator it_;
+ Address cell_base_;
+ MarkBit::CellType current_cell_;
+};
+
class EvacuationScope BASE_EMBEDDED {
public:
@@ -850,7 +898,7 @@ class EvacuationScope BASE_EMBEDDED {
const char* AllocationSpaceName(AllocationSpace space);
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_MARK_COMPACT_H_
diff --git a/chromium/v8/src/heap/memory-reducer.cc b/chromium/v8/src/heap/memory-reducer.cc
index 45d6bd3d7fb..33e624978ff 100644
--- a/chromium/v8/src/heap/memory-reducer.cc
+++ b/chromium/v8/src/heap/memory-reducer.cc
@@ -13,7 +13,7 @@
namespace v8 {
namespace internal {
-const int MemoryReducer::kLongDelayMs = 5000;
+const int MemoryReducer::kLongDelayMs = 8000;
const int MemoryReducer::kShortDelayMs = 500;
const int MemoryReducer::kWatchdogDelayMs = 100000;
const int MemoryReducer::kMaxNumberOfGCs = 3;
@@ -24,14 +24,27 @@ MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
void MemoryReducer::TimerTask::RunInternal() {
+ const double kJsCallsPerMsThreshold = 0.5;
Heap* heap = memory_reducer_->heap();
Event event;
double time_ms = heap->MonotonicallyIncreasingTimeInMs();
heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
heap->OldGenerationAllocationCounter());
+ double js_call_rate = memory_reducer_->SampleAndGetJsCallsPerMs(time_ms);
+ bool low_allocation_rate = heap->HasLowAllocationRate();
+ bool is_idle = js_call_rate < kJsCallsPerMsThreshold && low_allocation_rate;
+ bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(heap->isolate(), "Memory reducer: call rate %.3lf, %s, %s\n",
+ js_call_rate, low_allocation_rate ? "low alloc" : "high alloc",
+ optimize_for_memory ? "background" : "foreground");
+ }
event.type = kTimer;
event.time_ms = time_ms;
- event.low_allocation_rate = heap->HasLowAllocationRate();
+ // The memory reducer will start incremental markig if
+ // 1) mutator is likely idle: js call rate is low and allocation rate is low.
+ // 2) mutator is in background: optimize for memory flag is set.
+ event.should_start_incremental_gc = is_idle || optimize_for_memory;
event.can_start_incremental_gc =
heap->incremental_marking()->IsStopped() &&
heap->incremental_marking()->CanBeActivated();
@@ -39,6 +52,16 @@ void MemoryReducer::TimerTask::RunInternal() {
}
+double MemoryReducer::SampleAndGetJsCallsPerMs(double time_ms) {
+ unsigned int counter = heap()->isolate()->js_calls_from_api_counter();
+ unsigned int call_delta = counter - js_calls_counter_;
+ double time_delta_ms = time_ms - js_calls_sample_time_ms_;
+ js_calls_counter_ = counter;
+ js_calls_sample_time_ms_ = time_ms;
+ return time_delta_ms > 0 ? call_delta / time_delta_ms : 0;
+}
+
+
void MemoryReducer::NotifyTimer(const Event& event) {
DCHECK_EQ(kTimer, event.type);
DCHECK_EQ(kWait, state_.action);
@@ -51,8 +74,8 @@ void MemoryReducer::NotifyTimer(const Event& event) {
state_.started_gcs);
}
if (heap()->ShouldOptimizeForMemoryUsage()) {
- // Do full GC if memory usage has higher priority than latency. This is
- // important for background tabs that do not send idle notifications.
+ // TODO(ulan): Remove this once crbug.com/552305 is fixed.
+ // Do full GC if memory usage has higher priority than latency.
heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
"memory reducer");
} else {
@@ -76,7 +99,7 @@ void MemoryReducer::NotifyTimer(const Event& event) {
"Memory reducer: finalize incremental marking");
}
// Re-schedule the timer.
- ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
+ ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Memory reducer: waiting for %.f ms\n",
state_.next_gc_start_ms - event.time_ms);
@@ -91,7 +114,7 @@ void MemoryReducer::NotifyMarkCompact(const Event& event) {
state_ = Step(state_, event);
if (old_action != kWait && state_.action == kWait) {
// If we are transitioning to the WAIT state, start the timer.
- ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
+ ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
}
if (old_action == kRun) {
if (FLAG_trace_gc_verbose) {
@@ -109,34 +132,7 @@ void MemoryReducer::NotifyContextDisposed(const Event& event) {
state_ = Step(state_, event);
if (old_action != kWait && state_.action == kWait) {
// If we are transitioning to the WAIT state, start the timer.
- ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
- }
-}
-
-
-void MemoryReducer::NotifyBackgroundIdleNotification(const Event& event) {
- DCHECK_EQ(kBackgroundIdleNotification, event.type);
- Action old_action = state_.action;
- int old_started_gcs = state_.started_gcs;
- state_ = Step(state_, event);
- if (old_action == kWait && state_.action == kWait &&
- old_started_gcs + 1 == state_.started_gcs) {
- DCHECK(heap()->incremental_marking()->IsStopped());
- // TODO(ulan): Replace it with incremental marking GC once
- // chromium:490559 is fixed.
- if (event.time_ms > state_.last_gc_time_ms + kLongDelayMs) {
- heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
- "memory reducer background GC");
- } else {
- DCHECK(FLAG_incremental_marking);
- heap()->StartIdleIncrementalMarking();
- if (FLAG_trace_gc_verbose) {
- PrintIsolate(heap()->isolate(),
- "Memory reducer: started GC #%d"
- " (background idle)\n",
- state_.started_gcs);
- }
- }
+ ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
}
}
@@ -150,12 +146,12 @@ bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
// For specification of this function see the comment for MemoryReducer class.
MemoryReducer::State MemoryReducer::Step(const State& state,
const Event& event) {
- if (!FLAG_incremental_marking) {
+ if (!FLAG_incremental_marking || !FLAG_memory_reducer) {
return State(kDone, 0, 0, state.last_gc_time_ms);
}
switch (state.action) {
case kDone:
- if (event.type == kTimer || event.type == kBackgroundIdleNotification) {
+ if (event.type == kTimer) {
return state;
} else {
DCHECK(event.type == kContextDisposed || event.type == kMarkCompact);
@@ -171,7 +167,8 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
if (state.started_gcs >= kMaxNumberOfGCs) {
return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms);
} else if (event.can_start_incremental_gc &&
- (event.low_allocation_rate || WatchdogGC(state, event))) {
+ (event.should_start_incremental_gc ||
+ WatchdogGC(state, event))) {
if (state.next_gc_start_ms <= event.time_ms) {
return State(kRun, state.started_gcs + 1, 0.0,
state.last_gc_time_ms);
@@ -182,14 +179,6 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
state.last_gc_time_ms);
}
- case kBackgroundIdleNotification:
- if (event.can_start_incremental_gc &&
- state.started_gcs < kMaxNumberOfGCs) {
- return State(kWait, state.started_gcs + 1,
- event.time_ms + kLongDelayMs, state.last_gc_time_ms);
- } else {
- return state;
- }
case kMarkCompact:
return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
event.time_ms);
@@ -212,8 +201,10 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
}
-void MemoryReducer::ScheduleTimer(double delay_ms) {
+void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
DCHECK(delay_ms > 0);
+ // Record the time and the js call counter.
+ SampleAndGetJsCallsPerMs(time_ms);
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
@@ -225,5 +216,5 @@ void MemoryReducer::ScheduleTimer(double delay_ms) {
void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
-} // internal
-} // v8
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/memory-reducer.h b/chromium/v8/src/heap/memory-reducer.h
index f98cb045e38..9213613c076 100644
--- a/chromium/v8/src/heap/memory-reducer.h
+++ b/chromium/v8/src/heap/memory-reducer.h
@@ -96,23 +96,21 @@ class MemoryReducer {
double last_gc_time_ms;
};
- enum EventType {
- kTimer,
- kMarkCompact,
- kContextDisposed,
- kBackgroundIdleNotification
- };
+ enum EventType { kTimer, kMarkCompact, kContextDisposed };
struct Event {
EventType type;
double time_ms;
- bool low_allocation_rate;
bool next_gc_likely_to_collect_more;
+ bool should_start_incremental_gc;
bool can_start_incremental_gc;
};
explicit MemoryReducer(Heap* heap)
- : heap_(heap), state_(kDone, 0, 0.0, 0.0) {}
+ : heap_(heap),
+ state_(kDone, 0, 0.0, 0.0),
+ js_calls_counter_(0),
+ js_calls_sample_time_ms_(0.0) {}
// Callbacks.
void NotifyMarkCompact(const Event& event);
void NotifyContextDisposed(const Event& event);
@@ -121,7 +119,7 @@ class MemoryReducer {
// the incoming event.
static State Step(const State& state, const Event& event);
// Posts a timer task that will call NotifyTimer after the given delay.
- void ScheduleTimer(double delay_ms);
+ void ScheduleTimer(double time_ms, double delay_ms);
void TearDown();
static const int kLongDelayMs;
static const int kShortDelayMs;
@@ -150,8 +148,16 @@ class MemoryReducer {
static bool WatchdogGC(const State& state, const Event& event);
+ // Returns the rate of JS calls initiated from the API.
+ double SampleAndGetJsCallsPerMs(double time_ms);
+
Heap* heap_;
State state_;
+ unsigned int js_calls_counter_;
+ double js_calls_sample_time_ms_;
+
+ // Used in cctest.
+ friend class HeapTester;
DISALLOW_COPY_AND_ASSIGN(MemoryReducer);
};
diff --git a/chromium/v8/src/heap/object-stats.cc b/chromium/v8/src/heap/object-stats.cc
index 195723e86d8..c1566abfc5c 100644
--- a/chromium/v8/src/heap/object-stats.cc
+++ b/chromium/v8/src/heap/object-stats.cc
@@ -186,12 +186,6 @@ void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitMap>(Map* map,
heap->object_stats_->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
fixed_array_size);
}
- if (TransitionArray::IsFullTransitionArray(map_obj->raw_transitions())) {
- int fixed_array_size =
- TransitionArray::cast(map_obj->raw_transitions())->Size();
- heap->object_stats_->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
- fixed_array_size);
- }
if (map_obj->has_code_cache()) {
CodeCache* cache = CodeCache::cast(map_obj->code_cache());
heap->object_stats_->RecordFixedArraySubTypeStats(
diff --git a/chromium/v8/src/heap/objects-visiting-inl.h b/chromium/v8/src/heap/objects-visiting-inl.h
index 55734fd463a..a29ba4b08cc 100644
--- a/chromium/v8/src/heap/objects-visiting-inl.h
+++ b/chromium/v8/src/heap/objects-visiting-inl.h
@@ -9,6 +9,7 @@
#include "src/heap/objects-visiting.h"
#include "src/ic/ic-state.h"
#include "src/macro-assembler.h"
+#include "src/objects-body-descriptors-inl.h"
namespace v8 {
namespace internal {
@@ -43,8 +44,15 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
FixedArray::BodyDescriptor, int>::Visit);
table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
- table_.Register(kVisitFixedTypedArray, &VisitFixedTypedArray);
- table_.Register(kVisitFixedFloat64Array, &VisitFixedTypedArray);
+ table_.Register(
+ kVisitFixedTypedArray,
+ &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(
+ kVisitFixedFloat64Array,
+ &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
+ int>::Visit);
table_.Register(
kVisitNativeContext,
@@ -63,14 +71,14 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
- table_.Register(kVisitJSFunction, &VisitJSFunction);
+ // Don't visit code entry. We are using this visitor only during scavenges.
+ table_.Register(
+ kVisitJSFunction,
+ &FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
+ int>::Visit);
table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
- table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
-
- table_.Register(kVisitJSDataView, &VisitJSDataView);
-
table_.Register(kVisitFreeSpace, &VisitFreeSpace);
table_.Register(kVisitJSWeakCollection, &JSObjectVisitor::Visit);
@@ -90,36 +98,14 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
template <typename StaticVisitor>
int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
+ typedef FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor, int>
+ JSArrayBufferBodyVisitor;
- JSArrayBuffer::JSArrayBufferIterateBody<
- StaticNewSpaceVisitor<StaticVisitor> >(heap, object);
if (!JSArrayBuffer::cast(object)->is_external()) {
+ Heap* heap = map->GetHeap();
heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
}
- return JSArrayBuffer::kSizeWithInternalFields;
-}
-
-
-template <typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray(
- Map* map, HeapObject* object) {
- VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
- HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
- return JSTypedArray::kSizeWithInternalFields;
-}
-
-
-template <typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(Map* map,
- HeapObject* object) {
- VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
- HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
- return JSDataView::kSizeWithInternalFields;
+ return JSArrayBufferBodyVisitor::Visit(map, object);
}
@@ -156,9 +142,15 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
- table_.Register(kVisitFixedTypedArray, &DataObjectVisitor::Visit);
+ table_.Register(
+ kVisitFixedTypedArray,
+ &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
+ void>::Visit);
- table_.Register(kVisitFixedFloat64Array, &DataObjectVisitor::Visit);
+ table_.Register(
+ kVisitFixedFloat64Array,
+ &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
+ void>::Visit);
table_.Register(kVisitNativeContext, &VisitNativeContext);
@@ -190,10 +182,6 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
- table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
-
- table_.Register(kVisitJSDataView, &VisitJSDataView);
-
// Registration for kVisitJSRegExp is done by StaticVisitor.
table_.Register(
@@ -204,6 +192,8 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitWeakCell, &VisitWeakCell);
+ table_.Register(kVisitTransitionArray, &VisitTransitionArray);
+
table_.template RegisterSpecializations<DataObjectVisitor, kVisitDataObject,
kVisitDataObjectGeneric>();
@@ -299,13 +289,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
Map* map, HeapObject* object) {
FixedBodyVisitor<StaticVisitor, Context::MarkCompactBodyDescriptor,
void>::Visit(map, object);
-
- MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
- for (int idx = Context::FIRST_WEAK_SLOT; idx < Context::NATIVE_CONTEXT_SLOTS;
- ++idx) {
- Object** slot = Context::cast(object)->RawFieldOfElementAt(idx);
- collector->RecordSlot(object, slot, *slot);
- }
}
@@ -374,6 +357,31 @@ void StaticMarkingVisitor<StaticVisitor>::VisitWeakCell(Map* map,
template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitTransitionArray(
+ Map* map, HeapObject* object) {
+ TransitionArray* array = TransitionArray::cast(object);
+ Heap* heap = array->GetHeap();
+ // Visit strong references.
+ if (array->HasPrototypeTransitions()) {
+ StaticVisitor::VisitPointer(heap, array,
+ array->GetPrototypeTransitionsSlot());
+ }
+ int num_transitions = TransitionArray::NumberOfTransitions(array);
+ for (int i = 0; i < num_transitions; ++i) {
+ StaticVisitor::VisitPointer(heap, array, array->GetKeySlot(i));
+ }
+ // Enqueue the array in linked list of encountered transition arrays if it is
+ // not already in the list.
+ if (array->next_link()->IsUndefined()) {
+ Heap* heap = map->GetHeap();
+ array->set_next_link(heap->encountered_transition_arrays(),
+ UPDATE_WEAK_WRITE_BARRIER);
+ heap->set_encountered_transition_arrays(array);
+ }
+}
+
+
+template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
@@ -388,6 +396,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
Map* map, HeapObject* object) {
+ typedef FlexibleBodyVisitor<StaticVisitor,
+ JSWeakCollection::BodyDescriptorWeak,
+ void> JSWeakCollectionBodyVisitor;
Heap* heap = map->GetHeap();
JSWeakCollection* weak_collection =
reinterpret_cast<JSWeakCollection*>(object);
@@ -400,14 +411,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
// Skip visiting the backing hash table containing the mappings and the
// pointer to the other enqueued weak collections, both are post-processed.
- StaticVisitor::VisitPointers(
- heap, object,
- HeapObject::RawField(object, JSWeakCollection::kPropertiesOffset),
- HeapObject::RawField(object, JSWeakCollection::kTableOffset));
- STATIC_ASSERT(JSWeakCollection::kTableOffset + kPointerSize ==
- JSWeakCollection::kNextOffset);
- STATIC_ASSERT(JSWeakCollection::kNextOffset + kPointerSize ==
- JSWeakCollection::kSize);
+ JSWeakCollectionBodyVisitor::Visit(map, object);
// Partially initialized weak collection is enqueued, but table is ignored.
if (!weak_collection->table()->IsHashTable()) return;
@@ -423,12 +427,14 @@ void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitCode(Map* map,
HeapObject* object) {
+ typedef FlexibleBodyVisitor<StaticVisitor, Code::BodyDescriptor, void>
+ CodeBodyVisitor;
Heap* heap = map->GetHeap();
Code* code = Code::cast(object);
if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
code->MakeOlder(heap->mark_compact_collector()->marking_parity());
}
- code->CodeIterateBody<StaticVisitor>(heap);
+ CodeBodyVisitor::Visit(map, object);
}
@@ -443,23 +449,14 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (FLAG_cleanup_code_caches_at_gc) {
shared->ClearTypeFeedbackInfoAtGCTime();
}
- if ((FLAG_flush_optimized_code_cache ||
- heap->isolate()->serializer_enabled()) &&
- !shared->optimized_code_map()->IsSmi()) {
- // Always flush the optimized code map if requested by flag.
- shared->ClearOptimizedCodeMap();
+ if (FLAG_flush_optimized_code_cache) {
+ if (!shared->OptimizedCodeMapIsCleared()) {
+ // Always flush the optimized code map if requested by flag.
+ shared->ClearOptimizedCodeMap();
+ }
}
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
- if (!shared->optimized_code_map()->IsSmi()) {
- // Add the shared function info holding an optimized code map to
- // the code flusher for processing of code maps after marking.
- collector->code_flusher()->AddOptimizedCodeMap(shared);
- // Treat some references within the code map weakly by marking the
- // code map itself but not pushing it onto the marking deque.
- FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
- MarkOptimizedCodeMap(heap, code_map);
- }
if (IsFlushable(heap, shared)) {
// This function's code looks flushable. But we have to postpone
// the decision until we see all functions that point to the same
@@ -472,12 +469,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
VisitSharedFunctionInfoWeakCode(heap, object);
return;
}
- } else {
- if (!shared->optimized_code_map()->IsSmi()) {
- // Flush optimized code map on major GCs without code flushing,
- // needed because cached code doesn't contain breakpoints.
- shared->ClearOptimizedCodeMap();
- }
}
VisitSharedFunctionInfoStrongCode(heap, object);
}
@@ -498,38 +489,22 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
// non-flushable, because it is required for bailing out from
// optimized code.
collector->code_flusher()->AddCandidate(function);
- // Visit shared function info immediately to avoid double checking
- // of its flushability later. This is just an optimization because
- // the shared function info would eventually be visited.
- SharedFunctionInfo* shared = function->shared();
- if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) {
- StaticVisitor::MarkObject(heap, shared->map());
- VisitSharedFunctionInfoWeakCode(heap, shared);
- }
// Treat the reference to the code object weakly.
- VisitJSFunctionWeakCode(heap, object);
+ VisitJSFunctionWeakCode(map, object);
return;
} else {
// Visit all unoptimized code objects to prevent flushing them.
StaticVisitor::MarkObject(heap, function->shared()->code());
- if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
- MarkInlinedFunctionsCode(heap, function->code());
- }
}
}
- VisitJSFunctionStrongCode(heap, object);
+ VisitJSFunctionStrongCode(map, object);
}
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(Map* map,
HeapObject* object) {
- int last_property_offset =
- JSRegExp::kSize + kPointerSize * map->GetInObjectProperties();
- StaticVisitor::VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
- HeapObject::RawField(object, last_property_offset));
+ JSObjectVisitor::Visit(map, object);
}
@@ -538,7 +513,11 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
- JSArrayBuffer::JSArrayBufferIterateBody<StaticVisitor>(heap, object);
+ typedef FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
+ void> JSArrayBufferBodyVisitor;
+
+ JSArrayBufferBodyVisitor::Visit(map, object);
+
if (!JSArrayBuffer::cast(object)->is_external() &&
!heap->InNewSpace(object)) {
heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
@@ -547,26 +526,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray(
- Map* map, HeapObject* object) {
- StaticVisitor::VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
- HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
-}
-
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(Map* map,
- HeapObject* object) {
- StaticVisitor::VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
- HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
-}
-
-
-template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
Map* map, HeapObject* object) {
StaticVisitor::VisitPointers(
@@ -579,11 +538,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
Map* map) {
- Object* raw_transitions = map->raw_transitions();
- if (TransitionArray::IsFullTransitionArray(raw_transitions)) {
- MarkTransitionArray(heap, TransitionArray::cast(raw_transitions));
- }
-
// Since descriptor arrays are potentially shared, ensure that only the
// descriptors that belong to this map are marked. The first time a non-empty
// descriptor array is marked, its header is also visited. The slot holding
@@ -616,63 +570,6 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
- Heap* heap, TransitionArray* transitions) {
- if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return;
-
- if (transitions->HasPrototypeTransitions()) {
- StaticVisitor::VisitPointer(heap, transitions,
- transitions->GetPrototypeTransitionsSlot());
- }
-
- int num_transitions = TransitionArray::NumberOfTransitions(transitions);
- for (int i = 0; i < num_transitions; ++i) {
- StaticVisitor::VisitPointer(heap, transitions, transitions->GetKeySlot(i));
- }
-}
-
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkOptimizedCodeMap(
- Heap* heap, FixedArray* code_map) {
- if (!StaticVisitor::MarkObjectWithoutPush(heap, code_map)) return;
-
- // Mark the context-independent entry in the optimized code map. Depending on
- // the age of the code object, we treat it as a strong or a weak reference.
- Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
- if (FLAG_turbo_preserve_shared_code && shared_object->IsCode() &&
- FLAG_age_code && !Code::cast(shared_object)->IsOld()) {
- StaticVisitor::VisitPointer(
- heap, code_map,
- code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex));
- }
-}
-
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(Heap* heap,
- Code* code) {
- // For optimized functions we should retain both non-optimized version
- // of its code and non-optimized version of all inlined functions.
- // This is required to support bailing out from inlined code.
- DeoptimizationInputData* const data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- FixedArray* const literals = data->LiteralArray();
- int const inlined_count = data->InlinedFunctionCount()->value();
- for (int i = 0; i < inlined_count; ++i) {
- StaticVisitor::MarkObject(
- heap, SharedFunctionInfo::cast(literals->get(i))->code());
- }
-}
-
-
-inline static bool IsValidNonBuiltinContext(Object* context) {
- return context->IsContext() &&
- !Context::cast(context)->global_object()->IsJSBuiltinsObject();
-}
-
-
inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
Object* undefined = heap->undefined_value();
return (info->script() != undefined) &&
@@ -692,11 +589,6 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(Heap* heap,
return false;
}
- // The function must have a valid context and not be a builtin.
- if (!IsValidNonBuiltinContext(function->context())) {
- return false;
- }
-
// We do not (yet) flush code for optimized functions.
if (function->code() != shared_info->code()) {
return false;
@@ -754,6 +646,16 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
return false;
}
+ // The function must not be a builtin.
+ if (shared_info->IsBuiltin()) {
+ return false;
+ }
+
+ // Maintain debug break slots in the code.
+ if (shared_info->HasDebugCode()) {
+ return false;
+ }
+
// If this is a function initialized with %SetCode then the one-to-one
// relation between SharedFunctionInfo and Code is broken.
if (shared_info->dont_flush()) {
@@ -803,106 +705,24 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
- Heap* heap, HeapObject* object) {
- Object** start_slot =
- HeapObject::RawField(object, JSFunction::kPropertiesOffset);
- Object** end_slot =
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
- StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
-
- VisitCodeEntry(heap, object,
- object->address() + JSFunction::kCodeEntryOffset);
- STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
- JSFunction::kPrototypeOrInitialMapOffset);
-
- start_slot =
- HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
- end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
- StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
+ Map* map, HeapObject* object) {
+ typedef FlexibleBodyVisitor<StaticVisitor,
+ JSFunction::BodyDescriptorStrongCode,
+ void> JSFunctionStrongCodeBodyVisitor;
+ JSFunctionStrongCodeBodyVisitor::Visit(map, object);
}
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
- Heap* heap, HeapObject* object) {
- Object** start_slot =
- HeapObject::RawField(object, JSFunction::kPropertiesOffset);
- Object** end_slot =
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
- StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
-
- // Skip visiting kCodeEntryOffset as it is treated weakly here.
- STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
- JSFunction::kPrototypeOrInitialMapOffset);
-
- start_slot =
- HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
- end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
- StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
-}
-
-
-void Code::CodeIterateBody(ObjectVisitor* v) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::CELL) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
- RelocInfo::kDebugBreakSlotMask;
-
- // There are two places where we iterate code bodies: here and the
- // templated CodeIterateBody (below). They should be kept in sync.
- IteratePointer(v, kRelocationInfoOffset);
- IteratePointer(v, kHandlerTableOffset);
- IteratePointer(v, kDeoptimizationDataOffset);
- IteratePointer(v, kTypeFeedbackInfoOffset);
- IterateNextCodeLink(v, kNextCodeLinkOffset);
-
- RelocIterator it(this, mode_mask);
- Isolate* isolate = this->GetIsolate();
- for (; !it.done(); it.next()) {
- it.rinfo()->Visit(isolate, v);
- }
+ Map* map, HeapObject* object) {
+ typedef FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
+ void> JSFunctionWeakCodeBodyVisitor;
+ JSFunctionWeakCodeBodyVisitor::Visit(map, object);
}
-template <typename StaticVisitor>
-void Code::CodeIterateBody(Heap* heap) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::CELL) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
- RelocInfo::kDebugBreakSlotMask;
-
- // There are two places where we iterate code bodies: here and the non-
- // templated CodeIterateBody (above). They should be kept in sync.
- StaticVisitor::VisitPointer(
- heap, this,
- reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
- StaticVisitor::VisitPointer(
- heap, this,
- reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
- StaticVisitor::VisitPointer(
- heap, this,
- reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
- StaticVisitor::VisitPointer(
- heap, this,
- reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
- StaticVisitor::VisitNextCodeLink(
- heap, reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset));
-
-
- RelocIterator it(this, mode_mask);
- for (; !it.done(); it.next()) {
- it.rinfo()->template Visit<StaticVisitor>(heap);
- }
-}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_OBJECTS_VISITING_INL_H_
diff --git a/chromium/v8/src/heap/objects-visiting.cc b/chromium/v8/src/heap/objects-visiting.cc
index 902a96a644c..315c897bec1 100644
--- a/chromium/v8/src/heap/objects-visiting.cc
+++ b/chromium/v8/src/heap/objects-visiting.cc
@@ -79,13 +79,8 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case WEAK_CELL_TYPE:
return kVisitWeakCell;
- case JS_SET_TYPE:
- return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
- JSSet::kSize, has_unboxed_fields);
-
- case JS_MAP_TYPE:
- return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
- JSMap::kSize, has_unboxed_fields);
+ case TRANSITION_ARRAY_TYPE:
+ return kVisitTransitionArray;
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
@@ -99,31 +94,14 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_PROXY_TYPE:
return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
- JSProxy::kSize, has_unboxed_fields);
-
- case JS_FUNCTION_PROXY_TYPE:
- return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
- JSFunctionProxy::kSize, has_unboxed_fields);
-
- case FOREIGN_TYPE:
- return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
- Foreign::kSize, has_unboxed_fields);
+ instance_size, has_unboxed_fields);
case SYMBOL_TYPE:
return kVisitSymbol;
- case FILLER_TYPE:
- return kVisitDataObjectGeneric;
-
case JS_ARRAY_BUFFER_TYPE:
return kVisitJSArrayBuffer;
- case JS_TYPED_ARRAY_TYPE:
- return kVisitJSTypedArray;
-
- case JS_DATA_VIEW_TYPE:
- return kVisitJSDataView;
-
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -133,17 +111,26 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_ARRAY_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
case JS_ITERATOR_RESULT_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_BOUND_FUNCTION_TYPE:
return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
instance_size, has_unboxed_fields);
case JS_FUNCTION_TYPE:
return kVisitJSFunction;
+ case FILLER_TYPE:
+ if (instance_size == kPointerSize) return kVisitDataObjectGeneric;
+ // Fall through.
+ case FOREIGN_TYPE:
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
case SIMD128_VALUE_TYPE:
@@ -180,140 +167,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
}
-void HeapObject::IterateBody(InstanceType type, int object_size,
- ObjectVisitor* v) {
- // Avoiding <Type>::cast(this) because it accesses the map pointer field.
- // During GC, the map pointer field is encoded.
- if (type < FIRST_NONSTRING_TYPE) {
- switch (type & kStringRepresentationMask) {
- case kSeqStringTag:
- break;
- case kConsStringTag:
- ConsString::BodyDescriptor::IterateBody(this, v);
- break;
- case kSlicedStringTag:
- SlicedString::BodyDescriptor::IterateBody(this, v);
- break;
- case kExternalStringTag:
- if ((type & kStringEncodingMask) == kOneByteStringTag) {
- reinterpret_cast<ExternalOneByteString*>(this)
- ->ExternalOneByteStringIterateBody(v);
- } else {
- reinterpret_cast<ExternalTwoByteString*>(this)
- ->ExternalTwoByteStringIterateBody(v);
- }
- break;
- }
- return;
- }
-
- switch (type) {
- case FIXED_ARRAY_TYPE:
- FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
- break;
- case FIXED_DOUBLE_ARRAY_TYPE:
- break;
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_TYPE:
- case JS_VALUE_TYPE:
- case JS_DATE_TYPE:
- case JS_ARRAY_TYPE:
- case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE:
- case JS_SET_TYPE:
- case JS_MAP_TYPE:
- case JS_SET_ITERATOR_TYPE:
- case JS_MAP_ITERATOR_TYPE:
- case JS_ITERATOR_RESULT_TYPE:
- case JS_WEAK_MAP_TYPE:
- case JS_WEAK_SET_TYPE:
- case JS_REGEXP_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- JSObject::BodyDescriptor::IterateBody(this, object_size, v);
- break;
- case JS_ARRAY_BUFFER_TYPE:
- JSArrayBuffer::JSArrayBufferIterateBody(this, v);
- break;
- case JS_FUNCTION_TYPE:
- reinterpret_cast<JSFunction*>(this)
- ->JSFunctionIterateBody(object_size, v);
- break;
- case ODDBALL_TYPE:
- Oddball::BodyDescriptor::IterateBody(this, v);
- break;
- case JS_PROXY_TYPE:
- JSProxy::BodyDescriptor::IterateBody(this, v);
- break;
- case JS_FUNCTION_PROXY_TYPE:
- JSFunctionProxy::BodyDescriptor::IterateBody(this, v);
- break;
- case FOREIGN_TYPE:
- reinterpret_cast<Foreign*>(this)->ForeignIterateBody(v);
- break;
- case MAP_TYPE:
- Map::BodyDescriptor::IterateBody(this, v);
- break;
- case CODE_TYPE:
- reinterpret_cast<Code*>(this)->CodeIterateBody(v);
- break;
- case CELL_TYPE:
- Cell::BodyDescriptor::IterateBody(this, v);
- break;
- case PROPERTY_CELL_TYPE:
- PropertyCell::BodyDescriptor::IterateBody(this, v);
- break;
- case WEAK_CELL_TYPE:
- WeakCell::BodyDescriptor::IterateBody(this, v);
- break;
- case SYMBOL_TYPE:
- Symbol::BodyDescriptor::IterateBody(this, v);
- break;
- case BYTECODE_ARRAY_TYPE:
- reinterpret_cast<BytecodeArray*>(this)->BytecodeArrayIterateBody(v);
- break;
-
- case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
- case SIMD128_VALUE_TYPE:
- case FILLER_TYPE:
- case BYTE_ARRAY_TYPE:
- case FREE_SPACE_TYPE:
- break;
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- reinterpret_cast<FixedTypedArrayBase*>(this) \
- ->FixedTypedArrayBaseIterateBody(v); \
- break;
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- case SHARED_FUNCTION_INFO_TYPE: {
- SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
- break;
- }
-
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- if (type == ALLOCATION_SITE_TYPE) {
- AllocationSite::BodyDescriptor::IterateBody(this, v);
- } else {
- StructBodyDescriptor::IterateBody(this, object_size, v);
- }
- break;
- default:
- PrintF("Unknown type: %d\n", type);
- UNREACHABLE();
- }
-}
-
-
// We don't record weak slots during marking or scavenges. Instead we do it
// once when we complete mark-compact cycle. Note that write barrier has no
// effect if we are already in the middle of compacting mark-sweep cycle and we
@@ -442,9 +295,17 @@ struct WeakListVisitor<Context> {
DoWeakList<JSFunction>(heap, context, retainer,
Context::OPTIMIZED_FUNCTIONS_LIST);
- // Code objects are always allocated in Code space, we do not have to visit
- // them during scavenges.
if (heap->gc_state() == Heap::MARK_COMPACT) {
+ // Record the slots of the weak entries in the native context.
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ for (int idx = Context::FIRST_WEAK_SLOT;
+ idx < Context::NATIVE_CONTEXT_SLOTS; ++idx) {
+ Object** slot = Context::cast(context)->RawFieldOfElementAt(idx);
+ collector->RecordSlot(context, slot, *slot);
+ }
+ // Code objects are always allocated in Code space, we do not have to
+ // visit
+ // them during scavenges.
DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
}
diff --git a/chromium/v8/src/heap/objects-visiting.h b/chromium/v8/src/heap/objects-visiting.h
index 1eba88731b3..1fe8a1749ad 100644
--- a/chromium/v8/src/heap/objects-visiting.h
+++ b/chromium/v8/src/heap/objects-visiting.h
@@ -9,6 +9,7 @@
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/layout-descriptor.h"
+#include "src/objects-body-descriptors.h"
// This file provides base classes and auxiliary methods for defining
// static object visitors used during GC.
@@ -75,12 +76,11 @@ class StaticVisitorBase : public AllStatic {
V(Cell) \
V(PropertyCell) \
V(WeakCell) \
+ V(TransitionArray) \
V(SharedFunctionInfo) \
V(JSFunction) \
V(JSWeakCollection) \
V(JSArrayBuffer) \
- V(JSTypedArray) \
- V(JSDataView) \
V(JSRegExp)
// For data objects, JS objects and structs along with generic visitor which
@@ -188,79 +188,34 @@ class VisitorDispatchTable {
};
-template <typename StaticVisitor>
-class BodyVisitorBase : public AllStatic {
- public:
- INLINE(static void IteratePointers(Heap* heap, HeapObject* object,
- int start_offset, int end_offset)) {
- DCHECK(!FLAG_unbox_double_fields || object->map()->HasFastPointerLayout());
- IterateRawPointers(heap, object, start_offset, end_offset);
- }
-
- INLINE(static void IterateBody(Heap* heap, HeapObject* object,
- int start_offset, int end_offset)) {
- if (!FLAG_unbox_double_fields || object->map()->HasFastPointerLayout()) {
- IterateRawPointers(heap, object, start_offset, end_offset);
- } else {
- IterateBodyUsingLayoutDescriptor(heap, object, start_offset, end_offset);
- }
- }
-
- private:
- INLINE(static void IterateRawPointers(Heap* heap, HeapObject* object,
- int start_offset, int end_offset)) {
- StaticVisitor::VisitPointers(heap, object,
- HeapObject::RawField(object, start_offset),
- HeapObject::RawField(object, end_offset));
- }
-
- static void IterateBodyUsingLayoutDescriptor(Heap* heap, HeapObject* object,
- int start_offset,
- int end_offset) {
- DCHECK(FLAG_unbox_double_fields);
- DCHECK(IsAligned(start_offset, kPointerSize) &&
- IsAligned(end_offset, kPointerSize));
-
- LayoutDescriptorHelper helper(object->map());
- DCHECK(!helper.all_fields_tagged());
- for (int offset = start_offset; offset < end_offset;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
- IterateRawPointers(heap, object, offset, end_of_region_offset);
- }
- offset = end_of_region_offset;
- }
- }
-};
-
-
template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
+class FlexibleBodyVisitor : public AllStatic {
public:
INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
int object_size = BodyDescriptor::SizeOf(map, object);
- BodyVisitorBase<StaticVisitor>::IterateBody(
- map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size);
+ BodyDescriptor::template IterateBody<StaticVisitor>(object, object_size);
return static_cast<ReturnType>(object_size);
}
+ // This specialization is only suitable for objects containing pointer fields.
template <int object_size>
static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
DCHECK(BodyDescriptor::SizeOf(map, object) == object_size);
- BodyVisitorBase<StaticVisitor>::IteratePointers(
- map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size);
+ DCHECK(!FLAG_unbox_double_fields || map->HasFastPointerLayout());
+ StaticVisitor::VisitPointers(
+ object->GetHeap(), object,
+ HeapObject::RawField(object, BodyDescriptor::kStartOffset),
+ HeapObject::RawField(object, object_size));
return static_cast<ReturnType>(object_size);
}
};
template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
+class FixedBodyVisitor : public AllStatic {
public:
INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
- BodyVisitorBase<StaticVisitor>::IterateBody(map->GetHeap(), object,
- BodyDescriptor::kStartOffset,
- BodyDescriptor::kEndOffset);
+ BodyDescriptor::template IterateBody<StaticVisitor>(object);
return static_cast<ReturnType>(BodyDescriptor::kSize);
}
};
@@ -293,25 +248,20 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
Object** start, Object** end)) {
- for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
+ for (Object** p = start; p < end; p++) {
+ StaticVisitor::VisitPointer(heap, object, p);
+ }
}
- private:
- INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) {
- Heap* heap = map->GetHeap();
- VisitPointers(heap, object,
- HeapObject::RawField(object, JSFunction::kPropertiesOffset),
- HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
-
- // Don't visit code entry. We are using this visitor only during scavenges.
-
- VisitPointers(
- heap, object, HeapObject::RawField(
- object, JSFunction::kCodeEntryOffset + kPointerSize),
- HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset));
- return JSFunction::kSize;
+ // Although we are using the JSFunction body descriptor which does not
+ // visit the code entry, compiler wants it to be accessible.
+ // See JSFunction::BodyDescriptorImpl.
+ INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
+ Address entry_address)) {
+ UNREACHABLE();
}
+ private:
INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
}
@@ -321,10 +271,6 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return FixedDoubleArray::SizeFor(length);
}
- INLINE(static int VisitFixedTypedArray(Map* map, HeapObject* object)) {
- return reinterpret_cast<FixedTypedArrayBase*>(object)->size();
- }
-
INLINE(static int VisitJSObject(Map* map, HeapObject* object)) {
return JSObjectVisitor::Visit(map, object);
}
@@ -340,12 +286,10 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
}
INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) {
- return FreeSpace::cast(object)->Size();
+ return FreeSpace::cast(object)->size();
}
INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
- INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object));
- INLINE(static int VisitJSDataView(Map* map, HeapObject* object));
INLINE(static int VisitBytecodeArray(Map* map, HeapObject* object));
class DataObjectVisitor {
@@ -402,6 +346,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
INLINE(static void VisitWeakCell(Map* map, HeapObject* object));
+ INLINE(static void VisitTransitionArray(Map* map, HeapObject* object));
INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
Address entry_address));
INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
@@ -415,10 +360,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Skip the weak next code link in a code object.
INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) {}
- // Mark non-optimize code for functions inlined into the given optimized
- // code. This will prevent it from being flushed.
- static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
-
protected:
INLINE(static void VisitMap(Map* map, HeapObject* object));
INLINE(static void VisitCode(Map* map, HeapObject* object));
@@ -428,19 +369,11 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
- INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object));
- INLINE(static void VisitJSDataView(Map* map, HeapObject* object));
INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
- // Mark pointers in a Map and its TransitionArray together, possibly
- // treating transitions or back pointers weak.
+ // Mark pointers in a Map treating some elements of the descriptor array weak.
static void MarkMapContents(Heap* heap, Map* map);
- static void MarkTransitionArray(Heap* heap, TransitionArray* transitions);
-
- // Mark pointers in the optimized code map that should act as strong
- // references, possibly treating some entries weak.
- static void MarkOptimizedCodeMap(Heap* heap, FixedArray* code_map);
// Code flushing support.
INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
@@ -450,8 +383,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// references to code objects either strongly or weakly.
static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object);
static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object);
- static void VisitJSFunctionStrongCode(Heap* heap, HeapObject* object);
- static void VisitJSFunctionWeakCode(Heap* heap, HeapObject* object);
+ static void VisitJSFunctionStrongCode(Map* map, HeapObject* object);
+ static void VisitJSFunctionWeakCode(Map* map, HeapObject* object);
class DataObjectVisitor {
public:
@@ -491,7 +424,7 @@ class WeakObjectRetainer;
// access the next-element pointers.
template <class T>
Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_OBJECTS_VISITING_H_
diff --git a/chromium/v8/src/heap/scavenge-job.cc b/chromium/v8/src/heap/scavenge-job.cc
index c3804436fbd..52ba97a9c7b 100644
--- a/chromium/v8/src/heap/scavenge-job.cc
+++ b/chromium/v8/src/heap/scavenge-job.cc
@@ -17,7 +17,7 @@ namespace internal {
const double ScavengeJob::kMaxAllocationLimitAsFractionOfNewSpace = 0.8;
void ScavengeJob::IdleTask::RunInternal(double deadline_in_seconds) {
- Heap* heap = isolate_->heap();
+ Heap* heap = isolate()->heap();
double deadline_in_ms =
deadline_in_seconds *
static_cast<double>(base::Time::kMillisecondsPerSecond);
@@ -112,5 +112,5 @@ void ScavengeJob::ScheduleIdleTask(Heap* heap) {
}
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/scavenge-job.h b/chromium/v8/src/heap/scavenge-job.h
index c9e508ec527..56299a154b2 100644
--- a/chromium/v8/src/heap/scavenge-job.h
+++ b/chromium/v8/src/heap/scavenge-job.h
@@ -74,7 +74,7 @@ class ScavengeJob {
bool idle_task_rescheduled_;
int bytes_allocated_since_the_last_task_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_SCAVENGE_JOB_H_
diff --git a/chromium/v8/src/heap/scavenger-inl.h b/chromium/v8/src/heap/scavenger-inl.h
index 6ac64f2eb69..cd35c7d7e33 100644
--- a/chromium/v8/src/heap/scavenger-inl.h
+++ b/chromium/v8/src/heap/scavenger-inl.h
@@ -28,7 +28,8 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
return;
}
- Heap::UpdateAllocationSiteFeedback(object, Heap::IGNORE_SCRATCHPAD_SLOT);
+ object->GetHeap()->UpdateAllocationSite(
+ object, object->GetHeap()->global_pretenuring_feedback_);
// AllocationMementos are unrooted and shouldn't survive a scavenge
DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
@@ -38,7 +39,8 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
// static
-void StaticScavengeVisitor::VisitPointer(Heap* heap, Object** p) {
+void StaticScavengeVisitor::VisitPointer(Heap* heap, HeapObject* obj,
+ Object** p) {
Object* object = *p;
if (!heap->InNewSpace(object)) return;
Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
diff --git a/chromium/v8/src/heap/scavenger.cc b/chromium/v8/src/heap/scavenger.cc
index 74ed665c3fe..40aeb74aa95 100644
--- a/chromium/v8/src/heap/scavenger.cc
+++ b/chromium/v8/src/heap/scavenger.cc
@@ -67,23 +67,10 @@ class ScavengingVisitor : public StaticVisitorBase {
table_.Register(kVisitJSWeakCollection,
&ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
- table_.Register(kVisitJSTypedArray,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitJSDataView,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
table_.Register(kVisitJSRegExp,
&ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
- if (marks_handling == IGNORE_MARKS) {
- table_.Register(
- kVisitJSFunction,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- JSFunction::kSize>);
- } else {
- table_.Register(kVisitJSFunction, &EvacuateJSFunction);
- }
+ table_.Register(kVisitJSFunction, &EvacuateJSFunction);
table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
kVisitDataObject, kVisitDataObjectGeneric>();
@@ -199,12 +186,7 @@ class ScavengingVisitor : public StaticVisitorBase {
*slot = target;
if (object_contents == POINTER_OBJECT) {
- if (map->instance_type() == JS_FUNCTION_TYPE) {
- heap->promotion_queue()->insert(target,
- JSFunction::kNonWeakFieldsEndOffset);
- } else {
- heap->promotion_queue()->insert(target, object_size);
- }
+ heap->promotion_queue()->insert(target, object_size);
}
heap->IncrementPromotedObjectsSize(object_size);
return true;
@@ -216,7 +198,7 @@ class ScavengingVisitor : public StaticVisitorBase {
template <ObjectContents object_contents, AllocationAlignment alignment>
static inline void EvacuateObject(Map* map, HeapObject** slot,
HeapObject* object, int object_size) {
- SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
SLOW_DCHECK(object->Size() == object_size);
Heap* heap = map->GetHeap();
@@ -236,14 +218,15 @@ class ScavengingVisitor : public StaticVisitorBase {
// If promotion failed, we try to copy the object to the other semi-space
if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
- UNREACHABLE();
+ FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
}
static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
HeapObject* object) {
- ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- JSFunction::kSize>(map, slot, object);
+ ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
+
+ if (marks_handling == IGNORE_MARKS) return;
MapWord map_word = object->map_word();
DCHECK(map_word.IsForwardingAddress());
@@ -266,7 +249,8 @@ class ScavengingVisitor : public StaticVisitorBase {
static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
HeapObject* object) {
- int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+ int length = reinterpret_cast<FixedArray*>(object)->synchronized_length();
+ int object_size = FixedArray::SizeFor(length);
EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
object_size);
}
@@ -283,28 +267,16 @@ class ScavengingVisitor : public StaticVisitorBase {
static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
- EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
-
- MapWord map_word = object->map_word();
- DCHECK(map_word.IsForwardingAddress());
- FixedTypedArrayBase* target =
- reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
- if (target->base_pointer() != Smi::FromInt(0))
- target->set_base_pointer(target, SKIP_WRITE_BARRIER);
+ EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+ object_size);
}
static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
- EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
-
- MapWord map_word = object->map_word();
- DCHECK(map_word.IsForwardingAddress());
- FixedTypedArrayBase* target =
- reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
- if (target->base_pointer() != Smi::FromInt(0))
- target->set_base_pointer(target, SKIP_WRITE_BARRIER);
+ EvacuateObject<POINTER_OBJECT, kDoubleAligned>(map, slot, object,
+ object_size);
}
diff --git a/chromium/v8/src/heap/scavenger.h b/chromium/v8/src/heap/scavenger.h
index 44da98c86c7..5d0abf49d38 100644
--- a/chromium/v8/src/heap/scavenger.h
+++ b/chromium/v8/src/heap/scavenger.h
@@ -48,8 +48,8 @@ class ScavengeVisitor : public ObjectVisitor {
public:
explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointer(Object** p);
- void VisitPointers(Object** start, Object** end);
+ void VisitPointer(Object** p) override;
+ void VisitPointers(Object** start, Object** end) override;
private:
inline void ScavengePointer(Object** p);
@@ -63,7 +63,7 @@ class ScavengeVisitor : public ObjectVisitor {
class StaticScavengeVisitor
: public StaticNewSpaceVisitor<StaticScavengeVisitor> {
public:
- static inline void VisitPointer(Heap* heap, Object** p);
+ static inline void VisitPointer(Heap* heap, HeapObject* object, Object** p);
};
} // namespace internal
diff --git a/chromium/v8/src/heap/spaces-inl.h b/chromium/v8/src/heap/spaces-inl.h
index a12ed6f296d..3023fbf51ea 100644
--- a/chromium/v8/src/heap/spaces-inl.h
+++ b/chromium/v8/src/heap/spaces-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_SPACES_INL_H_
#define V8_HEAP_SPACES_INL_H_
+#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
#include "src/isolate.h"
#include "src/msan.h"
@@ -49,20 +50,21 @@ Page* PageIterator::next() {
// SemiSpaceIterator
HeapObject* SemiSpaceIterator::Next() {
- if (current_ == limit_) return NULL;
- if (NewSpacePage::IsAtEnd(current_)) {
- NewSpacePage* page = NewSpacePage::FromLimit(current_);
- page = page->next_page();
- DCHECK(!page->is_anchor());
- current_ = page->area_start();
- if (current_ == limit_) return NULL;
+ while (current_ != limit_) {
+ if (NewSpacePage::IsAtEnd(current_)) {
+ NewSpacePage* page = NewSpacePage::FromLimit(current_);
+ page = page->next_page();
+ DCHECK(!page->is_anchor());
+ current_ = page->area_start();
+ if (current_ == limit_) return nullptr;
+ }
+ HeapObject* object = HeapObject::FromAddress(current_);
+ current_ += object->Size();
+ if (!object->IsFiller()) {
+ return object;
+ }
}
-
- HeapObject* object = HeapObject::FromAddress(current_);
- int size = object->Size();
-
- current_ += size;
- return object;
+ return nullptr;
}
@@ -133,7 +135,12 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
}
if (!obj->IsFiller()) {
- DCHECK_OBJECT_SIZE(obj_size);
+ if (obj->IsCode()) {
+ DCHECK_EQ(space_, space_->heap()->code_space());
+ DCHECK_CODEOBJECT_SIZE(obj_size, space_);
+ } else {
+ DCHECK_OBJECT_SIZE(obj_size);
+ }
return obj;
}
}
@@ -188,7 +195,7 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
page->mutex_ = new base::Mutex();
- DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
+ DCHECK(page->area_size() <= kAllocatableMemory);
DCHECK(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
@@ -314,6 +321,24 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
}
+AllocationResult LocalAllocationBuffer::AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment) {
+ Address current_top = allocation_info_.top();
+ int filler_size = Heap::GetFillToAlign(current_top, alignment);
+
+ Address new_top = current_top + filler_size + size_in_bytes;
+ if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
+
+ allocation_info_.set_top(new_top);
+ if (filler_size > 0) {
+ return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
+ filler_size);
+ }
+
+ return AllocationResult(HeapObject::FromAddress(current_top));
+}
+
+
HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
@@ -446,7 +471,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
Address top = allocation_info_.top();
- if (allocation_info_.limit() - top < size_in_bytes) {
+ if (allocation_info_.limit() < top + size_in_bytes) {
// See if we can create room.
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
return AllocationResult::Retry();
@@ -477,6 +502,13 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
}
+MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
+ int size_in_bytes, AllocationAlignment alignment) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ return AllocateRaw(size_in_bytes, alignment);
+}
+
+
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
return static_cast<LargePage*>(chunk);
@@ -487,7 +519,35 @@ intptr_t LargeObjectSpace::Available() {
return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
}
+
+LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
+ return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
+}
+
+
+LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
+ AllocationResult result,
+ intptr_t size) {
+ if (result.IsRetry()) return InvalidBuffer();
+ HeapObject* obj = nullptr;
+ bool ok = result.To(&obj);
+ USE(ok);
+ DCHECK(ok);
+ Address top = HeapObject::cast(obj)->address();
+ return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
}
-} // namespace v8::internal
+
+
+bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
+ if (allocation_info_.top() == other->allocation_info_.limit()) {
+ allocation_info_.set_top(other->allocation_info_.top());
+ other->allocation_info_.Reset(nullptr, nullptr);
+ return true;
+ }
+ return false;
+}
+
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_SPACES_INL_H_
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index cd8a72951c6..90d252abb59 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -924,7 +924,7 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
- static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
+ static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
}
chunk->IncrementLiveBytes(by);
}
@@ -954,13 +954,11 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
: Space(heap, space, executable),
free_list_(this),
- unswept_free_bytes_(0),
end_of_unswept_pages_(NULL) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
- allocation_info_.set_top(NULL);
- allocation_info_.set_limit(NULL);
+ allocation_info_.Reset(nullptr, nullptr);
anchor_.InitializeAsAnchor(this);
}
@@ -983,6 +981,101 @@ void PagedSpace::TearDown() {
}
+void PagedSpace::AddMemory(Address start, intptr_t size) {
+ accounting_stats_.ExpandSpace(static_cast<int>(size));
+ Free(start, static_cast<int>(size));
+}
+
+
+FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
+ FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
+ if (free_space != nullptr) {
+ accounting_stats_.DecreaseCapacity(free_space->size());
+ }
+ return free_space;
+}
+
+
+void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
+ int num, intptr_t limit) {
+ DCHECK_GT(num, 0);
+ DCHECK(other != nullptr);
+
+ if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
+
+ EmptyAllocationInfo();
+
+ bool memory_available = true;
+ bool spaces_need_memory = true;
+ FreeSpace* node = nullptr;
+ CompactionSpace* current_space = nullptr;
+ // Iterate over spaces and memory as long as we have memory and there are
+ // spaces in need of some.
+ while (memory_available && spaces_need_memory) {
+ spaces_need_memory = false;
+ // Round-robin over all spaces.
+ for (int i = 0; i < num; i++) {
+ current_space = other[i]->Get(identity());
+ if (current_space->free_list()->Available() < limit) {
+ // Space has not reached its limit. Try to get some memory.
+ spaces_need_memory = true;
+ node = TryRemoveMemory(limit - current_space->free_list()->Available());
+ if (node != nullptr) {
+ CHECK(current_space->identity() == identity());
+ current_space->AddMemory(node->address(), node->size());
+ } else {
+ memory_available = false;
+ break;
+ }
+ }
+ }
+ }
+}
+
+
+void PagedSpace::RefillFreeList() {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ FreeList* free_list = nullptr;
+ if (this == heap()->old_space()) {
+ free_list = collector->free_list_old_space().get();
+ } else if (this == heap()->code_space()) {
+ free_list = collector->free_list_code_space().get();
+ } else if (this == heap()->map_space()) {
+ free_list = collector->free_list_map_space().get();
+ } else {
+ // Any PagedSpace might invoke RefillFreeList. We filter all but our old
+ // generation spaces out.
+ return;
+ }
+ DCHECK(free_list != nullptr);
+ intptr_t added = free_list_.Concatenate(free_list);
+ accounting_stats_.IncreaseCapacity(added);
+}
+
+
+void CompactionSpace::RefillFreeList() {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ FreeList* free_list = nullptr;
+ if (identity() == OLD_SPACE) {
+ free_list = collector->free_list_old_space().get();
+ } else if (identity() == CODE_SPACE) {
+ free_list = collector->free_list_code_space().get();
+ } else {
+ // Compaction spaces only represent old or code space.
+ UNREACHABLE();
+ }
+ DCHECK(free_list != nullptr);
+ intptr_t refilled = 0;
+ while (refilled < kCompactionMemoryWanted) {
+ FreeSpace* node =
+ free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
+ if (node == nullptr) return;
+ refilled += node->size();
+ AddMemory(node->address(), node->size());
+ }
+}
+
+
void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
DCHECK(identity() == other->identity());
// Destroy the linear allocation space of {other}. This is needed to
@@ -992,29 +1085,33 @@ void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
// Move over the free list. Concatenate makes sure that the source free list
// gets properly reset after moving over all nodes.
- intptr_t freed_bytes = free_list_.Concatenate(other->free_list());
+ intptr_t added = free_list_.Concatenate(other->free_list());
// Moved memory is not recorded as allocated memory, but rather increases and
- // decreases capacity of the corresponding spaces. Used size and waste size
- // are maintained by the receiving space upon allocating and freeing blocks.
- other->accounting_stats_.DecreaseCapacity(freed_bytes);
- accounting_stats_.IncreaseCapacity(freed_bytes);
+ // decreases capacity of the corresponding spaces.
+ other->accounting_stats_.DecreaseCapacity(added);
+ accounting_stats_.IncreaseCapacity(added);
}
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Unmerged fields:
// area_size_
- // allocation_info_
- // end_of_unswept_pages_
- // unswept_free_bytes_
// anchor_
MoveOverFreeMemory(other);
// Update and clear accounting statistics.
accounting_stats_.Merge(other->accounting_stats_);
- other->accounting_stats_.Reset();
+ other->accounting_stats_.Clear();
+
+ // The linear allocation area of {other} should be destroyed now.
+ DCHECK(other->top() == nullptr);
+ DCHECK(other->limit() == nullptr);
+
+ DCHECK(other->end_of_unswept_pages_ == nullptr);
+
+ AccountCommitted(other->CommittedMemory());
// Move over pages.
PageIterator it(other);
@@ -1094,6 +1191,8 @@ bool PagedSpace::Expand() {
executable());
if (p == NULL) return false;
+ AccountCommitted(static_cast<intptr_t>(p->size()));
+
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
@@ -1138,8 +1237,6 @@ void PagedSpace::ReleasePage(Page* page) {
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
DCHECK_EQ(AreaSize(), static_cast<int>(size));
- } else {
- DecreaseUnsweptFreeBytes(page);
}
if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
@@ -1150,8 +1247,7 @@ void PagedSpace::ReleasePage(Page* page) {
DCHECK(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
- allocation_info_.set_top(NULL);
- allocation_info_.set_limit(NULL);
+ allocation_info_.Reset(nullptr, nullptr);
}
// If page is still in a list, unlink it from that list.
@@ -1160,6 +1256,7 @@ void PagedSpace::ReleasePage(Page* page) {
page->Unlink();
}
+ AccountUncommitted(static_cast<intptr_t>(page->size()));
heap()->QueueMemoryChunkForFree(page);
DCHECK(Capacity() > 0);
@@ -1291,8 +1388,8 @@ void NewSpace::TearDown() {
}
start_ = NULL;
- allocation_info_.set_top(NULL);
- allocation_info_.set_limit(NULL);
+ allocation_info_.Reset(nullptr, nullptr);
+
to_space_.TearDown();
from_space_.TearDown();
@@ -1381,10 +1478,50 @@ void NewSpace::Shrink() {
}
+void LocalAllocationBuffer::Close() {
+ if (IsValid()) {
+ heap_->CreateFillerObjectAt(
+ allocation_info_.top(),
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
+ }
+}
+
+
+LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
+ AllocationInfo allocation_info)
+ : heap_(heap), allocation_info_(allocation_info) {
+ if (IsValid()) {
+ heap_->CreateFillerObjectAt(
+ allocation_info_.top(),
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
+ }
+}
+
+
+LocalAllocationBuffer::LocalAllocationBuffer(
+ const LocalAllocationBuffer& other) {
+ *this = other;
+}
+
+
+LocalAllocationBuffer& LocalAllocationBuffer::operator=(
+ const LocalAllocationBuffer& other) {
+ Close();
+ heap_ = other.heap_;
+ allocation_info_ = other.allocation_info_;
+
+ // This is needed since we (a) cannot yet use move-semantics, and (b) want
+ // to make the use of the class easy by it as value and (c) implicitly call
+ // {Close} upon copy.
+ const_cast<LocalAllocationBuffer&>(other)
+ .allocation_info_.Reset(nullptr, nullptr);
+ return *this;
+}
+
+
void NewSpace::UpdateAllocationInfo() {
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.set_top(to_space_.page_low());
- allocation_info_.set_limit(to_space_.page_high());
+ allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
UpdateInlineAllocationLimit(0);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -1400,7 +1537,7 @@ void NewSpace::ResetAllocationInfo() {
while (it.has_next()) {
Bitmap::Clear(it.next());
}
- InlineAllocationStep(old_top, allocation_info_.top());
+ InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
}
@@ -1410,14 +1547,15 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
Address high = to_space_.page_high();
Address new_top = allocation_info_.top() + size_in_bytes;
allocation_info_.set_limit(Min(new_top, high));
- } else if (inline_allocation_limit_step_ == 0) {
+ } else if (inline_allocation_observers_paused_ ||
+ top_on_previous_step_ == 0) {
// Normal limit is the end of the current page.
allocation_info_.set_limit(to_space_.page_high());
} else {
// Lower limit during incremental marking.
Address high = to_space_.page_high();
Address new_top = allocation_info_.top() + size_in_bytes;
- Address new_limit = new_top + inline_allocation_limit_step_;
+ Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
allocation_info_.set_limit(Min(new_limit, high));
}
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
@@ -1466,6 +1604,12 @@ bool NewSpace::AddFreshPage() {
}
+bool NewSpace::AddFreshPageSynchronized() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ return AddFreshPage();
+}
+
+
bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
@@ -1479,7 +1623,7 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return false;
}
- InlineAllocationStep(old_top, allocation_info_.top());
+ InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
old_top = allocation_info_.top();
high = to_space_.page_high();
@@ -1495,19 +1639,75 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
// or because idle scavenge job wants to get a chance to post a task.
// Set the new limit accordingly.
Address new_top = old_top + aligned_size_in_bytes;
- InlineAllocationStep(new_top, new_top);
+ Address soon_object = old_top + filler_size;
+ InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
}
return true;
}
-void NewSpace::InlineAllocationStep(Address top, Address new_top) {
+void NewSpace::StartNextInlineAllocationStep() {
+ if (!inline_allocation_observers_paused_) {
+ top_on_previous_step_ =
+ inline_allocation_observers_.length() ? allocation_info_.top() : 0;
+ UpdateInlineAllocationLimit(0);
+ }
+}
+
+
+intptr_t NewSpace::GetNextInlineAllocationStepSize() {
+ intptr_t next_step = 0;
+ for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
+ InlineAllocationObserver* o = inline_allocation_observers_[i];
+ next_step = next_step ? Min(next_step, o->bytes_to_next_step())
+ : o->bytes_to_next_step();
+ }
+ DCHECK(inline_allocation_observers_.length() == 0 || next_step != 0);
+ return next_step;
+}
+
+
+void NewSpace::AddInlineAllocationObserver(InlineAllocationObserver* observer) {
+ inline_allocation_observers_.Add(observer);
+ StartNextInlineAllocationStep();
+}
+
+
+void NewSpace::RemoveInlineAllocationObserver(
+ InlineAllocationObserver* observer) {
+ bool removed = inline_allocation_observers_.RemoveElement(observer);
+ // Only used in assertion. Suppress unused variable warning.
+ static_cast<void>(removed);
+ DCHECK(removed);
+ StartNextInlineAllocationStep();
+}
+
+
+void NewSpace::PauseInlineAllocationObservers() {
+ // Do a step to account for memory allocated so far.
+ InlineAllocationStep(top(), top(), nullptr, 0);
+ inline_allocation_observers_paused_ = true;
+ top_on_previous_step_ = 0;
+ UpdateInlineAllocationLimit(0);
+}
+
+
+void NewSpace::ResumeInlineAllocationObservers() {
+ DCHECK(top_on_previous_step_ == 0);
+ inline_allocation_observers_paused_ = false;
+ StartNextInlineAllocationStep();
+}
+
+
+void NewSpace::InlineAllocationStep(Address top, Address new_top,
+ Address soon_object, size_t size) {
if (top_on_previous_step_) {
int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
- heap()->ScheduleIdleScavengeIfNeeded(bytes_allocated);
- heap()->incremental_marking()->Step(bytes_allocated,
- IncrementalMarking::GC_VIA_STACK_GUARD);
+ for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
+ inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated,
+ soon_object, size);
+ }
top_on_previous_step_ = new_top;
}
}
@@ -1586,7 +1786,6 @@ void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
total_capacity_ = initial_capacity;
target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
- maximum_committed_ = 0;
committed_ = false;
start_ = start;
address_mask_ = ~(maximum_capacity - 1);
@@ -1609,6 +1808,7 @@ bool SemiSpace::Commit() {
start_, total_capacity_, executable())) {
return false;
}
+ AccountCommitted(total_capacity_);
NewSpacePage* current = anchor();
for (int i = 0; i < pages; i++) {
@@ -1632,6 +1832,8 @@ bool SemiSpace::Uncommit() {
total_capacity_)) {
return false;
}
+ AccountUncommitted(total_capacity_);
+
anchor()->set_next_page(anchor());
anchor()->set_prev_page(anchor());
@@ -1668,6 +1870,7 @@ bool SemiSpace::GrowTo(int new_capacity) {
start_ + total_capacity_, delta, executable())) {
return false;
}
+ AccountCommitted(static_cast<intptr_t>(delta));
SetCapacity(new_capacity);
NewSpacePage* last_page = anchor()->prev_page();
DCHECK(last_page != anchor());
@@ -1698,6 +1901,7 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
return false;
}
+ AccountUncommitted(static_cast<intptr_t>(delta));
int pages_after = new_capacity / Page::kPageSize;
NewSpacePage* new_last_page =
@@ -1783,9 +1987,6 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
void SemiSpace::SetCapacity(int new_capacity) {
total_capacity_ = new_capacity;
- if (total_capacity_ > maximum_committed_) {
- maximum_committed_ = total_capacity_;
- }
}
@@ -2066,12 +2267,6 @@ size_t NewSpace::CommittedPhysicalMemory() {
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
if (category->top() != NULL) {
- // This is safe (not going to deadlock) since Concatenate operations
- // are never performed on the same free lists at the same time in
- // reverse order. Furthermore, we only lock if the PagedSpace containing
- // the free list is know to be globally available, i.e., not local.
- if (!this->owner()->owner()->is_local()) mutex()->Lock();
- if (!category->owner()->owner()->is_local()) category->mutex()->Lock();
DCHECK(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
@@ -2080,40 +2275,46 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
category->end()->set_next(top());
}
set_top(category->top());
- base::NoBarrier_Store(&top_, category->top_);
available_ += category->available();
category->Reset();
- if (!category->owner()->owner()->is_local()) category->mutex()->Unlock();
- if (!this->owner()->owner()->is_local()) mutex()->Unlock();
}
return free_bytes;
}
void FreeListCategory::Reset() {
- set_top(NULL);
- set_end(NULL);
- set_available(0);
+ set_top(nullptr);
+ set_end(nullptr);
+ available_ = 0;
}
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
- int sum = 0;
- FreeSpace* t = top();
- FreeSpace** n = &t;
- while (*n != NULL) {
- if (Page::FromAddress((*n)->address()) == p) {
- FreeSpace* free_space = *n;
- sum += free_space->Size();
- *n = (*n)->next();
- } else {
- n = (*n)->next_address();
+ intptr_t sum = 0;
+ FreeSpace* prev_node = nullptr;
+ for (FreeSpace* cur_node = top(); cur_node != nullptr;
+ cur_node = cur_node->next()) {
+ Page* page_for_node = Page::FromAddress(cur_node->address());
+ if (page_for_node == p) {
+ // FreeSpace node on eviction page found, unlink it.
+ int size = cur_node->size();
+ sum += size;
+ DCHECK((prev_node != nullptr) || (top() == cur_node));
+ if (cur_node == top()) {
+ set_top(cur_node->next());
+ }
+ if (cur_node == end()) {
+ set_end(prev_node);
+ }
+ if (prev_node != nullptr) {
+ prev_node->set_next(cur_node->next());
+ }
+ continue;
}
+ prev_node = cur_node;
}
- set_top(t);
- if (top() == NULL) {
- set_end(NULL);
- }
+ DCHECK_EQ(p->available_in_free_list(type_), sum);
+ p->add_available_in_free_list(type_, -sum);
available_ -= sum;
return sum;
}
@@ -2131,25 +2332,25 @@ bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
FreeSpace* node = top();
+ if (node == nullptr) return nullptr;
- if (node == NULL) return NULL;
-
- while (node != NULL &&
- Page::FromAddress(node->address())->IsEvacuationCandidate()) {
- available_ -= node->Size();
+ Page* page = Page::FromAddress(node->address());
+ while ((node != nullptr) && !page->CanAllocate()) {
+ available_ -= node->size();
+ page->add_available_in_free_list(type_, -(node->Size()));
node = node->next();
}
- if (node != NULL) {
+ if (node != nullptr) {
set_top(node->next());
*node_size = node->Size();
available_ -= *node_size;
} else {
- set_top(NULL);
+ set_top(nullptr);
}
- if (top() == NULL) {
- set_end(NULL);
+ if (top() == nullptr) {
+ set_end(nullptr);
}
return node;
@@ -2159,15 +2360,52 @@ FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
int* node_size) {
FreeSpace* node = PickNodeFromList(node_size);
- if (node != NULL && *node_size < size_in_bytes) {
+ if ((node != nullptr) && (*node_size < size_in_bytes)) {
Free(node, *node_size);
*node_size = 0;
- return NULL;
+ return nullptr;
}
return node;
}
+FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
+ int* node_size) {
+ FreeSpace* prev_non_evac_node = nullptr;
+ for (FreeSpace* cur_node = top(); cur_node != nullptr;
+ cur_node = cur_node->next()) {
+ int size = cur_node->size();
+ Page* page_for_node = Page::FromAddress(cur_node->address());
+
+ if ((size >= size_in_bytes) || !page_for_node->CanAllocate()) {
+ // The node is either large enough or contained in an evacuation
+ // candidate. In both cases we need to unlink it from the list.
+ available_ -= size;
+ if (cur_node == top()) {
+ set_top(cur_node->next());
+ }
+ if (cur_node == end()) {
+ set_end(prev_non_evac_node);
+ }
+ if (prev_non_evac_node != nullptr) {
+ prev_non_evac_node->set_next(cur_node->next());
+ }
+ // For evacuation candidates we continue.
+ if (!page_for_node->CanAllocate()) {
+ page_for_node->add_available_in_free_list(type_, -size);
+ continue;
+ }
+ // Otherwise we have a large enough node and can return.
+ *node_size = size;
+ return cur_node;
+ }
+
+ prev_non_evac_node = cur_node;
+ }
+ return nullptr;
+}
+
+
void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
free_space->set_next(top());
set_top(free_space);
@@ -2194,22 +2432,38 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
FreeList::FreeList(PagedSpace* owner)
: owner_(owner),
- heap_(owner->heap()),
- small_list_(this),
- medium_list_(this),
- large_list_(this),
- huge_list_(this) {
+ wasted_bytes_(0),
+ small_list_(this, kSmall),
+ medium_list_(this, kMedium),
+ large_list_(this, kLarge),
+ huge_list_(this, kHuge) {
Reset();
}
-intptr_t FreeList::Concatenate(FreeList* free_list) {
- intptr_t free_bytes = 0;
- free_bytes += small_list_.Concatenate(free_list->small_list());
- free_bytes += medium_list_.Concatenate(free_list->medium_list());
- free_bytes += large_list_.Concatenate(free_list->large_list());
- free_bytes += huge_list_.Concatenate(free_list->huge_list());
- return free_bytes;
+intptr_t FreeList::Concatenate(FreeList* other) {
+ intptr_t usable_bytes = 0;
+ intptr_t wasted_bytes = 0;
+
+ // This is safe (not going to deadlock) since Concatenate operations
+ // are never performed on the same free lists at the same time in
+ // reverse order. Furthermore, we only lock if the PagedSpace containing
+ // the free list is know to be globally available, i.e., not local.
+ if (!owner()->is_local()) mutex_.Lock();
+ if (!other->owner()->is_local()) other->mutex()->Lock();
+
+ wasted_bytes = other->wasted_bytes_;
+ wasted_bytes_ += wasted_bytes;
+ other->wasted_bytes_ = 0;
+
+ usable_bytes += small_list_.Concatenate(other->GetFreeListCategory(kSmall));
+ usable_bytes += medium_list_.Concatenate(other->GetFreeListCategory(kMedium));
+ usable_bytes += large_list_.Concatenate(other->GetFreeListCategory(kLarge));
+ usable_bytes += huge_list_.Concatenate(other->GetFreeListCategory(kHuge));
+
+ if (!other->owner()->is_local()) other->mutex()->Unlock();
+ if (!owner()->is_local()) mutex_.Unlock();
+ return usable_bytes + wasted_bytes;
}
@@ -2218,19 +2472,21 @@ void FreeList::Reset() {
medium_list_.Reset();
large_list_.Reset();
huge_list_.Reset();
+ ResetStats();
}
int FreeList::Free(Address start, int size_in_bytes) {
if (size_in_bytes == 0) return 0;
- heap_->CreateFillerObjectAt(start, size_in_bytes);
+ owner()->heap()->CreateFillerObjectAt(start, size_in_bytes);
Page* page = Page::FromAddress(start);
// Early return to drop too-small blocks on the floor.
if (size_in_bytes <= kSmallListMin) {
page->add_non_available_small_blocks(size_in_bytes);
+ wasted_bytes_ += size_in_bytes;
return size_in_bytes;
}
@@ -2251,89 +2507,46 @@ int FreeList::Free(Address start, int size_in_bytes) {
page->add_available_in_huge_free_list(size_in_bytes);
}
- DCHECK(IsVeryLong() || available() == SumFreeLists());
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
return 0;
}
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
+ FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
+ if (node != nullptr) {
+ Page::FromAddress(node->address())
+ ->add_available_in_free_list(category, -(*node_size));
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ }
+ return node;
+}
+
+
FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
- FreeSpace* node = NULL;
- Page* page = NULL;
+ FreeSpace* node = nullptr;
+ Page* page = nullptr;
if (size_in_bytes <= kSmallAllocationMax) {
- node = small_list_.PickNodeFromList(node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_small_free_list(-(*node_size));
- DCHECK(IsVeryLong() || available() == SumFreeLists());
- return node;
- }
+ node = FindNodeIn(kSmall, node_size);
+ if (node != nullptr) return node;
}
if (size_in_bytes <= kMediumAllocationMax) {
- node = medium_list_.PickNodeFromList(node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_medium_free_list(-(*node_size));
- DCHECK(IsVeryLong() || available() == SumFreeLists());
- return node;
- }
+ node = FindNodeIn(kMedium, node_size);
+ if (node != nullptr) return node;
}
if (size_in_bytes <= kLargeAllocationMax) {
- node = large_list_.PickNodeFromList(node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_large_free_list(-(*node_size));
- DCHECK(IsVeryLong() || available() == SumFreeLists());
- return node;
- }
+ node = FindNodeIn(kLarge, node_size);
+ if (node != nullptr) return node;
}
- int huge_list_available = huge_list_.available();
- FreeSpace* top_node = huge_list_.top();
- for (FreeSpace** cur = &top_node; *cur != NULL;
- cur = (*cur)->next_address()) {
- FreeSpace* cur_node = *cur;
- while (cur_node != NULL &&
- Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
- int size = cur_node->Size();
- huge_list_available -= size;
- page = Page::FromAddress(cur_node->address());
- page->add_available_in_huge_free_list(-size);
- cur_node = cur_node->next();
- }
-
- *cur = cur_node;
- if (cur_node == NULL) {
- huge_list_.set_end(NULL);
- break;
- }
-
- int size = cur_node->Size();
- if (size >= size_in_bytes) {
- // Large enough node found. Unlink it from the list.
- node = *cur;
- *cur = node->next();
- *node_size = size;
- huge_list_available -= size;
- page = Page::FromAddress(node->address());
- page->add_available_in_huge_free_list(-size);
- break;
- }
- }
-
- huge_list_.set_top(top_node);
- if (huge_list_.top() == NULL) {
- huge_list_.set_end(NULL);
- }
- huge_list_.set_available(huge_list_available);
-
- if (node != NULL) {
- DCHECK(IsVeryLong() || available() == SumFreeLists());
+ node = huge_list_.SearchForNodeInList(size_in_bytes, node_size);
+ if (node != nullptr) {
+ page = Page::FromAddress(node->address());
+ page->add_available_in_large_free_list(-(*node_size));
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
@@ -2360,7 +2573,38 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
}
- DCHECK(IsVeryLong() || available() == SumFreeLists());
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+
+FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
+ hint_size_in_bytes = RoundDown(hint_size_in_bytes, kPointerSize);
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ FreeSpace* node = nullptr;
+ int node_size = 0;
+ // Try to find a node that fits exactly.
+ node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
+ // If no node could be found get as much memory as possible.
+ if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
+ if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
+ if (node != nullptr) {
+ // We round up the size to (kSmallListMin + kPointerSize) to (a) have a
+ // size larger then the minimum size required for FreeSpace, and (b) to get
+ // a block that can actually be freed into some FreeList later on.
+ if (hint_size_in_bytes <= kSmallListMin) {
+ hint_size_in_bytes = kSmallListMin + kPointerSize;
+ }
+ // Give back left overs that were not required by {size_in_bytes}.
+ intptr_t left_over = node_size - hint_size_in_bytes;
+
+ // Do not bother to return anything below {kSmallListMin} as it would be
+ // immediately discarded anyways.
+ if (left_over > kSmallListMin) {
+ Free(node->address() + hint_size_in_bytes, static_cast<int>(left_over));
+ node->set_size(static_cast<int>(hint_size_in_bytes));
+ }
+ }
return node;
}
@@ -2440,17 +2684,11 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
intptr_t FreeList::EvictFreeListItems(Page* p) {
intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
- p->set_available_in_huge_free_list(0);
-
if (sum < p->area_size()) {
sum += small_list_.EvictFreeListItemsInList(p) +
medium_list_.EvictFreeListItemsInList(p) +
large_list_.EvictFreeListItemsInList(p);
- p->set_available_in_small_free_list(0);
- p->set_available_in_medium_free_list(0);
- p->set_available_in_large_free_list(0);
}
-
return sum;
}
@@ -2484,9 +2722,6 @@ intptr_t FreeListCategory::SumFreeList() {
}
-static const int kVeryLongFreeList = 500;
-
-
int FreeListCategory::FreeListLength() {
int length = 0;
FreeSpace* cur = top();
@@ -2499,12 +2734,14 @@ int FreeListCategory::FreeListLength() {
}
+bool FreeListCategory::IsVeryLong() {
+ return FreeListLength() == kVeryLongFreeList;
+}
+
+
bool FreeList::IsVeryLong() {
- if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
- return false;
+ return small_list_.IsVeryLong() || medium_list_.IsVeryLong() ||
+ large_list_.IsVeryLong() || huge_list_.IsVeryLong();
}
@@ -2529,21 +2766,15 @@ void PagedSpace::PrepareForMarkCompact() {
// on the first allocation after the sweep.
EmptyAllocationInfo();
- // This counter will be increased for pages which will be swept by the
- // sweeper threads.
- unswept_free_bytes_ = 0;
-
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
}
intptr_t PagedSpace::SizeOfObjects() {
- DCHECK(!FLAG_concurrent_sweeping ||
- heap()->mark_compact_collector()->sweeping_in_progress() ||
- (unswept_free_bytes_ == 0));
- const intptr_t size = Size() - unswept_free_bytes_ - (limit() - top());
- DCHECK_GE(size, 0);
+ const intptr_t size = Size() - (limit() - top());
+ CHECK_GE(limit(), top());
+ CHECK_GE(size, 0);
USE(size);
return size;
}
@@ -2568,24 +2799,20 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
}
-void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
+void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
if (allocation_info_.top() >= allocation_info_.limit()) return;
- if (Page::FromAllocationTop(allocation_info_.top())
- ->IsEvacuationCandidate()) {
+ if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
// Create filler object to keep page iterable if it was iterable.
int remaining =
static_cast<int>(allocation_info_.limit() - allocation_info_.top());
heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
-
- allocation_info_.set_top(NULL);
- allocation_info_.set_limit(NULL);
+ allocation_info_.Reset(nullptr, nullptr);
}
}
-HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
- int size_in_bytes) {
+HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
// Wait for the sweeper threads here and complete the sweeping phase.
@@ -2595,7 +2822,17 @@ HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
// entries.
return free_list_.Allocate(size_in_bytes);
}
- return NULL;
+ return nullptr;
+}
+
+
+HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ collector->SweepAndRefill(this);
+ return free_list_.Allocate(size_in_bytes);
+ }
+ return nullptr;
}
@@ -2607,22 +2844,17 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
if (collector->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
- collector->RefillFreeList(this);
+ RefillFreeList();
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
if (object != NULL) return object;
// If sweeping is still in progress try to sweep pages on the main thread.
- int free_chunk = collector->SweepInParallel(this, size_in_bytes);
- collector->RefillFreeList(this);
- if (free_chunk >= size_in_bytes) {
- HeapObject* object = free_list_.Allocate(size_in_bytes);
- // We should be able to allocate an object here since we just freed that
- // much memory.
- DCHECK(object != NULL);
- if (object != NULL) return object;
- }
+ collector->SweepInParallel(heap()->paged_space(identity()), size_in_bytes);
+ RefillFreeList();
+ object = free_list_.Allocate(size_in_bytes);
+ if (object != nullptr) return object;
}
// Free list allocation failed and there is no next page. Fail if we have
@@ -2632,21 +2864,21 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
- HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
return object;
}
// Try to expand the space and allocate in the new next page.
if (Expand()) {
DCHECK((CountTotalPages() > 1) ||
- (size_in_bytes <= free_list_.available()));
+ (size_in_bytes <= free_list_.Available()));
return free_list_.Allocate(size_in_bytes);
}
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation.
- return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ return SweepAndRetryAllocation(size_in_bytes);
}
@@ -2799,11 +3031,10 @@ void PagedSpace::ReportStatistics() {
// -----------------------------------------------------------------------------
// MapSpace implementation
-// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
-// there is at least one non-inlined virtual function. I would prefer to hide
-// the VerifyObject definition behind VERIFY_HEAP.
+#ifdef VERIFY_HEAP
void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
+#endif
// -----------------------------------------------------------------------------
@@ -2825,7 +3056,6 @@ HeapObject* LargeObjectIterator::Next() {
// -----------------------------------------------------------------------------
// LargeObjectSpace
-static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
@@ -2834,7 +3064,7 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
size_(0),
page_count_(0),
objects_size_(0),
- chunk_map_(ComparePointers, 1024) {}
+ chunk_map_(HashMap::PointersMatch, 1024) {}
LargeObjectSpace::~LargeObjectSpace() {}
@@ -2843,7 +3073,6 @@ LargeObjectSpace::~LargeObjectSpace() {}
bool LargeObjectSpace::SetUp() {
first_page_ = NULL;
size_ = 0;
- maximum_committed_ = 0;
page_count_ = 0;
objects_size_ = 0;
chunk_map_.Clear();
@@ -2870,8 +3099,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
- if (!heap()->always_allocate() &&
- !heap()->CanExpandOldGeneration(object_size)) {
+ if (!heap()->CanExpandOldGeneration(object_size)) {
return AllocationResult::Retry(identity());
}
@@ -2881,15 +3109,12 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
DCHECK(page->area_size() >= object_size);
size_ += static_cast<int>(page->size());
+ AccountCommitted(static_cast<intptr_t>(page->size()));
objects_size_ += object_size;
page_count_++;
page->set_next_page(first_page_);
first_page_ = page;
- if (size_ > maximum_committed_) {
- maximum_committed_ = size_;
- }
-
// Register all MemoryChunk::kAlignment-aligned chunks covered by
// this large page in the chunk map.
uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
@@ -2961,7 +3186,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
while (current != NULL) {
HeapObject* object = current->GetObject();
MarkBit mark_bit = Marking::MarkBitFrom(object);
- DCHECK(Marking::IsBlackOrGrey(mark_bit));
+ DCHECK(Marking::IsBlack(mark_bit));
Marking::BlackToWhite(mark_bit);
Page::FromAddress(object->address())->ResetProgressBar();
Page::FromAddress(object->address())->ResetLiveBytes();
@@ -2976,7 +3201,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
while (current != NULL) {
HeapObject* object = current->GetObject();
MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (Marking::IsBlackOrGrey(mark_bit)) {
+ DCHECK(!Marking::IsGrey(mark_bit));
+ if (Marking::IsBlack(mark_bit)) {
previous = current;
current = current->next_page();
} else {
@@ -2993,6 +3219,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
heap()->isolate());
size_ -= static_cast<int>(page->size());
+ AccountUncommitted(static_cast<intptr_t>(page->size()));
objects_size_ -= object->Size();
page_count_--;
@@ -3048,11 +3275,6 @@ void LargeObjectSpace::Verify() {
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map));
- // Double unboxing in LO space is not allowed. This would break the
- // lookup mechanism for store and slot buffer entries which use the
- // page header tag.
- CHECK(object->ContentType() != HeapObjectContents::kMixedValues);
-
// We have only code, sequential strings, external strings
// (sequential strings that have been morphed into external
// strings), fixed arrays, byte arrays, and constant pool arrays in the
diff --git a/chromium/v8/src/heap/spaces.h b/chromium/v8/src/heap/spaces.h
index 95e3b7c6023..a8102cabc7c 100644
--- a/chromium/v8/src/heap/spaces.h
+++ b/chromium/v8/src/heap/spaces.h
@@ -19,6 +19,7 @@
namespace v8 {
namespace internal {
+class CompactionSpaceCollection;
class Isolate;
// -----------------------------------------------------------------------------
@@ -86,6 +87,9 @@ class Isolate;
#define DCHECK_OBJECT_SIZE(size) \
DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
+#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
+ DCHECK((0 < size) && (size <= code_space->AreaSize()))
+
#define DCHECK_PAGE_OFFSET(offset) \
DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
@@ -264,7 +268,7 @@ class Bitmap {
// Clears all bits starting from {cell_base_index} up to and excluding
// {index}. Note that {cell_base_index} is required to be cell aligned.
void ClearRange(uint32_t cell_base_index, uint32_t index) {
- DCHECK_EQ(IndexInCell(cell_base_index), 0);
+ DCHECK_EQ(IndexInCell(cell_base_index), 0u);
DCHECK_GE(index, cell_base_index);
uint32_t start_cell_index = IndexToCell(cell_base_index);
uint32_t end_cell_index = IndexToCell(index);
@@ -319,10 +323,17 @@ class MemoryChunk {
// candidates selection cycle.
FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
+ // This flag is inteded to be used for testing.
+ NEVER_ALLOCATE_ON_PAGE,
+
// The memory chunk is already logically freed, however the actual freeing
// still has to be performed.
PRE_FREED,
+ // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
+ // has been aborted and needs special handling by the sweeper.
+ COMPACTION_WAS_ABORTED,
+
// Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS
};
@@ -584,6 +595,7 @@ class MemoryChunk {
}
live_byte_count_ = 0;
}
+
void IncrementLiveBytes(int by) {
if (FLAG_gc_verbose) {
printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
@@ -591,13 +603,21 @@ class MemoryChunk {
live_byte_count_ + by);
}
live_byte_count_ += by;
+ DCHECK_GE(live_byte_count_, 0);
DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
}
+
int LiveBytes() {
- DCHECK(static_cast<unsigned>(live_byte_count_) <= size_);
+ DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
return live_byte_count_;
}
+ void SetLiveBytes(int live_bytes) {
+ DCHECK_GE(live_bytes, 0);
+ DCHECK_LE(static_cast<unsigned>(live_bytes), size_);
+ live_byte_count_ = live_bytes;
+ }
+
int write_barrier_counter() {
return static_cast<int>(write_barrier_counter_);
}
@@ -623,13 +643,6 @@ class MemoryChunk {
}
}
- bool IsLeftOfProgressBar(Object** slot) {
- Address slot_address = reinterpret_cast<Address>(slot);
- DCHECK(slot_address > this->address());
- return (slot_address - (this->address() + kObjectStartOffset)) <
- progress_bar();
- }
-
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
@@ -681,6 +694,10 @@ class MemoryChunk {
return IsFlagSet(EVACUATION_CANDIDATE);
}
+ bool CanAllocate() {
+ return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
+ }
+
bool ShouldSkipEvacuationSlotRecording() {
return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
}
@@ -773,6 +790,9 @@ class MemoryChunk {
};
+enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
+
+
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
//
@@ -832,11 +852,16 @@ class Page : public MemoryChunk {
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
- // Maximum object size that fits in a page. Objects larger than that size
- // are allocated in large object space and are never moved in memory. This
- // also applies to new space allocation, since objects are never migrated
- // from new space to large object space. Takes double alignment into account.
- static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
+ // Maximum object size that gets allocated into regular pages. Objects larger
+ // than that size are allocated in large object space and are never moved in
+ // memory. This also applies to new space allocation, since objects are never
+ // migrated from new space to large object space. Takes double alignment into
+ // account.
+ // TODO(hpayer): This limit should be way smaller but we currently have
+ // short living objects >256K.
+ static const int kMaxRegularHeapObjectSize = 600 * KB;
+
+ static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
@@ -874,6 +899,42 @@ class Page : public MemoryChunk {
#undef FRAGMENTATION_STATS_ACCESSORS
+ void add_available_in_free_list(FreeListCategoryType type, intptr_t bytes) {
+ switch (type) {
+ case kSmall:
+ add_available_in_small_free_list(bytes);
+ break;
+ case kMedium:
+ add_available_in_medium_free_list(bytes);
+ break;
+ case kLarge:
+ add_available_in_large_free_list(bytes);
+ break;
+ case kHuge:
+ add_available_in_huge_free_list(bytes);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ intptr_t available_in_free_list(FreeListCategoryType type) {
+ switch (type) {
+ case kSmall:
+ return available_in_small_free_list();
+ case kMedium:
+ return available_in_medium_free_list();
+ case kLarge:
+ return available_in_large_free_list();
+ case kHuge:
+ return available_in_huge_free_list();
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return 0;
+ }
+
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -904,7 +965,11 @@ class LargePage : public MemoryChunk {
class Space : public Malloced {
public:
Space(Heap* heap, AllocationSpace id, Executability executable)
- : heap_(heap), id_(id), executable_(executable) {}
+ : heap_(heap),
+ id_(id),
+ executable_(executable),
+ committed_(0),
+ max_committed_(0) {}
virtual ~Space() {}
@@ -916,6 +981,12 @@ class Space : public Malloced {
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
+ // Return the total amount committed memory for this space, i.e., allocatable
+ // memory and page headers.
+ virtual intptr_t CommittedMemory() { return committed_; }
+
+ virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
+
// Returns allocated size.
virtual intptr_t Size() = 0;
@@ -923,9 +994,6 @@ class Space : public Malloced {
// (e.g. see LargeObjectSpace).
virtual intptr_t SizeOfObjects() { return Size(); }
- // Return the total amount of memory committed for new space.
- virtual intptr_t CommittedMemory() = 0;
-
// Approximate amount of physical memory committed for this space.
virtual size_t CommittedPhysicalMemory() = 0;
@@ -944,10 +1012,29 @@ class Space : public Malloced {
virtual void Print() = 0;
#endif
+ protected:
+ void AccountCommitted(intptr_t bytes) {
+ DCHECK_GE(bytes, 0);
+ committed_ += bytes;
+ if (committed_ > max_committed_) {
+ max_committed_ = committed_;
+ }
+ }
+
+ void AccountUncommitted(intptr_t bytes) {
+ DCHECK_GE(bytes, 0);
+ committed_ -= bytes;
+ DCHECK_GE(committed_, 0);
+ }
+
private:
Heap* heap_;
AllocationSpace id_;
Executability executable_;
+
+ // Keeps track of committed memory in a space.
+ intptr_t committed_;
+ intptr_t max_committed_;
};
@@ -1168,7 +1255,7 @@ class MemoryAllocator {
// Returns maximum available bytes that the old space can have.
intptr_t MaxAvailable() {
- return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
+ return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
}
// Returns an indication of whether a pointer is in a space that has
@@ -1244,7 +1331,7 @@ class MemoryAllocator {
static int PageAreaSize(AllocationSpace space) {
DCHECK_NE(LO_SPACE, space);
return (space == CODE_SPACE) ? CodePageAreaSize()
- : Page::kMaxRegularHeapObjectSize;
+ : Page::kAllocatableMemory;
}
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
@@ -1344,7 +1431,7 @@ class HeapObjectIterator : public ObjectIterator {
// skipping the special garbage section of which there is one per space.
// Returns NULL when the iteration has ended.
inline HeapObject* Next();
- virtual inline HeapObject* next_object();
+ inline HeapObject* next_object() override;
private:
enum PageMode { kOnePageOnly, kAllPagesInSpace };
@@ -1394,7 +1481,13 @@ class PageIterator BASE_EMBEDDED {
// space.
class AllocationInfo {
public:
- AllocationInfo() : top_(NULL), limit_(NULL) {}
+ AllocationInfo() : top_(nullptr), limit_(nullptr) {}
+ AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
+
+ void Reset(Address top, Address limit) {
+ set_top(top);
+ set_limit(limit);
+ }
INLINE(void set_top(Address top)) {
SLOW_DCHECK(top == NULL ||
@@ -1411,15 +1504,10 @@ class AllocationInfo {
Address* top_address() { return &top_; }
INLINE(void set_limit(Address limit)) {
- SLOW_DCHECK(limit == NULL ||
- (reinterpret_cast<intptr_t>(limit) & kHeapObjectTagMask) == 0);
limit_ = limit;
}
INLINE(Address limit()) const {
- SLOW_DCHECK(limit_ == NULL ||
- (reinterpret_cast<intptr_t>(limit_) & kHeapObjectTagMask) ==
- 0);
return limit_;
}
@@ -1441,19 +1529,11 @@ class AllocationInfo {
// An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (i.e., not
-// including page bookkeeping structures) currently in the space. The 'size'
-// of a space is the number of allocated bytes, the 'waste' in the space is
-// the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (e.g. small blocks due
-// to internal fragmentation, top of page areas in map space), and the bytes
-// 'available' is the number of unallocated bytes that are not waste. The
-// capacity is the sum of size, waste, and available.
//
// The stats are only set by functions that ensure they stay balanced. These
-// functions increase or decrease one of the non-capacity stats in
-// conjunction with capacity, or else they always balance increases and
-// decreases to the non-capacity stats.
+// functions increase or decrease one of the non-capacity stats in conjunction
+// with capacity, or else they always balance increases and decreases to the
+// non-capacity stats.
class AllocationStats BASE_EMBEDDED {
public:
AllocationStats() { Clear(); }
@@ -1463,26 +1543,23 @@ class AllocationStats BASE_EMBEDDED {
capacity_ = 0;
max_capacity_ = 0;
size_ = 0;
- waste_ = 0;
}
- void ClearSizeWaste() {
- size_ = capacity_;
- waste_ = 0;
- }
+ void ClearSize() { size_ = capacity_; }
- // Reset the allocation statistics (i.e., available = capacity with no
- // wasted or allocated bytes).
+ // Reset the allocation statistics (i.e., available = capacity with no wasted
+ // or allocated bytes).
void Reset() {
size_ = 0;
- waste_ = 0;
}
// Accessors for the allocation statistics.
intptr_t Capacity() { return capacity_; }
intptr_t MaxCapacity() { return max_capacity_; }
- intptr_t Size() { return size_; }
- intptr_t Waste() { return waste_; }
+ intptr_t Size() {
+ CHECK_GE(size_, 0);
+ return size_;
+ }
// Grow the space by adding available bytes. They are initially marked as
// being in use (part of the size), but will normally be immediately freed,
@@ -1493,7 +1570,7 @@ class AllocationStats BASE_EMBEDDED {
if (capacity_ > max_capacity_) {
max_capacity_ = capacity_;
}
- DCHECK(size_ >= 0);
+ CHECK(size_ >= 0);
}
// Shrink the space by removing available bytes. Since shrinking is done
@@ -1502,161 +1579,147 @@ class AllocationStats BASE_EMBEDDED {
void ShrinkSpace(int size_in_bytes) {
capacity_ -= size_in_bytes;
size_ -= size_in_bytes;
- DCHECK(size_ >= 0);
+ CHECK(size_ >= 0);
}
// Allocate from available bytes (available -> size).
void AllocateBytes(intptr_t size_in_bytes) {
size_ += size_in_bytes;
- DCHECK(size_ >= 0);
+ CHECK(size_ >= 0);
}
// Free allocated bytes, making them available (size -> available).
void DeallocateBytes(intptr_t size_in_bytes) {
size_ -= size_in_bytes;
- DCHECK(size_ >= 0);
- }
-
- // Waste free bytes (available -> waste).
- void WasteBytes(int size_in_bytes) {
- DCHECK(size_in_bytes >= 0);
- waste_ += size_in_bytes;
+ CHECK_GE(size_, 0);
}
// Merge {other} into {this}.
void Merge(const AllocationStats& other) {
capacity_ += other.capacity_;
size_ += other.size_;
- waste_ += other.waste_;
if (other.max_capacity_ > max_capacity_) {
max_capacity_ = other.max_capacity_;
}
+ CHECK_GE(size_, 0);
}
void DecreaseCapacity(intptr_t size_in_bytes) {
capacity_ -= size_in_bytes;
- DCHECK_GE(capacity_, 0);
+ CHECK_GE(capacity_, 0);
+ CHECK_GE(capacity_, size_);
}
void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
private:
+ // |capacity_|: The number of object-area bytes (i.e., not including page
+ // bookkeeping structures) currently in the space.
intptr_t capacity_;
+
+ // |max_capacity_|: The maximum capacity ever observed.
intptr_t max_capacity_;
+
+ // |size_|: The number of allocated bytes.
intptr_t size_;
- intptr_t waste_;
};
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces
-
-// The free list category holds a pointer to the top element and a pointer to
-// the end element of the linked list of free memory blocks.
+// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
- explicit FreeListCategory(FreeList* owner)
- : top_(0), end_(NULL), available_(0), owner_(owner) {}
-
+ explicit FreeListCategory(FreeList* owner, FreeListCategoryType type)
+ : type_(type),
+ top_(nullptr),
+ end_(nullptr),
+ available_(0),
+ owner_(owner) {}
+
+ // Concatenates {category} into {this}.
+ //
+ // Note: Thread-safe.
intptr_t Concatenate(FreeListCategory* category);
void Reset();
void Free(FreeSpace* node, int size_in_bytes);
+ // Pick a node from the list.
FreeSpace* PickNodeFromList(int* node_size);
+
+ // Pick a node from the list and compare it against {size_in_bytes}. If the
+ // node's size is greater or equal return the node and null otherwise.
FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
+ // Search for a node of size {size_in_bytes}.
+ FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
+
intptr_t EvictFreeListItemsInList(Page* p);
bool ContainsPageFreeListItemsInList(Page* p);
void RepairFreeList(Heap* heap);
- FreeSpace* top() const {
- return reinterpret_cast<FreeSpace*>(base::NoBarrier_Load(&top_));
- }
-
- void set_top(FreeSpace* top) {
- base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
- }
+ bool IsEmpty() { return top() == nullptr; }
- FreeSpace* end() const { return end_; }
- void set_end(FreeSpace* end) { end_ = end; }
-
- int* GetAvailableAddress() { return &available_; }
+ FreeList* owner() { return owner_; }
int available() const { return available_; }
- void set_available(int available) { available_ = available; }
-
- base::Mutex* mutex() { return &mutex_; }
-
- bool IsEmpty() { return top() == 0; }
#ifdef DEBUG
intptr_t SumFreeList();
int FreeListLength();
+ bool IsVeryLong();
#endif
- FreeList* owner() { return owner_; }
-
private:
- // top_ points to the top FreeSpace* in the free list category.
- base::AtomicWord top_;
+ // For debug builds we accurately compute free lists lengths up until
+ // {kVeryLongFreeList} by manually walking the list.
+ static const int kVeryLongFreeList = 500;
+
+ FreeSpace* top() { return top_.Value(); }
+ void set_top(FreeSpace* top) { top_.SetValue(top); }
+
+ FreeSpace* end() const { return end_; }
+ void set_end(FreeSpace* end) { end_ = end; }
+
+ // |type_|: The type of this free list category.
+ FreeListCategoryType type_;
+
+ // |top_|: Points to the top FreeSpace* in the free list category.
+ AtomicValue<FreeSpace*> top_;
+
+ // |end_|: Points to the end FreeSpace* in the free list category.
FreeSpace* end_;
- base::Mutex mutex_;
- // Total available bytes in all blocks of this free list category.
+ // |available_|: Total available bytes in all blocks of this free list
+ // category.
int available_;
+ // |owner_|: The owning free list of this category.
FreeList* owner_;
};
-
-// The free list for the old space. The free list is organized in such a way
-// as to encourage objects allocated around the same time to be near each
-// other. The normal way to allocate is intended to be by bumping a 'top'
+// A free list maintaining free blocks of memory. The free list is organized in
+// a way to encourage objects allocated around the same time to be near each
+// other. The normal way to allocate is intended to be by bumping a 'top'
// pointer until it hits a 'limit' pointer. When the limit is hit we need to
-// find a new space to allocate from. This is done with the free list, which
-// is divided up into rough categories to cut down on waste. Having finer
+// find a new space to allocate from. This is done with the free list, which is
+// divided up into rough categories to cut down on waste. Having finer
// categories would scatter allocation more.
-// The old space free list is organized in categories.
-// 1-31 words: Such small free areas are discarded for efficiency reasons.
-// They can be reclaimed by the compactor. However the distance between top
-// and limit may be this small.
-// 32-255 words: There is a list of spaces this large. It is used for top and
-// limit when the object we need to allocate is 1-31 words in size. These
-// spaces are called small.
-// 256-2047 words: There is a list of spaces this large. It is used for top and
-// limit when the object we need to allocate is 32-255 words in size. These
-// spaces are called medium.
-// 1048-16383 words: There is a list of spaces this large. It is used for top
-// and limit when the object we need to allocate is 256-2047 words in size.
-// These spaces are call large.
-// At least 16384 words. This list is for objects of 2048 words or larger.
-// Empty pages are added to this list. These spaces are called huge.
+// The free list is organized in categories as follows:
+// 1-31 words (too small): Such small free areas are discarded for efficiency
+// reasons. They can be reclaimed by the compactor. However the distance
+// between top and limit may be this small.
+// 32-255 words (small): Used for allocating free space between 1-31 words in
+// size.
+// 256-2047 words (medium): Used for allocating free space between 32-255 words
+// in size.
+// 1048-16383 words (large): Used for allocating free space between 256-2047
+// words in size.
+// At least 16384 words (huge): This list is for objects of 2048 words or
+// larger. Empty pages are also added to this list.
class FreeList {
public:
- explicit FreeList(PagedSpace* owner);
-
- intptr_t Concatenate(FreeList* free_list);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- intptr_t available() {
- return small_list_.available() + medium_list_.available() +
- large_list_.available() + huge_list_.available();
- }
-
- // Place a node on the free list. The block of size 'size_in_bytes'
- // starting at 'start' is placed on the free list. The return value is the
- // number of bytes that have been lost due to internal fragmentation by
- // freeing the block. Bookkeeping information will be written to the block,
- // i.e., its contents will be destroyed. The start address should be word
- // aligned, and the size should be a non-zero multiple of the word size.
- int Free(Address start, int size_in_bytes);
-
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
static inline int GuaranteedAllocatable(int maximum_freed) {
@@ -1672,40 +1735,71 @@ class FreeList {
return maximum_freed;
}
- // Allocate a block of size 'size_in_bytes' from the free list. The block
- // is unitialized. A failure is returned if no block is available. The
- // number of bytes lost to fragmentation is returned in the output parameter
- // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
+ explicit FreeList(PagedSpace* owner);
+
+ // The method concatenates {other} into {this} and returns the added bytes,
+ // including waste.
+ //
+ // Note: Thread-safe.
+ intptr_t Concatenate(FreeList* other);
+
+ // Adds a node on the free list. The block of size {size_in_bytes} starting
+ // at {start} is placed on the free list. The return value is the number of
+ // bytes that were not added to the free list, because they freed memory block
+ // was too small. Bookkeeping information will be written to the block, i.e.,
+ // its contents will be destroyed. The start address should be word aligned,
+ // and the size should be a non-zero multiple of the word size.
+ int Free(Address start, int size_in_bytes);
+
+ // Allocate a block of size {size_in_bytes} from the free list. The block is
+ // unitialized. A failure is returned if no block is available. The size
+ // should be a non-zero multiple of the word size.
MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+ // Clear the free list.
+ void Reset();
+
+ void ResetStats() { wasted_bytes_ = 0; }
+
+ // Return the number of bytes available on the free list.
+ intptr_t Available() {
+ return small_list_.available() + medium_list_.available() +
+ large_list_.available() + huge_list_.available();
+ }
+
+ // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
+ // size in the free list category exactly matching the size. If no suitable
+ // node could be found, the method falls back to retrieving a {FreeSpace}
+ // from the large or huge free list category.
+ //
+ // Can be used concurrently.
+ MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
+
bool IsEmpty() {
return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
large_list_.IsEmpty() && huge_list_.IsEmpty();
}
-#ifdef DEBUG
- void Zap();
- intptr_t SumFreeLists();
- bool IsVeryLong();
-#endif
-
// Used after booting the VM.
void RepairLists(Heap* heap);
intptr_t EvictFreeListItems(Page* p);
bool ContainsPageFreeListItems(Page* p);
- FreeListCategory* small_list() { return &small_list_; }
- FreeListCategory* medium_list() { return &medium_list_; }
- FreeListCategory* large_list() { return &large_list_; }
- FreeListCategory* huge_list() { return &huge_list_; }
-
PagedSpace* owner() { return owner_; }
+ intptr_t wasted_bytes() { return wasted_bytes_; }
+ base::Mutex* mutex() { return &mutex_; }
+
+#ifdef DEBUG
+ void Zap();
+ intptr_t SumFreeLists();
+ bool IsVeryLong();
+#endif
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
- static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
+ static const int kMaxBlockSize = Page::kAllocatableMemory;
static const int kSmallListMin = 0x1f * kPointerSize;
static const int kSmallListMax = 0xff * kPointerSize;
@@ -1716,9 +1810,28 @@ class FreeList {
static const int kLargeAllocationMax = kMediumListMax;
FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
+ FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
+
+ FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
+ switch (category) {
+ case kSmall:
+ return &small_list_;
+ case kMedium:
+ return &medium_list_;
+ case kLarge:
+ return &large_list_;
+ case kHuge:
+ return &huge_list_;
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
PagedSpace* owner_;
- Heap* heap_;
+ base::Mutex mutex_;
+ intptr_t wasted_bytes_;
FreeListCategory small_list_;
FreeListCategory medium_list_;
FreeListCategory large_list_;
@@ -1771,12 +1884,68 @@ class AllocationResult {
STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
+// LocalAllocationBuffer represents a linear allocation area that is created
+// from a given {AllocationResult} and can be used to allocate memory without
+// synchronization.
+//
+// The buffer is properly closed upon destruction and reassignment.
+// Example:
+// {
+// AllocationResult result = ...;
+// LocalAllocationBuffer a(heap, result, size);
+// LocalAllocationBuffer b = a;
+// CHECK(!a.IsValid());
+// CHECK(b.IsValid());
+// // {a} is invalid now and cannot be used for further allocations.
+// }
+// // Since {b} went out of scope, the LAB is closed, resulting in creating a
+// // filler object for the remaining area.
+class LocalAllocationBuffer {
+ public:
+ // Indicates that a buffer cannot be used for allocations anymore. Can result
+ // from either reassigning a buffer, or trying to construct it from an
+ // invalid {AllocationResult}.
+ static inline LocalAllocationBuffer InvalidBuffer();
+
+ // Creates a new LAB from a given {AllocationResult}. Results in
+ // InvalidBuffer if the result indicates a retry.
+ static inline LocalAllocationBuffer FromResult(Heap* heap,
+ AllocationResult result,
+ intptr_t size);
+
+ ~LocalAllocationBuffer() { Close(); }
+
+ // Convert to C++11 move-semantics once allowed by the style guide.
+ LocalAllocationBuffer(const LocalAllocationBuffer& other);
+ LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
+
+ MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment);
+
+ inline bool IsValid() { return allocation_info_.top() != nullptr; }
+
+ // Try to merge LABs, which is only possible when they are adjacent in memory.
+ // Returns true if the merge was successful, false otherwise.
+ inline bool TryMerge(LocalAllocationBuffer* other);
+
+ private:
+ LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
+
+ void Close();
+
+ Heap* heap_;
+ AllocationInfo allocation_info_;
+};
+
+
class PagedSpace : public Space {
public:
+ static const intptr_t kCompactionMemoryWanted = 500 * KB;
+
// Creates a space with an id.
PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
- virtual ~PagedSpace() { TearDown(); }
+ ~PagedSpace() override { TearDown(); }
// Set up the space using the given address range of virtual memory (from
// the memory allocator's initial chunk) if possible. If the block of
@@ -1811,13 +1980,6 @@ class PagedSpace : public Space {
// Current capacity without growing (Size() + Available()).
intptr_t Capacity() { return accounting_stats_.Capacity(); }
- // Total amount of memory committed for this space. For paged
- // spaces this equals the capacity.
- intptr_t CommittedMemory() override { return Capacity(); }
-
- // The maximum amount of memory ever committed for this space.
- intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
-
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
@@ -1829,7 +1991,8 @@ class PagedSpace : public Space {
// discovered during the sweeping they are subtracted from the size and added
// to the available and wasted totals.
void ClearStats() {
- accounting_stats_.ClearSizeWaste();
+ accounting_stats_.ClearSize();
+ free_list_.ResetStats();
ResetFreeListStatistics();
}
@@ -1842,7 +2005,7 @@ class PagedSpace : public Space {
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
- intptr_t Available() override { return free_list_.available(); }
+ intptr_t Available() override { return free_list_.Available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
@@ -1855,9 +2018,8 @@ class PagedSpace : public Space {
intptr_t SizeOfObjects() override;
// Wasted bytes in this space. These are just the bytes that were thrown away
- // due to being too small to use for allocation. They do not include the
- // free bytes that were not found at all due to lazy sweeping.
- virtual intptr_t Waste() { return accounting_stats_.Waste(); }
+ // due to being too small to use for allocation.
+ virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top(); }
@@ -1896,7 +2058,6 @@ class PagedSpace : public Space {
int Free(Address start, int size_in_bytes) {
int wasted = free_list_.Free(start, size_in_bytes);
accounting_stats_.DeallocateBytes(size_in_bytes);
- accounting_stats_.WasteBytes(wasted);
return size_in_bytes - wasted;
}
@@ -1907,8 +2068,7 @@ class PagedSpace : public Space {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.set_top(top);
- allocation_info_.set_limit(limit);
+ allocation_info_.Reset(top, limit);
}
// Empty space allocation info, returning unused area to free list.
@@ -1959,22 +2119,6 @@ class PagedSpace : public Space {
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
}
- void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
-
- void IncreaseUnsweptFreeBytes(Page* p) {
- DCHECK(ShouldBeSweptBySweeperThreads(p));
- unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
- }
-
- void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
-
- void DecreaseUnsweptFreeBytes(Page* p) {
- DCHECK(ShouldBeSweptBySweeperThreads(p));
- unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
- }
-
- void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
-
// This function tries to steal size_in_bytes memory from the sweeper threads
// free-lists. If it does not succeed stealing enough memory, it will wait
// for the sweeper threads to finish sweeping.
@@ -1988,7 +2132,7 @@ class PagedSpace : public Space {
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
- void EvictEvacuationCandidatesFromFreeLists();
+ void EvictEvacuationCandidatesFromLinearAllocationArea();
bool CanExpand(size_t size);
@@ -1998,15 +2142,26 @@ class PagedSpace : public Space {
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return area_size_; }
+ virtual bool is_local() { return false; }
+
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
void MergeCompactionSpace(CompactionSpace* other);
- void MoveOverFreeMemory(PagedSpace* other);
+ void DivideUponCompactionSpaces(CompactionSpaceCollection** other, int num,
+ intptr_t limit = kCompactionMemoryWanted);
- virtual bool is_local() { return false; }
+ // Refills the free list from the corresponding free list filled by the
+ // sweeper.
+ virtual void RefillFreeList();
protected:
+ void AddMemory(Address start, intptr_t size);
+
+ FreeSpace* TryRemoveMemory(intptr_t size_in_bytes);
+
+ void MoveOverFreeMemory(PagedSpace* other);
+
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
virtual bool snapshotable() { return true; }
@@ -2037,7 +2192,7 @@ class PagedSpace : public Space {
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
- MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
+ MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
@@ -2057,10 +2212,6 @@ class PagedSpace : public Space {
// Normal allocation information.
AllocationInfo allocation_info_;
- // The number of free bytes which could be reclaimed by advancing the
- // concurrent sweeper threads.
- intptr_t unswept_free_bytes_;
-
// The sweeper threads iterate over the list of pointer and data space pages
// and sweep these pages concurrently. They will stop sweeping after the
// end_of_unswept_pages_ page.
@@ -2071,6 +2222,9 @@ class PagedSpace : public Space {
friend class MarkCompactCollector;
friend class PageIterator;
+
+ // Used in cctest.
+ friend class HeapTester;
};
@@ -2124,7 +2278,7 @@ class NewSpacePage : public MemoryChunk {
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::SCAN_ON_SCAVENGE);
- static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
+ static const int kAreaSize = Page::kAllocatableMemory;
inline NewSpacePage* next_page() {
return static_cast<NewSpacePage*>(next_chunk());
@@ -2286,11 +2440,6 @@ class SemiSpace : public Space {
intptr_t SizeOfObjects() override { return Size(); }
- intptr_t CommittedMemory() override {
- UNREACHABLE();
- return 0;
- }
-
intptr_t Available() override {
UNREACHABLE();
return 0;
@@ -2335,9 +2484,6 @@ class SemiSpace : public Space {
static void Swap(SemiSpace* from, SemiSpace* to);
- // Returns the maximum amount of memory ever committed by the semi space.
- size_t MaximumCommittedMemory() { return maximum_committed_; }
-
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
@@ -2357,8 +2503,6 @@ class SemiSpace : public Space {
int maximum_total_capacity_;
int initial_total_capacity_;
- intptr_t maximum_committed_;
-
// The start address of the space.
Address start_;
// Used to govern object promotion during mark-compact collection.
@@ -2393,7 +2537,7 @@ class SemiSpaceIterator : public ObjectIterator {
inline HeapObject* Next();
// Implementation of the ObjectIterator functions.
- virtual inline HeapObject* next_object();
+ inline HeapObject* next_object() override;
private:
void Initialize(Address start, Address end);
@@ -2432,6 +2576,54 @@ class NewSpacePageIterator BASE_EMBEDDED {
NewSpacePage* last_page_;
};
+// -----------------------------------------------------------------------------
+// Allows observation of inline allocation in the new space.
+class InlineAllocationObserver {
+ public:
+ explicit InlineAllocationObserver(intptr_t step_size)
+ : step_size_(step_size), bytes_to_next_step_(step_size) {
+ DCHECK(step_size >= kPointerSize);
+ }
+ virtual ~InlineAllocationObserver() {}
+
+ private:
+ intptr_t step_size() const { return step_size_; }
+ intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
+
+ // Pure virtual method provided by the subclasses that gets called when at
+ // least step_size bytes have been allocated. soon_object is the address just
+ // allocated (but not yet initialized.) size is the size of the object as
+ // requested (i.e. w/o the alignment fillers). Some complexities to be aware
+ // of:
+ // 1) soon_object will be nullptr in cases where we end up observing an
+ // allocation that happens to be a filler space (e.g. page boundaries.)
+ // 2) size is the requested size at the time of allocation. Right-trimming
+ // may change the object size dynamically.
+ // 3) soon_object may actually be the first object in an allocation-folding
+ // group. In such a case size is the size of the group rather than the
+ // first object.
+ virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
+
+ // Called each time the new space does an inline allocation step. This may be
+ // more frequently than the step_size we are monitoring (e.g. when there are
+ // multiple observers, or when page or space boundary is encountered.)
+ void InlineAllocationStep(int bytes_allocated, Address soon_object,
+ size_t size) {
+ bytes_to_next_step_ -= bytes_allocated;
+ if (bytes_to_next_step_ <= 0) {
+ Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
+ size);
+ bytes_to_next_step_ = step_size_;
+ }
+ }
+
+ intptr_t step_size_;
+ intptr_t bytes_to_next_step_;
+
+ friend class NewSpace;
+
+ DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver);
+};
// -----------------------------------------------------------------------------
// The young generation space.
@@ -2447,8 +2639,8 @@ class NewSpace : public Space {
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
- inline_allocation_limit_step_(0),
- top_on_previous_step_(0) {}
+ top_on_previous_step_(0),
+ inline_allocation_observers_paused_(false) {}
// Sets up the new space using the given chunk.
bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
@@ -2512,16 +2704,15 @@ class NewSpace : public Space {
return to_space_.TotalCapacity();
}
- // Return the total amount of memory committed for new space.
+ // Committed memory for NewSpace is the committed memory of both semi-spaces
+ // combined.
intptr_t CommittedMemory() override {
- if (from_space_.is_committed()) return 2 * Capacity();
- return TotalCapacity();
+ return from_space_.CommittedMemory() + to_space_.CommittedMemory();
}
- // Return the total amount of memory committed for new space.
- intptr_t MaximumCommittedMemory() {
- return to_space_.MaximumCommittedMemory() +
- from_space_.MaximumCommittedMemory();
+ intptr_t MaximumCommittedMemory() override {
+ return from_space_.MaximumCommittedMemory() +
+ to_space_.MaximumCommittedMemory();
}
// Approximate amount of physical memory committed for this space.
@@ -2618,14 +2809,26 @@ class NewSpace : public Space {
MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment));
+ MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
+ int size_in_bytes, AllocationAlignment alignment);
+
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
void UpdateInlineAllocationLimit(int size_in_bytes);
- void LowerInlineAllocationLimit(intptr_t step) {
- inline_allocation_limit_step_ = step;
+
+ // Allows observation of inline allocation. The observer->Step() method gets
+ // called after every step_size bytes have been allocated (approximately).
+ // This works by adjusting the allocation limit to a lower value and adjusting
+ // it after each step.
+ void AddInlineAllocationObserver(InlineAllocationObserver* observer);
+
+ // Removes a previously installed observer.
+ void RemoveInlineAllocationObserver(InlineAllocationObserver* observer);
+
+ void DisableInlineAllocationSteps() {
+ top_on_previous_step_ = 0;
UpdateInlineAllocationLimit(0);
- top_on_previous_step_ = step ? allocation_info_.top() : 0;
}
// Get the extent of the inactive semispace (for use as a marking stack,
@@ -2658,6 +2861,7 @@ class NewSpace : public Space {
// are no pages, or the current page is already empty), or true
// if successful.
bool AddFreshPage();
+ bool AddFreshPageSynchronized();
#ifdef VERIFY_HEAP
// Verify the active semispace.
@@ -2695,16 +2899,14 @@ class NewSpace : public Space {
bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
- inline intptr_t inline_allocation_limit_step() {
- return inline_allocation_limit_step_;
- }
-
SemiSpace* active_space() { return &to_space_; }
private:
// Update allocation info to match the current to-space page.
void UpdateAllocationInfo();
+ base::Mutex mutex_;
+
Address chunk_base_;
uintptr_t chunk_size_;
@@ -2724,13 +2926,14 @@ class NewSpace : public Space {
// mark-compact collection.
AllocationInfo allocation_info_;
- // When incremental marking is active we will set allocation_info_.limit
- // to be lower than actual limit and then will gradually increase it
- // in steps to guarantee that we do incremental marking steps even
- // when all allocation is performed from inlined generated code.
- intptr_t inline_allocation_limit_step_;
-
+ // When inline allocation stepping is active, either because of incremental
+ // marking or because of idle scavenge, we 'interrupt' inline allocation every
+ // once in a while. This is done by setting allocation_info_.limit to be lower
+ // than the actual limit and and increasing it in steps to guarantee that the
+ // observers are notified periodically.
+ List<InlineAllocationObserver*> inline_allocation_observers_;
Address top_on_previous_step_;
+ bool inline_allocation_observers_paused_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
@@ -2738,17 +2941,37 @@ class NewSpace : public Space {
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
// If we are doing inline allocation in steps, this method performs the 'step'
- // operation. Right now incremental marking is the only consumer of inline
- // allocation steps. top is the memory address of the bump pointer at the last
+ // operation. top is the memory address of the bump pointer at the last
// inline allocation (i.e. it determines the numbers of bytes actually
// allocated since the last step.) new_top is the address of the bump pointer
// where the next byte is going to be allocated from. top and new_top may be
// different when we cross a page boundary or reset the space.
- void InlineAllocationStep(Address top, Address new_top);
-
+ void InlineAllocationStep(Address top, Address new_top, Address soon_object,
+ size_t size);
+ intptr_t GetNextInlineAllocationStepSize();
+ void StartNextInlineAllocationStep();
+ void PauseInlineAllocationObservers();
+ void ResumeInlineAllocationObservers();
+
+ friend class PauseInlineAllocationObserversScope;
friend class SemiSpaceIterator;
};
+class PauseInlineAllocationObserversScope {
+ public:
+ explicit PauseInlineAllocationObserversScope(NewSpace* new_space)
+ : new_space_(new_space) {
+ new_space_->PauseInlineAllocationObservers();
+ }
+ ~PauseInlineAllocationObserversScope() {
+ new_space_->ResumeInlineAllocationObservers();
+ }
+
+ private:
+ NewSpace* new_space_;
+ DISALLOW_COPY_AND_ASSIGN(PauseInlineAllocationObserversScope);
+};
+
// -----------------------------------------------------------------------------
// Compaction space that is used temporarily during compaction.
@@ -2763,11 +2986,16 @@ class CompactionSpace : public PagedSpace {
Free(start, size_in_bytes);
}
- virtual bool is_local() { return true; }
+ bool is_local() override { return true; }
+
+ void RefillFreeList() override;
protected:
// The space is temporary and not included in any snapshots.
- virtual bool snapshotable() { return false; }
+ bool snapshotable() override { return false; }
+
+ MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
+ int size_in_bytes) override;
};
@@ -2776,7 +3004,9 @@ class CompactionSpaceCollection : public Malloced {
public:
explicit CompactionSpaceCollection(Heap* heap)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
- code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
+ code_space_(heap, CODE_SPACE, Executability::EXECUTABLE),
+ duration_(0.0),
+ bytes_compacted_(0) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
@@ -2791,9 +3021,21 @@ class CompactionSpaceCollection : public Malloced {
return nullptr;
}
+ void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
+ duration_ += duration;
+ bytes_compacted_ += bytes_compacted;
+ }
+
+ double duration() const { return duration_; }
+ intptr_t bytes_compacted() const { return bytes_compacted_; }
+
private:
CompactionSpace old_space_;
CompactionSpace code_space_;
+
+ // Book keeping.
+ double duration_;
+ intptr_t bytes_compacted_;
};
@@ -2831,7 +3073,7 @@ class MapSpace : public PagedSpace {
// TODO(1600): this limit is artifical just to keep code compilable
static const int kMaxMapPageIndex = 1 << 16;
- virtual int RoundSizeDownToObjectAlignment(int size) {
+ int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo32(Map::kSize)) {
return RoundDown(size, Map::kSize);
} else {
@@ -2839,11 +3081,12 @@ class MapSpace : public PagedSpace {
}
}
- protected:
- virtual void VerifyObject(HeapObject* obj);
+#ifdef VERIFY_HEAP
+ void VerifyObject(HeapObject* obj) override;
+#endif
private:
- static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
+ static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize;
// Do map space compaction if there is a page gap.
int CompactionThreshold() {
@@ -2889,10 +3132,6 @@ class LargeObjectSpace : public Space {
intptr_t SizeOfObjects() override { return objects_size_; }
- intptr_t MaximumCommittedMemory() { return maximum_committed_; }
-
- intptr_t CommittedMemory() override { return Size(); }
-
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
@@ -2935,7 +3174,6 @@ class LargeObjectSpace : public Space {
bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
private:
- intptr_t maximum_committed_;
// The head of the linked list of large object chunks.
LargePage* first_page_;
intptr_t size_; // allocated bytes
@@ -2994,7 +3232,7 @@ struct CommentStatistic {
static const int kMaxComments = 64;
};
#endif
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_SPACES_H_
diff --git a/chromium/v8/src/heap/store-buffer-inl.h b/chromium/v8/src/heap/store-buffer-inl.h
index 1f3dda21d22..e11ad87087c 100644
--- a/chromium/v8/src/heap/store-buffer-inl.h
+++ b/chromium/v8/src/heap/store-buffer-inl.h
@@ -48,7 +48,7 @@ void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
}
}
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STORE_BUFFER_INL_H_
diff --git a/chromium/v8/src/heap/store-buffer.cc b/chromium/v8/src/heap/store-buffer.cc
index 2ed9deccff4..a8a1e5bbf1e 100644
--- a/chromium/v8/src/heap/store-buffer.cc
+++ b/chromium/v8/src/heap/store-buffer.cc
@@ -412,6 +412,26 @@ void StoreBuffer::VerifyValidStoreBufferEntries() {
}
+class FindPointersToNewSpaceVisitor final : public ObjectVisitor {
+ public:
+ FindPointersToNewSpaceVisitor(StoreBuffer* store_buffer,
+ ObjectSlotCallback callback)
+ : store_buffer_(store_buffer), callback_(callback) {}
+
+ V8_INLINE void VisitPointers(Object** start, Object** end) override {
+ store_buffer_->FindPointersToNewSpaceInRegion(
+ reinterpret_cast<Address>(start), reinterpret_cast<Address>(end),
+ callback_);
+ }
+
+ V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
+
+ private:
+ StoreBuffer* store_buffer_;
+ ObjectSlotCallback callback_;
+};
+
+
void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
// We do not sort or remove duplicated entries from the store buffer because
// we expect that callback will rebuild the store buffer thus removing
@@ -438,6 +458,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
}
PointerChunkIterator it(heap_);
MemoryChunk* chunk;
+ FindPointersToNewSpaceVisitor visitor(this, slot_callback);
while ((chunk = it.next()) != NULL) {
if (chunk->scan_on_scavenge()) {
chunk->set_scan_on_scavenge(false);
@@ -469,69 +490,22 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
}
}
} else {
- heap_->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
- page);
- HeapObjectIterator iterator(page);
- for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
- heap_object = iterator.Next()) {
- // We iterate over objects that contain new space pointers only.
- Address obj_address = heap_object->address();
- const int start_offset = HeapObject::kHeaderSize;
- const int end_offset = heap_object->Size();
-
- switch (heap_object->ContentType()) {
- case HeapObjectContents::kTaggedValues: {
- Address start_address = obj_address + start_offset;
- Address end_address = obj_address + end_offset;
- // Object has only tagged fields.
- FindPointersToNewSpaceInRegion(start_address, end_address,
- slot_callback);
- break;
- }
-
- case HeapObjectContents::kMixedValues: {
- if (heap_object->IsFixedTypedArrayBase()) {
- FindPointersToNewSpaceInRegion(
- obj_address + FixedTypedArrayBase::kBasePointerOffset,
- obj_address + FixedTypedArrayBase::kHeaderSize,
- slot_callback);
- } else if (heap_object->IsBytecodeArray()) {
- FindPointersToNewSpaceInRegion(
- obj_address + BytecodeArray::kConstantPoolOffset,
- obj_address + BytecodeArray::kHeaderSize,
- slot_callback);
- } else if (heap_object->IsJSArrayBuffer()) {
- FindPointersToNewSpaceInRegion(
- obj_address +
- JSArrayBuffer::BodyDescriptor::kStartOffset,
- obj_address + JSArrayBuffer::kByteLengthOffset +
- kPointerSize,
- slot_callback);
- FindPointersToNewSpaceInRegion(
- obj_address + JSArrayBuffer::kSize,
- obj_address + JSArrayBuffer::kSizeWithInternalFields,
- slot_callback);
- } else if (FLAG_unbox_double_fields) {
- LayoutDescriptorHelper helper(heap_object->map());
- DCHECK(!helper.all_fields_tagged());
- for (int offset = start_offset; offset < end_offset;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, end_offset,
- &end_of_region_offset)) {
- FindPointersToNewSpaceInRegion(
- obj_address + offset,
- obj_address + end_of_region_offset, slot_callback);
- }
- offset = end_of_region_offset;
- }
- } else {
- UNREACHABLE();
- }
- break;
- }
-
- case HeapObjectContents::kRawValues:
- break;
+ if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ // Aborted pages require iterating using mark bits because they
+ // don't have an iterable object layout before sweeping (which can
+ // only happen later). Note that we can never reach an
+ // aborted page through the scavenger.
+ DCHECK_EQ(heap_->gc_state(), Heap::MARK_COMPACT);
+ heap_->mark_compact_collector()->VisitLiveObjectsBody(page,
+ &visitor);
+ } else {
+ heap_->mark_compact_collector()
+ ->SweepOrWaitUntilSweepingCompleted(page);
+ HeapObjectIterator iterator(page);
+ for (HeapObject* heap_object = iterator.Next();
+ heap_object != nullptr; heap_object = iterator.Next()) {
+ // We iterate over objects that contain new space pointers only.
+ heap_object->IterateBody(&visitor);
}
}
}
diff --git a/chromium/v8/src/heap/store-buffer.h b/chromium/v8/src/heap/store-buffer.h
index 37a78eb0752..9eeb00117b8 100644
--- a/chromium/v8/src/heap/store-buffer.h
+++ b/chromium/v8/src/heap/store-buffer.h
@@ -19,9 +19,6 @@ class StoreBuffer;
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-typedef void (StoreBuffer::*RegionCallback)(Address start, Address end,
- ObjectSlotCallback slot_callback);
-
// Used to implement the write barrier by collecting addresses of pointers
// between spaces.
class StoreBuffer {
@@ -147,23 +144,15 @@ class StoreBuffer {
void FindPointersToNewSpaceInRegion(Address start, Address end,
ObjectSlotCallback slot_callback);
- // For each region of pointers on a page in use from an old space call
- // visit_pointer_region callback.
- // If either visit_pointer_region or callback can cause an allocation
- // in old space and changes in allocation watermark then
- // can_preallocate_during_iteration should be set to true.
- void IteratePointersOnPage(PagedSpace* space, Page* page,
- RegionCallback region_callback,
- ObjectSlotCallback slot_callback);
-
void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
#ifdef VERIFY_HEAP
void VerifyPointers(LargeObjectSpace* space);
#endif
- friend class StoreBufferRebuildScope;
friend class DontMoveStoreBufferEntriesScope;
+ friend class FindPointersToNewSpaceVisitor;
+ friend class StoreBufferRebuildScope;
};
@@ -227,7 +216,7 @@ class DontMoveStoreBufferEntriesScope {
StoreBuffer* store_buffer_;
bool stored_state_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STORE_BUFFER_H_
diff --git a/chromium/v8/src/i18n.cc b/chromium/v8/src/i18n.cc
index 7899f2937d5..8de2d2998aa 100644
--- a/chromium/v8/src/i18n.cc
+++ b/chromium/v8/src/i18n.cc
@@ -16,6 +16,7 @@
#include "unicode/decimfmt.h"
#include "unicode/dtfmtsym.h"
#include "unicode/dtptngen.h"
+#include "unicode/gregocal.h"
#include "unicode/locid.h"
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
@@ -96,6 +97,16 @@ icu::SimpleDateFormat* CreateICUDateFormat(
icu::Calendar* calendar =
icu::Calendar::createInstance(tz, icu_locale, status);
+ if (calendar->getDynamicClassID() ==
+ icu::GregorianCalendar::getStaticClassID()) {
+ icu::GregorianCalendar* gc = (icu::GregorianCalendar*)calendar;
+ UErrorCode status = U_ZERO_ERROR;
+ // The beginning of ECMAScript time, namely -(2**53)
+ const double start_of_time = -9007199254740992;
+ gc->setGregorianChange(start_of_time, status);
+ DCHECK(U_SUCCESS(status));
+ }
+
// Make formatter from skeleton. Calendar and numbering system are added
// to the locale as Unicode extension (if they were specified at all).
icu::SimpleDateFormat* date_format = NULL;
@@ -134,7 +145,7 @@ void SetResolvedDateSettings(Isolate* isolate,
icu::UnicodeString pattern;
date_format->toPattern(pattern);
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("pattern"),
+ resolved, factory->intl_pattern_symbol(),
factory->NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
@@ -356,7 +367,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
icu::UnicodeString pattern;
number_format->toPattern(pattern);
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("pattern"),
+ resolved, factory->intl_pattern_symbol(),
factory->NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
diff --git a/chromium/v8/src/i18n.h b/chromium/v8/src/i18n.h
index ea8380baa72..a8db4d18a65 100644
--- a/chromium/v8/src/i18n.h
+++ b/chromium/v8/src/i18n.h
@@ -128,6 +128,7 @@ class BreakIterator {
BreakIterator();
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_I18N_H_
diff --git a/chromium/v8/src/ia32/assembler-ia32-inl.h b/chromium/v8/src/ia32/assembler-ia32-inl.h
index 17ae01ad535..d957872cabf 100644
--- a/chromium/v8/src/ia32/assembler-ia32-inl.h
+++ b/chromium/v8/src/ia32/assembler-ia32-inl.h
@@ -104,7 +104,8 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
@@ -133,7 +134,7 @@ void RelocInfo::set_target_object(Object* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -199,7 +200,7 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -229,8 +230,8 @@ void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(*pc_ == kCallOpcode);
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
- icache_flush_mode);
+ Assembler::set_target_address_at(
+ isolate_, pc_ + 1, host_, stub->instruction_start(), icache_flush_mode);
}
@@ -244,7 +245,7 @@ Address RelocInfo::debug_call_address() {
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
- Assembler::set_target_address_at(location, host_, target);
+ Assembler::set_target_address_at(isolate_, location, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -259,7 +260,8 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -280,7 +282,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate, pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
@@ -305,7 +307,7 @@ void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(heap->isolate(), pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
@@ -453,13 +455,13 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(p, sizeof(int32_t));
+ Assembler::FlushICache(isolate, p, sizeof(int32_t));
}
}
@@ -499,7 +501,7 @@ void Assembler::emit_near_disp(Label* L) {
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@@ -559,6 +561,7 @@ Operand::Operand(Immediate imm) {
set_modrm(0, ebp);
set_dispr(imm.x_, imm.rmode_);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_ASSEMBLER_IA32_INL_H_
diff --git a/chromium/v8/src/ia32/assembler-ia32.cc b/chromium/v8/src/ia32/assembler-ia32.cc
index 9f64a6005fd..f120a6233e2 100644
--- a/chromium/v8/src/ia32/assembler-ia32.cc
+++ b/chromium/v8/src/ia32/assembler-ia32.cc
@@ -187,37 +187,6 @@ bool RelocInfo::IsInConstantPool() {
}
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Call instruction takes up 5 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 5;
- int code_size = kCallCodeSize + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc_, code_size);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->call(target, RelocInfo::NONE32);
-
- // Check that the size of the code generated is as expected.
- DCHECK_EQ(kCallCodeSize,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- DCHECK_GE(guard_bytes, 0);
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-}
-
-
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -338,6 +307,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
+ desc->constant_pool_size = 0;
}
@@ -1307,6 +1277,14 @@ void Assembler::bsr(Register dst, const Operand& src) {
}
+void Assembler::bsf(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBC);
+ emit_operand(dst, src);
+}
+
+
void Assembler::hlt() {
EnsureSpace ensure_space(this);
EMIT(0xF4);
@@ -2186,6 +2164,19 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
}
+void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x0A);
+ emit_sse_operand(dst, src);
+ // Mask precision exeption.
+ EMIT(static_cast<byte>(mode) | 0x8);
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2785,6 +2776,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
@@ -2921,7 +2913,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!serializer_enabled() && !emit_debug_code()) {
return;
}
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
diff --git a/chromium/v8/src/ia32/assembler-ia32.h b/chromium/v8/src/ia32/assembler-ia32.h
index 57987bc7513..0b202529f9a 100644
--- a/chromium/v8/src/ia32/assembler-ia32.h
+++ b/chromium/v8/src/ia32/assembler-ia32.h
@@ -40,12 +40,49 @@
#include <deque>
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/isolate.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
+#define GENERAL_REGISTERS(V) \
+ V(eax) \
+ V(ecx) \
+ V(edx) \
+ V(ebx) \
+ V(esp) \
+ V(ebp) \
+ V(esi) \
+ V(edi)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(eax) \
+ V(ecx) \
+ V(edx) \
+ V(ebx) \
+ V(esi) \
+ V(edi)
+
+#define DOUBLE_REGISTERS(V) \
+ V(xmm0) \
+ V(xmm1) \
+ V(xmm2) \
+ V(xmm3) \
+ V(xmm4) \
+ V(xmm5) \
+ V(xmm6) \
+ V(xmm7)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(xmm1) \
+ V(xmm2) \
+ V(xmm3) \
+ V(xmm4) \
+ V(xmm5) \
+ V(xmm6) \
+ V(xmm7)
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -68,151 +105,86 @@ namespace internal {
// and best performance in optimized code.
//
struct Register {
- static const int kMaxNumAllocatableRegisters = 6;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
- static const int kNumRegisters = 8;
-
- static inline const char* AllocationIndexToString(int index);
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- static inline int ToAllocationIndex(Register reg);
-
- static inline Register FromAllocationIndex(int index);
+ static const int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) {
DCHECK(code >= 0);
DCHECK(code < kNumRegisters);
- Register r = { code };
+ Register r = {code};
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- // eax, ebx, ecx and edx are byte registers, the rest are not.
- bool is_byte_register() const { return code_ <= 3; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
+ bool is_byte_register() const { return reg_code <= 3; }
+
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
-const int kRegister_eax_Code = 0;
-const int kRegister_ecx_Code = 1;
-const int kRegister_edx_Code = 2;
-const int kRegister_ebx_Code = 3;
-const int kRegister_esp_Code = 4;
-const int kRegister_ebp_Code = 5;
-const int kRegister_esi_Code = 6;
-const int kRegister_edi_Code = 7;
-const int kRegister_no_reg_Code = -1;
-
-const Register eax = { kRegister_eax_Code };
-const Register ecx = { kRegister_ecx_Code };
-const Register edx = { kRegister_edx_Code };
-const Register ebx = { kRegister_ebx_Code };
-const Register esp = { kRegister_esp_Code };
-const Register ebp = { kRegister_ebp_Code };
-const Register esi = { kRegister_esi_Code };
-const Register edi = { kRegister_edi_Code };
-const Register no_reg = { kRegister_no_reg_Code };
-
-
-inline const char* Register::AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- // This is the mapping of allocation indices to registers.
- const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
- return kNames[index];
-}
-
-
-inline int Register::ToAllocationIndex(Register reg) {
- DCHECK(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
- return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
-}
-
-
-inline Register Register::FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return (index >= 4) ? from_code(index + 2) : from_code(index);
-}
-
-struct XMMRegister {
- static const int kMaxNumAllocatableRegisters = 7;
- static const int kMaxNumRegisters = 8;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
- // TODO(turbofan): Proper support for float32.
- static int NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
- }
- static int ToAllocationIndex(XMMRegister reg) {
- DCHECK(reg.code() != 0);
- return reg.code() - 1;
- }
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- static XMMRegister FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index + 1);
- }
+ static const int kMaxNumRegisters = Code::kAfterLast;
- static XMMRegister from_code(int code) {
- XMMRegister result = { code };
+ static DoubleRegister from_code(int code) {
+ DoubleRegister result = {code};
return result;
}
- bool is_valid() const {
- return 0 <= code_ && code_ < kMaxNumRegisters;
- }
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "xmm1",
- "xmm2",
- "xmm3",
- "xmm4",
- "xmm5",
- "xmm6",
- "xmm7"
- };
- return names[index];
- }
+ const char* ToString();
- int code_;
+ int reg_code;
};
+#define DECLARE_REGISTER(R) \
+ const DoubleRegister R = {DoubleRegister::kCode_##R};
+DOUBLE_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
-typedef XMMRegister DoubleRegister;
-
-
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
-const XMMRegister no_xmm_reg = { -1 };
-
+typedef DoubleRegister XMMRegister;
enum Condition {
// any value < 0 is considered no_condition
@@ -514,19 +486,17 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target in the branch/call instruction at pc.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
- static inline void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED) {
+ static inline void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target);
+ set_target_address_at(isolate, pc, constant_pool, target);
}
// Return the code target address at a call site from the return address
@@ -536,13 +506,14 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
- set_target_address_at(instruction_payload, code, target);
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target) {
+ set_target_address_at(isolate, instruction_payload, code, target);
}
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
static const int kSpecialTargetSize = kPointerSize;
@@ -807,6 +778,8 @@ class Assembler : public AssemblerBase {
void bts(const Operand& dst, Register src);
void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
void bsr(Register dst, const Operand& src);
+ void bsf(Register dst, Register src) { bsf(dst, Operand(src)); }
+ void bsf(Register dst, const Operand& src);
// Miscellaneous
void hlt();
@@ -1015,6 +988,7 @@ class Assembler : public AssemblerBase {
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void ucomisd(XMMRegister dst, const Operand& src);
+ void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void movmskpd(Register dst, XMMRegister src);
@@ -1426,7 +1400,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
@@ -1595,6 +1569,7 @@ class EnsureSpace BASE_EMBEDDED {
#endif
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_ASSEMBLER_IA32_H_
diff --git a/chromium/v8/src/ia32/builtins-ia32.cc b/chromium/v8/src/ia32/builtins-ia32.cc
index ccdd01c7a34..a2aec741621 100644
--- a/chromium/v8/src/ia32/builtins-ia32.cc
+++ b/chromium/v8/src/ia32/builtins-ia32.cc
@@ -22,12 +22,12 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- eax : number of arguments excluding receiver
- // -- edi : called function (only guaranteed when
- // extra_args requires it)
+ // -- edi : target
+ // -- edx : new.target
// -- esp[0] : return address
// -- esp[4] : last argument
// -- ...
- // -- esp[4 * argc] : first argument (argc == eax)
+ // -- esp[4 * argc] : first argument
// -- esp[4 * (argc +1)] : receiver
// -----------------------------------
__ AssertFunction(edi);
@@ -36,38 +36,48 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- Register scratch = ebx;
- __ pop(scratch); // Save return address.
- __ push(edi);
- __ push(scratch); // Restore return address.
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ if (extra_args != BuiltinExtraArguments::kNone) {
+ __ PopReturnAddressTo(ecx);
+ if (extra_args & BuiltinExtraArguments::kTarget) {
+ ++num_extra_args;
+ __ Push(edi);
+ }
+ if (extra_args & BuiltinExtraArguments::kNewTarget) {
+ ++num_extra_args;
+ __ Push(edx);
+ }
+ __ PushReturnAddressFrom(ecx);
}
// JumpToExternalReference expects eax to contain the number of arguments
// including the receiver and the extra arguments.
__ add(eax, Immediate(num_extra_args + 1));
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- edx : new target (preserved for callee)
+ // -- edi : target function (preserved for callee)
+ // -----------------------------------
+
FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function.
+ // Push a copy of the target function and the new target.
__ push(edi);
+ __ push(edx);
// Function is also the parameter to the runtime call.
__ push(edi);
__ CallRuntime(function_id, 1);
- // Restore receiver.
+ // Restore target function and new target.
+ __ pop(edx);
__ pop(edi);
}
@@ -107,12 +117,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
// -- ebx: allocation site or undefined
- // -- edx: original constructor
+ // -- edx: new target
// -----------------------------------
// Enter a construct frame.
@@ -124,173 +135,166 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(ebx);
__ SmiTag(eax);
__ push(eax);
- __ push(edi);
- __ push(edx);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(not_equal, &rt_call);
-
- // Fall back to runtime if the original constructor and function differ.
- __ cmp(edx, edi);
- __ j(not_equal, &rt_call);
-
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- __ JumpIfSmi(eax, &rt_call);
- // edi: constructor
- // eax: initial map (if proven valid below)
- __ CmpObjectType(eax, MAP_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // edi: constructor
- // eax: initial map
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (!is_api_function) {
- Label allocate;
- // The code below relies on these assumptions.
- STATIC_ASSERT(Map::Counter::kShift + Map::Counter::kSize == 32);
- // Check if slack tracking is enabled.
- __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
- __ shr(esi, Map::Counter::kShift);
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(less, &allocate);
- // Decrease generous allocation count.
- __ sub(FieldOperand(eax, Map::kBitField3Offset),
- Immediate(1 << Map::Counter::kShift));
-
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(not_equal, &allocate);
-
- __ push(eax);
- __ push(edx);
- __ push(edi);
-
- __ push(edi); // constructor
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(edi);
- __ pop(edx);
- __ pop(eax);
- __ mov(esi, Map::kSlackTrackingCounterEnd - 1);
-
- __ bind(&allocate);
- }
- // Now allocate the JSObject on the heap.
- // edi: constructor
- // eax: initial map
- __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ shl(edi, kPointerSizeLog2);
-
- __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-
- Factory* factory = masm->isolate()->factory();
-
- // Allocated the JSObject, now initialize the fields.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ mov(Operand(ebx, JSObject::kMapOffset), eax);
- __ mov(ecx, factory->empty_fixed_array());
- __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
- __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
- // Set extra fields in the newly allocated object.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- // esi: slack tracking counter (non-API function case)
- __ mov(edx, factory->undefined_value());
- __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(less, &no_inobject_slack_tracking);
-
- // Allocate object with a slack.
- __ movzx_b(
- esi,
- FieldOperand(
- eax, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ sub(esi, eax);
- __ lea(esi,
- Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
- // esi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(esi, edi);
- __ Assert(less_equal,
- kUnexpectedNumberOfPreAllocatedPropertyFields);
+ if (create_implicit_receiver) {
+ __ push(edi);
+ __ push(edx);
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ // edx: new target
+ __ mov(eax,
+ FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ __ JumpIfSmi(eax, &rt_call);
+ // edi: constructor
+ // eax: initial map (if proven valid below)
+ __ CmpObjectType(eax, MAP_TYPE, ebx);
+ __ j(not_equal, &rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // edi: constructor
+ // eax: initial map
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ // edi: constructor
+ // eax: initial map
+ __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+ __ shl(edi, kPointerSizeLog2);
+
+ __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+
+ Factory* factory = masm->isolate()->factory();
+
+ // Allocated the JSObject, now initialize the fields.
+ // eax: initial map
+ // ebx: JSObject (not HeapObject tagged - the actual address).
+ // edi: start of next object
+ __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+ __ mov(ecx, factory->empty_fixed_array());
+ __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+ __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+ __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ or_(ebx, Immediate(kHeapObjectTag));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // ebx: JSObject (tagged)
+ // ecx: First in-object property of JSObject (not tagged)
+ __ mov(edx, factory->undefined_value());
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // The code below relies on these assumptions.
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ // Check if slack tracking is enabled.
+ __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
+ __ shr(esi, Map::ConstructionCounter::kShift);
+ __ j(zero, &no_inobject_slack_tracking); // Map::kNoSlackTracking
+ __ push(esi); // Save allocation count value.
+ // Decrease generous allocation count.
+ __ sub(FieldOperand(eax, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCounter::kShift));
+
+ // Allocate object with a slack.
+ __ movzx_b(esi, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+ __ neg(esi);
+ __ lea(esi, Operand(edi, esi, times_pointer_size, 0));
+ // esi: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(ecx, esi);
+ __ Assert(less_equal,
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(ecx, esi, edx);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ mov(edx, factory->one_pointer_filler_map());
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
+
+ __ pop(esi); // Restore allocation count value before decreasing.
+ __ cmp(esi, Map::kSlackTrackingCounterEnd);
+ __ j(not_equal, &allocated);
+
+ // Push the object to the stack, and then the initial map as
+ // an argument to the runtime call.
+ __ push(ebx);
+ __ push(eax); // initial map
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ pop(ebx);
+
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject (tagged)
+ __ jmp(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- __ InitializeFieldsWithFiller(ecx, esi, edx);
- __ mov(edx, factory->one_pointer_filler_map());
- // Fill the remaining fields with one pointer filler map.
- __ bind(&no_inobject_slack_tracking);
- }
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
- __ InitializeFieldsWithFiller(ecx, edi, edx);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- // ebx: JSObject (untagged)
- __ or_(ebx, Immediate(kHeapObjectTag));
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject (tagged)
+ __ jmp(&allocated);
+ }
- // Continue with JSObject being successfully allocated
- // ebx: JSObject (tagged)
- __ jmp(&allocated);
+ // Allocate the new receiver object using the runtime call.
+ // edx: new target
+ __ bind(&rt_call);
+ int offset = kPointerSize;
+
+ // Must restore esi (context) and edi (constructor) before calling
+ // runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(edi, Operand(esp, offset));
+ __ push(edi); // constructor function
+ __ push(edx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ mov(ebx, eax); // store result in ebx
+
+ // New object allocated.
+ // ebx: newly allocated object
+ __ bind(&allocated);
+
+ // Restore the parameters.
+ __ pop(edx); // new.target
+ __ pop(edi); // Constructor function.
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ mov(eax, Operand(esp, 0));
}
- // Allocate the new receiver object using the runtime call.
- // edx: original constructor
- __ bind(&rt_call);
- int offset = kPointerSize;
-
- // Must restore esi (context) and edi (constructor) before calling
- // runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(edi, Operand(esp, offset));
- __ push(edi); // argument 2/1: constructor function
- __ push(edx); // argument 3/2: original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mov(ebx, eax); // store result in ebx
-
- // New object allocated.
- // ebx: newly allocated object
- __ bind(&allocated);
-
- // Restore the parameters.
- __ pop(edx); // new.target
- __ pop(edi); // Constructor function.
-
- // Retrieve smi-tagged arguments count from the stack.
- __ mov(eax, Operand(esp, 0));
__ SmiUntag(eax);
- // Push new.target onto the construct frame. This is stored just below the
- // receiver on the stack.
- __ push(edx);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(ebx);
- __ push(ebx);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(ebx);
+ __ push(ebx);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
@@ -313,40 +317,44 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper());
+ __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0));
-
- // Restore the arguments count and leave the construct frame. The arguments
- // count is stored below the reciever and the new.target.
- __ bind(&exit);
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(eax, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(above_equal, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ mov(eax, Operand(esp, 0));
+
+ // Restore the arguments count and leave the construct frame. The
+ // arguments count is stored below the receiver.
+ __ bind(&exit);
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ } else {
+ __ mov(ebx, Operand(esp, 0));
+ }
// Leave construct frame.
}
@@ -356,91 +364,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ pop(ecx);
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ }
__ ret(0);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax: number of arguments
- // -- edi: constructor function
- // -- ebx: allocation site or undefined
- // -- edx: original constructor
- // -----------------------------------
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve allocation site.
- __ AssertUndefinedOrAllocationSite(ebx);
- __ push(ebx);
-
- // Preserve actual arguments count.
- __ SmiTag(eax);
- __ push(eax);
- __ SmiUntag(eax);
-
- // Push new.target.
- __ push(edx);
-
- // receiver is the hole.
- __ push(Immediate(masm->isolate()->factory()->the_hole_value()));
-
- // Set up pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, eax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(ebx, ecx, times_4, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(equal, &skip_step_in);
-
- __ push(eax);
- __ push(edi);
- __ push(edi);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ pop(edi);
- __ pop(eax);
-
- __ bind(&skip_step_in);
-
- // Invoke function.
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Get arguments count, skipping over new.target.
- __ mov(ebx, Operand(esp, kPointerSize));
- }
- __ pop(ecx); // Return address.
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));
- __ push(ecx);
- __ ret(0);
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edi);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -473,7 +422,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -559,6 +508,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o edi: the JS function object being called
+// o edx: the new target
// o esi: our context
// o ebp: the caller's frame pointer
// o esp: stack pointer (pointing to return address)
@@ -576,6 +526,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(ebp, esp);
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS function.
+ __ push(edx); // Callee's new target.
+
+ // Push zero for bytecode array offset.
+ __ push(Immediate(0));
// Get the bytecode array from the function object and load the pointer to the
// first entry into edi (InterpreterBytecodeRegister).
@@ -605,7 +559,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::address_of_real_stack_limit(masm->isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -627,21 +581,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
@@ -650,7 +590,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ CallRuntime(Runtime::kStackGuard);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -658,31 +600,33 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ mov(kInterpreterRegisterFileRegister, ebp);
- __ sub(
- kInterpreterRegisterFileRegister,
- Immediate(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ add(kInterpreterRegisterFileRegister,
+ Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
// Since the dispatch table root might be set after builtins are generated,
// load directly from the roots table.
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ add(kInterpreterDispatchTableRegister,
- Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
+ __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
- // Push context as a stack located parameter to the bytecode handler.
- DCHECK_EQ(-1, kInterpreterContextSpillSlot);
- __ push(esi);
+ // Push dispatch table as a stack located parameter to the bytecode handler.
+ DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
+ __ push(ebx);
// Dispatch to the first bytecode handler for the function.
- __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzx_b(eax, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(esi, Operand(kInterpreterDispatchTableRegister, esi,
- times_pointer_size, 0));
+ __ mov(ebx, Operand(ebx, eax, times_pointer_size, 0));
+ // Restore undefined_value in accumulator (eax)
+ // TODO(rmcilroy): Remove this once we move the dispatch table back into a
+ // register.
+ __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
- __ add(esi, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(esi);
+ __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(ebx);
+ __ nop(); // Ensure that return address still counts as interpreter entry
+ // trampoline.
}
@@ -708,36 +652,191 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ Register array_limit) {
+ // ----------- S t a t e -------------
+ // -- ebx : Pointer to the last argument in the args array.
+ // -- array_limit : Pointer to one before the first argument in the
+ // args array.
+ // -----------------------------------
+ Label loop_header, loop_check;
+ __ jmp(&loop_check);
+ __ bind(&loop_header);
+ __ Push(Operand(ebx, 0));
+ __ sub(ebx, Immediate(kPointerSize));
+ __ bind(&loop_check);
+ __ cmp(ebx, array_limit);
+ __ j(greater, &loop_header, Label::kNear);
}
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- ebx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- edi : the target to call (can be any Object).
+ // -----------------------------------
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function.
- __ push(edi);
- // Function is also the parameter to the runtime call.
- __ push(edi);
- // Whether to compile in a background thread.
- __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+ // Pop return address to allow tail-call after pushing arguments.
+ __ Pop(edx);
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ pop(edi);
+ // Find the address of the last argument.
+ __ mov(ecx, eax);
+ __ add(ecx, Immediate(1)); // Add one for receiver.
+ __ shl(ecx, kPointerSizeLog2);
+ __ neg(ecx);
+ __ add(ecx, ebx);
+
+ Generate_InterpreterPushArgs(masm, ecx);
+
+ // Call the target.
+ __ Push(edx); // Re-push return address.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the new target
+ // -- edi : the constructor
+ // -- ebx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+
+ // Save number of arguments on the stack below where arguments are going
+ // to be pushed.
+ __ mov(ecx, eax);
+ __ neg(ecx);
+ __ mov(Operand(esp, ecx, times_pointer_size, -kPointerSize), eax);
+ __ mov(eax, ecx);
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ Pop(ecx);
+
+ // Find the address of the last argument.
+ __ shl(eax, kPointerSizeLog2);
+ __ add(eax, ebx);
+
+ // Push padding for receiver.
+ __ Push(Immediate(0));
+
+ Generate_InterpreterPushArgs(masm, eax);
+
+ // Restore number of arguments from slot on stack.
+ __ mov(eax, Operand(esp, -kPointerSize));
+
+ // Re-push return address.
+ __ Push(ecx);
+
+ // Call the constructor with unmodified eax, edi, ebi values.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
+
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Initialize register file register.
+ __ mov(kInterpreterRegisterFileRegister, ebp);
+ __ add(kInterpreterRegisterFileRegister,
+ Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+
+ // Get the bytecode array pointer from the frame.
+ __ mov(ebx, Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(kInterpreterBytecodeArrayRegister,
+ FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+ __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+ ebx);
+ __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ mov(
+ kInterpreterBytecodeOffsetRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Push dispatch table as a stack located parameter to the bytecode handler -
+ // overwrite the state slot (we don't use these for interpreter deopts).
+ __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
+ __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
+ __ mov(Operand(esp, kPointerSize), ebx);
+
+ // Dispatch to the target bytecode.
+ __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ mov(kContextRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(ebx);
+}
+
+
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -833,7 +932,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ popad();
// Tear down internal frame.
}
@@ -860,7 +959,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass deoptimization type to the runtime system.
__ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
// Tear down internal frame.
}
@@ -902,7 +1001,136 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into eax and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ JumpIfSmi(eax, &receiver_not_date);
+ __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
+ __ j(not_equal, &receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ mov(eax, FieldOperand(eax, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ mov(edx, Operand::StaticVariable(
+ ExternalReference::date_cache_stamp(masm->isolate())));
+ __ cmp(edx, FieldOperand(eax, JSDate::kCacheStampOffset));
+ __ j(not_equal, &stamp_mismatch, Label::kNear);
+ __ mov(eax, FieldOperand(
+ eax, JSDate::kValueOffset + field_index * kPointerSize));
+ __ ret(1 * kPointerSize);
+ __ bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 0), eax);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(Smi::FromInt(field_index)));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ ret(1 * kPointerSize);
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowNotDateError);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : argArray
+ // -- esp[8] : thisArg
+ // -- esp[12] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into edi, argArray into eax (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label no_arg_array, no_this_arg;
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ mov(ebx, edx);
+ __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ test(eax, eax);
+ __ j(zero, &no_this_arg, Label::kNear);
+ {
+ __ mov(edx, Operand(esp, eax, times_pointer_size, 0));
+ __ cmp(eax, Immediate(1));
+ __ j(equal, &no_arg_array, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, -kPointerSize));
+ __ bind(&no_arg_array);
+ }
+ __ bind(&no_this_arg);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(eax, ebx);
+ }
+
+ // ----------- S t a t e -------------
+ // -- eax : argArray
+ // -- edi : receiver
+ // -- esp[0] : return address
+ // -- esp[4] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ j(zero, &receiver_not_callable, Label::kNear);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(eax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+ __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &no_arguments,
+ Label::kNear);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ Set(eax, 0);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ mov(Operand(esp, kPointerSize), edi);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// esp[0] : Return address
// esp[8] : Argument n
@@ -948,201 +1176,142 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- __ mov(key, Operand(ebp, indexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(receiver, Operand(ebp, argumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ mov(slot, Immediate(Smi::FromInt(slot_index)));
- __ mov(vector, Operand(ebp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(eax);
-
- // Update the index on the stack and in register key.
- __ mov(key, Operand(ebp, indexOffset));
- __ add(key, Immediate(1 << kSmiTagSize));
- __ mov(Operand(ebp, indexOffset), key);
-
- __ bind(&entry);
- __ cmp(key, Operand(ebp, limitOffset));
- __ j(not_equal, &loop);
-
- // On exit, the pushed arguments count is in eax, untagged
- __ Move(eax, key);
- __ SmiUntag(eax);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
-
- // Stack at entry:
- // esp : return address
- // esp[4] : arguments
- // esp[8] : receiver ("this")
- // esp[12] : function
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // ebp : Old base pointer
- // ebp[4] : return address
- // ebp[8] : function arguments
- // ebp[12] : receiver
- // ebp[16] : function
- static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- static const int kFunctionOffset = kReceiverOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(edi);
-
- __ push(Operand(ebp, kFunctionOffset)); // push this
- __ push(Operand(ebp, kArgumentsOffset)); // push arguments
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : argumentsList
+ // -- esp[8] : thisArgument
+ // -- esp[12] : target
+ // -- esp[16] : receiver
+ // -----------------------------------
- Generate_CheckStackOverflow(masm, kEaxIsSmiTagged);
+ // 1. Load target into edi (if present), argumentsList into eax (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
+ {
+ Label done;
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ mov(edx, edi);
+ __ mov(ebx, edi);
+ __ cmp(eax, Immediate(1));
+ __ j(below, &done, Label::kNear);
+ __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+ __ j(equal, &done, Label::kNear);
+ __ mov(edx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+ __ cmp(eax, Immediate(3));
+ __ j(below, &done, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(eax, ebx);
+ }
- // Push current index and limit.
- const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(eax); // limit
- __ Push(Immediate(0)); // index
- __ Push(Operand(ebp, kReceiverOffset)); // receiver
+ // ----------- S t a t e -------------
+ // -- eax : argumentsList
+ // -- edi : target
+ // -- esp[0] : return address
+ // -- esp[4] : thisArgument
+ // -----------------------------------
- // Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ j(zero, &target_not_callable, Label::kNear);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Leave internal frame.
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
+ {
+ __ mov(Operand(esp, kPointerSize), edi);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
}
-// Used by ReflectConstruct
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : new.target (optional)
+ // -- esp[8] : argumentsList
+ // -- esp[12] : target
+ // -- esp[16] : receiver
+ // -----------------------------------
- // Stack at entry:
- // esp : return address
- // esp[4] : original constructor (new.target)
- // esp[8] : arguments
- // esp[16] : constructor
+ // 1. Load target into edi (if present), argumentsList into eax (if present),
+ // new.target into edx (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // ebp : Old base pointer
- // ebp[4] : return address
- // ebp[8] : original constructor (new.target)
- // ebp[12] : arguments
- // ebp[16] : constructor
- static const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- static const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(edi);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ mov(eax, Operand(ebp, kNewTargetOffset));
- __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &validate_arguments, Label::kNear);
- __ mov(eax, Operand(ebp, kFunctionOffset));
- __ mov(Operand(ebp, kNewTargetOffset), eax);
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ push(Operand(ebp, kFunctionOffset));
- __ push(Operand(ebp, kArgumentsOffset));
- __ push(Operand(ebp, kNewTargetOffset));
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- Generate_CheckStackOverflow(masm, kEaxIsSmiTagged);
-
- // Push current index and limit.
- const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(eax); // limit
- __ push(Immediate(0)); // index
- // Push the constructor function as callee.
- __ push(Operand(ebp, kFunctionOffset));
-
- // Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ mov(ecx, Operand(ebp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ Label done;
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ mov(edx, edi);
+ __ mov(ebx, edi);
+ __ cmp(eax, Immediate(1));
+ __ j(below, &done, Label::kNear);
+ __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+ __ mov(edx, edi);
+ __ j(equal, &done, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+ __ cmp(eax, Immediate(3));
+ __ j(below, &done, Label::kNear);
+ __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(eax, ebx);
}
- // remove this, target, arguments, and newTarget
- __ ret(kStackSize * kPointerSize);
-}
+ // ----------- S t a t e -------------
+ // -- eax : argumentsList
+ // -- edx : new.target
+ // -- edi : target
+ // -- esp[0] : return address
+ // -- esp[4] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &target_not_constructor, Label::kNear);
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &new_target_not_constructor, Label::kNear);
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ mov(Operand(esp, kPointerSize), edi);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ mov(Operand(esp, kPointerSize), edx);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1206,6 +1375,113 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into eax and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ test(eax, eax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ mov(eax, ebx);
+ }
+
+ // 2a. Convert the first argument to a number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0 (already in eax).
+ __ bind(&no_arguments);
+ __ ret(1 * kPointerSize);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- edx : new target
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into ebx and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ test(eax, eax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&no_arguments);
+ __ Move(ebx, Smi::FromInt(0));
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ }
+
+ // 3. Make sure ebx is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(ebx, &done_convert);
+ __ CompareRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(equal, &done_convert);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(edi);
+ __ Push(edx);
+ __ Move(eax, ebx);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(ebx, eax);
+ __ Pop(edx);
+ __ Pop(edi);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(edx, edi);
+ __ j(not_equal, &new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx); // the first argument
+ __ Push(edi); // constructor function
+ __ Push(edx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(FieldOperand(eax, JSValue::kValueOffset));
+ }
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
@@ -1260,7 +1536,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ PopReturnAddressTo(ecx);
__ Push(eax);
__ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -1270,12 +1546,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
+ // -- edx : new target
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into ebx and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into ebx and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -1291,60 +1571,47 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ PushReturnAddressFrom(ecx);
}
- // 2. Make sure ebx is a string.
+ // 3. Make sure ebx is a string.
{
Label convert, done_convert;
__ JumpIfSmi(ebx, &convert, Label::kNear);
- __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, edx);
+ __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, ecx);
__ j(below, &done_convert);
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
__ Push(edi);
+ __ Push(edx);
__ Move(eax, ebx);
__ CallStub(&stub);
__ Move(ebx, eax);
+ __ Pop(edx);
__ Pop(edi);
}
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- ebx : the first argument
- // -- edi : constructor function
- // -----------------------------------
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(edx, edi);
+ __ j(not_equal, &new_object);
- Label allocate, done_allocate;
- __ Allocate(JSValue::kSize, eax, ecx, no_reg, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
-
- // Initialize the JSValue in eax.
- __ LoadGlobalFunctionInitialMap(edi, ecx);
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
+ __ Ret();
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx);
- __ Push(edi);
- __ Push(Smi::FromInt(JSValue::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(edi);
- __ Pop(ebx);
- }
- __ jmp(&done_allocate);
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx); // the first argument
+ __ Push(edi); // constructor function
+ __ Push(edx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(FieldOperand(eax, JSValue::kValueOffset));
}
+ __ Ret();
}
@@ -1353,24 +1620,24 @@ static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- eax : actual number of arguments
// -- ebx : expected number of arguments
- // -- edi : function (passed through to callee)
+ // -- edx : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
ExternalReference real_stack_limit =
ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edx, Operand::StaticVariable(real_stack_limit));
+ __ mov(edi, Operand::StaticVariable(real_stack_limit));
// Make ecx the space we have left. The stack might already be overflowed
// here which will cause ecx to become negative.
__ mov(ecx, esp);
- __ sub(ecx, edx);
- // Make edx the space we need for the array when it is unrolled onto the
+ __ sub(ecx, edi);
+ // Make edi the space we need for the array when it is unrolled onto the
// stack.
- __ mov(edx, ebx);
- __ shl(edx, kPointerSizeLog2);
+ __ mov(edi, ebx);
+ __ shl(edi, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
+ __ cmp(ecx, edi);
__ j(less_equal, stack_overflow); // Signed comparison.
}
@@ -1410,74 +1677,218 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argumentsList
+ // -- edi : target
+ // -- edx : new.target (checked to be constructor or undefined)
+ // -- esp[0] : return address.
+ // -- esp[4] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(eax, &create_runtime);
+
+ // Load the map of argumentsList into ecx.
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+
+ // Load native context into ebx.
+ __ mov(ebx, NativeContextOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ cmp(ecx, ContextOperand(ebx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ j(equal, &create_arguments);
+ __ cmp(ecx, ContextOperand(ebx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ j(equal, &create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CmpInstanceType(ecx, JS_ARRAY_TYPE);
+ __ j(equal, &create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(edi);
+ __ Push(edx);
+ __ Push(eax);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(edx);
+ __ Pop(edi);
+ __ mov(ebx, FieldOperand(eax, FixedArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ }
+ __ jmp(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ mov(ebx,
+ FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
+ __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ j(not_equal, &create_runtime);
+ __ SmiUntag(ebx);
+ __ mov(eax, ecx);
+ __ jmp(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(ecx);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(above, &create_runtime);
+ __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
+ __ j(equal, &create_runtime);
+ __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(ecx, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ neg(ecx);
+ __ add(ecx, esp);
+ __ sar(ecx, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, ebx);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- edi : target
+ // -- eax : args (a FixedArray built from argumentsList)
+ // -- ebx : len (number of elements to push from args)
+ // -- edx : new.target (checked to be constructor or undefined)
+ // -- esp[0] : return address.
+ // -- esp[4] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ __ movd(xmm0, edx);
+ __ PopReturnAddressTo(edx);
+ __ Move(ecx, Immediate(0));
+ Label done, loop;
+ __ bind(&loop);
+ __ cmp(ecx, ebx);
+ __ j(equal, &done, Label::kNear);
+ __ Push(
+ FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ __ inc(ecx);
+ __ jmp(&loop);
+ __ bind(&done);
+ __ PushReturnAddressFrom(edx);
+ __ movd(edx, xmm0);
+ __ Move(eax, ebx);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(edi);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
+ SharedFunctionInfo::kClassConstructorBitsWithinByte);
+ __ j(not_zero, &class_constructor);
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
__ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
(1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_zero, &done_convert);
{
- __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
-
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- ecx : the receiver
// -- edx : the shared function info.
// -- edi : the function to call (checked to be a JSFunction)
// -- esi : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
- __ j(above_equal, &done_convert);
- __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex, &convert_global_proxy,
- Label::kNear);
- __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
- Label::kNear);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(ecx);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
+ __ j(above_equal, &done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy, Label::kNear);
+ __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
+ Label::kNear);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(ecx);
+ }
+ __ jmp(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(eax);
+ __ Push(eax);
+ __ Push(edi);
+ __ mov(eax, ecx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(ecx, eax);
+ __ Pop(edi);
+ __ Pop(eax);
+ __ SmiUntag(eax);
+ }
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ jmp(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(eax);
- __ Push(eax);
- __ Push(edi);
- __ mov(eax, ecx);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(ecx, eax);
- __ Pop(edi);
- __ Pop(eax);
- __ SmiUntag(eax);
- }
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
}
__ bind(&done_convert);
@@ -1494,13 +1905,131 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
__ SmiUntag(ebx);
ParameterCount actual(eax);
ParameterCount expected(ebx);
- __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), expected,
- actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ push(edi);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
}
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : new.target (only in case of [[Construct]])
+ // -- edi : target (checked to be a JSBoundFunction)
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into ecx and length of that into ebx.
+ Label no_bound_arguments;
+ __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+ __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ test(ebx, ebx);
+ __ j(zero, &no_bound_arguments);
+ {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : new.target (only in case of [[Construct]])
+ // -- edi : target (checked to be a JSBoundFunction)
+ // -- ecx : the [[BoundArguments]] (implemented as FixedArray)
+ // -- ebx : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ lea(ecx, Operand(ebx, times_pointer_size, 0));
+ __ sub(esp, ecx);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ // Restore the stack pointer.
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, 0));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Adjust effective number of arguments to include return address.
+ __ inc(eax);
+
+ // Relocate arguments and return address down the stack.
+ {
+ Label loop;
+ __ Set(ecx, 0);
+ __ lea(ebx, Operand(esp, ebx, times_pointer_size, 0));
+ __ bind(&loop);
+ __ movd(xmm0, Operand(ebx, ecx, times_pointer_size, 0));
+ __ movd(Operand(esp, ecx, times_pointer_size, 0), xmm0);
+ __ inc(ecx);
+ __ cmp(ecx, eax);
+ __ j(less, &loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+ __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ bind(&loop);
+ __ dec(ebx);
+ __ movd(xmm0, FieldOperand(ecx, ebx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ movd(Operand(esp, eax, times_pointer_size, 0), xmm0);
+ __ lea(eax, Operand(eax, 1));
+ __ j(greater, &loop);
+ }
+
+ // Adjust effective number of arguments (eax contains the number of
+ // arguments from the call plus return address plus the number of
+ // [[BoundArguments]]), so we need to subtract one for the return address.
+ __ dec(eax);
+ }
+ __ bind(&no_bound_arguments);
+}
+
+} // namespace
+
+
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edi : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(edi);
+
+ // Patch the receiver to [[BoundThis]].
+ __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ecx, Operand::StaticVariable(ExternalReference(
+ Builtins::kCall_ReceiverIsAny, masm->isolate())));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
+}
+
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object).
@@ -1510,16 +2039,24 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(edi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET);
+ __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ mov(edi, FieldOperand(edi, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(edi);
- __ jmp(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ PopReturnAddressTo(ecx);
+ __ Push(edi);
+ __ PushReturnAddressFrom(ecx);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ add(eax, Immediate(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1531,14 +2068,16 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(edi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1547,10 +2086,9 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor (checked to be a JSFunction)
+ // -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(edx);
__ AssertFunction(edi);
// Calling convention for function specific ConstructStubs require
@@ -1567,17 +2105,54 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the new target (checked to be a constructor)
+ // -- edi : the constructor to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(edi);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label done;
+ __ cmp(edi, edx);
+ __ j(not_equal, &done, Label::kNear);
+ __ mov(edx, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&done);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ecx, Operand::StaticVariable(
+ ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor (either the same as the constructor or
+ // -- edi : the constructor to call (checked to be a JSProxy)
+ // -- edx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
- // -- edi : the constructor to call (checked to be a JSFunctionProxy)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ mov(edi, FieldOperand(edi, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ PopReturnAddressTo(ecx);
+ __ Push(edi);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ // Include the pushed new_target, constructor and the receiver.
+ __ add(eax, Immediate(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1585,23 +2160,32 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor (either the same as the constructor or
+ // -- edx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- edi : the constructor to call (can be any Object)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(edi, &non_constructor, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
- __ j(zero, &non_constructor, Label::kNear);
// Dispatch based on instance type.
- __ CmpInstanceType(ecx, JS_FUNCTION_TYPE);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(equal, masm->isolate()->builtins()->ConstructFunction(),
RelocInfo::CODE_TARGET);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+
+ // Check if target has a [[Construct]] internal method.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &non_constructor, Label::kNear);
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->ConstructBoundFunction(),
+ RelocInfo::CODE_TARGET);
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(equal, masm->isolate()->builtins()->ConstructProxy(),
RelocInfo::CODE_TARGET);
@@ -1618,46 +2202,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
-}
-
-
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- ebx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- edi : the target to call (can be any Object).
-
- // Pop return address to allow tail-call after pushing arguments.
- __ Pop(edx);
-
- // Find the address of the last argument.
- __ mov(ecx, eax);
- __ add(ecx, Immediate(1)); // Add one for receiver.
- __ shl(ecx, kPointerSizeLog2);
- __ neg(ecx);
- __ add(ecx, ebx);
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ jmp(&loop_check);
- __ bind(&loop_header);
- __ Push(Operand(ebx, 0));
- __ sub(ebx, Immediate(kPointerSize));
- __ bind(&loop_check);
- __ cmp(ebx, ecx);
- __ j(greater, &loop_header, Label::kNear);
-
- // Call the target.
- __ Push(edx); // Re-push return address.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1665,17 +2211,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
// -- ebx : expected number of arguments
+ // -- edx : new target (passed through to callee)
// -- edi : function (passed through to callee)
// -----------------------------------
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
__ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
- Label stack_overflow;
- ArgumentsAdaptorStackCheck(masm, &stack_overflow);
-
Label enough, too_few;
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ cmp(eax, ebx);
__ j(less, &too_few);
__ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
@@ -1684,6 +2227,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -1720,11 +2264,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
// Remember expected arguments in ecx.
__ mov(ecx, ebx);
@@ -1763,8 +2308,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Restore function pointer.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
// eax : expected number of arguments
+ // edx : new target (passed through to callee)
// edi : function (passed through to callee)
- __ call(edx);
+ __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ call(ecx);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1777,18 +2324,128 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ jmp(edx);
+ __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ jmp(ecx);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ int3();
}
}
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Register scratch0, Register scratch1,
+ Label* receiver_check_failed) {
+ // If there is no signature, return the holder.
+ __ CompareRoot(FieldOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset),
+ Heap::kUndefinedValueRootIndex);
+ Label receiver_check_passed;
+ __ j(equal, &receiver_check_passed, Label::kNear);
+
+ // Walk the prototype chain.
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(scratch0, scratch0, scratch1);
+ __ CmpInstanceType(scratch1, JS_FUNCTION_TYPE);
+ Label next_prototype;
+ __ j(not_equal, &next_prototype, Label::kNear);
+
+ // Get the constructor's signature.
+ __ mov(scratch0,
+ FieldOperand(scratch0, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(scratch0,
+ FieldOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ cmp(scratch0, FieldOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ __ j(equal, &receiver_check_passed, Label::kNear);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(scratch0, &next_prototype, Label::kNear);
+ __ CmpObjectType(scratch0, FUNCTION_TEMPLATE_INFO_TYPE, scratch1);
+ __ j(not_equal, &next_prototype, Label::kNear);
+
+ // Otherwise load the parent function template and iterate.
+ __ mov(scratch0,
+ FieldOperand(scratch0, FunctionTemplateInfo::kParentTemplateOffset));
+ __ jmp(&function_template_loop, Label::kNear);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ mov(receiver, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ j(equal, receiver_check_failed);
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ test(FieldOperand(scratch0, Map::kBitField3Offset),
+ Immediate(Map::IsHiddenPrototype::kMask));
+ __ j(zero, receiver_check_failed);
+ // Iterate.
+ __ jmp(&prototype_loop_start, Label::kNear);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments (not including the receiver)
+ // -- edi : callee
+ // -- esi : context
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[eax * 4] : first argument
+ // -- esp[(eax + 1) * 4] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, kPCOnStackSize));
+ __ Push(eax);
+ CompatibleReceiverCheck(masm, ecx, ebx, edx, eax, &receiver_check_failed);
+ __ Pop(eax);
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ mov(edx, FieldOperand(ebx, FunctionTemplateInfo::kCallCodeOffset));
+ __ mov(edx, FieldOperand(edx, CallHandlerInfo::kFastHandlerOffset));
+ __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(edx);
+
+ // Compatible receiver check failed: pop return address, arguments and
+ // receiver and throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ __ Pop(eax);
+ __ PopReturnAddressTo(ebx);
+ __ lea(eax, Operand(eax, times_pointer_size, 1 * kPointerSize));
+ __ add(esp, eax);
+ __ PushReturnAddressFrom(ebx);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+ }
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1796,7 +2453,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
Label skip;
@@ -1835,7 +2492,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok, Label::kNear);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/chromium/v8/src/ia32/code-stubs-ia32.cc b/chromium/v8/src/ia32/code-stubs-ia32.cc
index 37e1876f3de..6e597e2814c 100644
--- a/chromium/v8/src/ia32/code-stubs-ia32.cc
+++ b/chromium/v8/src/ia32/code-stubs-ia32.cc
@@ -617,7 +617,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -688,7 +688,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ push(scratch); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -787,7 +787,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ pop(ebx); // Return address.
__ push(edx);
__ push(ebx);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -819,7 +819,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ push(edx); // Push parameters pointer.
__ push(ecx); // Push parameter count.
__ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -892,8 +892,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// esp[8] = parameter count (tagged)
// Get the arguments map from the current native context into edi.
Label has_mapped_parameters, instantiate;
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
+ __ mov(edi, NativeContextOperand());
__ mov(ebx, Operand(esp, 0 * kPointerSize));
__ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -1055,7 +1054,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ push(edx); // Push parameters pointer.
__ push(ecx); // Push parameter count.
__ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1096,10 +1095,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Allocate(eax, eax, ebx, no_reg, &runtime, TAG_OBJECT);
// Get the arguments map from the current native context.
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
- const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
- __ mov(edi, Operand(edi, offset));
+ __ mov(edi, NativeContextOperand());
+ __ mov(edi, ContextOperand(edi, Context::STRICT_ARGUMENTS_MAP_INDEX));
__ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -1151,7 +1148,35 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ push(edx); // Push parameters pointer.
__ push(ecx); // Push parameter count.
__ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // ecx : number of parameters (tagged)
+ // edx : parameters pointer
+ // ebx : rest parameter index (tagged)
+ // esp[0] : return address
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ mov(edi, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(edi, StandardFrameConstants::kContextOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ mov(ecx, Operand(edi, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx,
+ Operand(edi, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
+
+ __ bind(&runtime);
+ __ pop(eax); // Save return address.
+ __ push(ecx); // Push number of parameters.
+ __ push(edx); // Push parameters pointer.
+ __ push(ebx); // Push rest parameter index.
+ __ push(eax); // Push return address.
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -1160,7 +1185,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1438,7 +1463,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(equal, &runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure to match, return null.
@@ -1530,7 +1555,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -1685,7 +1710,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// Call runtime on identical JSObjects. Otherwise return equal.
- __ cmpb(ecx, static_cast<uint8_t>(FIRST_SPEC_OBJECT_TYPE));
+ __ cmpb(ecx, static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE));
__ j(above_equal, &runtime_call, Label::kFar);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
@@ -1753,8 +1778,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison.
Label first_non_object;
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(below, &first_non_object, Label::kNear);
// Return non-zero (eax is not zero)
@@ -1768,7 +1793,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -1852,9 +1877,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime_call, Label::kNear);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(below, &runtime_call, Label::kNear);
- __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
+ __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ebx);
__ j(below, &runtime_call, Label::kNear);
// We do not bail out after this point. Both are JSObjects, and
// they are equal if and only if both are undetectable.
@@ -1883,8 +1908,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Figure out which native to call and setup the arguments.
if (cc == equal) {
__ push(ecx);
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
__ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
@@ -1893,9 +1917,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -1903,16 +1926,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// eax : number of arguments to the construct function
// ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
- // esp[0]: original receiver (for IsSuperConstructorCall)
- if (is_super) {
- __ pop(ecx);
- }
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1923,29 +1941,19 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
__ push(edi);
__ push(edx);
__ push(ebx);
- if (is_super) {
- __ push(ecx);
- }
__ CallStub(stub);
- if (is_super) {
- __ pop(ecx);
- }
__ pop(ebx);
__ pop(edx);
__ pop(edi);
__ pop(eax);
__ SmiUntag(eax);
}
-
- if (is_super) {
- __ push(ecx);
- }
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -1953,7 +1961,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
- // esp[0]: original receiver (for IsSuperConstructorCall)
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
@@ -2019,118 +2026,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ jmp(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, cont);
-
- // Do not transform the receiver for natives (shared already in ecx).
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, cont);
-}
-
-
-static void EmitSlowCase(Isolate* isolate, MacroAssembler* masm, int argc) {
- __ Set(eax, argc);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ push(edi);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ pop(edi);
- }
- __ mov(Operand(esp, (argc + 1) * kPointerSize), eax);
- __ jmp(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // edi : the function to call
- Label slow, wrap, cont;
-
- if (needs_checks) {
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &slow);
-
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
- }
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Load the receiver from the stack.
- __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
-
- if (needs_checks) {
- __ JumpIfSmi(eax, &wrap);
-
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &wrap);
- } else {
- __ jmp(&wrap);
- }
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm->isolate(), masm, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
// ebx : feedback vector
- // ecx : original constructor (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
- if (IsSuperConstructorCall()) {
- __ push(ecx);
- }
-
Label non_function;
// Check that function is not a smi.
__ JumpIfSmi(edi, &non_function);
@@ -2138,29 +2049,22 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
+ GenerateRecordCallTarget(masm);
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into ebx, or undefined.
- __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Handle<Map> allocation_site_map =
- isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
- __ j(equal, &feedback_register_initialized);
- __ mov(ebx, isolate()->factory()->undefined_value());
- __ bind(&feedback_register_initialized);
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into ebx, or undefined.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ j(equal, &feedback_register_initialized);
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(ebx);
- }
+ __ AssertUndefinedOrAllocationSite(ebx);
- if (IsSuperConstructorCall()) {
- __ pop(edx);
- } else {
- // Pass original constructor to construct stub.
- __ mov(edx, edi);
- }
+ // Pass new target to construct stub.
+ __ mov(edx, edi);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -2170,7 +2074,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ jmp(ecx);
__ bind(&non_function);
- if (IsSuperConstructorCall()) __ Drop(1);
__ mov(edx, edi);
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -2208,13 +2111,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// edx - slot id
// ebx - vector
Isolate* isolate = masm->isolate();
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2248,36 +2145,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::kHeaderSize + kPointerSize),
Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
-
- // Load the receiver from the stack.
- __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
-
- __ JumpIfSmi(eax, &wrap);
-
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &wrap);
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(isolate, masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call_function);
+ __ Set(eax, argc);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
- __ j(equal, &slow_start);
+ __ j(equal, &call);
// Check if we have an allocation site.
__ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
@@ -2306,10 +2183,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ mov(
FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
- // We have to update statistics for runtime profiling.
- __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
- __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
- __ jmp(&slow_start);
+
+ __ bind(&call);
+ __ Set(eax, argc);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2326,8 +2204,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ cmp(edi, ecx);
__ j(equal, &miss);
- // Update stats.
- __ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+ // Make sure the function belongs to the same native context.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kContextOffset));
+ __ mov(ecx, ContextOperand(ecx, Context::NATIVE_CONTEXT_INDEX));
+ __ cmp(ecx, NativeContextOperand());
+ __ j(not_equal, &miss);
// Initialize the call counter.
__ mov(FieldOperand(ebx, edx, times_half_pointer_size,
@@ -2346,23 +2227,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ pop(edi);
}
- __ jmp(&have_js_function);
+ __ jmp(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &slow);
-
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
- __ jmp(&have_js_function);
+ __ jmp(&call);
// Unreachable
__ int3();
@@ -2378,7 +2250,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ push(edx);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
@@ -2425,11 +2297,23 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// esp: stack pointer (restored after C call)
// esi: current context (C callee-saved)
// edi: JS function of the caller (C callee-saved)
+ //
+ // If argv_in_register():
+ // ecx: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(save_doubles());
+ if (argv_in_register()) {
+ DCHECK(!save_doubles());
+ __ EnterApiExitFrame(3);
+
+ // Move argc and argv into the correct registers.
+ __ mov(esi, ecx);
+ __ mov(edi, eax);
+ } else {
+ __ EnterExitFrame(save_doubles());
+ }
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
@@ -2474,7 +2358,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles());
+ __ LeaveExitFrame(save_doubles(), !argv_in_register());
__ ret(0);
// Handling of exception.
@@ -2673,14 +2557,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
static_cast<uint8_t>(1 << Map::kHasNonInstancePrototype));
__ j(not_zero, &slow_case);
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ mov(shared_info,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ BooleanBitTest(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
- SharedFunctionInfo::kBoundFunction);
- __ j(not_zero, &slow_case);
-
// Get the "prototype" (or initial map) of the {function}.
__ mov(function_prototype,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -2706,28 +2582,48 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
- Label done, loop;
+ Label done, loop, fast_runtime_fallback;
__ mov(eax, isolate()->factory()->true_value());
__ bind(&loop);
- __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, function_prototype);
+
+ // Check if the object needs to be access checked.
+ __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ __ j(not_zero, &fast_runtime_fallback, Label::kNear);
+ // Check if the current object is a Proxy.
+ __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+ __ j(equal, &fast_runtime_fallback, Label::kNear);
+
+ __ mov(object, FieldOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object, function_prototype);
__ j(equal, &done, Label::kNear);
- __ cmp(object_prototype, isolate()->factory()->null_value());
- __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+ __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
+ __ cmp(object, isolate()->factory()->null_value());
__ j(not_equal, &loop);
__ mov(eax, isolate()->factory()->false_value());
+
__ bind(&done);
__ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
__ ret(0);
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime.
+ __ bind(&fast_runtime_fallback);
+ __ PopReturnAddressTo(scratch);
+ __ Push(object);
+ __ Push(function_prototype);
+ __ PushReturnAddressFrom(scratch);
+ // Invalidate the instanceof cache.
+ __ Move(eax, Immediate(Smi::FromInt(0)));
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
- __ pop(scratch); // Pop return address.
- __ push(object); // Push {object}.
- __ push(function); // Push {function}.
- __ push(scratch); // Push return address.
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ PopReturnAddressTo(scratch);
+ __ Push(object);
+ __ Push(function);
+ __ PushReturnAddressFrom(scratch);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -2788,11 +2684,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ push(object_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
if (!index_.is(eax)) {
// Save the conversion result before the pop instructions below
@@ -2822,7 +2718,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ push(object_);
__ SmiTag(index_);
__ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -2868,7 +2764,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -3118,7 +3014,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// eax: string
@@ -3163,7 +3059,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -3176,7 +3072,26 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
+}
+
+
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes on argument in eax.
+ Label not_smi, positive_smi;
+ __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, eax);
+ __ j(greater_equal, &positive_smi, Label::kNear);
+ __ xor_(eax, eax);
+ __ bind(&positive_smi);
+ __ Ret();
+ __ bind(&not_smi);
+
+ __ pop(ecx); // Pop return address.
+ __ push(eax); // Push argument.
+ __ push(ecx); // Push return address.
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3211,7 +3126,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3374,7 +3289,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ Push(edx);
__ Push(eax);
__ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3419,14 +3334,16 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
__ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
__ AssertSmi(eax);
__ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
__ AssertSmi(edx);
- __ xchg(eax, edx);
+ __ push(eax);
+ __ mov(eax, edx);
+ __ pop(edx);
}
__ sub(eax, edx);
__ Ret();
@@ -3715,9 +3632,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ push(right);
__ push(tmp1);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3725,19 +3642,20 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
__ JumpIfSmi(ecx, &miss, Label::kNear);
- __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
- __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(below, &miss, Label::kNear);
+ __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(below, &miss, Label::kNear);
- DCHECK(GetCondition() == equal);
+ DCHECK_EQ(equal, GetCondition());
__ sub(eax, edx);
__ ret(0);
@@ -3746,7 +3664,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ mov(ecx, edx);
@@ -3763,14 +3681,14 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ sub(eax, edx);
__ ret(0);
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ Push(eax);
__ Push(Immediate(Smi::FromInt(NegativeComparisonResult(GetCondition()))));
__ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -3787,7 +3705,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ push(edx); // And also use them as the arguments.
__ push(eax);
__ push(Immediate(Smi::FromInt(op())));
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ lea(edi, FieldOperand(eax, Code::kHeaderSize));
__ pop(eax);
@@ -4178,11 +4096,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need an extra register for this, so we push the object register
// temporarily.
__ push(regs_.object());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object,
- Label::kNear);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ &need_incremental_pop_object, Label::kNear);
__ pop(regs_.object());
regs_.Restore(masm);
@@ -4202,91 +4119,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : element value to store
- // -- ecx : element index as smi
- // -- esp[0] : return address
- // -- esp[4] : array literal index in function
- // -- esp[8] : array literal
- // clobbers ebx, edx, edi
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label slow_elements_from_double;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
-
- __ CheckFastElements(edi, &double_elements);
-
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
- __ JumpIfSmi(eax, &smi_element);
- __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
-
- __ bind(&slow_elements);
- __ pop(edi); // Pop return address and remember to put back later for tail
- // call.
- __ push(ebx);
- __ push(ecx);
- __ push(eax);
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(edx);
- __ push(edi); // Return return address so that tail call returns to right
- // place.
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- __ bind(&slow_elements_from_double);
- __ pop(edx);
- __ jmp(&slow_elements);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
- FixedArrayBase::kHeaderSize));
- __ mov(Operand(ecx, 0), eax);
- // Update the write barrier for the array store.
- __ RecordWrite(ebx, ecx, eax,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
- FixedArrayBase::kHeaderSize), eax);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
-
- __ push(edx);
- __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(eax,
- edx,
- ecx,
- edi,
- xmm0,
- &slow_elements_from_double);
- __ pop(edx);
- __ ret(0);
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -4561,13 +4393,14 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
Register key, Register vector,
Register slot, Register feedback,
- Label* miss) {
+ bool is_polymorphic, Label* miss) {
// feedback initially contains the feedback array
Label next, next_loop, prepare_next;
Label load_smi_map, compare_map;
Label start_polymorphic;
+ Label pop_and_miss;
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
__ push(receiver);
__ push(vector);
@@ -4599,16 +4432,18 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
__ jmp(Operand::StaticVariable(virtual_register));
// Polymorphic, we have to loop from 2 to N
-
- // TODO(mvstanton): I think there is a bug here, we are assuming the
- // array has more than one map/handler pair, but we call this function in the
- // keyed store with a string key case, where it might be just an array of two
- // elements.
-
__ bind(&start_polymorphic);
__ push(key);
Register counter = key;
__ mov(counter, Immediate(Smi::FromInt(2)));
+
+ if (!is_polymorphic) {
+ // If is_polymorphic is false, we may only have a two element array.
+ // Check against length now in that case.
+ __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+ __ j(greater_equal, &pop_and_miss);
+ }
+
__ bind(&next_loop);
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -4630,6 +4465,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
__ j(less, &next_loop);
// We exhausted our array of map handler pairs.
+ __ bind(&pop_and_miss);
__ pop(key);
__ pop(vector);
__ pop(receiver);
@@ -4648,7 +4484,7 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
// The store ic value is on the stack.
DCHECK(weak_cell.is(VectorStoreICDescriptor::ValueRegister()));
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
// feedback initially contains the feedback array
Label compare_smi_map;
@@ -4710,7 +4546,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&try_array);
__ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &not_array);
- HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+ HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, true,
+ &miss);
__ bind(&not_array);
__ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
@@ -4755,13 +4592,16 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
Label transition_call;
Label pop_and_miss;
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
+ ExternalReference virtual_slot =
+ ExternalReference::virtual_slot_register(masm->isolate());
__ push(receiver);
__ push(vector);
Register receiver_map = receiver;
Register cached_map = vector;
+ Register value = StoreDescriptor::ValueRegister();
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &load_smi_map);
@@ -4770,11 +4610,17 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
// Polymorphic, we have to loop from 0 to N - 1
__ push(key);
- // On the stack we have:
- // key (esp)
- // vector
- // receiver
- // value
+ // Current stack layout:
+ // - esp[0] -- key
+ // - esp[4] -- vector
+ // - esp[8] -- receiver
+ // - esp[12] -- value
+ // - esp[16] -- return address
+ //
+ // Required stack layout for handler call:
+ // - esp[0] -- return address
+ // - receiver, key, value, vector, slot in registers.
+ // - handler in virtual register.
Register counter = key;
__ mov(counter, Immediate(Smi::FromInt(0)));
__ bind(&next_loop);
@@ -4793,32 +4639,39 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ pop(receiver);
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
__ mov(Operand::StaticVariable(virtual_register), feedback);
- __ pop(feedback); // Pop "value".
+ __ pop(value);
__ jmp(Operand::StaticVariable(virtual_register));
__ bind(&transition_call);
- // Oh holy hell this will be tough.
- // The map goes in vector register.
- __ mov(receiver, FieldOperand(cached_map, WeakCell::kValueOffset));
- // The weak cell may have been cleared.
- __ JumpIfSmi(receiver, &pop_and_miss);
- // slot goes on the stack, and holds return address.
- __ xchg(slot, Operand(esp, 4 * kPointerSize));
- // Get the handler in value.
+ // Current stack layout:
+ // - esp[0] -- key
+ // - esp[4] -- vector
+ // - esp[8] -- receiver
+ // - esp[12] -- value
+ // - esp[16] -- return address
+ //
+ // Required stack layout for handler call:
+ // - esp[0] -- return address
+ // - receiver, key, value, map, vector in registers.
+ // - handler and slot in virtual registers.
+ __ mov(Operand::StaticVariable(virtual_slot), slot);
__ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize + 2 * kPointerSize));
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
+ __ mov(Operand::StaticVariable(virtual_register), feedback);
+
+ __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ // The weak cell may have been cleared.
+ __ JumpIfSmi(cached_map, &pop_and_miss);
+ DCHECK(!cached_map.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ mov(VectorStoreTransitionDescriptor::MapRegister(), cached_map);
+
// Pop key into place.
__ pop(key);
- // Put the return address on top of stack, vector goes in slot.
- __ xchg(slot, Operand(esp, 0));
- // put the map on the stack, receiver holds receiver.
- __ xchg(receiver, Operand(esp, 1 * kPointerSize));
- // put the vector on the stack, slot holds value.
- __ xchg(slot, Operand(esp, 2 * kPointerSize));
- // feedback (value) = value, slot = handler.
- __ xchg(feedback, slot);
- __ jmp(slot);
+ __ pop(vector);
+ __ pop(receiver);
+ __ pop(value);
+ __ jmp(Operand::StaticVariable(virtual_register));
__ bind(&prepare_next);
__ add(counter, Immediate(Smi::FromInt(3)));
@@ -4885,7 +4738,8 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
// at least one map/handler pair.
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
- HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+ HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false,
+ &miss);
__ bind(&miss);
__ pop(value);
@@ -5146,6 +5000,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label subclassing;
+ // Enter the context of the Array function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
__ cmp(edx, edi);
__ j(not_equal, &subclassing);
@@ -5167,27 +5024,26 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
- __ pop(ecx); // return address.
- __ push(edi);
- __ push(edx);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ add(eax, Immediate(2));
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+ __ add(eax, Immediate(3));
break;
case NONE:
- __ mov(eax, Immediate(2));
+ __ mov(Operand(esp, 1 * kPointerSize), edi);
+ __ mov(eax, Immediate(3));
break;
case ONE:
- __ mov(eax, Immediate(3));
+ __ mov(Operand(esp, 2 * kPointerSize), edi);
+ __ mov(eax, Immediate(4));
break;
}
-
- __ push(ecx);
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ Push(ebx);
+ __ PushReturnAddressFrom(ecx);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5304,7 +5160,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Pop(result_reg); // Pop return address.
__ Push(slot_reg);
__ Push(result_reg); // Push return address.
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5427,8 +5283,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(cell_reg); // Push return address.
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5565,7 +5420,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ CmpInstanceType(map, LAST_NAME_TYPE);
__ j(below_equal, &ok, Label::kNear);
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, &ok, Label::kNear);
__ cmp(map, isolate->factory()->heap_number_map());
@@ -5599,7 +5454,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
diff --git a/chromium/v8/src/ia32/code-stubs-ia32.h b/chromium/v8/src/ia32/code-stubs-ia32.h
index c09b27b773e..121d12fe74e 100644
--- a/chromium/v8/src/ia32/code-stubs-ia32.h
+++ b/chromium/v8/src/ia32/code-stubs-ia32.h
@@ -320,13 +320,15 @@ class RecordWriteStub: public PlatformCodeStub {
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(ecx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ Register candidate = Register::from_code(i);
+ if (candidate.IsAllocatable()) {
+ if (candidate.is(ecx)) continue;
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
}
UNREACHABLE();
return no_reg;
@@ -385,6 +387,7 @@ class RecordWriteStub: public PlatformCodeStub {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/chromium/v8/src/ia32/codegen-ia32.cc b/chromium/v8/src/ia32/codegen-ia32.cc
index 93f4cee6366..2f94f356650 100644
--- a/chromium/v8/src/ia32/codegen-ia32.cc
+++ b/chromium/v8/src/ia32/codegen-ia32.cc
@@ -34,15 +34,15 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
{
@@ -65,19 +65,20 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ if (buffer == nullptr) return nullptr;
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
// Move double input into registers.
@@ -94,9 +95,9 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@@ -186,13 +187,14 @@ class LabelConverter {
};
-MemMoveFunction CreateMemMoveFunction() {
+MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return NULL;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ if (buffer == nullptr) return nullptr;
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
LabelConverter conv(buffer);
// Generated code is put into a fixed, unmovable buffer, and not into
@@ -505,7 +507,7 @@ MemMoveFunction CreateMemMoveFunction() {
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
@@ -986,9 +988,11 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
- CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+ CodePatcher patcher(isolate, young_sequence_.start(),
+ young_sequence_.length());
patcher.masm()->push(ebp);
patcher.masm()->mov(ebp, esp);
patcher.masm()->push(esi);
@@ -1035,7 +1039,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length);
+ CodePatcher patcher(isolate, sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
}
diff --git a/chromium/v8/src/ia32/codegen-ia32.h b/chromium/v8/src/ia32/codegen-ia32.h
index 2382388beaf..133b1adbdf3 100644
--- a/chromium/v8/src/ia32/codegen-ia32.h
+++ b/chromium/v8/src/ia32/codegen-ia32.h
@@ -5,7 +5,7 @@
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -42,6 +42,7 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/chromium/v8/src/ia32/deoptimizer-ia32.cc b/chromium/v8/src/ia32/deoptimizer-ia32.cc
index d804f630ea8..efe64762030 100644
--- a/chromium/v8/src/ia32/deoptimizer-ia32.cc
+++ b/chromium/v8/src/ia32/deoptimizer-ia32.cc
@@ -8,6 +8,7 @@
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ia32/frames-ia32.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
@@ -74,7 +75,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
new_reloc->GetDataStartAddress() + padding, 0);
intptr_t comment_string
= reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
- RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
+ RelocInfo rinfo(isolate, 0, RelocInfo::COMMENT, comment_string, NULL);
for (int i = 0; i < additional_comments; ++i) {
#ifdef DEBUG
byte* pos_before = reloc_info_writer.pos();
@@ -100,14 +101,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->int3();
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->int3();
}
}
@@ -136,14 +138,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Patch lazy deoptimization entry.
Address call_address = code_start_address + deopt_data->Pc(i)->value();
- CodePatcher patcher(call_address, patch_size());
+ CodePatcher patcher(isolate, call_address, patch_size());
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
// We use RUNTIME_ENTRY for deoptimization bailouts.
- RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
+ RelocInfo rinfo(isolate, call_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY,
- reinterpret_cast<intptr_t>(deopt_entry),
- NULL);
+ reinterpret_cast<intptr_t>(deopt_entry), NULL);
reloc_info_writer.Write(&rinfo);
DCHECK_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize);
@@ -156,18 +157,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
// Move the relocation info to the beginning of the byte array.
- int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
- MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
-
- // The relocation info is in place, update the size.
- reloc_info->set_length(new_reloc_size);
-
- // Handle the junk part after the new relocation info. We will create
- // a non-live object in the extra space at the end of the former reloc info.
- Address junk_address = reloc_info->address() + reloc_info->Size();
- DCHECK(junk_address <= reloc_end_address);
- isolate->heap()->CreateFillerObjectAt(junk_address,
- reloc_end_address - junk_address);
+ const int new_reloc_length = reloc_end_address - reloc_info_writer.pos();
+ MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_length);
+
+ // Right trim the relocation info to free up remaining space.
+ const int delta = reloc_info->length() - new_reloc_length;
+ if (delta > 0) {
+ isolate->heap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+ reloc_info, delta);
+ }
}
@@ -181,7 +179,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -203,7 +201,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
@@ -233,12 +231,14 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
- const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kMaxNumAllocatableRegisters;
+ const int kDoubleRegsSize = kDoubleSize * XMMRegister::kMaxNumRegisters;
__ sub(esp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int offset = code * kDoubleSize;
__ movsd(Operand(esp, offset), xmm_reg);
}
@@ -288,9 +288,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int double_regs_offset = FrameDescription::double_registers_offset();
// Fill in the double input registers.
- for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize;
__ movsd(xmm0, Operand(esp, src_offset));
__ movsd(Operand(ebx, dst_offset), xmm0);
}
@@ -371,9 +372,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ j(below, &outer_push_loop);
// In case of a failed STUB, we have to restore the XMM registers.
- for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
__ movsd(xmm_reg, Operand(ebx, src_offset));
}
diff --git a/chromium/v8/src/ia32/disasm-ia32.cc b/chromium/v8/src/ia32/disasm-ia32.cc
index 935b22d9004..5a432806592 100644
--- a/chromium/v8/src/ia32/disasm-ia32.cc
+++ b/chromium/v8/src/ia32/disasm-ia32.cc
@@ -1231,6 +1231,8 @@ static const char* F0Mnem(byte f0byte) {
case 0xAD: return "shrd";
case 0xAC: return "shrd"; // 3-operand version.
case 0xAB: return "bts";
+ case 0xBC:
+ return "bsf";
case 0xBD: return "bsr";
default: return NULL;
}
@@ -1482,6 +1484,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (f0byte == 0xBC) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
} else if (f0byte == 0xBD) {
data += 2;
int mod, regop, rm;
diff --git a/chromium/v8/src/ia32/frames-ia32.h b/chromium/v8/src/ia32/frames-ia32.h
index a5ce6a5f02f..609dfec7b63 100644
--- a/chromium/v8/src/ia32/frames-ia32.h
+++ b/chromium/v8/src/ia32/frames-ia32.h
@@ -80,6 +80,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_FRAMES_IA32_H_
diff --git a/chromium/v8/src/ia32/interface-descriptors-ia32.cc b/chromium/v8/src/ia32/interface-descriptors-ia32.cc
index 22d85d8cc3d..ad381c7eb28 100644
--- a/chromium/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/chromium/v8/src/ia32/interface-descriptors-ia32.cc
@@ -35,12 +35,10 @@ const Register VectorStoreTransitionDescriptor::SlotRegister() {
}
-const Register VectorStoreTransitionDescriptor::VectorRegister() {
- return no_reg;
-}
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return ebx; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return no_reg; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return edi; }
const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
@@ -70,6 +68,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return ecx; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return edx; }
+const Register RestParamAccessDescriptor::parameter_count() { return ecx; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return edx; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return ebx; }
+
+
const Register ApiGetterDescriptor::function_address() { return edx; }
@@ -85,14 +88,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister()};
- // The other three parameters are on the stack in ia32.
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx};
@@ -116,6 +111,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return eax; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return eax; }
@@ -137,6 +136,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi, eax, ecx, edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax, ebx, ecx};
@@ -197,7 +203,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
- // ecx : original constructor (for IsSuperConstructorCall)
+ // ecx : new target (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
@@ -216,6 +222,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // edx : the new target
+ // edi : the target to call
+ // ebx : allocation site or undefined
+ Register registers[] = {edi, edx, eax, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // edx : the new target
+ // edi : the target to call
+ Register registers[] = {edi, edx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ecx, ebx, eax};
@@ -237,6 +264,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -347,6 +381,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // JSFunction
+ edx, // the new target
eax, // actual number of arguments
ebx, // expected number of arguments
};
@@ -379,33 +414,35 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- edi, // math rounding function
- edx, // vector slot id
+ eax, // argument count (not including receiver)
+ ebx, // address of first argument
+ edi // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- edi, // math rounding function
- edx, // vector slot id
- ebx // type vector
+ eax, // argument count (not including receiver)
+ edx, // new target
+ edi, // constructor
+ ebx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- eax, // argument count (including receiver)
- ebx, // address of first argument
- edi // the target callable to be call
+ eax, // argument count (argc)
+ ecx, // address of first argument (argv)
+ ebx // the runtime function to call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.cc b/chromium/v8/src/ia32/macro-assembler-ia32.cc
index 0ad5d778ec7..5f80b4d52f1 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.cc
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.cc
@@ -19,14 +19,14 @@ namespace internal {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
- if (isolate() != NULL) {
- // TODO(titzer): should we just use a null handle here instead?
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -821,6 +821,18 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotABoundFunction);
+ Push(object);
+ CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
@@ -974,7 +986,7 @@ void MacroAssembler::EnterApiExitFrame(int argc) {
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Optionally restore all XMM registers.
if (save_doubles) {
const int offset = -2 * kPointerSize;
@@ -984,15 +996,20 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
}
}
- // Get the return address from the stack and restore the frame pointer.
- mov(ecx, Operand(ebp, 1 * kPointerSize));
- mov(ebp, Operand(ebp, 0 * kPointerSize));
+ if (pop_arguments) {
+ // Get the return address from the stack and restore the frame pointer.
+ mov(ecx, Operand(ebp, 1 * kPointerSize));
+ mov(ebp, Operand(ebp, 0 * kPointerSize));
- // Pop the arguments and the receiver from the caller stack.
- lea(esp, Operand(esi, 1 * kPointerSize));
+ // Pop the arguments and the receiver from the caller stack.
+ lea(esp, Operand(esi, 1 * kPointerSize));
- // Push the return address to get ready to return.
- push(ecx);
+ // Push the return address to get ready to return.
+ push(ecx);
+ } else {
+ // Otherwise just leave the exit frame.
+ leave();
+ }
LeaveExitFrameEpilogue(true);
}
@@ -1064,10 +1081,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- mov(scratch1, FieldOperand(scratch1, offset));
- mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
+ mov(scratch1, ContextOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1662,6 +1676,27 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
}
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch,
+ Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch));
+ DCHECK(!result.is(value));
+
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch);
+ mov(FieldOperand(result, HeapObject::kMapOffset), scratch);
+ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
+ mov(FieldOperand(result, JSValue::kValueOffset), value);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+}
+
+
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Source and destination are incremented by length.
@@ -1729,16 +1764,16 @@ void MacroAssembler::CopyBytes(Register source,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
jmp(&entry);
bind(&loop);
- mov(Operand(start_offset, 0), filler);
- add(start_offset, Immediate(kPointerSize));
+ mov(Operand(current_address, 0), filler);
+ add(current_address, Immediate(kPointerSize));
bind(&entry);
- cmp(start_offset, end_offset);
+ cmp(current_address, end_address);
j(below, &loop);
}
@@ -1886,24 +1921,27 @@ void MacroAssembler::CallExternalReference(ExternalReference ref,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Move(eax, Immediate(num_arguments));
- JumpToExternalReference(ext);
-}
-
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[8] : argument num_arguments - 1
+ // ...
+ // -- esp[8 * num_arguments] : argument 0 (receiver)
+ //
+ // For runtime functions with variable arguments:
+ // -- eax : number of arguments
+ // -----------------------------------
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(eax, Immediate(function->nargs));
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -1917,8 +1955,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -1969,13 +2005,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (!definitely_matches) {
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- mov(edx, Immediate(code_constant));
- add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_operand.is_reg(edx)) {
- mov(edx, code_operand);
- }
-
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
call(adaptor, RelocInfo::CODE_TARGET);
@@ -1991,20 +2020,76 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ cmpb(Operand::StaticVariable(step_in_enabled), 0);
+ j(equal, &skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(edi));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ mov(edx, isolate()->factory()->undefined_value());
+ }
Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag, Label::kNear,
- call_wrapper);
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
+ Label::kNear, call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
call(code);
@@ -2019,6 +2104,7 @@ void MacroAssembler::InvokeCode(const Operand& code,
void MacroAssembler::InvokeFunction(Register fun,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -2026,14 +2112,13 @@ void MacroAssembler::InvokeFunction(Register fun,
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(fun.is(edi));
- mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(ebx);
ParameterCount expected(ebx);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(edi, new_target, expected, actual, flag, call_wrapper);
}
@@ -2048,8 +2133,7 @@ void MacroAssembler::InvokeFunction(Register fun,
DCHECK(fun.is(edi));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(edi, no_reg, expected, actual, flag, call_wrapper);
}
@@ -2068,35 +2152,21 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
+ // Fake a parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinFunction(edi, native_context_index);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, expected, flag, call_wrapper);
+ InvokeFunctionCode(edi, no_reg, expected, expected, flag, call_wrapper);
}
void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the JavaScript builtin function from the builtins object.
- mov(target, GlobalObjectOperand());
- mov(target, FieldOperand(target, GlobalObject::kNativeContextOffset));
+ mov(target, NativeContextOperand());
mov(target, ContextOperand(target, native_context_index));
}
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(edi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(edi, native_context_index);
- // Load the code entry point from the function into the target register.
- mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
-}
-
-
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
@@ -2124,8 +2194,8 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::LoadGlobalProxy(Register dst) {
- mov(dst, GlobalObjectOperand());
- mov(dst, FieldOperand(dst, GlobalObject::kGlobalProxyOffset));
+ mov(dst, NativeContextOperand());
+ mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
}
@@ -2135,35 +2205,26 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- mov(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmp(map_in_out, FieldOperand(scratch, offset));
+ mov(scratch, NativeContextOperand());
+ cmp(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
j(not_equal, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- mov(map_in_out, FieldOperand(scratch, offset));
+ mov(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- mov(function,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- mov(function,
- FieldOperand(function, GlobalObject::kNativeContextOffset));
+ // Load the native context from the current context.
+ mov(function, NativeContextOperand());
// Load the function from the native context.
- mov(function, Operand(function, Context::SlotOffset(index)));
+ mov(function, ContextOperand(function, index));
}
@@ -2425,6 +2486,30 @@ void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
}
+void MacroAssembler::Tzcnt(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(BMI1)) {
+ CpuFeatureScope scope(this, BMI1);
+ tzcnt(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsf(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Move(dst, Immediate(32)); // The result of tzcnt is 32 if src = 0.
+ bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Popcnt(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ CpuFeatureScope scope(this, POPCNT);
+ popcnt(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
@@ -2763,10 +2848,10 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(byte* address, int size)
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
: address_(address),
size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
+ masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@@ -2776,7 +2861,7 @@ CodePatcher::CodePatcher(byte* address, int size)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- Assembler::FlushICacheWithoutIsolate(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
@@ -2836,10 +2921,9 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch1,
Label* on_black,
Label::Distance on_black_near) {
- HasColor(object, scratch0, scratch1,
- on_black, on_black_near,
- 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, on_black_near, 1,
+ 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -2893,110 +2977,22 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Label* value_is_white,
+ Label::Distance distance) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(not_zero, &done, Label::kNear);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- push(mask_scratch);
- // shl. May overflow making the check conservative.
- add(mask_scratch, mask_scratch);
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = ecx; // Holds map while checking type.
- Register length = ecx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- mov(map, FieldOperand(value, HeapObject::kMapOffset));
- cmp(map, isolate()->factory()->heap_number_map());
- j(not_equal, &not_heap_number, Label::kNear);
- mov(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = ecx;
- movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- test_b(instance_type, kExternalStringTag);
- j(zero, &not_external, Label::kNear);
- mov(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either Latin1 or UC16.
- DCHECK(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- add(length, Immediate(0x04));
- // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
- // by 2. If we multiply the string length as smi by this, it still
- // won't overflow a 32-bit value.
- DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
- DCHECK(SeqOneByteString::kMaxSize <=
- static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, 2 + kSmiTagSize + kSmiShiftSize);
- add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
- length);
- if (emit_debug_code()) {
- mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
- Check(less_equal, kLiveBytesCountOverflowChunkSize);
- }
-
- bind(&done);
+ j(zero, value_is_white, Label::kNear);
}
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.h b/chromium/v8/src/ia32/macro-assembler-ia32.h
index 508e2099ad9..76c48900275 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.h
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.h
@@ -14,20 +14,21 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_eax_Code};
-const Register kReturnRegister1 = {kRegister_edx_Code};
-const Register kJSFunctionRegister = {kRegister_edi_Code};
-const Register kContextRegister = {kRegister_esi_Code};
-const Register kInterpreterAccumulatorRegister = {kRegister_eax_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_edx_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_ecx_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_edi_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_ebx_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_ebx_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_eax_Code};
+const Register kReturnRegister0 = {Register::kCode_eax};
+const Register kReturnRegister1 = {Register::kCode_edx};
+const Register kJSFunctionRegister = {Register::kCode_edi};
+const Register kContextRegister = {Register::kCode_esi};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_edx};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
// Spill slots used by interpreter dispatch calling convention.
-const int kInterpreterContextSpillSlot = -1;
+const int kInterpreterDispatchTableSpillSlot = -1;
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
@@ -40,33 +41,20 @@ enum PointersToHereCheck {
kPointersToHereAreAlwaysInteresting
};
-
-enum RegisterValueType {
- REGISTER_VALUE_IS_SMI,
- REGISTER_VALUE_IS_INT32
-};
-
+enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3 = no_reg,
- Register reg4 = no_reg,
- Register reg5 = no_reg,
- Register reg6 = no_reg,
- Register reg7 = no_reg,
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+ Register reg4 = no_reg, Register reg5 = no_reg,
+ Register reg6 = no_reg, Register reg7 = no_reg,
Register reg8 = no_reg);
#endif
-
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
void Load(Register dst, const Operand& src, Representation r);
void Store(Register src, const Operand& dst, Representation r);
@@ -93,7 +81,13 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
- Label::Distance if_equal_distance = Label::kNear) {
+ Label::Distance if_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(equal, if_equal, if_equal_distance);
+ }
+ void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_equal,
+ Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
@@ -101,84 +95,64 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value and jump if they are not equal.
void JumpIfNotRoot(Register with, Heap::RootListIndex index,
Label* if_not_equal,
- Label::Distance if_not_equal_distance = Label::kNear) {
+ Label::Distance if_not_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(not_equal, if_not_equal, if_not_equal_distance);
+ }
+ void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_not_equal,
+ Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
// ---------------------------------------------------------------------------
// GC Support
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
+ enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
+ Register addr, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
void CheckPageFlagForMap(
- Handle<Map> map,
- int mask,
- Condition cc,
- Label* condition_met,
+ Handle<Map> map, int mask, Condition cc, Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch,
+ void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, zero, branch, distance);
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch,
+ void JumpIfInNewSpace(Register object, Register scratch, Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, not_zero, branch, distance);
}
// Check if an object has a given incremental marking color. Also uses ecx!
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- Label::Distance has_color_distance,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
+ void HasColor(Register object, Register scratch0, Register scratch1,
+ Label* has_color, Label::Distance has_color_distance,
+ int first_bit, int second_bit);
+
+ void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black,
Label::Distance on_black_distance = Label::kFar);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Label* value_is_white, Label::Distance distance);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -186,10 +160,7 @@ class MacroAssembler: public Assembler {
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
+ Register object, int offset, Register value, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
@@ -199,22 +170,14 @@ class MacroAssembler: public Assembler {
// As above, but the offset has the tag presubtracted. For use with
// Operand(reg, off).
void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
+ Register context, int offset, Register value, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- save_fp,
- remembered_set_action,
- smi_check,
+ RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp,
+ remembered_set_action, smi_check,
pointers_to_here_check_for_value);
}
@@ -225,10 +188,7 @@ class MacroAssembler: public Assembler {
// filters out smis so it does not update the write barrier if the
// value is a smi.
void RecordWriteArray(
- Register array,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
+ Register array, Register value, Register index, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
@@ -240,10 +200,7 @@ class MacroAssembler: public Assembler {
// operation. RecordWrite filters out smis so it does not update the
// write barrier if the value is a smi.
void RecordWrite(
- Register object,
- Register address,
- Register value,
- SaveFPRegsMode save_fp,
+ Register object, Register address, Register value, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
@@ -252,12 +209,8 @@ class MacroAssembler: public Assembler {
// For page containing |object| mark the region covering the object's map
// dirty. |object| is the object being stored into, |map| is the Map object
// that was stored.
- void RecordWriteForMap(
- Register object,
- Handle<Map> map,
- Register scratch1,
- Register scratch2,
- SaveFPRegsMode save_fp);
+ void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1,
+ Register scratch2, SaveFPRegsMode save_fp);
// ---------------------------------------------------------------------------
// Debugger Support
@@ -278,8 +231,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
- // argument in register esi.
- void LeaveExitFrame(bool save_doubles);
+ // argument in register esi (if pop_arguments == true).
+ void LeaveExitFrame(bool save_doubles, bool pop_arguments = true);
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
@@ -295,12 +248,11 @@ class MacroAssembler: public Assembler {
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
// expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
+ void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -353,37 +305,29 @@ class MacroAssembler: public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- InvokeCode(Operand(code), expected, actual, flag, call_wrapper);
- }
- void InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
+ void InvokeFunction(Register function, Register new_target,
+ const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void InvokeFunction(Register function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
+ void InvokeFunction(Register function, const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
+ const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
// Invoke specified builtin JavaScript function.
@@ -393,9 +337,6 @@ class MacroAssembler: public Assembler {
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, int native_context_index);
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, int native_context_index);
-
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
@@ -417,31 +358,25 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Label* fail,
+ void CheckFastElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Label* fail,
+ void CheckFastObjectElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Label* fail,
+ void CheckFastSmiElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements, otherwise jump to fail.
- void StoreNumberToDoubleElements(Register maybe_number,
- Register elements,
- Register key,
- Register scratch1,
- XMMRegister scratch2,
- Label* fail,
+ void StoreNumberToDoubleElements(Register maybe_number, Register elements,
+ Register key, Register scratch1,
+ XMMRegister scratch2, Label* fail,
int offset = 0);
// Compare an object's map with the specified map.
@@ -451,9 +386,7 @@ class MacroAssembler: public Assembler {
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
// against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
+ void CheckMap(Register obj, Handle<Map> map, Label* fail,
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified weak map and branch
@@ -468,8 +401,7 @@ class MacroAssembler: public Assembler {
// contains the instance_type. The registers map and instance_type can be the
// same in which case it contains the instance type afterwards. Either of the
// registers map and instance_type can be the same as heap_object.
- Condition IsObjectStringType(Register heap_object,
- Register map,
+ Condition IsObjectStringType(Register heap_object, Register map,
Register instance_type);
// Check if the object in register heap_object is a name. Afterwards the
@@ -477,8 +409,7 @@ class MacroAssembler: public Assembler {
// contains the instance_type. The registers map and instance_type can be the
// same in which case it contains the instance type afterwards. Either of the
// registers map and instance_type can be the same as heap_object.
- Condition IsObjectNameType(Register heap_object,
- Register map,
+ Condition IsObjectNameType(Register heap_object, Register map,
Register instance_type);
// FCmp is similar to integer cmp, but requires unsigned
@@ -487,8 +418,7 @@ class MacroAssembler: public Assembler {
void ClampUint8(Register reg);
- void ClampDoubleToUint8(XMMRegister input_reg,
- XMMRegister scratch_reg,
+ void ClampDoubleToUint8(XMMRegister input_reg, XMMRegister scratch_reg,
Register result_reg);
void SlowTruncateToI(Register result_reg, Register input_reg,
@@ -526,22 +456,19 @@ class MacroAssembler: public Assembler {
void LoadUint32(XMMRegister dst, const Operand& src);
// Jump the register contains a smi.
- inline void JumpIfSmi(Register value,
- Label* smi_label,
+ inline void JumpIfSmi(Register value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, distance);
}
// Jump if the operand is a smi.
- inline void JumpIfSmi(Operand value,
- Label* smi_label,
+ inline void JumpIfSmi(Operand value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, distance);
}
// Jump if register contain a non-smi.
- inline void JumpIfNotSmi(Register value,
- Label* not_smi_label,
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(not_zero, not_smi_label, distance);
@@ -597,6 +524,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
@@ -616,22 +547,15 @@ class MacroAssembler: public Assembler {
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register is clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch1,
- Register scratch2,
- Label* miss);
+ void CheckAccessGlobalProxy(Register holder_reg, Register scratch1,
+ Register scratch2, Label* miss);
void GetNumberHash(Register r0, Register scratch);
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
+ void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
+ Register r0, Register r1, Register r2,
Register result);
-
// ---------------------------------------------------------------------------
// Allocation support
@@ -645,48 +569,29 @@ class MacroAssembler: public Assembler {
// result is known to be the allocation top on entry (could be result_end
// from a previous call). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void Allocate(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void Allocate(int header_size,
- ScaleFactor element_size,
- Register element_count,
- RegisterValueType element_count_type,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void Allocate(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(int object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
+
+ void Allocate(int header_size, ScaleFactor element_size,
+ Register element_count, RegisterValueType element_count_type,
+ Register result, Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags);
+
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or
// jumps to gc_required if new space is full.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- MutableMode mode = IMMUTABLE);
+ void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
+ Label* gc_required, MutableMode mode = IMMUTABLE);
// Allocate a sequential string. All the header fields of the string object
// are initialized.
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
+ void AllocateTwoByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
void AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
@@ -695,36 +600,34 @@ class MacroAssembler: public Assembler {
// Allocate a raw cons string object. Only the map field of the result is
// initialized.
- void AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateTwoByteConsString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
void AllocateOneByteConsString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
// Allocate a raw sliced string object. Only the map field of the result is
// initialized.
- void AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateTwoByteSlicedString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
void AllocateOneByteSlicedString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch, Label* gc_required);
+
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies.
// The contents of index and scratch are destroyed.
- void CopyBytes(Register source,
- Register destination,
- Register length,
+ void CopyBytes(Register source, Register destination, Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@@ -771,35 +674,31 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f,
- int num_arguments,
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
@@ -841,6 +740,7 @@ class MacroAssembler: public Assembler {
void Push(const Operand& src) { push(src); }
void Push(Immediate value) { push(value); }
void Pop(Register dst) { pop(dst); }
+ void Pop(const Operand& dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
@@ -854,6 +754,12 @@ class MacroAssembler: public Assembler {
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
void Lzcnt(Register dst, const Operand& src);
+ void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
+ void Tzcnt(Register dst, const Operand& src);
+
+ void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
+ void Popcnt(Register dst, const Operand& src);
+
// Emit call to the code we are currently generating.
void CallSelf() {
Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
@@ -872,9 +778,11 @@ class MacroAssembler: public Assembler {
void Move(XMMRegister dst, uint64_t src);
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
+ void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
+
// Push a handle value.
void Push(Handle<Object> handle) { push(Immediate(handle)); }
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+ void Push(Smi* smi) { Push(Immediate(smi)); }
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
@@ -894,7 +802,6 @@ class MacroAssembler: public Assembler {
void IncrementCounter(Condition cc, StatsCounter* counter, int value);
void DecrementCounter(Condition cc, StatsCounter* counter, int value);
-
// ---------------------------------------------------------------------------
// Debugging
@@ -945,10 +852,8 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
Label::Distance distance = Label::kFar);
- void EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask);
+ void EmitSeqStringSetCharCheck(Register string, Register index,
+ Register value, uint32_t encoding_mask);
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
@@ -998,14 +903,10 @@ class MacroAssembler: public Assembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
+ const ParameterCount& actual, Label* done,
+ bool* definitely_mismatches, InvokeFlag flag,
Label::Distance done_distance,
- const CallWrapper& call_wrapper = NullCallWrapper());
+ const CallWrapper& call_wrapper);
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
@@ -1013,18 +914,14 @@ class MacroAssembler: public Assembler {
void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
- void LoadAllocationTopHelper(Register result,
- Register scratch,
+ void LoadAllocationTopHelper(Register result, Register scratch,
AllocationFlags flags);
- void UpdateAllocationTopHelper(Register result_end,
- Register scratch,
+ void UpdateAllocationTopHelper(Register result_end, Register scratch,
AllocationFlags flags);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
+ void InNewSpace(Register object, Register scratch, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
@@ -1032,8 +929,7 @@ class MacroAssembler: public Assembler {
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Uses ecx as scratch and leaves addr_reg
// unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
+ inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg);
// Compute memory operands for safepoint stack slots.
@@ -1045,7 +941,6 @@ class MacroAssembler: public Assembler {
friend class StandardFrame;
};
-
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
@@ -1053,19 +948,18 @@ class MacroAssembler: public Assembler {
// an assertion.
class CodePatcher {
public:
- CodePatcher(byte* address, int size);
+ CodePatcher(Isolate* isolate, byte* address, int size);
~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
+ byte* address_; // The address of the code being patched.
+ int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
-
// -----------------------------------------------------------------------------
// Static helper functions.
@@ -1074,39 +968,30 @@ inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
-
// Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
+inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
-
-inline Operand FixedArrayElementOperand(Register array,
- Register index_as_smi,
+inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
int additional_offset = 0) {
int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
}
-
inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
-
inline Operand ContextOperand(Register context, Register index) {
return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
}
-
-inline Operand GlobalObjectOperand() {
- return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
+inline Operand NativeContextOperand() {
+ return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
}
-
#ifdef GENERATED_CODE_COVERAGE
extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_STRINGIFY(x) #x
@@ -1128,7 +1013,7 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define ACCESS_MASM(masm) masm->
#endif
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_
diff --git a/chromium/v8/src/ia32/simulator-ia32.h b/chromium/v8/src/ia32/simulator-ia32.h
index 02a8e9c03a4..076bde83e60 100644
--- a/chromium/v8/src/ia32/simulator-ia32.h
+++ b/chromium/v8/src/ia32/simulator-ia32.h
@@ -12,7 +12,7 @@ namespace internal {
// Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
@@ -21,7 +21,8 @@ typedef int (*regexp_matcher)(String*, int, const byte*,
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
@@ -36,13 +37,18 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() { }
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ USE(isolate);
+ }
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IA32_SIMULATOR_IA32_H_
diff --git a/chromium/v8/src/ic/access-compiler.cc b/chromium/v8/src/ic/access-compiler.cc
index 951966e7deb..0f1b7b9bf1a 100644
--- a/chromium/v8/src/ic/access-compiler.cc
+++ b/chromium/v8/src/ic/access-compiler.cc
@@ -55,8 +55,7 @@ Register PropertyAccessCompiler::slot() const {
if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
return LoadDescriptor::SlotRegister();
}
- DCHECK(FLAG_vector_stores &&
- (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC));
+ DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
return VectorStoreICDescriptor::SlotRegister();
}
@@ -65,8 +64,7 @@ Register PropertyAccessCompiler::vector() const {
if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
return LoadWithVectorDescriptor::VectorRegister();
}
- DCHECK(FLAG_vector_stores &&
- (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC));
+ DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
return VectorStoreICDescriptor::VectorRegister();
}
} // namespace internal
diff --git a/chromium/v8/src/ic/access-compiler.h b/chromium/v8/src/ic/access-compiler.h
index a5beb714f82..50c2cc7303a 100644
--- a/chromium/v8/src/ic/access-compiler.h
+++ b/chromium/v8/src/ic/access-compiler.h
@@ -40,7 +40,7 @@ class PropertyAccessCompiler BASE_EMBEDDED {
kind_(kind),
cache_holder_(cache_holder),
isolate_(isolate),
- masm_(isolate, NULL, 256) {
+ masm_(isolate, NULL, 256, CodeObjectRequired::kYes) {
// TODO(yangguo): remove this once we can serialize IC stubs.
masm_.enable_serializer();
}
@@ -81,7 +81,7 @@ class PropertyAccessCompiler BASE_EMBEDDED {
// Ensure that MacroAssembler has a reasonable size.
STATIC_ASSERT(sizeof(MacroAssembler) < 128 * kPointerSize);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IC_ACCESS_COMPILER_H_
diff --git a/chromium/v8/src/ic/arm/access-compiler-arm.cc b/chromium/v8/src/ic/arm/access-compiler-arm.cc
index 62f554792f4..d360f5a62be 100644
--- a/chromium/v8/src/ic/arm/access-compiler-arm.cc
+++ b/chromium/v8/src/ic/arm/access-compiler-arm.cc
@@ -31,7 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores || r3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, r3, r4, r5};
return registers;
}
diff --git a/chromium/v8/src/ic/arm/handler-compiler-arm.cc b/chromium/v8/src/ic/arm/handler-compiler-arm.cc
index e2585fe222f..e293965e6f3 100644
--- a/chromium/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/chromium/v8/src/ic/arm/handler-compiler-arm.cc
@@ -40,7 +40,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(r1, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(r1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -81,7 +82,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(r1, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(r1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -143,7 +145,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Check that receiver is a JSObject.
__ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmp(scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(lt, miss_label);
// Load properties array.
@@ -169,10 +171,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ ldr(result, MemOperand(cp, offset));
- __ ldr(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- __ ldr(result, MemOperand(result, Context::SlotOffset(index)));
+ __ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ ldr(result,
FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -223,8 +222,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -293,6 +294,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
+
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
@@ -307,15 +315,10 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
@@ -324,7 +327,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -333,8 +336,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -356,8 +358,8 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
@@ -707,8 +709,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -733,7 +734,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(ip, value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -745,7 +746,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/chromium/v8/src/ic/arm/ic-arm.cc b/chromium/v8/src/ic/arm/ic-arm.cc
index de219ae72fc..f59ac074be2 100644
--- a/chromium/v8/src/ic/arm/ic-arm.cc
+++ b/chromium/v8/src/ic/arm/ic-arm.cc
@@ -26,8 +26,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// type: holds the receiver instance type on entry.
__ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
__ b(eq, global_object);
- __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(eq, global_object);
__ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
__ b(eq, global_object);
}
@@ -311,8 +309,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -325,8 +322,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -341,8 +337,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -355,8 +350,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Perform tail call to the entry.
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
@@ -432,7 +426,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ mov(slot, Operand(Smi::FromInt(slot_index)));
@@ -464,23 +458,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -495,8 +483,11 @@ static void KeyedStoreGenerateMegamorphicHelper(
// Fast case: Do the store, could be either Object or double.
__ bind(fast_object);
- Register scratch_value = r4;
+ Register scratch = r4;
Register address = r5;
+ DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+ scratch, address));
+
if (check_map == kCheckMap) {
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ cmp(elements_map,
@@ -509,12 +500,10 @@ static void KeyedStoreGenerateMegamorphicHelper(
// there may be a callback on the element
Label holecheck_passed1;
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(scratch_value,
- MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
- __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
+ __ ldr(scratch, MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
+ __ cmp(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
__ b(ne, &holecheck_passed1);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&holecheck_passed1);
@@ -524,8 +513,8 @@ static void KeyedStoreGenerateMegamorphicHelper(
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ add(scratch, key, Operand(Smi::FromInt(1)));
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -534,22 +523,21 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ bind(&non_smi_value);
// Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
+ __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
__ bind(&finish_object_store);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ add(scratch, key, Operand(Smi::FromInt(1)));
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand::PointerOffsetFromSmiKey(key));
__ str(value, MemOperand(address));
// Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved,
+ __ mov(scratch, value); // Preserve the value which is returned.
+ __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
@@ -567,33 +555,31 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ add(address, elements,
Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
kHeapObjectTag));
- __ ldr(scratch_value,
- MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
- __ cmp(scratch_value, Operand(kHoleNanUpper32));
+ __ ldr(scratch, MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
__ b(ne, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
+ __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ add(scratch, key, Operand(Smi::FromInt(1)));
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Ret();
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
- __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
+ __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
__ b(ne, &non_double_value);
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, slow);
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
AllocationSiteMode mode =
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
@@ -604,7 +590,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, r4, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -616,7 +602,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, r4, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -692,27 +678,24 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r4, &slow);
- // We use register r8 when FLAG_vector_stores is enabled, because otherwise
- // probing the megamorphic stub cache would require pushing temporaries on
- // the stack.
+ // We use register r8, because otherwise probing the megamorphic stub cache
+ // would require pushing temporaries on the stack.
// TODO(mvstanton): quit using register r8 when
// FLAG_enable_embedded_constant_pool is turned on.
- DCHECK(!FLAG_vector_stores || !FLAG_enable_embedded_constant_pool);
- Register temporary2 = FLAG_vector_stores ? r8 : r4;
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
-
- DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ mov(slot, Operand(Smi::FromInt(slot_index)));
- }
+ DCHECK(!FLAG_enable_embedded_constant_pool);
+ Register temporary2 = r8;
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+
+ DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+ __ mov(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -790,8 +773,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -855,7 +837,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
Address cmp_instruction_address =
Assembler::return_address_from_call_start(address);
@@ -894,7 +877,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// tst rx, #kSmiTagMask
// b ne/eq, <target>
// and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
+ CodePatcher patcher(isolate, patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
if (check == ENABLE_INLINED_SMI_CHECK) {
DCHECK(Assembler::IsCmpRegister(instr_at_patch));
diff --git a/chromium/v8/src/ic/arm/ic-compiler-arm.cc b/chromium/v8/src/ic/arm/ic-compiler-arm.cc
index 9b8abd3298c..318523199a2 100644
--- a/chromium/v8/src/ic/arm/ic-compiler-arm.cc
+++ b/chromium/v8/src/ic/arm/ic-compiler-arm.cc
@@ -22,109 +22,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ Push(r0);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
- __ ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ cmp(this->name(), Operand(name));
- __ b(ne, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
-
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- Register map_reg = scratch1();
- __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
- } else {
- Label next_map;
- __ b(ne, &next_map);
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/chromium/v8/src/ic/arm64/access-compiler-arm64.cc b/chromium/v8/src/ic/arm64/access-compiler-arm64.cc
index 13b0887a823..892ce85dfbd 100644
--- a/chromium/v8/src/ic/arm64/access-compiler-arm64.cc
+++ b/chromium/v8/src/ic/arm64/access-compiler-arm64.cc
@@ -38,7 +38,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, value, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores || x3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, x3, x4, x5};
return registers;
}
diff --git a/chromium/v8/src/ic/arm64/handler-compiler-arm64.cc b/chromium/v8/src/ic/arm64/handler-compiler-arm64.cc
index 10ea1d72ff3..7cfef6a1b41 100644
--- a/chromium/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/chromium/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -59,7 +59,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Check that receiver is a JSObject.
__ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ Cmp(scratch0, FIRST_JS_RECEIVER_TYPE);
__ B(lt, miss_label);
// Load properties array.
@@ -78,9 +78,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- __ Ldr(result, GlobalObjectMemOperand());
- __ Ldr(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- __ Ldr(result, ContextMemOperand(result, index));
+ __ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ Ldr(result,
FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -132,9 +130,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -207,6 +206,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ Ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
+
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
@@ -248,7 +254,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(x1, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(x1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(x1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -285,7 +292,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(x1, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(x1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(x1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -300,15 +308,10 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
@@ -317,7 +320,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -327,8 +330,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -378,7 +380,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -404,8 +406,8 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
@@ -767,8 +769,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -796,7 +797,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/chromium/v8/src/ic/arm64/ic-arm64.cc b/chromium/v8/src/ic/arm64/ic-arm64.cc
index c4c856aab7e..eb933c78ec9 100644
--- a/chromium/v8/src/ic/arm64/ic-arm64.cc
+++ b/chromium/v8/src/ic/arm64/ic-arm64.cc
@@ -22,7 +22,6 @@ namespace internal {
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
__ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
__ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
__ B(eq, global_object);
}
@@ -294,8 +293,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadWithVectorDescriptor::NameRegister(),
LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister());
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -306,8 +304,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -325,8 +322,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadWithVectorDescriptor::VectorRegister());
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -337,8 +333,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
@@ -410,7 +405,7 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ Mov(slot, Operand(Smi::FromInt(slot_index)));
@@ -471,24 +466,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
ASM_LOCATION("KeyedStoreIC::GenerateMiss");
StoreIC_PushArgs(masm);
-
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -691,19 +679,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(x10, &slow);
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ Mov(slot, Operand(Smi::FromInt(slot_index)));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+ __ Mov(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -779,8 +765,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -840,7 +825,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
// Activate a SMI fast-path by patching the instructions generated by
// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
// JumpPatchSite::EmitPatchInfo().
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
// The patch information is encoded in the instruction stream using
// instructions which have no side effects, so we can safely execute them.
// The patch information is encoded directly after the call to the helper
@@ -865,7 +851,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// to
// tb(!n)z test_reg, #0, <target>
Instruction* to_patch = info.SmiCheck();
- PatchingAssembler patcher(to_patch, 1);
+ PatchingAssembler patcher(isolate, to_patch, 1);
DCHECK(to_patch->IsTestBranch());
DCHECK(to_patch->ImmTestBranchBit5() == 0);
DCHECK(to_patch->ImmTestBranchBit40() == 0);
diff --git a/chromium/v8/src/ic/arm64/ic-compiler-arm64.cc b/chromium/v8/src/ic/arm64/ic-compiler-arm64.cc
index b4a4163fed5..c99c637ab19 100644
--- a/chromium/v8/src/ic/arm64/ic-compiler-arm64.cc
+++ b/chromium/v8/src/ic/arm64/ic-compiler-arm64.cc
@@ -23,115 +23,9 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ Push(x10);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ Ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
- __ Ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
- __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ CmpWeakValue(map_reg, cell, scratch2());
- Label try_next;
- __ B(ne, &try_next);
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ Bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
- __ Bind(&try_next);
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ Bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
-
- ASM_LOCATION("PropertyICCompiler::CompileStorePolymorphic");
-
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- Register map_reg = scratch1();
- __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; i++) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ CmpWeakValue(map_reg, cell, scratch2());
- Label skip;
- __ B(&skip, ne);
- if (!transitioned_maps->at(i).is_null()) {
- // This argument is used by the handler stub. For example, see
- // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- }
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ Bind(&skip);
- }
-
- __ Bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
#undef __
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/ic/call-optimization.h b/chromium/v8/src/ic/call-optimization.h
index 01947d7fed2..7963d1ce670 100644
--- a/chromium/v8/src/ic/call-optimization.h
+++ b/chromium/v8/src/ic/call-optimization.h
@@ -61,7 +61,7 @@ class CallOptimization BASE_EMBEDDED {
Handle<FunctionTemplateInfo> expected_receiver_type_;
Handle<CallHandlerInfo> api_call_info_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IC_CALL_OPTIMIZATION_H_
diff --git a/chromium/v8/src/ic/handler-compiler.cc b/chromium/v8/src/ic/handler-compiler.cc
index 77e0fb5e435..b353628053c 100644
--- a/chromium/v8/src/ic/handler-compiler.cc
+++ b/chromium/v8/src/ic/handler-compiler.cc
@@ -58,7 +58,7 @@ Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
cache_name = name;
JSReceiver* prototype = JSReceiver::cast(current_map->prototype());
if (!prototype->map()->is_hidden_prototype() &&
- !prototype->map()->IsGlobalObjectMap()) {
+ !prototype->map()->IsJSGlobalObjectMap()) {
break;
}
}
@@ -330,6 +330,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
PrototypeIterator iter(isolate(), last);
while (!iter.IsAtEnd()) {
lost_holder_register = true;
+ // Casting to JSObject is fine here. The LookupIterator makes sure to
+ // look behind non-masking interceptors during the original lookup, and
+ // we wouldn't try to compile a handler if there was a Proxy anywhere.
last = iter.GetCurrent<JSObject>();
iter.Advance();
}
@@ -424,7 +427,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
Handle<Map> transition, Handle<Name> name) {
Label miss;
- if (FLAG_vector_stores) PushVectorAndSlot();
+ PushVectorAndSlot();
// Check that we are allowed to write this.
bool is_nonexistent = holder()->map() == transition->GetBackPointer();
@@ -456,18 +459,19 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
DCHECK(!transition->is_access_check_needed());
// Call to respective StoreTransitionStub.
- Register transition_map_reg = StoreTransitionHelper::MapRegister();
- bool stack_args = StoreTransitionHelper::UsesStackArgs();
- Register map_reg = stack_args ? scratch1() : transition_map_reg;
+ bool virtual_args = StoreTransitionHelper::HasVirtualSlotArg();
+ Register map_reg = StoreTransitionHelper::MapRegister();
if (details.type() == DATA_CONSTANT) {
DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
- GenerateRestoreMap(transition, map_reg, scratch2(), &miss);
- GenerateConstantCheck(map_reg, descriptor, value(), scratch2(), &miss);
- if (stack_args) {
- // Also pushes vector and slot.
- GeneratePushMap(map_reg, scratch2());
- } else if (FLAG_vector_stores) {
+ Register tmp =
+ virtual_args ? VectorStoreICDescriptor::VectorRegister() : map_reg;
+ GenerateRestoreMap(transition, tmp, scratch2(), &miss);
+ GenerateConstantCheck(tmp, descriptor, value(), scratch2(), &miss);
+ if (virtual_args) {
+ // This will move the map from tmp into map_reg.
+ RearrangeVectorAndSlot(tmp, map_reg);
+ } else {
PopVectorAndSlot();
}
GenerateRestoreName(name);
@@ -484,11 +488,12 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
? StoreTransitionStub::ExtendStorageAndStoreMapAndValue
: StoreTransitionStub::StoreMapAndValue;
- GenerateRestoreMap(transition, map_reg, scratch2(), &miss);
- if (stack_args) {
- // Also pushes vector and slot.
- GeneratePushMap(map_reg, scratch2());
- } else if (FLAG_vector_stores) {
+ Register tmp =
+ virtual_args ? VectorStoreICDescriptor::VectorRegister() : map_reg;
+ GenerateRestoreMap(transition, tmp, scratch2(), &miss);
+ if (virtual_args) {
+ RearrangeVectorAndSlot(tmp, map_reg);
+ } else {
PopVectorAndSlot();
}
GenerateRestoreName(name);
@@ -499,7 +504,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
}
GenerateRestoreName(&miss, name);
- if (FLAG_vector_stores) PopVectorAndSlot();
+ PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
return GetCode(kind(), Code::FAST, name);
diff --git a/chromium/v8/src/ic/handler-compiler.h b/chromium/v8/src/ic/handler-compiler.h
index f5dafe9038a..fe592103531 100644
--- a/chromium/v8/src/ic/handler-compiler.h
+++ b/chromium/v8/src/ic/handler-compiler.h
@@ -251,7 +251,10 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
virtual void FrontendFooter(Handle<Name> name, Label* miss);
void GenerateRestoreName(Label* label, Handle<Name> name);
- void GeneratePushMap(Register map_reg, Register scratch);
+
+ // Pop the vector and slot into appropriate registers, moving the map in
+ // the process. (This is an accomodation for register pressure on ia32).
+ void RearrangeVectorAndSlot(Register current_map, Register destination_map);
private:
void GenerateRestoreName(Handle<Name> name);
@@ -297,7 +300,7 @@ class ElementHandlerCompiler : public PropertyHandlerCompiler {
static void GenerateStoreSlow(MacroAssembler* masm);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IC_HANDLER_COMPILER_H_
diff --git a/chromium/v8/src/ic/ia32/access-compiler-ia32.cc b/chromium/v8/src/ic/ia32/access-compiler-ia32.cc
index acb3526d9d8..1825202366e 100644
--- a/chromium/v8/src/ic/ia32/access-compiler-ia32.cc
+++ b/chromium/v8/src/ic/ia32/access-compiler-ia32.cc
@@ -30,8 +30,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores ||
- ebx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, ebx, edi, no_reg};
return registers;
}
diff --git a/chromium/v8/src/ic/ia32/handler-compiler-ia32.cc b/chromium/v8/src/ic/ia32/handler-compiler-ia32.cc
index 1d019092c7b..0b380b3ee2c 100644
--- a/chromium/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/chromium/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -36,7 +36,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER);
__ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -92,7 +92,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE);
__ j(below, miss_label);
// Load properties array.
@@ -114,10 +114,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ mov(result, Operand(esi, offset));
- __ mov(result, FieldOperand(result, GlobalObject::kNativeContextOffset));
- __ mov(result, Operand(result, Context::SlotOffset(index)));
+ __ LoadGlobalFunction(index, result);
// Load its initial map. The global functions all have initial maps.
__ mov(result,
FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -206,6 +203,12 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the code.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ mov(api_function_address, Immediate(function_address));
@@ -261,7 +264,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
__ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -294,8 +297,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -303,25 +308,15 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- if (FLAG_vector_stores) {
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
-
- __ xchg(receiver, Operand(esp, 0));
- __ push(name);
- __ push(value);
- __ push(slot);
- __ push(vector);
- __ push(receiver); // which contains the return address.
- } else {
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
- }
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // which contains the return address.
}
@@ -330,7 +325,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -339,8 +334,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -362,18 +356,16 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
- // current after GeneratePushMap
- // -------------------------------------------------
- // ret addr slot
- // vector vector
- // sp -> slot map
- // sp -> ret addr
- //
- __ xchg(map_reg, Operand(esp, 0));
- __ xchg(map_reg, Operand(esp, 2 * kPointerSize));
- __ push(map_reg);
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
+ DCHECK(destination_map.is(StoreTransitionHelper::MapRegister()));
+ DCHECK(current_map.is(StoreTransitionHelper::VectorRegister()));
+ ExternalReference virtual_slot =
+ ExternalReference::virtual_slot_register(isolate());
+ __ mov(destination_map, current_map);
+ __ pop(current_map);
+ __ mov(Operand::StaticVariable(virtual_slot), current_map);
+ __ pop(current_map); // put vector in place.
}
@@ -734,8 +726,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
holder());
__ push(scratch2()); // restore old return address
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -760,7 +751,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -776,7 +767,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/chromium/v8/src/ic/ia32/ic-compiler-ia32.cc b/chromium/v8/src/ic/ia32/ic-compiler-ia32.cc
index d0a2e0bd546..d93b67bffc5 100644
--- a/chromium/v8/src/ic/ia32/ic-compiler-ia32.cc
+++ b/chromium/v8/src/ic/ia32/ic-compiler-ia32.cc
@@ -27,104 +27,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ push(ebx); // return address
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
- __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ cmp(this->name(), Immediate(name));
- __ j(not_equal, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ j(equal, handlers->at(current));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
- Register map_reg = scratch1();
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i));
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/chromium/v8/src/ic/ia32/ic-ia32.cc b/chromium/v8/src/ic/ia32/ic-ia32.cc
index 7a6a41541cc..88947e47e78 100644
--- a/chromium/v8/src/ic/ia32/ic-ia32.cc
+++ b/chromium/v8/src/ic/ia32/ic-ia32.cc
@@ -25,8 +25,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// type: holds the receiver instance type on entry.
__ cmp(type, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, global_object);
- __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
- __ j(equal, global_object);
__ cmp(type, JS_GLOBAL_PROXY_TYPE);
__ j(equal, global_object);
}
@@ -341,7 +339,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(isolate);
int slot = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
@@ -563,26 +561,22 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ push(Immediate(Smi::FromInt(slot)));
- __ push(Immediate(dummy_vector));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ push(Immediate(Smi::FromInt(slot)));
+ __ push(Immediate(dummy_vector));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
receiver, key, edi, no_reg);
- if (FLAG_vector_stores) {
- __ pop(VectorStoreICDescriptor::VectorRegister());
- __ pop(VectorStoreICDescriptor::SlotRegister());
- }
+ __ pop(VectorStoreICDescriptor::VectorRegister());
+ __ pop(VectorStoreICDescriptor::SlotRegister());
// Cache miss.
__ jmp(&miss);
@@ -678,8 +672,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -697,8 +690,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -709,8 +701,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -728,27 +719,15 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- // This shouldn't be called.
- __ int3();
- return;
- }
-
- // Return address is on the stack.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), ebx, no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
+ // This shouldn't be called.
+ // TODO(mvstanton): remove this method.
+ __ int3();
+ return;
}
@@ -756,25 +735,15 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- if (FLAG_vector_stores) {
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
-
- __ xchg(receiver, Operand(esp, 0));
- __ push(name);
- __ push(value);
- __ push(slot);
- __ push(vector);
- __ push(receiver); // Contains the return address.
- } else {
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
- }
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // Contains the return address.
}
@@ -783,8 +752,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -800,25 +768,21 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
// objects. Push and restore receiver but rely on
// GenerateDictionaryStore preserving the value and name.
__ push(receiver);
- if (FLAG_vector_stores) {
- __ push(vector);
- __ push(slot);
- }
+ __ push(vector);
+ __ push(slot);
Register dictionary = ebx;
__ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
receiver, edi);
- __ Drop(FLAG_vector_stores ? 3 : 1);
+ __ Drop(3);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->store_normal_hit(), 1);
__ ret(0);
__ bind(&restore_miss);
- if (FLAG_vector_stores) {
- __ pop(slot);
- __ pop(vector);
- }
+ __ pop(slot);
+ __ pop(vector);
__ pop(receiver);
__ IncrementCounter(counters->store_normal_miss(), 1);
GenerateMiss(masm);
@@ -830,8 +794,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -869,7 +832,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
diff --git a/chromium/v8/src/ic/ia32/stub-cache-ia32.cc b/chromium/v8/src/ic/ia32/stub-cache-ia32.cc
index 7366ebe15f7..fcfae4bc0ce 100644
--- a/chromium/v8/src/ic/ia32/stub-cache-ia32.cc
+++ b/chromium/v8/src/ic/ia32/stub-cache-ia32.cc
@@ -24,7 +24,7 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
Label miss;
bool is_vector_store =
diff --git a/chromium/v8/src/ic/ic-compiler.cc b/chromium/v8/src/ic/ic-compiler.cc
index 20e4fedc23b..ae4b2a5d58c 100644
--- a/chromium/v8/src/ic/ic-compiler.cc
+++ b/chromium/v8/src/ic/ic-compiler.cc
@@ -33,60 +33,6 @@ bool PropertyICCompiler::IncludesNumberMap(MapHandleList* maps) {
}
-Handle<Code> PropertyICCompiler::CompileMonomorphic(Handle<Map> map,
- Handle<Code> handler,
- Handle<Name> name,
- IcCheckType check) {
- MapHandleList maps(1);
- CodeHandleList handlers(1);
- maps.Add(map);
- handlers.Add(handler);
- Code::StubType stub_type = handler->type();
- return CompilePolymorphic(&maps, &handlers, name, stub_type, check);
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeMonomorphic(
- Code::Kind kind, Handle<Name> name, Handle<Map> map, Handle<Code> handler,
- ExtraICState extra_ic_state) {
- Isolate* isolate = name->GetIsolate();
- if (handler.is_identical_to(isolate->builtins()->LoadIC_Normal()) ||
- handler.is_identical_to(isolate->builtins()->LoadIC_Normal_Strong()) ||
- handler.is_identical_to(isolate->builtins()->StoreIC_Normal())) {
- name = isolate->factory()->normal_ic_symbol();
- }
-
- CacheHolderFlag flag;
- Handle<Map> stub_holder = IC::GetICCacheHolder(map, isolate, &flag);
- if (kind == Code::KEYED_STORE_IC) {
- // Always set the "property" bit.
- extra_ic_state =
- KeyedStoreIC::IcCheckTypeField::update(extra_ic_state, PROPERTY);
- DCHECK(STANDARD_STORE ==
- KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state));
- } else if (kind == Code::KEYED_LOAD_IC) {
- extra_ic_state = KeyedLoadIC::IcCheckTypeField::update(extra_ic_state,
- PROPERTY);
- }
-
- Handle<Code> ic;
- // There are multiple string maps that all use the same prototype. That
- // prototype cannot hold multiple handlers, one for each of the string maps,
- // for a single name. Hence, turn off caching of the IC.
- bool can_be_cached = map->instance_type() >= FIRST_NONSTRING_TYPE;
- if (can_be_cached) {
- ic = Find(name, stub_holder, kind, extra_ic_state, flag);
- if (!ic.is_null()) return ic;
- }
-
- PropertyICCompiler ic_compiler(isolate, kind, extra_ic_state, flag);
- ic = ic_compiler.CompileMonomorphic(map, handler, name, PROPERTY);
-
- if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic);
- return ic;
-}
-
-
Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
Handle<Map> receiver_map, ExtraICState extra_ic_state) {
Isolate* isolate = receiver_map->GetIsolate();
@@ -138,35 +84,6 @@ Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
}
-Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- Handle<Map> receiver_map, LanguageMode language_mode,
- KeyedAccessStoreMode store_mode) {
- Isolate* isolate = receiver_map->GetIsolate();
- ExtraICState extra_state =
- KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, extra_state);
-
- DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-
- Handle<String> name = isolate->factory()->KeyedStoreMonomorphic_string();
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
- Handle<Code> code =
- compiler.CompileKeyedStoreMonomorphic(receiver_map, store_mode);
-
- Map::UpdateCodeCache(receiver_map, name, code);
- DCHECK(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state()) ==
- store_mode);
- return code;
-}
-
-
Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
ExtraICState state) {
Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state);
@@ -239,17 +156,6 @@ Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
}
-Handle<Code> PropertyICCompiler::ComputePolymorphic(
- Code::Kind kind, MapHandleList* maps, CodeHandleList* handlers,
- int valid_maps, Handle<Name> name, ExtraICState extra_ic_state) {
- Handle<Code> handler = handlers->at(0);
- Code::StubType type = valid_maps == 1 ? handler->type() : Code::NORMAL;
- DCHECK(kind == Code::LOAD_IC || kind == Code::STORE_IC);
- PropertyICCompiler ic_compiler(name->GetIsolate(), kind, extra_ic_state);
- return ic_compiler.CompilePolymorphic(maps, handlers, name, type, PROPERTY);
-}
-
-
void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
@@ -267,31 +173,6 @@ void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
}
-Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
- LanguageMode language_mode) {
- Isolate* isolate = receiver_maps->at(0)->GetIsolate();
- DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW);
- Handle<PolymorphicCodeCache> cache =
- isolate->factory()->polymorphic_code_cache();
- ExtraICState extra_state =
- KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
- Code::Flags flags =
- Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
- Handle<Object> probe = cache->Lookup(receiver_maps, flags);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
- Handle<Code> code =
- compiler.CompileKeyedStorePolymorphic(receiver_maps, store_mode);
- PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
- return code;
-}
-
-
Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
LoadIC::GenerateInitialize(masm());
Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
@@ -394,22 +275,6 @@ void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
}
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) {
- // Collect MONOMORPHIC stubs for all |receiver_maps|.
- CodeHandleList handlers(receiver_maps->length());
- MapHandleList transitioned_maps(receiver_maps->length());
- CompileKeyedStorePolymorphicHandlers(receiver_maps, &transitioned_maps,
- &handlers, store_mode);
-
- Handle<Code> code = CompileKeyedStorePolymorphic(receiver_maps, &handlers,
- &transitioned_maps);
- isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
- PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, 0));
- return code;
-}
-
-
#define __ ACCESS_MASM(masm())
diff --git a/chromium/v8/src/ic/ic-compiler.h b/chromium/v8/src/ic/ic-compiler.h
index ee6597d59d0..08444df6541 100644
--- a/chromium/v8/src/ic/ic-compiler.h
+++ b/chromium/v8/src/ic/ic-compiler.h
@@ -21,15 +21,6 @@ class PropertyICCompiler : public PropertyAccessCompiler {
static Handle<Code> ComputeStore(Isolate* isolate, InlineCacheState ic_state,
ExtraICState extra_state);
- static Handle<Code> ComputeMonomorphic(Code::Kind kind, Handle<Name> name,
- Handle<Map> map, Handle<Code> handler,
- ExtraICState extra_ic_state);
- static Handle<Code> ComputePolymorphic(Code::Kind kind, MapHandleList* maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- ExtraICState extra_ic_state);
-
// Keyed
static Handle<Code> ComputeKeyedLoadMonomorphicHandler(
Handle<Map> receiver_map, ExtraICState extra_ic_state);
@@ -37,16 +28,10 @@ class PropertyICCompiler : public PropertyAccessCompiler {
static Handle<Code> ComputeKeyedStoreMonomorphicHandler(
Handle<Map> receiver_map, LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
- static Handle<Code> ComputeKeyedStoreMonomorphic(
- Handle<Map> receiver_map, LanguageMode language_mode,
- KeyedAccessStoreMode store_mode);
static void ComputeKeyedStorePolymorphicHandlers(
MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
LanguageMode language_mode);
- static Handle<Code> ComputeKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
- LanguageMode language_mode);
// Compare nil
static Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
@@ -77,25 +62,14 @@ class PropertyICCompiler : public PropertyAccessCompiler {
Handle<Code> CompileStoreGeneric(Code::Flags flags);
Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
- Handle<Code> CompileMonomorphic(Handle<Map> map, Handle<Code> handler,
- Handle<Name> name, IcCheckType check);
- Handle<Code> CompilePolymorphic(MapHandleList* maps, CodeHandleList* handlers,
- Handle<Name> name, Code::StubType type,
- IcCheckType check);
-
Handle<Code> CompileKeyedStoreMonomorphicHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode);
- Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
- KeyedAccessStoreMode store_mode);
void CompileKeyedStorePolymorphicHandlers(MapHandleList* receiver_maps,
MapHandleList* transitioned_maps,
CodeHandleList* handlers,
KeyedAccessStoreMode store_mode);
- Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps);
bool IncludesNumberMap(MapHandleList* maps);
@@ -125,7 +99,7 @@ class PropertyICCompiler : public PropertyAccessCompiler {
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IC_IC_COMPILER_H_
diff --git a/chromium/v8/src/ic/ic-inl.h b/chromium/v8/src/ic/ic-inl.h
index 646b73d6414..6dab006ad56 100644
--- a/chromium/v8/src/ic/ic-inl.h
+++ b/chromium/v8/src/ic/ic-inl.h
@@ -60,9 +60,8 @@ void IC::SetTargetAtAddress(Address address, Code* target,
DCHECK(!target->is_inline_cache_stub() ||
(target->kind() != Code::LOAD_IC &&
target->kind() != Code::KEYED_LOAD_IC &&
- target->kind() != Code::CALL_IC &&
- (!FLAG_vector_stores || (target->kind() != Code::STORE_IC &&
- target->kind() != Code::KEYED_STORE_IC))));
+ target->kind() != Code::CALL_IC && target->kind() != Code::STORE_IC &&
+ target->kind() != Code::KEYED_STORE_IC));
Heap* heap = target->GetHeap();
Code* old_target = GetTargetAtAddress(address, constant_pool);
@@ -75,7 +74,7 @@ void IC::SetTargetAtAddress(Address address, Code* target,
StoreICState::GetLanguageMode(target->extra_ic_state()));
}
#endif
- Assembler::set_target_address_at(address, constant_pool,
+ Assembler::set_target_address_at(heap->isolate(), address, constant_pool,
target->instruction_start());
if (heap->gc_state() == Heap::MARK_COMPACT) {
heap->mark_compact_collector()->RecordCodeTargetPatch(address, target);
@@ -127,19 +126,6 @@ Code* IC::raw_target() const {
void IC::UpdateTarget() { target_ = handle(raw_target(), isolate_); }
-JSFunction* IC::GetRootConstructor(Map* receiver_map, Context* native_context) {
- DisallowHeapAllocation no_alloc;
- if (receiver_map->IsPrimitiveMap()) {
- int constructor_function_index =
- receiver_map->GetConstructorFunctionIndex();
- if (constructor_function_index != Map::kNoConstructorFunctionIndex) {
- return JSFunction::cast(native_context->get(constructor_function_index));
- }
- }
- return nullptr;
-}
-
-
Handle<Map> IC::GetHandlerCacheHolder(Handle<Map> receiver_map,
bool receiver_is_holder, Isolate* isolate,
CacheHolderFlag* flag) {
@@ -147,9 +133,9 @@ Handle<Map> IC::GetHandlerCacheHolder(Handle<Map> receiver_map,
*flag = kCacheOnReceiver;
return receiver_map;
}
- Context* native_context = *isolate->native_context();
- JSFunction* builtin_ctor = GetRootConstructor(*receiver_map, native_context);
- if (builtin_ctor != NULL) {
+ Handle<JSFunction> builtin_ctor;
+ if (Map::GetConstructorFunction(receiver_map, isolate->native_context())
+ .ToHandle(&builtin_ctor)) {
*flag = kCacheOnPrototypeReceiverIsPrimitive;
return handle(HeapObject::cast(builtin_ctor->instance_prototype())->map());
}
@@ -163,9 +149,9 @@ Handle<Map> IC::GetHandlerCacheHolder(Handle<Map> receiver_map,
Handle<Map> IC::GetICCacheHolder(Handle<Map> map, Isolate* isolate,
CacheHolderFlag* flag) {
- Context* native_context = *isolate->native_context();
- JSFunction* builtin_ctor = GetRootConstructor(*map, native_context);
- if (builtin_ctor != NULL) {
+ Handle<JSFunction> builtin_ctor;
+ if (Map::GetConstructorFunction(map, isolate->native_context())
+ .ToHandle(&builtin_ctor)) {
*flag = kCacheOnPrototype;
return handle(builtin_ctor->initial_map());
}
@@ -193,7 +179,7 @@ bool IC::AddressIsDeoptimizedCode(Isolate* isolate, Address address) {
return (host->kind() == Code::OPTIMIZED_FUNCTION &&
host->marked_for_deoptimization());
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IC_INL_H_
diff --git a/chromium/v8/src/ic/ic-state.cc b/chromium/v8/src/ic/ic-state.cc
index bc03d7d4870..4bdaf3ff036 100644
--- a/chromium/v8/src/ic/ic-state.cc
+++ b/chromium/v8/src/ic/ic-state.cc
@@ -16,22 +16,8 @@ void ICUtility::Clear(Isolate* isolate, Address address,
}
-CallICState::CallICState(ExtraICState extra_ic_state)
- : argc_(ArgcBits::decode(extra_ic_state)),
- call_type_(CallTypeBits::decode(extra_ic_state)) {}
-
-
-ExtraICState CallICState::GetExtraICState() const {
- ExtraICState extra_ic_state =
- ArgcBits::encode(argc_) | CallTypeBits::encode(call_type_);
- return extra_ic_state;
-}
-
-
std::ostream& operator<<(std::ostream& os, const CallICState& s) {
- return os << "(args(" << s.arg_count() << "), "
- << (s.call_type() == CallICState::METHOD ? "METHOD" : "FUNCTION")
- << ", ";
+ return os << "(args(" << s.argc() << "), " << s.convert_mode() << ", ";
}
@@ -205,17 +191,17 @@ void BinaryOpICState::GenerateAheadOfTime(
}
-Type* BinaryOpICState::GetResultType(Zone* zone) const {
+Type* BinaryOpICState::GetResultType() const {
Kind result_kind = result_kind_;
if (HasSideEffects()) {
result_kind = NONE;
} else if (result_kind == GENERIC && op_ == Token::ADD) {
- return Type::Union(Type::Number(zone), Type::String(zone), zone);
+ return Type::NumberOrString();
} else if (result_kind == NUMBER && op_ == Token::SHR) {
- return Type::Unsigned32(zone);
+ return Type::Unsigned32();
}
DCHECK_NE(GENERIC, result_kind);
- return KindToType(result_kind, zone);
+ return KindToType(result_kind);
}
@@ -334,20 +320,20 @@ const char* BinaryOpICState::KindToString(Kind kind) {
// static
-Type* BinaryOpICState::KindToType(Kind kind, Zone* zone) {
+Type* BinaryOpICState::KindToType(Kind kind) {
switch (kind) {
case NONE:
- return Type::None(zone);
+ return Type::None();
case SMI:
- return Type::SignedSmall(zone);
+ return Type::SignedSmall();
case INT32:
- return Type::Signed32(zone);
+ return Type::Signed32();
case NUMBER:
- return Type::Number(zone);
+ return Type::Number();
case STRING:
- return Type::String(zone);
+ return Type::String();
case GENERIC:
- return Type::Any(zone);
+ return Type::Any();
}
UNREACHABLE();
return NULL;
@@ -370,10 +356,10 @@ const char* CompareICState::GetStateName(State state) {
return "STRING";
case UNIQUE_NAME:
return "UNIQUE_NAME";
- case OBJECT:
- return "OBJECT";
- case KNOWN_OBJECT:
- return "KNOWN_OBJECT";
+ case RECEIVER:
+ return "RECEIVER";
+ case KNOWN_RECEIVER:
+ return "KNOWN_RECEIVER";
case GENERIC:
return "GENERIC";
}
@@ -398,9 +384,9 @@ Type* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
return Type::InternalizedString(zone);
case UNIQUE_NAME:
return Type::UniqueName(zone);
- case OBJECT:
+ case RECEIVER:
return Type::Receiver(zone);
- case KNOWN_OBJECT:
+ case KNOWN_RECEIVER:
return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone);
case GENERIC:
return Type::Any(zone);
@@ -420,7 +406,7 @@ CompareICState::State CompareICState::NewInputState(State old_state,
if (value->IsInternalizedString()) return INTERNALIZED_STRING;
if (value->IsString()) return STRING;
if (value->IsSymbol()) return UNIQUE_NAME;
- if (value->IsJSObject()) return OBJECT;
+ if (value->IsJSReceiver()) return RECEIVER;
break;
case BOOLEAN:
if (value->IsBoolean()) return BOOLEAN;
@@ -443,12 +429,12 @@ CompareICState::State CompareICState::NewInputState(State old_state,
case UNIQUE_NAME:
if (value->IsUniqueName()) return UNIQUE_NAME;
break;
- case OBJECT:
- if (value->IsJSObject()) return OBJECT;
+ case RECEIVER:
+ if (value->IsJSReceiver()) return RECEIVER;
break;
case GENERIC:
break;
- case KNOWN_OBJECT:
+ case KNOWN_RECEIVER:
UNREACHABLE();
break;
}
@@ -479,12 +465,12 @@ CompareICState::State CompareICState::TargetState(
return Token::IsEqualityOp(op) ? INTERNALIZED_STRING : STRING;
}
if (x->IsString() && y->IsString()) return STRING;
- if (x->IsJSObject() && y->IsJSObject()) {
- if (Handle<JSObject>::cast(x)->map() ==
- Handle<JSObject>::cast(y)->map()) {
- return KNOWN_OBJECT;
+ if (x->IsJSReceiver() && y->IsJSReceiver()) {
+ if (Handle<JSReceiver>::cast(x)->map() ==
+ Handle<JSReceiver>::cast(y)->map()) {
+ return KNOWN_RECEIVER;
} else {
- return Token::IsEqualityOp(op) ? OBJECT : GENERIC;
+ return Token::IsEqualityOp(op) ? RECEIVER : GENERIC;
}
}
if (!Token::IsEqualityOp(op)) return GENERIC;
@@ -504,15 +490,15 @@ CompareICState::State CompareICState::TargetState(
if (old_left == SMI && x->IsHeapNumber()) return NUMBER;
if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
return GENERIC;
- case KNOWN_OBJECT:
- if (x->IsJSObject() && y->IsJSObject()) {
- return Token::IsEqualityOp(op) ? OBJECT : GENERIC;
+ case KNOWN_RECEIVER:
+ if (x->IsJSReceiver() && y->IsJSReceiver()) {
+ return Token::IsEqualityOp(op) ? RECEIVER : GENERIC;
}
return GENERIC;
case BOOLEAN:
case STRING:
case UNIQUE_NAME:
- case OBJECT:
+ case RECEIVER:
case GENERIC:
return GENERIC;
}
diff --git a/chromium/v8/src/ic/ic-state.h b/chromium/v8/src/ic/ic-state.h
index b529b8c54de..1982fbe08b3 100644
--- a/chromium/v8/src/ic/ic-state.h
+++ b/chromium/v8/src/ic/ic-state.h
@@ -23,30 +23,29 @@ class ICUtility : public AllStatic {
class CallICState final BASE_EMBEDDED {
public:
- explicit CallICState(ExtraICState extra_ic_state);
+ explicit CallICState(ExtraICState extra_ic_state)
+ : bit_field_(extra_ic_state) {}
+ CallICState(int argc, ConvertReceiverMode convert_mode)
+ : bit_field_(ArgcBits::encode(argc) |
+ ConvertModeBits::encode(convert_mode)) {}
- enum CallType { METHOD, FUNCTION };
-
- CallICState(int argc, CallType call_type)
- : argc_(argc), call_type_(call_type) {}
-
- ExtraICState GetExtraICState() const;
+ ExtraICState GetExtraICState() const { return bit_field_; }
static void GenerateAheadOfTime(Isolate*,
void (*Generate)(Isolate*,
const CallICState&));
- int arg_count() const { return argc_; }
- CallType call_type() const { return call_type_; }
-
- bool CallAsMethod() const { return call_type_ == METHOD; }
+ int argc() const { return ArgcBits::decode(bit_field_); }
+ ConvertReceiverMode convert_mode() const {
+ return ConvertModeBits::decode(bit_field_);
+ }
private:
- class ArgcBits : public BitField<int, 0, Code::kArgumentsBits> {};
- class CallTypeBits : public BitField<CallType, Code::kArgumentsBits, 1> {};
+ typedef BitField<int, 0, Code::kArgumentsBits> ArgcBits;
+ typedef BitField<ConvertReceiverMode, Code::kArgumentsBits, 2>
+ ConvertModeBits;
- const int argc_;
- const CallType call_type_;
+ int const bit_field_;
};
@@ -121,9 +120,9 @@ class BinaryOpICState final BASE_EMBEDDED {
Token::Value op() const { return op_; }
Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
- Type* GetLeftType(Zone* zone) const { return KindToType(left_kind_, zone); }
- Type* GetRightType(Zone* zone) const { return KindToType(right_kind_, zone); }
- Type* GetResultType(Zone* zone) const;
+ Type* GetLeftType() const { return KindToType(left_kind_); }
+ Type* GetRightType() const { return KindToType(right_kind_); }
+ Type* GetResultType() const;
void Update(Handle<Object> left, Handle<Object> right, Handle<Object> result);
@@ -137,7 +136,7 @@ class BinaryOpICState final BASE_EMBEDDED {
Kind UpdateKind(Handle<Object> object, Kind kind) const;
static const char* KindToString(Kind kind);
- static Type* KindToType(Kind kind, Zone* zone);
+ static Type* KindToType(Kind kind);
static bool KindMaybeSmi(Kind kind) {
return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
}
@@ -175,7 +174,7 @@ class CompareICState {
// SMI < NUMBER
// INTERNALIZED_STRING < STRING
// INTERNALIZED_STRING < UNIQUE_NAME
- // KNOWN_OBJECT < OBJECT
+ // KNOWN_RECEIVER < RECEIVER
enum State {
UNINITIALIZED,
BOOLEAN,
@@ -183,9 +182,9 @@ class CompareICState {
NUMBER,
STRING,
INTERNALIZED_STRING,
- UNIQUE_NAME, // Symbol or InternalizedString
- OBJECT, // JSObject
- KNOWN_OBJECT, // JSObject with specific map (faster check)
+ UNIQUE_NAME, // Symbol or InternalizedString
+ RECEIVER, // JSReceiver
+ KNOWN_RECEIVER, // JSReceiver with specific map (faster check)
GENERIC
};
@@ -268,7 +267,8 @@ class StoreICState final BASE_EMBEDDED {
private:
const ExtraICState state_;
};
-}
-}
+
+} // namespace internal
+} // namespace v8
#endif // V8_IC_STATE_H_
diff --git a/chromium/v8/src/ic/ic.cc b/chromium/v8/src/ic/ic.cc
index f0d571bed64..73ac666a41c 100644
--- a/chromium/v8/src/ic/ic.cc
+++ b/chromium/v8/src/ic/ic.cc
@@ -47,9 +47,6 @@ char IC::TransitionMarkFromState(IC::State state) {
// these cases fall through to the unreachable code below.
case DEBUG_STUB:
break;
- // Type-vector-based ICs resolve state to one of the above.
- case DEFAULT:
- break;
}
UNREACHABLE();
return 0;
@@ -120,13 +117,10 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
stdout, true);
}
- ExtraICState extra_state = new_target->extra_ic_state();
const char* modifier = "";
if (new_target->kind() == Code::KEYED_STORE_IC) {
KeyedAccessStoreMode mode =
- FLAG_vector_stores
- ? casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode()
- : KeyedStoreIC::GetKeyedAccessStoreMode(extra_state);
+ casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
modifier = GetTransitionMarkModifier(mode);
}
PrintF(" (%c->%c%s) ", TransitionMarkFromState(old_state),
@@ -200,6 +194,11 @@ SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
// corresponding to the frame.
StackFrameIterator it(isolate());
while (it.frame()->fp() != this->fp()) it.Advance();
+ if (FLAG_ignition && it.frame()->type() == StackFrame::STUB) {
+ // Advance over bytecode handler frame.
+ // TODO(rmcilroy): Remove this once bytecode handlers don't need a frame.
+ it.Advance();
+ }
JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
// Find the function on the stack and both the active code for the
// function and the original code.
@@ -293,8 +292,8 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
}
}
- if (receiver->IsGlobalObject()) {
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+ if (receiver->IsJSGlobalObject()) {
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(receiver);
LookupIterator it(global, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
if (it.state() == LookupIterator::ACCESS_CHECK) return false;
if (!it.IsFound()) return false;
@@ -332,14 +331,6 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
MarkPrototypeFailure(name);
return;
}
-
- // The builtins object is special. It only changes when JavaScript
- // builtins are loaded lazily. It is important to keep inline
- // caches for the builtins object monomorphic. Therefore, if we get
- // an inline cache miss for the builtins object after lazily loading
- // JavaScript builtins, we return uninitialized as the state to
- // force the inline cache back to monomorphic state.
- if (receiver->IsJSBuiltinsObject()) state_ = PREMONOMORPHIC;
}
@@ -388,7 +379,6 @@ static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state,
break;
case PROTOTYPE_FAILURE:
case DEBUG_STUB:
- case DEFAULT:
UNREACHABLE();
}
}
@@ -425,19 +415,9 @@ void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address,
// static
-void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host,
- TypeFeedbackVector* vector, State old_state,
- State new_state) {
+void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host) {
if (host->kind() != Code::FUNCTION) return;
- if (FLAG_type_info_threshold > 0) {
- int polymorphic_delta = 0; // "Polymorphic" here includes monomorphic.
- int generic_delta = 0; // "Generic" here includes megamorphic.
- ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta,
- &generic_delta);
- vector->change_ic_with_type_info_count(polymorphic_delta);
- vector->change_ic_generic_count(generic_delta);
- }
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
info->change_own_type_change_checksum();
host->set_profiler_ticks(0);
@@ -477,13 +457,9 @@ void IC::Clear(Isolate* isolate, Address address, Address constant_pool) {
switch (target->kind()) {
case Code::LOAD_IC:
case Code::KEYED_LOAD_IC:
- return;
case Code::STORE_IC:
- if (FLAG_vector_stores) return;
- return StoreIC::Clear(isolate, address, target, constant_pool);
case Code::KEYED_STORE_IC:
- if (FLAG_vector_stores) return;
- return KeyedStoreIC::Clear(isolate, address, target, constant_pool);
+ return;
case Code::COMPARE_IC:
return CompareIC::Clear(isolate, address, target, constant_pool);
case Code::COMPARE_NIL_IC:
@@ -505,9 +481,8 @@ void KeyedLoadIC::Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus) {
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
- State state = nexus->StateFromFeedback();
nexus->ConfigurePremonomorphic();
- OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+ OnTypeFeedbackChanged(isolate, host);
}
@@ -519,16 +494,15 @@ void CallIC::Clear(Isolate* isolate, Code* host, CallICNexus* nexus) {
if (state != UNINITIALIZED && !feedback->IsAllocationSite()) {
nexus->ConfigureUninitialized();
// The change in state must be processed.
- OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, UNINITIALIZED);
+ OnTypeFeedbackChanged(isolate, host);
}
}
void LoadIC::Clear(Isolate* isolate, Code* host, LoadICNexus* nexus) {
if (IsCleared(nexus)) return;
- State state = nexus->StateFromFeedback();
nexus->ConfigurePremonomorphic();
- OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+ OnTypeFeedbackChanged(isolate, host);
}
@@ -543,9 +517,8 @@ void StoreIC::Clear(Isolate* isolate, Address address, Code* target,
void StoreIC::Clear(Isolate* isolate, Code* host, StoreICNexus* nexus) {
if (IsCleared(nexus)) return;
- State state = nexus->StateFromFeedback();
nexus->ConfigurePremonomorphic();
- OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+ OnTypeFeedbackChanged(isolate, host);
}
@@ -561,9 +534,8 @@ void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target,
void KeyedStoreIC::Clear(Isolate* isolate, Code* host,
KeyedStoreICNexus* nexus) {
if (IsCleared(nexus)) return;
- State state = nexus->StateFromFeedback();
nexus->ConfigurePremonomorphic();
- OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+ OnTypeFeedbackChanged(isolate, host);
}
@@ -572,11 +544,11 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC);
CompareICStub stub(target->stub_key(), isolate);
// Only clear CompareICs that can retain objects.
- if (stub.state() != CompareICState::KNOWN_OBJECT) return;
+ if (stub.state() != CompareICState::KNOWN_RECEIVER) return;
SetTargetAtAddress(address,
GetRawUninitialized(isolate, stub.op(), stub.strength()),
constant_pool);
- PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(isolate, address, DISABLE_INLINED_SMI_CHECK);
}
@@ -613,8 +585,7 @@ void IC::ConfigureVectorState(IC::State new_state) {
}
vector_set_ = true;
- OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
- new_state);
+ OnTypeFeedbackChanged(isolate(), get_host());
}
@@ -637,8 +608,7 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
}
vector_set_ = true;
- OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
- MONOMORPHIC);
+ OnTypeFeedbackChanged(isolate(), get_host());
}
@@ -661,8 +631,7 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
}
vector_set_ = true;
- OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
- POLYMORPHIC);
+ OnTypeFeedbackChanged(isolate(), get_host());
}
@@ -675,8 +644,7 @@ void IC::ConfigureVectorState(MapHandleList* maps,
nexus->ConfigurePolymorphic(maps, transitioned_maps, handlers);
vector_set_ = true;
- OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
- POLYMORPHIC);
+ OnTypeFeedbackChanged(isolate(), get_host());
}
@@ -707,10 +675,10 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
- if (object->IsGlobalObject() && name->IsString()) {
+ if (object->IsJSGlobalObject() && name->IsString()) {
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(object);
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
@@ -817,12 +785,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
if (number_of_valid_maps > 1 && target()->is_keyed_stub()) return false;
Handle<Code> ic;
if (number_of_valid_maps == 1) {
- if (UseVector()) {
- ConfigureVectorState(name, receiver_map(), code);
- } else {
- ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, map, code,
- extra_ic_state());
- }
+ ConfigureVectorState(name, receiver_map(), code);
} else {
if (handler_to_overwrite >= 0) {
handlers.Set(handler_to_overwrite, code);
@@ -834,13 +797,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
handlers.Add(code);
}
- if (UseVector()) {
- ConfigureVectorState(name, &maps, &handlers);
- } else {
- ic = PropertyICCompiler::ComputePolymorphic(kind(), &maps, &handlers,
- number_of_valid_maps, name,
- extra_ic_state());
- }
+ ConfigureVectorState(name, &maps, &handlers);
}
if (!UseVector()) set_target(*ic);
@@ -850,13 +807,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
void IC::UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name) {
DCHECK(handler->is_handler());
- if (UseVector()) {
- ConfigureVectorState(name, receiver_map(), handler);
- } else {
- Handle<Code> ic = PropertyICCompiler::ComputeMonomorphic(
- kind(), name, receiver_map(), handler, extra_ic_state());
- set_target(*ic);
- }
+ ConfigureVectorState(name, receiver_map(), handler);
}
@@ -918,7 +869,6 @@ void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
break;
case DEBUG_STUB:
break;
- case DEFAULT:
case GENERIC:
UNREACHABLE();
break;
@@ -981,7 +931,7 @@ static Handle<Code> KeyedStoreICInitializeStubHelper(
Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate,
LanguageMode language_mode,
State initialization_state) {
- if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) {
+ if (initialization_state != MEGAMORPHIC) {
VectorKeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
return stub.GetCode();
}
@@ -993,7 +943,7 @@ Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate,
Handle<Code> KeyedStoreIC::initialize_stub_in_optimized_code(
Isolate* isolate, LanguageMode language_mode, State initialization_state) {
- if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) {
+ if (initialization_state != MEGAMORPHIC) {
VectorKeyedStoreICStub stub(isolate, StoreICState(language_mode));
return stub.GetCode();
}
@@ -1022,6 +972,39 @@ Handle<Code> LoadIC::SimpleFieldLoad(FieldIndex index) {
}
+bool IsCompatibleReceiver(LookupIterator* lookup, Handle<Map> receiver_map) {
+ DCHECK(lookup->state() == LookupIterator::ACCESSOR);
+ Isolate* isolate = lookup->isolate();
+ Handle<Object> accessors = lookup->GetAccessors();
+ if (accessors->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(accessors);
+ if (info->getter() != NULL &&
+ !ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate, info,
+ receiver_map)) {
+ return false;
+ }
+ } else if (accessors->IsAccessorPair()) {
+ Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
+ isolate);
+ Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+ Handle<Object> receiver = lookup->GetReceiver();
+ if (getter->IsJSFunction() && holder->HasFastProperties()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+ if (receiver->IsJSObject() || function->shared()->IsBuiltin() ||
+ !is_sloppy(function->shared()->language_mode())) {
+ CallOptimization call_optimization(function);
+ if (call_optimization.is_simple_api_call() &&
+ !call_optimization.IsCompatibleReceiverMap(receiver_map, holder)) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+
void LoadIC::UpdateCaches(LookupIterator* lookup) {
if (state() == UNINITIALIZED) {
// This is the first time we execute this inline cache. Set the target to
@@ -1046,35 +1029,20 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
}
} else {
if (lookup->state() == LookupIterator::ACCESSOR) {
- Handle<Object> accessors = lookup->GetAccessors();
- Handle<Map> map = receiver_map();
- if (accessors->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(accessors);
- if ((v8::ToCData<Address>(info->getter()) != 0) &&
- !ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info,
- map)) {
- TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
- code = slow_stub();
- }
- } else if (accessors->IsAccessorPair()) {
- Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
- isolate());
- Handle<JSObject> holder = lookup->GetHolder<JSObject>();
- Handle<Object> receiver = lookup->GetReceiver();
- if (getter->IsJSFunction() && holder->HasFastProperties()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
- if (receiver->IsJSObject() || function->IsBuiltin() ||
- !is_sloppy(function->shared()->language_mode())) {
- CallOptimization call_optimization(function);
- if (call_optimization.is_simple_api_call() &&
- !call_optimization.IsCompatibleReceiver(receiver, holder)) {
- TRACE_GENERIC_IC(isolate(), "LoadIC",
- "incompatible receiver type");
- code = slow_stub();
- }
- }
- }
+ if (!IsCompatibleReceiver(lookup, receiver_map())) {
+ TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
+ code = slow_stub();
+ }
+ } else if (lookup->state() == LookupIterator::INTERCEPTOR) {
+ // Perform a lookup behind the interceptor. Copy the LookupIterator since
+ // the original iterator will be used to fetch the value.
+ LookupIterator it = *lookup;
+ it.Next();
+ LookupForRead(&it);
+ if (it.state() == LookupIterator::ACCESSOR &&
+ !IsCompatibleReceiver(&it, receiver_map())) {
+ TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
+ code = slow_stub();
}
}
if (code.is_null()) code = ComputeHandler(lookup);
@@ -1223,7 +1191,7 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
// When debugging we need to go the slow path to flood the accessor.
if (GetSharedFunctionInfo()->HasDebugInfo()) break;
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
- if (!receiver->IsJSObject() && !function->IsBuiltin() &&
+ if (!receiver->IsJSObject() && !function->shared()->IsBuiltin() &&
is_sloppy(function->shared()->language_mode())) {
// Calling sloppy non-builtins with a value as the receiver
// requires boxing.
@@ -1251,7 +1219,7 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
case LookupIterator::DATA: {
if (lookup->is_dictionary_holder()) {
if (kind() != Code::LOAD_IC) break;
- if (holder->IsGlobalObject()) {
+ if (holder->IsJSGlobalObject()) {
NamedLoadHandlerCompiler compiler(isolate(), map, holder,
cache_holder);
Handle<PropertyCell> cell = lookup->GetPropertyCell();
@@ -1489,6 +1457,8 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
PrototypeIterator::GetCurrent(iter));
}
+ if (it->HolderIsReceiverOrHiddenPrototype()) return false;
+
it->PrepareTransitionToDataProperty(value, NONE, store_mode);
return it->IsCacheableTransition();
}
@@ -1524,10 +1494,10 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
return result;
}
- if (object->IsGlobalObject() && name->IsString()) {
+ if (object->IsJSGlobalObject() && name->IsString()) {
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(object);
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
@@ -1589,26 +1559,23 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
LookupIterator it(object, name);
if (FLAG_use_ic) UpdateCaches(&it, value, store_mode);
- // Set the property.
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::SetProperty(&it, value, language_mode(), store_mode), Object);
- return result;
+ MAYBE_RETURN_NULL(
+ Object::SetProperty(&it, value, language_mode(), store_mode));
+ return value;
}
Handle<Code> CallIC::initialize_stub(Isolate* isolate, int argc,
- CallICState::CallType call_type) {
- CallICTrampolineStub stub(isolate, CallICState(argc, call_type));
+ ConvertReceiverMode mode) {
+ CallICTrampolineStub stub(isolate, CallICState(argc, mode));
Handle<Code> code = stub.GetCode();
return code;
}
Handle<Code> CallIC::initialize_stub_in_optimized_code(
- Isolate* isolate, int argc, CallICState::CallType call_type) {
- CallICStub stub(isolate, CallICState(argc, call_type));
+ Isolate* isolate, int argc, ConvertReceiverMode mode) {
+ CallICStub stub(isolate, CallICState(argc, mode));
Handle<Code> code = stub.GetCode();
return code;
}
@@ -1629,13 +1596,8 @@ Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
DCHECK(initialization_state == UNINITIALIZED ||
initialization_state == PREMONOMORPHIC ||
initialization_state == MEGAMORPHIC);
- if (FLAG_vector_stores) {
- VectorStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
- return stub.GetCode();
- }
-
- return StoreICInitializeStubHelper(
- isolate, ComputeExtraICState(language_mode), initialization_state);
+ VectorStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
+ return stub.GetCode();
}
@@ -1644,7 +1606,7 @@ Handle<Code> StoreIC::initialize_stub_in_optimized_code(
DCHECK(initialization_state == UNINITIALIZED ||
initialization_state == PREMONOMORPHIC ||
initialization_state == MEGAMORPHIC);
- if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) {
+ if (initialization_state != MEGAMORPHIC) {
VectorStoreICStub stub(isolate, StoreICState(language_mode));
return stub.GetCode();
}
@@ -1691,11 +1653,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
if (state() == UNINITIALIZED) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
- if (FLAG_vector_stores) {
- ConfigureVectorState(PREMONOMORPHIC);
- } else {
- set_target(*pre_monomorphic_stub());
- }
+ ConfigureVectorState(PREMONOMORPHIC);
TRACE_IC("StoreIC", lookup->name());
return;
}
@@ -1712,7 +1670,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
static Handle<Code> PropertyCellStoreHandler(
- Isolate* isolate, Handle<JSObject> receiver, Handle<GlobalObject> holder,
+ Isolate* isolate, Handle<JSObject> receiver, Handle<JSGlobalObject> holder,
Handle<Name> name, Handle<PropertyCell> cell, PropertyCellType type) {
auto constant_type = Nothing<PropertyCellConstantType>();
if (type == PropertyCellType::kConstantType) {
@@ -1741,12 +1699,12 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
switch (lookup->state()) {
case LookupIterator::TRANSITION: {
auto store_target = lookup->GetStoreTarget();
- if (store_target->IsGlobalObject()) {
+ if (store_target->IsJSGlobalObject()) {
// TODO(dcarney): this currently just deopts. Use the transition cell.
auto cell = isolate()->factory()->NewPropertyCell();
cell->set_value(*value);
auto code = PropertyCellStoreHandler(
- isolate(), store_target, Handle<GlobalObject>::cast(store_target),
+ isolate(), store_target, Handle<JSGlobalObject>::cast(store_target),
lookup->name(), cell, PropertyCellType::kConstant);
cell->set_value(isolate()->heap()->the_hole_value());
return code;
@@ -1802,8 +1760,6 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function");
break;
}
- // When debugging we need to go the slow path to flood the accessor.
- if (GetSharedFunctionInfo()->HasDebugInfo()) break;
Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
CallOptimization call_optimization(function);
NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
@@ -1824,14 +1780,14 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
case LookupIterator::DATA: {
if (lookup->is_dictionary_holder()) {
- if (holder->IsGlobalObject()) {
+ if (holder->IsJSGlobalObject()) {
DCHECK(holder.is_identical_to(receiver) ||
receiver->map()->prototype() == *holder);
auto cell = lookup->GetPropertyCell();
auto updated_type = PropertyCell::UpdatedType(
cell, value, lookup->property_details());
auto code = PropertyCellStoreHandler(
- isolate(), receiver, Handle<GlobalObject>::cast(holder),
+ isolate(), receiver, Handle<JSGlobalObject>::cast(holder),
lookup->name(), cell, updated_type);
return code;
}
@@ -1891,25 +1847,18 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
Handle<Map> monomorphic_map =
ComputeTransitionedMap(receiver_map, store_mode);
store_mode = GetNonTransitioningStoreMode(store_mode);
- if (FLAG_vector_stores) {
- Handle<Code> handler =
- PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
- monomorphic_map, language_mode(), store_mode);
- ConfigureVectorState(Handle<Name>::null(), monomorphic_map, handler);
- return null_handle;
- }
- return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- monomorphic_map, language_mode(), store_mode);
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ monomorphic_map, language_mode(), store_mode);
+ ConfigureVectorState(Handle<Name>::null(), monomorphic_map, handler);
+ return null_handle;
}
// There are several special cases where an IC that is MONOMORPHIC can still
// transition to a different GetNonTransitioningStoreMode IC that handles a
// superset of the original IC. Handle those here if the receiver map hasn't
// changed or it has transitioned to a more general kind.
- KeyedAccessStoreMode old_store_mode =
- FLAG_vector_stores
- ? GetKeyedAccessStoreMode()
- : KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
+ KeyedAccessStoreMode old_store_mode = GetKeyedAccessStoreMode();
Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
if (state() == MONOMORPHIC) {
Handle<Map> transitioned_receiver_map = receiver_map;
@@ -1925,16 +1874,12 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
// if they at least come from the same origin for a transitioning store,
// stay MONOMORPHIC and use the map for the most generic ElementsKind.
store_mode = GetNonTransitioningStoreMode(store_mode);
- if (FLAG_vector_stores) {
- Handle<Code> handler =
- PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
- transitioned_receiver_map, language_mode(), store_mode);
- ConfigureVectorState(Handle<Name>::null(), transitioned_receiver_map,
- handler);
- return null_handle;
- }
- return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- transitioned_receiver_map, language_mode(), store_mode);
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ transitioned_receiver_map, language_mode(), store_mode);
+ ConfigureVectorState(Handle<Name>::null(), transitioned_receiver_map,
+ handler);
+ return null_handle;
} else if (receiver_map.is_identical_to(previous_receiver_map) &&
old_store_mode == STANDARD_STORE &&
(store_mode == STORE_AND_GROW_NO_TRANSITION ||
@@ -1943,15 +1888,11 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
// A "normal" IC that handles stores can switch to a version that can
// grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
- if (FLAG_vector_stores) {
- Handle<Code> handler =
- PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
- receiver_map, language_mode(), store_mode);
- ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
- return null_handle;
- }
- return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- receiver_map, language_mode(), store_mode);
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ receiver_map, language_mode(), store_mode);
+ ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
+ return null_handle;
}
}
@@ -2010,18 +1951,13 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
}
}
- if (FLAG_vector_stores) {
- MapHandleList transitioned_maps(target_receiver_maps.length());
- CodeHandleList handlers(target_receiver_maps.length());
- PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
- &target_receiver_maps, &transitioned_maps, &handlers, store_mode,
- language_mode());
- ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers);
- return null_handle;
- }
-
- return PropertyICCompiler::ComputeKeyedStorePolymorphic(
- &target_receiver_maps, store_mode, language_mode());
+ MapHandleList transitioned_maps(target_receiver_maps.length());
+ CodeHandleList handlers(target_receiver_maps.length());
+ PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
+ &target_receiver_maps, &transitioned_maps, &handlers, store_mode,
+ language_mode());
+ ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers);
+ return null_handle;
}
@@ -2115,44 +2051,6 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
}
-void KeyedStoreIC::ValidateStoreMode(Handle<Code> stub) {
-#ifdef DEBUG
- DCHECK(!FLAG_vector_stores);
- if (stub.is_null() || *stub == *megamorphic_stub() || *stub == *slow_stub()) {
- return;
- }
-
- // Query the keyed store mode.
- ExtraICState state = stub->extra_ic_state();
- KeyedAccessStoreMode stub_mode = GetKeyedAccessStoreMode(state);
-
- MapHandleList map_list;
- stub->FindAllMaps(&map_list);
- CodeHandleList list;
- stub->FindHandlers(&list, map_list.length());
- for (int i = 0; i < list.length(); i++) {
- Handle<Code> handler = list.at(i);
- CHECK(handler->is_handler());
- CodeStub::Major major_key = CodeStub::MajorKeyFromKey(handler->stub_key());
- uint32_t minor_key = CodeStub::MinorKeyFromKey(handler->stub_key());
- // Ensure that we only see handlers we know have the store mode embedded.
- CHECK(major_key == CodeStub::KeyedStoreSloppyArguments ||
- major_key == CodeStub::StoreFastElement ||
- major_key == CodeStub::StoreElement ||
- major_key == CodeStub::ElementsTransitionAndStore ||
- *handler == *isolate()->builtins()->KeyedStoreIC_Slow());
- // Ensure that the store mode matches that of the IC.
- CHECK(major_key == CodeStub::NoCache ||
- stub_mode == CommonStoreModeBits::decode(minor_key));
- // The one exception is the keyed store slow builtin, which doesn't include
- // store mode.
- CHECK(major_key != CodeStub::NoCache ||
- *handler == *isolate()->builtins()->KeyedStoreIC_Slow());
- }
-#endif // DEBUG
-}
-
-
MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> key,
Handle<Object> value) {
@@ -2183,20 +2081,11 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
StoreIC::Store(object, Handle<Name>::cast(key), value,
JSReceiver::MAY_BE_STORE_FROM_KEYED),
Object);
- if (FLAG_vector_stores) {
- if (!is_vector_set()) {
- ConfigureVectorState(MEGAMORPHIC);
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
- "unhandled internalized string key");
- TRACE_IC("StoreIC", key);
- }
- } else {
- if (!is_target_set()) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
- "unhandled internalized string key");
- TRACE_IC("StoreIC", key);
- set_target(*stub);
- }
+ if (!is_vector_set()) {
+ ConfigureVectorState(MEGAMORPHIC);
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+ "unhandled internalized string key");
+ TRACE_IC("StoreIC", key);
}
return store_handle;
}
@@ -2253,12 +2142,9 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
// from fast path keyed stores.
if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly()) {
stub = StoreElementStub(old_receiver_map, store_mode);
-
- // Validate that the store_mode in the stub can also be derived
- // from peeking in the code bits of the handlers.
- if (!FLAG_vector_stores) ValidateStoreMode(stub);
} else {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "dictionary prototype");
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+ "dictionary or proxy prototype");
}
} else {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-smi-like key");
@@ -2268,27 +2154,12 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
}
- if (FLAG_vector_stores) {
- if (!is_vector_set() || stub.is_null()) {
- Code* megamorphic = *megamorphic_stub();
- if (!stub.is_null() && (*stub == megamorphic || *stub == *slow_stub())) {
- ConfigureVectorState(MEGAMORPHIC);
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
- *stub == megamorphic ? "set generic" : "slow stub");
- }
- }
- } else {
- DCHECK(!is_target_set());
+ if (!is_vector_set() || stub.is_null()) {
Code* megamorphic = *megamorphic_stub();
- if (*stub == megamorphic) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
- } else if (*stub == *slow_stub()) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "slow stub");
- }
-
- DCHECK(!stub.is_null());
- if (!AddressIsDeoptimizedCode()) {
- set_target(*stub);
+ if (!stub.is_null() && (*stub == megamorphic || *stub == *slow_stub())) {
+ ConfigureVectorState(MEGAMORPHIC);
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+ *stub == megamorphic ? "set generic" : "slow stub");
}
}
TRACE_IC("StoreIC", key);
@@ -2318,6 +2189,12 @@ void CallIC::HandleMiss(Handle<Object> function) {
if (array_function.is_identical_to(js_function)) {
// Alter the slot.
nexus->ConfigureMonomorphicArray();
+ } else if (js_function->context()->native_context() !=
+ *isolate()->native_context()) {
+ // Don't collect cross-native context feedback for the CallIC.
+ // TODO(bmeurer): We should collect the SharedFunctionInfo as
+ // feedback in this case instead.
+ nexus->ConfigureMegamorphic();
} else {
nexus->ConfigureMonomorphic(js_function);
}
@@ -2328,8 +2205,7 @@ void CallIC::HandleMiss(Handle<Object> function) {
name = handle(js_function->shared()->name(), isolate());
}
- IC::State new_state = nexus->StateFromFeedback();
- OnTypeFeedbackChanged(isolate(), get_host(), *vector(), state(), new_state);
+ OnTypeFeedbackChanged(isolate(), get_host());
TRACE_IC("CallIC", name);
}
@@ -2349,7 +2225,7 @@ RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
Handle<Object> function = args.at<Object>(0);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
Handle<Smi> slot = args.at<Smi>(2);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
CallICNexus nexus(vector, vector_slot);
CallIC ic(isolate, &nexus);
ic.HandleMiss(function);
@@ -2368,7 +2244,7 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
DCHECK(args.length() == 4);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
@@ -2400,7 +2276,7 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
DCHECK(args.length() == 4);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
@@ -2419,7 +2295,7 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
DCHECK(args.length() == 4);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
@@ -2438,29 +2314,21 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
Handle<Object> value = args.at<Object>(2);
Handle<Object> result;
- if (FLAG_vector_stores) {
- DCHECK(args.length() == 5 || args.length() == 6);
- Handle<Smi> slot = args.at<Smi>(3);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
- StoreICNexus nexus(vector, vector_slot);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- } else {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
- vector->GetKind(vector_slot));
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- }
+ DCHECK(args.length() == 5 || args.length() == 6);
+ Handle<Smi> slot = args.at<Smi>(3);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
+ StoreICNexus nexus(vector, vector_slot);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
} else {
- DCHECK(args.length() == 3);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
+ vector->GetKind(vector_slot));
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
ic.Store(receiver, key, value));
@@ -2477,29 +2345,41 @@ RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
Handle<Object> value = args.at<Object>(2);
Handle<Object> result;
- if (FLAG_vector_stores) {
- DCHECK(args.length() == 5 || args.length() == 6);
- Handle<Smi> slot = args.at<Smi>(3);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
- StoreICNexus nexus(vector, vector_slot);
- StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
+ int length = args.length();
+ DCHECK(length == 5 || length == 6);
+ // We might have slot and vector, for a normal miss (slot(3), vector(4)).
+ // Or, map and vector for a transitioning store miss (map(3), vector(4)).
+ // In this case, we need to recover the slot from a virtual register.
+ // If length == 6, then a map is included (map(3), slot(4), vector(5)).
+ Handle<Smi> slot;
+ Handle<TypeFeedbackVector> vector;
+ if (length == 5) {
+ if (args.at<Object>(3)->IsMap()) {
+ vector = args.at<TypeFeedbackVector>(4);
+ slot = handle(
+ *reinterpret_cast<Smi**>(isolate->virtual_slot_register_address()),
+ isolate);
} else {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
- vector->GetKind(vector_slot));
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
+ vector = args.at<TypeFeedbackVector>(4);
+ slot = args.at<Smi>(3);
}
} else {
- DCHECK(args.length() == 3 || args.length() == 4);
- StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ vector = args.at<TypeFeedbackVector>(5);
+ slot = args.at<Smi>(4);
+ }
+
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
+ StoreICNexus nexus(vector, vector_slot);
+ StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
+ } else {
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
+ vector->GetKind(vector_slot));
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
ic.Store(receiver, key, value));
@@ -2517,23 +2397,15 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
Handle<Object> value = args.at<Object>(2);
Handle<Object> result;
- if (FLAG_vector_stores) {
- DCHECK(args.length() == 5);
- Handle<Smi> slot = args.at<Smi>(3);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- } else {
- DCHECK(args.length() == 3);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- }
+ DCHECK(args.length() == 5);
+ Handle<Smi> slot = args.at<Smi>(3);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
return *result;
}
@@ -2546,42 +2418,29 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_MissFromStubFailure) {
Handle<Object> value = args.at<Object>(2);
Handle<Object> result;
- if (FLAG_vector_stores) {
- DCHECK(args.length() == 5);
- Handle<Smi> slot = args.at<Smi>(3);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- } else {
- DCHECK(args.length() == 3);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
- }
+ DCHECK(args.length() == 5);
+ Handle<Smi> slot = args.at<Smi>(3);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
return *result;
}
RUNTIME_FUNCTION(Runtime_StoreIC_Slow) {
HandleScope scope(isolate);
- DCHECK(args.length() == (FLAG_vector_stores ? 5 : 3));
+ DCHECK(args.length() == 5);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
LanguageMode language_mode;
- if (FLAG_vector_stores) {
- StoreICNexus nexus(isolate);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- language_mode = ic.language_mode();
- } else {
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- language_mode = ic.language_mode();
- }
+ StoreICNexus nexus(isolate);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ language_mode = ic.language_mode();
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -2592,19 +2451,14 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
HandleScope scope(isolate);
- DCHECK(args.length() == (FLAG_vector_stores ? 5 : 3));
+ DCHECK(args.length() == 5);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
LanguageMode language_mode;
- if (FLAG_vector_stores) {
- KeyedStoreICNexus nexus(isolate);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- language_mode = ic.language_mode();
- } else {
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- language_mode = ic.language_mode();
- }
+ KeyedStoreICNexus nexus(isolate);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ language_mode = ic.language_mode();
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -2616,20 +2470,17 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == (FLAG_vector_stores ? 6 : 4));
+ // Length == 5 or 6, depending on whether the vector slot
+ // is passed in a virtual register or not.
+ DCHECK(args.length() == 5 || args.length() == 6);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- Handle<Map> map = args.at<Map>(FLAG_vector_stores ? 5 : 3);
+ Handle<Map> map = args.at<Map>(3);
LanguageMode language_mode;
- if (FLAG_vector_stores) {
- KeyedStoreICNexus nexus(isolate);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- language_mode = ic.language_mode();
- } else {
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- language_mode = ic.language_mode();
- }
+ KeyedStoreICNexus nexus(isolate);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ language_mode = ic.language_mode();
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
map->elements_kind());
@@ -2762,9 +2613,9 @@ MaybeHandle<Object> BinaryOpIC::Transition(
// Patch the inlined smi code as necessary.
if (!old_state.UseInlinedSmiCode() && state.UseInlinedSmiCode()) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(isolate(), address(), ENABLE_INLINED_SMI_CHECK);
} else if (old_state.UseInlinedSmiCode() && !state.UseInlinedSmiCode()) {
- PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(isolate(), address(), DISABLE_INLINED_SMI_CHECK);
}
return result;
@@ -2835,9 +2686,9 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HasInlinedSmiCode(address()), x, y);
CompareICStub stub(isolate(), op_, old_stub.strength(), new_left, new_right,
state);
- if (state == CompareICState::KNOWN_OBJECT) {
+ if (state == CompareICState::KNOWN_RECEIVER) {
stub.set_known_map(
- Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
+ Handle<Map>(Handle<JSReceiver>::cast(x)->map(), isolate()));
}
Handle<Code> new_target = stub.GetCode();
set_target(*new_target);
@@ -2857,7 +2708,7 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (old_stub.state() == CompareICState::UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(isolate(), address(), ENABLE_INLINED_SMI_CHECK);
}
return *new_target;
@@ -2892,9 +2743,9 @@ void CompareNilIC::Clear(Address address, Code* target, Address constant_pool) {
Handle<Object> CompareNilIC::DoCompareNilSlow(Isolate* isolate, NilValue nil,
Handle<Object> object) {
if (object->IsNull() || object->IsUndefined()) {
- return handle(Smi::FromInt(true), isolate);
+ return isolate->factory()->true_value();
}
- return handle(Smi::FromInt(object->IsUndetectableObject()), isolate);
+ return isolate->factory()->ToBoolean(object->IsUndetectableObject());
}
@@ -2947,7 +2798,7 @@ Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
bool to_boolean_value = stub.UpdateStatus(object);
Handle<Code> code = stub.GetCode();
set_target(*code);
- return handle(Smi::FromInt(to_boolean_value ? 1 : 0), isolate());
+ return isolate()->factory()->ToBoolean(to_boolean_value);
}
@@ -3109,7 +2960,7 @@ RUNTIME_FUNCTION(Runtime_LoadIC_MissFromStubFailure) {
DCHECK(args.length() == 4);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
diff --git a/chromium/v8/src/ic/ic.h b/chromium/v8/src/ic/ic.h
index d65d7a8c1ba..a3265d70b9a 100644
--- a/chromium/v8/src/ic/ic.h
+++ b/chromium/v8/src/ic/ic.h
@@ -57,8 +57,6 @@ class IC {
bool IsCallStub() const { return target()->is_call_stub(); }
#endif
- static inline JSFunction* GetRootConstructor(Map* receiver_map,
- Context* native_context);
static inline Handle<Map> GetHandlerCacheHolder(Handle<Map> receiver_map,
bool receiver_is_holder,
Isolate* isolate,
@@ -79,9 +77,8 @@ class IC {
static bool ICUseVector(Code::Kind kind) {
return kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC ||
- kind == Code::CALL_IC ||
- (FLAG_vector_stores &&
- (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC));
+ kind == Code::CALL_IC || kind == Code::STORE_IC ||
+ kind == Code::KEYED_STORE_IC;
}
protected:
@@ -146,9 +143,7 @@ class IC {
State old_state, State new_state,
bool target_remains_ic_stub);
// As a vector-based IC, type feedback must be updated differently.
- static void OnTypeFeedbackChanged(Isolate* isolate, Code* host,
- TypeFeedbackVector* vector, State old_state,
- State new_state);
+ static void OnTypeFeedbackChanged(Isolate* isolate, Code* host);
static void PostPatching(Address address, Code* target, Code* old_target);
// Compute the handler either by compiling or by retrieving a cached version.
@@ -210,7 +205,7 @@ class IC {
inline void UpdateTarget();
Handle<TypeFeedbackVector> vector() const { return nexus()->vector_handle(); }
- FeedbackVectorICSlot slot() const { return nexus()->slot(); }
+ FeedbackVectorSlot slot() const { return nexus()->slot(); }
State saved_state() const {
return state() == PROTOTYPE_FAILURE ? old_state_ : state();
}
@@ -289,9 +284,9 @@ class CallIC : public IC {
// Code generator routines.
static Handle<Code> initialize_stub(Isolate* isolate, int argc,
- CallICState::CallType call_type);
+ ConvertReceiverMode mode);
static Handle<Code> initialize_stub_in_optimized_code(
- Isolate* isolate, int argc, CallICState::CallType call_type);
+ Isolate* isolate, int argc, ConvertReceiverMode mode);
static void Clear(Isolate* isolate, Code* host, CallICNexus* nexus);
};
@@ -319,7 +314,7 @@ class LoadIC : public IC {
}
bool ShouldThrowReferenceError(Handle<Object> receiver) {
- return receiver->IsGlobalObject() && typeof_mode() == NOT_INSIDE_TYPEOF;
+ return receiver->IsJSGlobalObject() && typeof_mode() == NOT_INSIDE_TYPEOF;
}
// Code generator routines.
@@ -362,9 +357,8 @@ class LoadIC : public IC {
// lookup result.
void UpdateCaches(LookupIterator* lookup);
- virtual Handle<Code> CompileHandler(LookupIterator* lookup,
- Handle<Object> unused,
- CacheHolderFlag cache_holder) override;
+ Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> unused,
+ CacheHolderFlag cache_holder) override;
private:
Handle<Code> SimpleFieldLoad(FieldIndex index);
@@ -498,9 +492,8 @@ class StoreIC : public IC {
// lookup result.
void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode);
- virtual Handle<Code> CompileHandler(LookupIterator* lookup,
- Handle<Object> value,
- CacheHolderFlag cache_holder) override;
+ Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> value,
+ CacheHolderFlag cache_holder) override;
private:
inline void set_target(Code* code);
@@ -536,22 +529,10 @@ class KeyedStoreIC : public StoreIC {
IcCheckTypeField::encode(ELEMENT);
}
- static KeyedAccessStoreMode GetKeyedAccessStoreMode(
- ExtraICState extra_state) {
- DCHECK(!FLAG_vector_stores);
- return ExtraICStateKeyedAccessStoreMode::decode(extra_state);
- }
-
KeyedAccessStoreMode GetKeyedAccessStoreMode() {
- DCHECK(FLAG_vector_stores);
return casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
}
- static IcCheckType GetKeyType(ExtraICState extra_state) {
- DCHECK(!FLAG_vector_stores);
- return IcCheckTypeField::decode(extra_state);
- }
-
KeyedStoreIC(FrameDepth depth, Isolate* isolate,
KeyedStoreICNexus* nexus = NULL)
: StoreIC(depth, isolate, nexus) {
@@ -608,8 +589,6 @@ class KeyedStoreIC : public StoreIC {
Handle<Map> ComputeTransitionedMap(Handle<Map> map,
KeyedAccessStoreMode store_mode);
- void ValidateStoreMode(Handle<Code> stub);
-
friend class IC;
};
@@ -683,9 +662,10 @@ class ToBooleanIC : public IC {
// Helper for BinaryOpIC and CompareIC.
enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check);
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IC_H_
diff --git a/chromium/v8/src/ic/mips/OWNERS b/chromium/v8/src/ic/mips/OWNERS
index 5508ba626f3..89455a4fbd7 100644
--- a/chromium/v8/src/ic/mips/OWNERS
+++ b/chromium/v8/src/ic/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/ic/mips/access-compiler-mips.cc b/chromium/v8/src/ic/mips/access-compiler-mips.cc
index f2f6c62c716..b122946577f 100644
--- a/chromium/v8/src/ic/mips/access-compiler-mips.cc
+++ b/chromium/v8/src/ic/mips/access-compiler-mips.cc
@@ -31,7 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores || a3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, a3, t0, t1};
return registers;
}
diff --git a/chromium/v8/src/ic/mips/handler-compiler-mips.cc b/chromium/v8/src/ic/mips/handler-compiler-mips.cc
index 8c135e40888..554d0c56ff8 100644
--- a/chromium/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/chromium/v8/src/ic/mips/handler-compiler-mips.cc
@@ -40,7 +40,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(a1, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -81,7 +82,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -141,7 +143,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Check that receiver is a JSObject.
__ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
// Load properties array.
Register properties = scratch0;
@@ -165,10 +167,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ lw(result, MemOperand(cp, offset));
- __ lw(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- __ lw(result, MemOperand(result, Context::SlotOffset(index)));
+ __ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ lw(result,
FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -216,8 +215,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -283,6 +284,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ lw(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
__ lw(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
+
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
@@ -297,15 +305,10 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
@@ -314,7 +317,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -323,8 +326,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -346,8 +348,8 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
@@ -698,8 +700,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -722,7 +723,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(at, value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -734,7 +735,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/chromium/v8/src/ic/mips/ic-compiler-mips.cc b/chromium/v8/src/ic/mips/ic-compiler-mips.cc
index 64f1662880b..86a602b3ec9 100644
--- a/chromium/v8/src/ic/mips/ic-compiler-mips.cc
+++ b/chromium/v8/src/ic/mips/ic-compiler-mips.cc
@@ -10,114 +10,6 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ lw(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
- __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ Branch(&miss, ne, this->name(), Operand(name));
- }
- }
-
- Label number_case;
- Register match = scratch2();
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
-
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- // Check map and tail call if there's a match.
- // Separate compare from branch, to provide path for above JumpIfSmi().
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ GetWeakValue(match, cell);
- __ Subu(match, match, Operand(map_reg));
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match,
- Operand(zero_reg));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- Register map_reg = scratch1();
- Register match = scratch2();
- __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ GetWeakValue(match, cell);
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, match,
- Operand(map_reg));
- } else {
- Label next_map;
- __ Branch(&next_map, ne, match, Operand(map_reg));
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-#undef __
#define __ ACCESS_MASM(masm)
@@ -130,7 +22,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ Push(a0);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/chromium/v8/src/ic/mips/ic-mips.cc b/chromium/v8/src/ic/mips/ic-mips.cc
index a1a118135bd..a27d6b56f7e 100644
--- a/chromium/v8/src/ic/mips/ic-mips.cc
+++ b/chromium/v8/src/ic/mips/ic-mips.cc
@@ -25,7 +25,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// Register usage:
// type: holds the receiver instance type on entry.
__ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
__ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
}
@@ -317,8 +316,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -331,8 +329,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -347,8 +344,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -360,8 +356,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
@@ -436,7 +431,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
@@ -478,8 +473,13 @@ static void KeyedStoreGenerateMegamorphicHelper(
// Fast case: Do the store, could be either Object or double.
__ bind(fast_object);
- Register scratch_value = t0;
+ Register scratch = t0;
+ Register scratch2 = t4;
+ Register scratch3 = t5;
Register address = t1;
+ DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+ scratch, scratch2, scratch3, address));
+
if (check_map == kCheckMap) {
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ Branch(fast_double, ne, elements_map,
@@ -493,11 +493,10 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
__ addu(address, address, at);
- __ lw(scratch_value, MemOperand(address));
- __ Branch(&holecheck_passed1, ne, scratch_value,
+ __ lw(scratch, MemOperand(address));
+ __ Branch(&holecheck_passed1, ne, scratch,
Operand(masm->isolate()->factory()->the_hole_value()));
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&holecheck_passed1);
@@ -507,35 +506,34 @@ static void KeyedStoreGenerateMegamorphicHelper(
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Addu(scratch, key, Operand(Smi::FromInt(1)));
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch_value);
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(address, address, scratch);
__ sw(value, MemOperand(address));
__ Ret();
__ bind(&non_smi_value);
// Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
+ __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
__ bind(&finish_object_store);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Addu(scratch, key, Operand(Smi::FromInt(1)));
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch_value);
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(address, address, scratch);
__ sw(value, MemOperand(address));
// Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
+ __ mov(scratch, value); // Preserve the value which is returned.
+ __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
@@ -554,34 +552,31 @@ static void KeyedStoreGenerateMegamorphicHelper(
kHoleNanUpper32Offset - kHeapObjectTag));
__ sll(at, key, kPointerSizeLog2);
__ addu(address, address, at);
- __ lw(scratch_value, MemOperand(address));
- __ Branch(&fast_double_without_map_check, ne, scratch_value,
+ __ lw(scratch, MemOperand(address));
+ __ Branch(&fast_double_without_map_check, ne, scratch,
Operand(kHoleNanUpper32));
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key,
- elements, // Overwritten.
- a3, // Scratch regs...
- t0, t1, &transition_double_elements);
+ __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
+ scratch3, &transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Addu(scratch, key, Operand(Smi::FromInt(1)));
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Ret();
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
- __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&non_double_value, ne, t0, Operand(at));
+ __ Branch(&non_double_value, ne, scratch, Operand(at));
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, t0, slow);
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
AllocationSiteMode mode =
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
@@ -592,7 +587,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, t0, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -604,7 +599,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, t0, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -676,19 +671,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ lb(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(t0, &slow);
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ li(slot, Operand(Smi::FromInt(slot_index)));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+ __ li(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -742,23 +735,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -784,8 +771,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -850,7 +836,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
Address andi_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -880,8 +867,6 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address patch_address =
andi_instruction_address - delta * Instruction::kInstrSize;
Instr instr_at_patch = Assembler::instr_at(patch_address);
- Instr branch_instr =
- Assembler::instr_at(patch_address + Instruction::kInstrSize);
// This is patching a conditional "jump if not smi/jump if smi" site.
// Enabling by changing from
// andi at, rx, 0
@@ -890,7 +875,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// andi at, rx, #kSmiTagMask
// Branch <target>, ne, at, Operand(zero_reg)
// and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
+ CodePatcher patcher(isolate, patch_address, 2);
Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
if (check == ENABLE_INLINED_SMI_CHECK) {
DCHECK(Assembler::IsAndImmediate(instr_at_patch));
@@ -901,13 +886,44 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
DCHECK(Assembler::IsAndImmediate(instr_at_patch));
patcher.masm()->andi(at, reg, 0);
}
+ Instr branch_instr =
+ Assembler::instr_at(patch_address + Instruction::kInstrSize);
DCHECK(Assembler::IsBranch(branch_instr));
- if (Assembler::IsBeq(branch_instr)) {
- patcher.ChangeBranchCondition(ne);
- } else {
- DCHECK(Assembler::IsBne(branch_instr));
- patcher.ChangeBranchCondition(eq);
+
+ uint32_t opcode = Assembler::GetOpcodeField(branch_instr);
+ // Currently only the 'eq' and 'ne' cond values are supported and the simple
+ // branch instructions and their r6 variants (with opcode being the branch
+ // type). There are some special cases (see Assembler::IsBranch()) so
+ // extending this would be tricky.
+ DCHECK(opcode == BEQ || // BEQ
+ opcode == BNE || // BNE
+ opcode == POP10 || // BEQC
+ opcode == POP30 || // BNEC
+ opcode == POP66 || // BEQZC
+ opcode == POP76); // BNEZC
+ switch (opcode) {
+ case BEQ:
+ opcode = BNE; // change BEQ to BNE.
+ break;
+ case POP10:
+ opcode = POP30; // change BEQC to BNEC.
+ break;
+ case POP66:
+ opcode = POP76; // change BEQZC to BNEZC.
+ break;
+ case BNE:
+ opcode = BEQ; // change BNE to BEQ.
+ break;
+ case POP30:
+ opcode = POP10; // change BNEC to BEQC.
+ break;
+ case POP76:
+ opcode = POP66; // change BNEZC to BEQZC.
+ break;
+ default:
+ UNIMPLEMENTED();
}
+ patcher.ChangeBranchCondition(branch_instr, opcode);
}
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/ic/mips64/OWNERS b/chromium/v8/src/ic/mips64/OWNERS
index 5508ba626f3..89455a4fbd7 100644
--- a/chromium/v8/src/ic/mips64/OWNERS
+++ b/chromium/v8/src/ic/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/ic/mips64/access-compiler-mips64.cc b/chromium/v8/src/ic/mips64/access-compiler-mips64.cc
index 500a6d65c74..96e921c7c61 100644
--- a/chromium/v8/src/ic/mips64/access-compiler-mips64.cc
+++ b/chromium/v8/src/ic/mips64/access-compiler-mips64.cc
@@ -31,7 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores || a3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, a3, a4, a5};
return registers;
}
diff --git a/chromium/v8/src/ic/mips64/handler-compiler-mips64.cc b/chromium/v8/src/ic/mips64/handler-compiler-mips64.cc
index 9c3a5b3e70b..d94a2922287 100644
--- a/chromium/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/chromium/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -40,7 +40,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(a1, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -81,7 +82,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -141,7 +143,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Check that receiver is a JSObject.
__ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
// Load properties array.
Register properties = scratch0;
@@ -165,11 +167,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- // Check we're still in the same context.
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ ld(result, MemOperand(cp, offset));
- __ ld(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- __ ld(result, MemOperand(result, Context::SlotOffset(index)));
+ __ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ ld(result,
FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -217,8 +215,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -284,6 +284,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
__ ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
+
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
@@ -298,15 +305,10 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
@@ -315,7 +317,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -324,8 +326,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -347,8 +348,8 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
@@ -699,8 +700,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -723,7 +723,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(at, value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -735,7 +735,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/chromium/v8/src/ic/mips64/ic-compiler-mips64.cc b/chromium/v8/src/ic/mips64/ic-compiler-mips64.cc
index 8cdd8f03bc8..276f3afd389 100644
--- a/chromium/v8/src/ic/mips64/ic-compiler-mips64.cc
+++ b/chromium/v8/src/ic/mips64/ic-compiler-mips64.cc
@@ -10,114 +10,6 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ ld(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
- __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ Branch(&miss, ne, this->name(), Operand(name));
- }
- }
-
- Label number_case;
- Register match = scratch2();
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
-
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- // Check map and tail call if there's a match.
- // Separate compare from branch, to provide path for above JumpIfSmi().
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ GetWeakValue(match, cell);
- __ Dsubu(match, match, Operand(map_reg));
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match,
- Operand(zero_reg));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- Register map_reg = scratch1();
- Register match = scratch2();
- __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ GetWeakValue(match, cell);
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, match,
- Operand(map_reg));
- } else {
- Label next_map;
- __ Branch(&next_map, ne, match, Operand(map_reg));
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-#undef __
#define __ ACCESS_MASM(masm)
@@ -130,7 +22,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ Push(a0);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/chromium/v8/src/ic/mips64/ic-mips64.cc b/chromium/v8/src/ic/mips64/ic-mips64.cc
index 0d7af56071f..c5da5fbb424 100644
--- a/chromium/v8/src/ic/mips64/ic-mips64.cc
+++ b/chromium/v8/src/ic/mips64/ic-mips64.cc
@@ -25,7 +25,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// Register usage:
// type: holds the receiver instance type on entry.
__ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
__ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
}
@@ -314,8 +313,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -328,8 +326,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -344,8 +341,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -357,8 +353,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
@@ -433,7 +428,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
@@ -475,8 +470,12 @@ static void KeyedStoreGenerateMegamorphicHelper(
// Fast case: Do the store, could be either Object or double.
__ bind(fast_object);
- Register scratch_value = a4;
+ Register scratch = a4;
+ Register scratch2 = t0;
Register address = a5;
+ DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+ scratch, scratch2, address));
+
if (check_map == kCheckMap) {
__ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ Branch(fast_double, ne, elements_map,
@@ -490,12 +489,11 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ SmiScale(at, key, kPointerSizeLog2);
__ daddu(address, address, at);
- __ ld(scratch_value, MemOperand(address));
+ __ ld(scratch, MemOperand(address));
- __ Branch(&holecheck_passed1, ne, scratch_value,
+ __ Branch(&holecheck_passed1, ne, scratch,
Operand(masm->isolate()->factory()->the_hole_value()));
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&holecheck_passed1);
@@ -505,37 +503,36 @@ static void KeyedStoreGenerateMegamorphicHelper(
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
+ __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ Daddu(address, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiScale(scratch_value, key, kPointerSizeLog2);
- __ Daddu(address, address, scratch_value);
+ __ SmiScale(scratch, key, kPointerSizeLog2);
+ __ Daddu(address, address, scratch);
__ sd(value, MemOperand(address));
__ Ret();
__ bind(&non_smi_value);
// Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
+ __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
__ bind(&finish_object_store);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
+ __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Daddu(address, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiScale(scratch_value, key, kPointerSizeLog2);
- __ Daddu(address, address, scratch_value);
+ __ SmiScale(scratch, key, kPointerSizeLog2);
+ __ Daddu(address, address, scratch);
__ sd(value, MemOperand(address));
// Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
+ __ mov(scratch, value); // Preserve the value which is returned.
+ __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
@@ -555,34 +552,31 @@ static void KeyedStoreGenerateMegamorphicHelper(
kHeapObjectTag));
__ SmiScale(at, key, kPointerSizeLog2);
__ daddu(address, address, at);
- __ lw(scratch_value, MemOperand(address));
- __ Branch(&fast_double_without_map_check, ne, scratch_value,
+ __ lw(scratch, MemOperand(address));
+ __ Branch(&fast_double_without_map_check, ne, scratch,
Operand(static_cast<int32_t>(kHoleNanUpper32)));
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key,
- elements, // Overwritten.
- a3, // Scratch regs...
- a4, &transition_double_elements);
+ __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
+ &transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
+ __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Ret();
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
- __ ld(a4, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&non_double_value, ne, a4, Operand(at));
+ __ Branch(&non_double_value, ne, scratch, Operand(at));
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, a4, slow);
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
AllocationSiteMode mode =
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
@@ -593,7 +587,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, a4, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -605,7 +599,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, a4, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -674,20 +668,18 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(a4, &slow);
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
-
- DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ li(slot, Operand(Smi::FromInt(slot_index)));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+
+ DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+ __ li(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -741,23 +733,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -783,8 +769,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -847,7 +832,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
Address andi_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -877,8 +863,6 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address patch_address =
andi_instruction_address - delta * Instruction::kInstrSize;
Instr instr_at_patch = Assembler::instr_at(patch_address);
- Instr branch_instr =
- Assembler::instr_at(patch_address + Instruction::kInstrSize);
// This is patching a conditional "jump if not smi/jump if smi" site.
// Enabling by changing from
// andi at, rx, 0
@@ -887,7 +871,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// andi at, rx, #kSmiTagMask
// Branch <target>, ne, at, Operand(zero_reg)
// and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
+ CodePatcher patcher(isolate, patch_address, 2);
Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
if (check == ENABLE_INLINED_SMI_CHECK) {
DCHECK(Assembler::IsAndImmediate(instr_at_patch));
@@ -898,13 +882,44 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
DCHECK(Assembler::IsAndImmediate(instr_at_patch));
patcher.masm()->andi(at, reg, 0);
}
+ Instr branch_instr =
+ Assembler::instr_at(patch_address + Instruction::kInstrSize);
DCHECK(Assembler::IsBranch(branch_instr));
- if (Assembler::IsBeq(branch_instr)) {
- patcher.ChangeBranchCondition(ne);
- } else {
- DCHECK(Assembler::IsBne(branch_instr));
- patcher.ChangeBranchCondition(eq);
+
+ uint32_t opcode = Assembler::GetOpcodeField(branch_instr);
+ // Currently only the 'eq' and 'ne' cond values are supported and the simple
+ // branch instructions and their r6 variants (with opcode being the branch
+ // type). There are some special cases (see Assembler::IsBranch()) so
+ // extending this would be tricky.
+ DCHECK(opcode == BEQ || // BEQ
+ opcode == BNE || // BNE
+ opcode == POP10 || // BEQC
+ opcode == POP30 || // BNEC
+ opcode == POP66 || // BEQZC
+ opcode == POP76); // BNEZC
+ switch (opcode) {
+ case BEQ:
+ opcode = BNE; // change BEQ to BNE.
+ break;
+ case POP10:
+ opcode = POP30; // change BEQC to BNEC.
+ break;
+ case POP66:
+ opcode = POP76; // change BEQZC to BNEZC.
+ break;
+ case BNE:
+ opcode = BEQ; // change BNE to BEQ.
+ break;
+ case POP30:
+ opcode = POP10; // change BNEC to BEQC.
+ break;
+ case POP76:
+ opcode = POP66; // change BNEZC to BEQZC.
+ break;
+ default:
+ UNIMPLEMENTED();
}
+ patcher.ChangeBranchCondition(branch_instr, opcode);
}
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/ic/ppc/access-compiler-ppc.cc b/chromium/v8/src/ic/ppc/access-compiler-ppc.cc
index fcbbc661218..b1e06e16e14 100644
--- a/chromium/v8/src/ic/ppc/access-compiler-ppc.cc
+++ b/chromium/v8/src/ic/ppc/access-compiler-ppc.cc
@@ -31,7 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores || r6.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, r6, r7, r8};
return registers;
}
diff --git a/chromium/v8/src/ic/ppc/handler-compiler-ppc.cc b/chromium/v8/src/ic/ppc/handler-compiler-ppc.cc
index 52efcf91a4c..8b48755bbff 100644
--- a/chromium/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/chromium/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -40,7 +40,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(r4, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(r4, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r4, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -81,7 +82,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(r4, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(r4, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r4, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -141,7 +143,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Check that receiver is a JSObject.
__ lbz(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmpi(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmpi(scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
__ blt(miss_label);
// Load properties array.
@@ -167,10 +169,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ LoadP(result, MemOperand(cp, offset));
- __ LoadP(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- __ LoadP(result, MemOperand(result, Context::SlotOffset(index)));
+ __ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ LoadP(result,
FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -221,8 +220,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -292,6 +293,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ LoadP(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
+
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
@@ -306,15 +314,10 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
@@ -323,7 +326,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -332,8 +335,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -355,8 +357,8 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
@@ -704,8 +706,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -729,7 +730,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(ip, value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -741,7 +742,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/chromium/v8/src/ic/ppc/ic-compiler-ppc.cc b/chromium/v8/src/ic/ppc/ic-compiler-ppc.cc
index 578b73d40e9..c6b36f29f48 100644
--- a/chromium/v8/src/ic/ppc/ic-compiler-ppc.cc
+++ b/chromium/v8/src/ic/ppc/ic-compiler-ppc.cc
@@ -20,112 +20,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
StoreDescriptor::ValueRegister(), r0);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ LoadP(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
- __ lbz(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ Cmpi(this->name(), Operand(name), r0);
- __ bne(&miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
-
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- __ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ CmpWeakValue(map_reg, cell, scratch2());
- Label next;
- __ bne(&next);
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
- __ bind(&next);
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- Register map_reg = scratch1();
- __ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
- } else {
- Label next_map;
- __ bne(&next_map);
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/chromium/v8/src/ic/ppc/ic-ppc.cc b/chromium/v8/src/ic/ppc/ic-ppc.cc
index 09117179ea2..78daac2657c 100644
--- a/chromium/v8/src/ic/ppc/ic-ppc.cc
+++ b/chromium/v8/src/ic/ppc/ic-ppc.cc
@@ -26,8 +26,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// type: holds the receiver instance type on entry.
__ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE));
__ beq(global_object);
- __ cmpi(type, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ beq(global_object);
__ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE));
__ beq(global_object);
}
@@ -321,8 +319,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -335,8 +332,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -351,8 +347,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -364,8 +359,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
@@ -442,7 +436,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
@@ -474,23 +468,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -505,13 +493,15 @@ static void KeyedStoreGenerateMegamorphicHelper(
// Fast case: Do the store, could be either Object or double.
__ bind(fast_object);
- Register scratch_value = r7;
+ Register scratch = r7;
Register address = r8;
+ DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+ scratch, address));
+
if (check_map == kCheckMap) {
__ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ mov(scratch_value,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ cmp(elements_map, scratch_value);
+ __ mov(scratch, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ cmp(elements_map, scratch);
__ bne(fast_double);
}
@@ -520,13 +510,11 @@ static void KeyedStoreGenerateMegamorphicHelper(
// there may be a callback on the element
Label holecheck_passed1;
__ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(scratch_value, key);
- __ LoadPX(scratch_value, MemOperand(address, scratch_value));
- __ Cmpi(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()),
- r0);
+ __ SmiToPtrArrayOffset(scratch, key);
+ __ LoadPX(scratch, MemOperand(address, scratch));
+ __ Cmpi(scratch, Operand(masm->isolate()->factory()->the_hole_value()), r0);
__ bne(&holecheck_passed1);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&holecheck_passed1);
@@ -536,35 +524,32 @@ static void KeyedStoreGenerateMegamorphicHelper(
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0);
- __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset),
- r0);
+ __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
+ __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(scratch_value, key);
- __ StorePX(value, MemOperand(address, scratch_value));
+ __ SmiToPtrArrayOffset(scratch, key);
+ __ StorePX(value, MemOperand(address, scratch));
__ Ret();
__ bind(&non_smi_value);
// Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
+ __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
__ bind(&finish_object_store);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0);
- __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset),
- r0);
+ __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
+ __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
}
__ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(scratch_value, key);
- __ StorePUX(value, MemOperand(address, scratch_value));
+ __ SmiToPtrArrayOffset(scratch, key);
+ __ StorePUX(value, MemOperand(address, scratch));
// Update write barrier for the elements array address.
- __ mr(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved,
+ __ mr(scratch, value); // Preserve the value which is returned.
+ __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
@@ -582,34 +567,32 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ addi(address, elements,
Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
kHeapObjectTag)));
- __ SmiToDoubleArrayOffset(scratch_value, key);
- __ lwzx(scratch_value, MemOperand(address, scratch_value));
- __ Cmpi(scratch_value, Operand(kHoleNanUpper32), r0);
+ __ SmiToDoubleArrayOffset(scratch, key);
+ __ lwzx(scratch, MemOperand(address, scratch));
+ __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
__ bne(&fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, r6, d0,
+ __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0);
- __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset),
- r0);
+ __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
+ __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
}
__ Ret();
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
- __ LoadP(r7, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(r7, Heap::kHeapNumberMapRootIndex);
+ __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
__ bne(&non_double_value);
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r7, slow);
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
AllocationSiteMode mode =
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
@@ -620,7 +603,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, r7, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -632,7 +615,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, r7, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -706,19 +689,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r7, &slow);
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+ __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -796,8 +777,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -864,7 +844,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
//
// This code is paired with the JumpPatchSite class in full-codegen-ppc.cc
//
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
Address cmp_instruction_address =
Assembler::return_address_from_call_start(address);
@@ -902,7 +883,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// rlwinm(r0, value, 0, 31, 31, SetRC);
// bc(label, BT/BF, 2)
// and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
+ CodePatcher patcher(isolate, patch_address, 2);
Register reg = Assembler::GetRA(instr_at_patch);
if (check == ENABLE_INLINED_SMI_CHECK) {
DCHECK(Assembler::IsCmpRegister(instr_at_patch));
diff --git a/chromium/v8/src/ic/stub-cache.h b/chromium/v8/src/ic/stub-cache.h
index cb1b62848ed..4b27e6e396d 100644
--- a/chromium/v8/src/ic/stub-cache.h
+++ b/chromium/v8/src/ic/stub-cache.h
@@ -165,7 +165,7 @@ class StubCache {
DISALLOW_COPY_AND_ASSIGN(StubCache);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STUB_CACHE_H_
diff --git a/chromium/v8/src/ic/x64/access-compiler-x64.cc b/chromium/v8/src/ic/x64/access-compiler-x64.cc
index 85b44ef4757..b8d50b3d2cd 100644
--- a/chromium/v8/src/ic/x64/access-compiler-x64.cc
+++ b/chromium/v8/src/ic/x64/access-compiler-x64.cc
@@ -31,8 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores ||
- rbx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, rbx, rdi, r8};
return registers;
}
diff --git a/chromium/v8/src/ic/x64/handler-compiler-x64.cc b/chromium/v8/src/ic/x64/handler-compiler-x64.cc
index 1490c921fc3..c09eca68dd6 100644
--- a/chromium/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/chromium/v8/src/ic/x64/handler-compiler-x64.cc
@@ -56,7 +56,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE);
__ j(below, miss_label);
// Load properties array.
@@ -78,10 +78,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ movp(result, Operand(rsi, offset));
- __ movp(result, FieldOperand(result, GlobalObject::kNativeContextOffset));
- __ movp(result, Operand(result, Context::SlotOffset(index)));
+ __ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ movp(result,
FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -115,8 +112,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -188,6 +187,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ movp(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
+
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ Move(api_function_address, function_address,
@@ -241,8 +247,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(rdi, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ __ InvokeFunction(rdi, no_reg, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -284,8 +290,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(rdi, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ __ InvokeFunction(rdi, no_reg, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -304,26 +310,16 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- if (FLAG_vector_stores) {
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
-
- __ PopReturnAddressTo(r11);
- __ Push(receiver);
- __ Push(name);
- __ Push(value);
- __ Push(slot);
- __ Push(vector);
- __ PushReturnAddressFrom(r11);
- } else {
- DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- __ PopReturnAddressTo(rbx);
- __ Push(receiver);
- __ Push(name);
- __ Push(value);
- __ PushReturnAddressFrom(rbx);
- }
+ __ PopReturnAddressTo(r11);
+ __ Push(receiver);
+ __ Push(name);
+ __ Push(value);
+ __ Push(slot);
+ __ Push(vector);
+ __ PushReturnAddressFrom(r11);
}
@@ -332,7 +328,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -341,8 +337,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -364,8 +359,8 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
@@ -722,8 +717,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
holder());
__ PushReturnAddressFrom(scratch2());
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -748,7 +742,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -764,7 +758,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/chromium/v8/src/ic/x64/ic-compiler-x64.cc b/chromium/v8/src/ic/x64/ic-compiler-x64.cc
index fd92cca5702..9d734338bb9 100644
--- a/chromium/v8/src/ic/x64/ic-compiler-x64.cc
+++ b/chromium/v8/src/ic/x64/ic-compiler-x64.cc
@@ -28,111 +28,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- Register map_reg = scratch1();
- __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int i = 0; i < receiver_count; ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- // Check map and tail call if there's a match
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
-
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ movp(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
- __ movzxbp(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ Cmp(this->name(), name);
- __ j(not_equal, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
- __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- // Check map and tail call if there's a match
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
- }
- }
- DCHECK(number_of_handled_maps > 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/chromium/v8/src/ic/x64/ic-x64.cc b/chromium/v8/src/ic/x64/ic-x64.cc
index ff74a965e43..bf4ad96f69c 100644
--- a/chromium/v8/src/ic/x64/ic-x64.cc
+++ b/chromium/v8/src/ic/x64/ic-x64.cc
@@ -25,8 +25,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// type: holds the receiver instance type on entry.
__ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
__ j(equal, global_object);
- __ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
- __ j(equal, global_object);
__ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
__ j(equal, global_object);
}
@@ -346,7 +344,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ Move(vector, dummy_vector);
__ Move(slot, Smi::FromInt(slot_index));
@@ -566,18 +564,16 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ movzxbp(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index);
- if (FLAG_vector_stores) {
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ Move(vector, dummy_vector);
- __ Move(slot, Smi::FromInt(slot_index));
- }
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ Move(vector, dummy_vector);
+ __ Move(slot, Smi::FromInt(slot_index));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -676,8 +672,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -696,8 +691,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -709,8 +703,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -729,28 +722,13 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- // This shouldn't be called.
- __ int3();
- return;
- }
-
- // The return address is on the stack.
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), rbx, no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
+ // This shouldn't be called.
+ __ int3();
}
@@ -765,13 +743,11 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(receiver);
__ Push(name);
__ Push(value);
- if (FLAG_vector_stores) {
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
- DCHECK(!temp.is(slot) && !temp.is(vector));
- __ Push(slot);
- __ Push(vector);
- }
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ DCHECK(!temp.is(slot) && !temp.is(vector));
+ __ Push(slot);
+ __ Push(vector);
__ PushReturnAddressFrom(temp);
}
@@ -781,8 +757,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -791,8 +766,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Register dictionary = r11;
- DCHECK(!FLAG_vector_stores ||
- !AreAliased(dictionary, VectorStoreICDescriptor::VectorRegister(),
+ DCHECK(!AreAliased(dictionary, VectorStoreICDescriptor::VectorRegister(),
VectorStoreICDescriptor::SlotRegister()));
Label miss;
@@ -814,8 +788,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -853,7 +826,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
diff --git a/chromium/v8/src/ic/x87/access-compiler-x87.cc b/chromium/v8/src/ic/x87/access-compiler-x87.cc
index a80c649e451..2c1b9427562 100644
--- a/chromium/v8/src/ic/x87/access-compiler-x87.cc
+++ b/chromium/v8/src/ic/x87/access-compiler-x87.cc
@@ -30,8 +30,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores ||
- ebx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, ebx, edi, no_reg};
return registers;
}
diff --git a/chromium/v8/src/ic/x87/handler-compiler-x87.cc b/chromium/v8/src/ic/x87/handler-compiler-x87.cc
index d9f7e8012d9..cc43ed298d1 100644
--- a/chromium/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/chromium/v8/src/ic/x87/handler-compiler-x87.cc
@@ -36,7 +36,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER);
__ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -92,7 +92,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE);
__ j(below, miss_label);
// Load properties array.
@@ -114,10 +114,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ mov(result, Operand(esi, offset));
- __ mov(result, FieldOperand(result, GlobalObject::kNativeContextOffset));
- __ mov(result, Operand(result, Context::SlotOffset(index)));
+ __ LoadGlobalFunction(index, result);
// Load its initial map. The global functions all have initial maps.
__ mov(result,
FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -206,6 +203,12 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the code.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ mov(api_function_address, Immediate(function_address));
@@ -261,7 +264,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
__ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -294,8 +297,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -303,25 +308,15 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- if (FLAG_vector_stores) {
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
-
- __ xchg(receiver, Operand(esp, 0));
- __ push(name);
- __ push(value);
- __ push(slot);
- __ push(vector);
- __ push(receiver); // which contains the return address.
- } else {
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
- }
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // which contains the return address.
}
@@ -330,7 +325,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -339,8 +334,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -362,18 +356,16 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
}
-void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
- Register scratch) {
- // current after GeneratePushMap
- // -------------------------------------------------
- // ret addr slot
- // vector vector
- // sp -> slot map
- // sp -> ret addr
- //
- __ xchg(map_reg, Operand(esp, 0));
- __ xchg(map_reg, Operand(esp, 2 * kPointerSize));
- __ push(map_reg);
+void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
+ Register current_map, Register destination_map) {
+ DCHECK(destination_map.is(StoreTransitionHelper::MapRegister()));
+ DCHECK(current_map.is(StoreTransitionHelper::VectorRegister()));
+ ExternalReference virtual_slot =
+ ExternalReference::virtual_slot_register(isolate());
+ __ mov(destination_map, current_map);
+ __ pop(current_map);
+ __ mov(Operand::StaticVariable(virtual_slot), current_map);
+ __ pop(current_map); // put vector in place.
}
@@ -734,8 +726,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
holder());
__ push(scratch2()); // restore old return address
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -760,7 +751,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -776,7 +767,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/chromium/v8/src/ic/x87/ic-compiler-x87.cc b/chromium/v8/src/ic/x87/ic-compiler-x87.cc
index d29e32108b6..9edf63b7224 100644
--- a/chromium/v8/src/ic/x87/ic-compiler-x87.cc
+++ b/chromium/v8/src/ic/x87/ic-compiler-x87.cc
@@ -27,104 +27,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ push(ebx); // return address
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
- __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ cmp(this->name(), Immediate(name));
- __ j(not_equal, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ j(equal, handlers->at(current));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss, Label::kNear);
- Register map_reg = scratch1();
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i));
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/chromium/v8/src/ic/x87/ic-x87.cc b/chromium/v8/src/ic/x87/ic-x87.cc
index 53e7a5ca0c7..d4cc3ce80aa 100644
--- a/chromium/v8/src/ic/x87/ic-x87.cc
+++ b/chromium/v8/src/ic/x87/ic-x87.cc
@@ -25,8 +25,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
// type: holds the receiver instance type on entry.
__ cmp(type, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, global_object);
- __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
- __ j(equal, global_object);
__ cmp(type, JS_GLOBAL_PROXY_TYPE);
__ j(equal, global_object);
}
@@ -341,7 +339,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(isolate);
int slot = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
@@ -563,26 +561,22 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot = dummy_vector->GetIndex(
- FeedbackVectorICSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ push(Immediate(Smi::FromInt(slot)));
- __ push(Immediate(dummy_vector));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ push(Immediate(Smi::FromInt(slot)));
+ __ push(Immediate(dummy_vector));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
receiver, key, edi, no_reg);
- if (FLAG_vector_stores) {
- __ pop(VectorStoreICDescriptor::VectorRegister());
- __ pop(VectorStoreICDescriptor::SlotRegister());
- }
+ __ pop(VectorStoreICDescriptor::VectorRegister());
+ __ pop(VectorStoreICDescriptor::SlotRegister());
// Cache miss.
__ jmp(&miss);
@@ -678,8 +672,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -697,8 +690,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -709,8 +701,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -728,27 +719,15 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- // This shouldn't be called.
- __ int3();
- return;
- }
-
- // Return address is on the stack.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), ebx, no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
+ // This shouldn't be called.
+ // TODO(mvstanton): remove this method.
+ __ int3();
+ return;
}
@@ -756,25 +735,15 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- if (FLAG_vector_stores) {
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
-
- __ xchg(receiver, Operand(esp, 0));
- __ push(name);
- __ push(value);
- __ push(slot);
- __ push(vector);
- __ push(receiver); // Contains the return address.
- } else {
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
- }
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // Contains the return address.
}
@@ -783,8 +752,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -800,25 +768,21 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
// objects. Push and restore receiver but rely on
// GenerateDictionaryStore preserving the value and name.
__ push(receiver);
- if (FLAG_vector_stores) {
- __ push(vector);
- __ push(slot);
- }
+ __ push(vector);
+ __ push(slot);
Register dictionary = ebx;
__ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
receiver, edi);
- __ Drop(FLAG_vector_stores ? 3 : 1);
+ __ Drop(3);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->store_normal_hit(), 1);
__ ret(0);
__ bind(&restore_miss);
- if (FLAG_vector_stores) {
- __ pop(slot);
- __ pop(vector);
- }
+ __ pop(slot);
+ __ pop(vector);
__ pop(receiver);
__ IncrementCounter(counters->store_normal_miss(), 1);
GenerateMiss(masm);
@@ -830,8 +794,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -869,7 +832,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
diff --git a/chromium/v8/src/ic/x87/stub-cache-x87.cc b/chromium/v8/src/ic/x87/stub-cache-x87.cc
index 2522223eade..dfc0ef6c665 100644
--- a/chromium/v8/src/ic/x87/stub-cache-x87.cc
+++ b/chromium/v8/src/ic/x87/stub-cache-x87.cc
@@ -24,7 +24,7 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
Label miss;
bool is_vector_store =
diff --git a/chromium/v8/src/icu_util.h b/chromium/v8/src/icu_util.h
index cd98ff0dfc5..c308decfe5f 100644
--- a/chromium/v8/src/icu_util.h
+++ b/chromium/v8/src/icu_util.h
@@ -14,6 +14,7 @@ namespace internal {
// function should be called before ICU is used.
bool InitializeICU(const char* icu_data_file);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ICU_UTIL_H_
diff --git a/chromium/v8/src/identity-map.cc b/chromium/v8/src/identity-map.cc
index 1d23af95e8c..723cdfa2a64 100644
--- a/chromium/v8/src/identity-map.cc
+++ b/chromium/v8/src/identity-map.cc
@@ -5,6 +5,7 @@
#include "src/identity-map.h"
#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -14,31 +15,26 @@ static const int kInitialIdentityMapSize = 4;
static const int kResizeFactor = 4;
IdentityMapBase::~IdentityMapBase() {
- if (keys_) {
- Heap::OptionalRelocationLock lock(heap_, concurrent_);
- heap_->UnregisterStrongRoots(keys_);
- }
+ if (keys_) heap_->UnregisterStrongRoots(keys_);
}
-IdentityMapBase::RawEntry IdentityMapBase::Lookup(Handle<Object> key) {
- AllowHandleDereference for_lookup;
- int index = LookupIndex(*key);
+IdentityMapBase::RawEntry IdentityMapBase::Lookup(Object* key) {
+ int index = LookupIndex(key);
return index >= 0 ? &values_[index] : nullptr;
}
-IdentityMapBase::RawEntry IdentityMapBase::Insert(Handle<Object> key) {
- AllowHandleDereference for_lookup;
- int index = InsertIndex(*key);
+IdentityMapBase::RawEntry IdentityMapBase::Insert(Object* key) {
+ int index = InsertIndex(key);
DCHECK_GE(index, 0);
return &values_[index];
}
int IdentityMapBase::Hash(Object* address) {
+ CHECK_NE(address, heap_->not_mapped_symbol());
uintptr_t raw_address = reinterpret_cast<uintptr_t>(address);
- CHECK_NE(0U, raw_address); // Cannot store Smi 0 as a key here, sorry.
// Xor some of the upper bits, since the lower 2 or 3 are usually aligned.
return static_cast<int>((raw_address >> 11) ^ raw_address);
}
@@ -46,26 +42,28 @@ int IdentityMapBase::Hash(Object* address) {
int IdentityMapBase::LookupIndex(Object* address) {
int start = Hash(address) & mask_;
+ Object* not_mapped = heap_->not_mapped_symbol();
for (int index = start; index < size_; index++) {
if (keys_[index] == address) return index; // Found.
- if (keys_[index] == nullptr) return -1; // Not found.
+ if (keys_[index] == not_mapped) return -1; // Not found.
}
for (int index = 0; index < start; index++) {
if (keys_[index] == address) return index; // Found.
- if (keys_[index] == nullptr) return -1; // Not found.
+ if (keys_[index] == not_mapped) return -1; // Not found.
}
return -1;
}
int IdentityMapBase::InsertIndex(Object* address) {
+ Object* not_mapped = heap_->not_mapped_symbol();
while (true) {
int start = Hash(address) & mask_;
int limit = size_ / 2;
// Search up to {limit} entries.
for (int index = start; --limit > 0; index = (index + 1) & mask_) {
if (keys_[index] == address) return index; // Found.
- if (keys_[index] == nullptr) { // Free entry.
+ if (keys_[index] == not_mapped) { // Free entry.
keys_[index] = address;
return index;
}
@@ -81,8 +79,7 @@ int IdentityMapBase::InsertIndex(Object* address) {
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => a pointer to a new storage location for the value
-IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Handle<Object> key) {
- Heap::OptionalRelocationLock lock(heap_, concurrent_);
+IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Object* key) {
RawEntry result;
if (size_ == 0) {
// Allocate the initial storage for keys and values.
@@ -91,7 +88,8 @@ IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Handle<Object> key) {
gc_counter_ = heap_->gc_count();
keys_ = zone_->NewArray<Object*>(size_);
- memset(keys_, 0, sizeof(Object*) * size_);
+ Object* not_mapped = heap_->not_mapped_symbol();
+ for (int i = 0; i < size_; i++) keys_[i] = not_mapped;
values_ = zone_->NewArray<void*>(size_);
memset(values_, 0, sizeof(void*) * size_);
@@ -114,10 +112,9 @@ IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Handle<Object> key) {
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => {nullptr}
-IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Handle<Object> key) {
+IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Object* key) {
if (size_ == 0) return nullptr;
- Heap::OptionalRelocationLock lock(heap_, concurrent_);
RawEntry result = Lookup(key);
if (result == nullptr && gc_counter_ != heap_->gc_count()) {
Rehash(); // Rehash is expensive, so only do it in case of a miss.
@@ -135,15 +132,16 @@ void IdentityMapBase::Rehash() {
// Search the table looking for keys that wouldn't be found with their
// current hashcode and evacuate them.
int last_empty = -1;
+ Object* not_mapped = heap_->not_mapped_symbol();
for (int i = 0; i < size_; i++) {
- if (keys_[i] == nullptr) {
+ if (keys_[i] == not_mapped) {
last_empty = i;
} else {
int pos = Hash(keys_[i]) & mask_;
if (pos <= last_empty || pos > i) {
// Evacuate an entry that is in the wrong place.
reinsert.push_back(std::pair<Object*, void*>(keys_[i], values_[i]));
- keys_[i] = nullptr;
+ keys_[i] = not_mapped;
values_[i] = nullptr;
last_empty = i;
}
@@ -153,7 +151,7 @@ void IdentityMapBase::Rehash() {
for (auto pair : reinsert) {
int index = InsertIndex(pair.first);
DCHECK_GE(index, 0);
- DCHECK_NULL(values_[index]);
+ DCHECK_NE(heap_->not_mapped_symbol(), values_[index]);
values_[index] = pair.second;
}
}
@@ -172,12 +170,13 @@ void IdentityMapBase::Resize() {
CHECK_LE(size_, (1024 * 1024 * 16)); // that would be extreme...
keys_ = zone_->NewArray<Object*>(size_);
- memset(keys_, 0, sizeof(Object*) * size_);
+ Object* not_mapped = heap_->not_mapped_symbol();
+ for (int i = 0; i < size_; i++) keys_[i] = not_mapped;
values_ = zone_->NewArray<void*>(size_);
memset(values_, 0, sizeof(void*) * size_);
for (int i = 0; i < old_size; i++) {
- if (old_keys[i] == nullptr) continue;
+ if (old_keys[i] == not_mapped) continue;
int index = InsertIndex(old_keys[i]);
DCHECK_GE(index, 0);
values_[index] = old_values[i];
diff --git a/chromium/v8/src/identity-map.h b/chromium/v8/src/identity-map.h
index 2143e24e37b..2c4a0f33991 100644
--- a/chromium/v8/src/identity-map.h
+++ b/chromium/v8/src/identity-map.h
@@ -17,11 +17,6 @@ class Zone;
// Base class of identity maps contains shared code for all template
// instantions.
class IdentityMapBase {
- public:
- // Enable or disable concurrent mode for this map. Concurrent mode implies
- // taking the heap's relocation lock during most operations.
- void SetConcurrent(bool concurrent) { concurrent_ = concurrent; }
-
protected:
// Allow Tester to access internals, including changing the address of objects
// within the {keys_} array in order to simulate a moving GC.
@@ -32,7 +27,6 @@ class IdentityMapBase {
IdentityMapBase(Heap* heap, Zone* zone)
: heap_(heap),
zone_(zone),
- concurrent_(false),
gc_counter_(-1),
size_(0),
mask_(0),
@@ -40,8 +34,8 @@ class IdentityMapBase {
values_(nullptr) {}
~IdentityMapBase();
- RawEntry GetEntry(Handle<Object> key);
- RawEntry FindEntry(Handle<Object> key);
+ RawEntry GetEntry(Object* key);
+ RawEntry FindEntry(Object* key);
private:
// Internal implementation should not be called directly by subclasses.
@@ -49,13 +43,12 @@ class IdentityMapBase {
int InsertIndex(Object* address);
void Rehash();
void Resize();
- RawEntry Lookup(Handle<Object> key);
- RawEntry Insert(Handle<Object> key);
+ RawEntry Lookup(Object* key);
+ RawEntry Insert(Object* key);
int Hash(Object* address);
Heap* heap_;
Zone* zone_;
- bool concurrent_;
int gc_counter_;
int size_;
int mask_;
@@ -79,20 +72,21 @@ class IdentityMap : public IdentityMapBase {
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => a pointer to a new storage location for the value
- V* Get(Handle<Object> key) { return reinterpret_cast<V*>(GetEntry(key)); }
+ V* Get(Handle<Object> key) { return Get(*key); }
+ V* Get(Object* key) { return reinterpret_cast<V*>(GetEntry(key)); }
// Searches this map for the given key using the object's address
// as the identity, returning:
// found => a pointer to the storage location for the value
// not found => {nullptr}
- V* Find(Handle<Object> key) { return reinterpret_cast<V*>(FindEntry(key)); }
+ V* Find(Handle<Object> key) { return Find(*key); }
+ V* Find(Object* key) { return reinterpret_cast<V*>(FindEntry(key)); }
// Set the value for the given key.
- void Set(Handle<Object> key, V value) {
- *(reinterpret_cast<V*>(GetEntry(key))) = value;
- }
+ void Set(Handle<Object> key, V v) { Set(*key, v); }
+ void Set(Object* key, V v) { *(reinterpret_cast<V*>(GetEntry(key))) = v; }
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_IDENTITY_MAP_H_
diff --git a/chromium/v8/src/interface-descriptors.cc b/chromium/v8/src/interface-descriptors.cc
index b71f9731204..94ed7020c3f 100644
--- a/chromium/v8/src/interface-descriptors.cc
+++ b/chromium/v8/src/interface-descriptors.cc
@@ -14,8 +14,8 @@ Type* SmiType(Zone* zone) {
}
-Type* UntaggedSigned32(Zone* zone) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32(), zone);
+Type* UntaggedIntegral32(Zone* zone) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone);
}
@@ -29,7 +29,7 @@ Type* AnyTagged(Zone* zone) {
Type* ExternalPointer(Zone* zone) {
return Type::Intersect(Type::Internal(), Type::UntaggedPointer(), zone);
}
-}
+} // namespace
Type::FunctionType* CallInterfaceDescriptor::BuildDefaultFunctionType(
@@ -75,6 +75,18 @@ const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) const {
}
+void AllocateMutableHeapNumberDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformSpecific(0, nullptr, nullptr);
+}
+
+
+void VoidDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformSpecific(0, nullptr);
+}
+
+
Type::FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
@@ -86,6 +98,7 @@ Type::FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
return function;
}
+
void LoadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister()};
@@ -109,6 +122,21 @@ void StoreTransitionDescriptor::InitializePlatformSpecific(
}
+void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ if (SlotRegister().is(no_reg)) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister(), VectorRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+ } else {
+ Register registers[] = {ReceiverRegister(), NameRegister(),
+ ValueRegister(), MapRegister(),
+ SlotRegister(), VectorRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+ }
+}
+
+
Type::FunctionType*
StoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
@@ -129,7 +157,7 @@ LoadGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
Zone* zone = isolate->interface_descriptor_zone();
Type::FunctionType* function =
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 1, zone);
- function->InitParameter(0, UntaggedSigned32(zone));
+ function->InitParameter(0, UntaggedIntegral32(zone));
return function;
}
@@ -147,7 +175,7 @@ StoreGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
Zone* zone = isolate->interface_descriptor_zone();
Type::FunctionType* function =
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
- function->InitParameter(0, UntaggedSigned32(zone));
+ function->InitParameter(0, UntaggedIntegral32(zone));
function->InitParameter(1, AnyTagged(zone));
return function;
}
@@ -174,6 +202,13 @@ void StringCompareDescriptor::InitializePlatformSpecific(
}
+void ToLengthDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister()};
@@ -228,14 +263,19 @@ Type::FunctionType*
VectorStoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 6, zone);
- function->InitParameter(0, AnyTagged(zone)); // receiver
- function->InitParameter(1, AnyTagged(zone)); // name
- function->InitParameter(2, AnyTagged(zone)); // value
- function->InitParameter(3, SmiType(zone)); // slot
- function->InitParameter(4, AnyTagged(zone)); // vector
- function->InitParameter(5, AnyTagged(zone)); // map
+ bool has_slot = !VectorStoreTransitionDescriptor::SlotRegister().is(no_reg);
+ int arg_count = has_slot ? 6 : 5;
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(zone), Type::Undefined(), arg_count, zone);
+ int index = 0;
+ function->InitParameter(index++, AnyTagged(zone)); // receiver
+ function->InitParameter(index++, AnyTagged(zone)); // name
+ function->InitParameter(index++, AnyTagged(zone)); // value
+ function->InitParameter(index++, AnyTagged(zone)); // map
+ if (has_slot) {
+ function->InitParameter(index++, SmiType(zone)); // slot
+ }
+ function->InitParameter(index++, AnyTagged(zone)); // vector
return function;
}
@@ -330,6 +370,27 @@ void ArgumentsAccessNewDescriptor::InitializePlatformSpecific(
}
+Type::FunctionType*
+RestParamAccessDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ function->InitParameter(0, SmiType(zone));
+ function->InitParameter(1, ExternalPointer(zone));
+ function->InitParameter(2, SmiType(zone));
+ return function;
+}
+
+
+void RestParamAccessDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {parameter_count(), parameter_pointer(),
+ rest_parameter_index()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ContextOnlyDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr);
@@ -344,6 +405,20 @@ void GrowArrayElementsDescriptor::InitializePlatformSpecific(
Type::FunctionType*
+FastCloneRegExpDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ function->InitParameter(0, AnyTagged(zone)); // closure
+ function->InitParameter(1, SmiType(zone)); // literal_index
+ function->InitParameter(2, AnyTagged(zone)); // pattern
+ function->InitParameter(3, AnyTagged(zone)); // flags
+ return function;
+}
+
+
+Type::FunctionType*
FastCloneShallowArrayDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
@@ -387,9 +462,35 @@ CallTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
Zone* zone = isolate->interface_descriptor_zone();
Type::FunctionType* function =
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
- function->InitParameter(0, AnyTagged(zone)); // target
- function->InitParameter(
- 1, UntaggedSigned32(zone)); // actual number of arguments
+ function->InitParameter(0, AnyTagged(zone)); // target
+ function->InitParameter(1, UntaggedIntegral32(zone)); // actual #arguments
+ return function;
+}
+
+
+Type::FunctionType*
+ConstructStubDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ function->InitParameter(0, AnyTagged(zone)); // target
+ function->InitParameter(1, AnyTagged(zone)); // new.target
+ function->InitParameter(2, UntaggedIntegral32(zone)); // actual #arguments
+ function->InitParameter(3, AnyTagged(zone)); // opt. allocation site
+ return function;
+}
+
+
+Type::FunctionType*
+ConstructTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ function->InitParameter(0, AnyTagged(zone)); // target
+ function->InitParameter(1, AnyTagged(zone)); // new.target
+ function->InitParameter(2, UntaggedIntegral32(zone)); // actual #arguments
return function;
}
@@ -427,7 +528,7 @@ ArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
function->InitParameter(0, Type::Receiver()); // JSFunction
function->InitParameter(1, AnyTagged(zone));
- function->InitParameter(2, UntaggedSigned32(zone));
+ function->InitParameter(2, UntaggedIntegral32(zone));
return function;
}
@@ -439,7 +540,7 @@ InternalArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType* function =
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(1, UntaggedSigned32(zone));
+ function->InitParameter(1, UntaggedIntegral32(zone));
return function;
}
@@ -449,13 +550,11 @@ ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
- function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(
- 1, UntaggedSigned32(zone)); // actual number of arguments
- function->InitParameter(
- 2,
- UntaggedSigned32(zone)); // expected number of arguments
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ function->InitParameter(0, Type::Receiver()); // JSFunction
+ function->InitParameter(1, AnyTagged(zone)); // the new target
+ function->InitParameter(2, UntaggedIntegral32(zone)); // actual #arguments
+ function->InitParameter(3, UntaggedIntegral32(zone)); // expected #arguments
return function;
}
@@ -466,12 +565,11 @@ ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Zone* zone = isolate->interface_descriptor_zone();
Type::FunctionType* function =
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 5, zone);
- function->InitParameter(0, AnyTagged(zone)); // callee
- function->InitParameter(1, AnyTagged(zone)); // call_data
- function->InitParameter(2, AnyTagged(zone)); // holder
- function->InitParameter(3, ExternalPointer(zone)); // api_function_address
- function->InitParameter(
- 4, UntaggedSigned32(zone)); // actual number of arguments
+ function->InitParameter(0, AnyTagged(zone)); // callee
+ function->InitParameter(1, AnyTagged(zone)); // call_data
+ function->InitParameter(2, AnyTagged(zone)); // holder
+ function->InitParameter(3, ExternalPointer(zone)); // api_function_address
+ function->InitParameter(4, UntaggedIntegral32(zone)); // actual #arguments
return function;
}
@@ -490,32 +588,5 @@ ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
}
-Type::FunctionType* MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
- int paramater_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
- function->InitParameter(0, Type::Receiver());
- function->InitParameter(1, SmiType(zone));
- function->InitParameter(2, AnyTagged(zone));
- function->InitParameter(3, AnyTagged(zone));
- return function;
-}
-
-
-Type::FunctionType* MathRoundVariantCallFromOptimizedCodeDescriptor::
- BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
- int paramater_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 5, zone);
- function->InitParameter(0, Type::Receiver());
- function->InitParameter(1, SmiType(zone));
- function->InitParameter(2, AnyTagged(zone));
- function->InitParameter(3, AnyTagged(zone));
- function->InitParameter(4, AnyTagged(zone));
- return function;
-}
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/interface-descriptors.h b/chromium/v8/src/interface-descriptors.h
index 534313f7d30..2814daeded3 100644
--- a/chromium/v8/src/interface-descriptors.h
+++ b/chromium/v8/src/interface-descriptors.h
@@ -14,6 +14,7 @@ namespace internal {
class PlatformInterfaceDescriptor;
#define INTERFACE_DESCRIPTOR_LIST(V) \
+ V(Void) \
V(Load) \
V(Store) \
V(StoreTransition) \
@@ -25,10 +26,12 @@ class PlatformInterfaceDescriptor;
V(FastNewClosure) \
V(FastNewContext) \
V(ToNumber) \
+ V(ToLength) \
V(ToString) \
V(ToObject) \
V(NumberToString) \
V(Typeof) \
+ V(FastCloneRegExp) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(CreateAllocationSite) \
@@ -38,10 +41,13 @@ class PlatformInterfaceDescriptor;
V(CallFunctionWithFeedbackAndVector) \
V(CallConstruct) \
V(CallTrampoline) \
- V(PushArgsAndCall) \
+ V(ConstructStub) \
+ V(ConstructTrampoline) \
V(RegExpConstructResult) \
V(TransitionElementsKind) \
V(AllocateHeapNumber) \
+ V(AllocateMutableHeapNumber) \
+ V(AllocateInNewSpace) \
V(ArrayConstructorConstantArgCount) \
V(ArrayConstructor) \
V(InternalArrayConstructorConstantArgCount) \
@@ -62,6 +68,7 @@ class PlatformInterfaceDescriptor;
V(ApiGetter) \
V(ArgumentsAccessRead) \
V(ArgumentsAccessNew) \
+ V(RestParamAccess) \
V(StoreArrayLiteralElement) \
V(LoadGlobalViaContext) \
V(StoreGlobalViaContext) \
@@ -69,8 +76,9 @@ class PlatformInterfaceDescriptor;
V(MathPowInteger) \
V(ContextOnly) \
V(GrowArrayElements) \
- V(MathRoundVariantCallFromUnoptimizedCode) \
- V(MathRoundVariantCallFromOptimizedCode)
+ V(InterpreterPushArgsAndCall) \
+ V(InterpreterPushArgsAndConstruct) \
+ V(InterpreterCEntry)
class CallInterfaceDescriptorData {
@@ -219,13 +227,21 @@ class CallInterfaceDescriptor {
static inline CallDescriptors::Key key();
-#define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
- DECLARE_DESCRIPTOR(name, base) \
- protected: \
- virtual Type::FunctionType* BuildCallInterfaceDescriptorFunctionType( \
- Isolate* isolate, int register_param_count) override; \
- \
+#define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
+ DECLARE_DESCRIPTOR(name, base) \
+ protected: \
+ Type::FunctionType* BuildCallInterfaceDescriptorFunctionType( \
+ Isolate* isolate, int register_param_count) override; \
+ \
public:
+
+
+class VoidDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
+};
+
+
// LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
class LoadDescriptor : public CallInterfaceDescriptor {
public:
@@ -280,19 +296,21 @@ class VectorStoreTransitionDescriptor : public StoreDescriptor {
// Extends StoreDescriptor with Map parameter.
enum ParameterIndices {
- kReceiverIndex,
- kNameIndex,
- kValueIndex,
- kSlotIndex,
- kVectorIndex,
- kMapIndex,
- kParameterCount
+ kReceiverIndex = 0,
+ kNameIndex = 1,
+ kValueIndex = 2,
+
+ kMapIndex = 3,
+
+ kSlotIndex = 4, // not present on ia32.
+ kVirtualSlotVectorIndex = 4,
+
+ kVectorIndex = 5
};
- // These registers are no_reg for ia32, using the stack instead.
+ static const Register MapRegister();
static const Register SlotRegister();
static const Register VectorRegister();
- static const Register MapRegister();
};
@@ -368,6 +386,16 @@ class ToNumberDescriptor : public CallInterfaceDescriptor {
};
+class ToLengthDescriptor : public CallInterfaceDescriptor {
+ public:
+ enum ParameterIndices { kReceiverIndex };
+
+ DECLARE_DESCRIPTOR(ToLengthDescriptor, CallInterfaceDescriptor)
+
+ static const Register ReceiverRegister();
+};
+
+
class ToStringDescriptor : public CallInterfaceDescriptor {
public:
enum ParameterIndices { kReceiverIndex };
@@ -400,6 +428,13 @@ class TypeofDescriptor : public CallInterfaceDescriptor {
};
+class FastCloneRegExpDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastCloneRegExpDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
class FastCloneShallowArrayDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastCloneShallowArrayDescriptor,
@@ -441,6 +476,20 @@ class CallTrampolineDescriptor : public CallInterfaceDescriptor {
};
+class ConstructStubDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructStubDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class ConstructTrampolineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructTrampolineDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
class CallFunctionDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
@@ -505,6 +554,19 @@ class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
};
+class AllocateMutableHeapNumberDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(AllocateMutableHeapNumberDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class AllocateInNewSpaceDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(AllocateInNewSpaceDescriptor, CallInterfaceDescriptor)
+};
+
+
class ArrayConstructorConstantArgCountDescriptor
: public CallInterfaceDescriptor {
public:
@@ -650,6 +712,16 @@ class ArgumentsAccessNewDescriptor : public CallInterfaceDescriptor {
};
+class RestParamAccessDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(RestParamAccessDescriptor,
+ CallInterfaceDescriptor)
+ static const Register parameter_count();
+ static const Register parameter_pointer();
+ static const Register rest_parameter_index();
+};
+
+
class StoreArrayLiteralElementDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(StoreArrayLiteralElementDescriptor,
@@ -673,23 +745,6 @@ class MathPowIntegerDescriptor : public CallInterfaceDescriptor {
};
-class MathRoundVariantCallFromOptimizedCodeDescriptor
- : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- MathRoundVariantCallFromOptimizedCodeDescriptor, CallInterfaceDescriptor)
-};
-
-
-class MathRoundVariantCallFromUnoptimizedCodeDescriptor
- : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- MathRoundVariantCallFromUnoptimizedCodeDescriptor,
- CallInterfaceDescriptor)
-};
-
-
class ContextOnlyDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
@@ -706,11 +761,27 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
};
-class PushArgsAndCallDescriptor : public CallInterfaceDescriptor {
+class InterpreterPushArgsAndCallDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(InterpreterPushArgsAndCallDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class InterpreterPushArgsAndConstructDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(InterpreterPushArgsAndConstructDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class InterpreterCEntryDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(PushArgsAndCallDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(InterpreterCEntryDescriptor, CallInterfaceDescriptor)
};
+
#undef DECLARE_DESCRIPTOR
@@ -720,8 +791,8 @@ class PushArgsAndCallDescriptor : public CallInterfaceDescriptor {
CallDescriptors::Key name##Descriptor::key() { return CallDescriptors::name; }
INTERFACE_DESCRIPTOR_LIST(DEF_KEY)
#undef DEF_KEY
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#if V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/interpreter/OWNERS b/chromium/v8/src/interpreter/OWNERS
index 906a5ce6418..5ad730c8a43 100644
--- a/chromium/v8/src/interpreter/OWNERS
+++ b/chromium/v8/src/interpreter/OWNERS
@@ -1 +1,6 @@
+set noparent
+
+bmeurer@chromium.org
+mstarzinger@chromium.org
+oth@chromium.org
rmcilroy@chromium.org
diff --git a/chromium/v8/src/interpreter/bytecode-array-builder.cc b/chromium/v8/src/interpreter/bytecode-array-builder.cc
index 9c6b5905ccf..1b15fc66686 100644
--- a/chromium/v8/src/interpreter/bytecode-array-builder.cc
+++ b/chromium/v8/src/interpreter/bytecode-array-builder.cc
@@ -8,120 +8,262 @@ namespace v8 {
namespace internal {
namespace interpreter {
+class BytecodeArrayBuilder::PreviousBytecodeHelper {
+ public:
+ explicit PreviousBytecodeHelper(const BytecodeArrayBuilder& array_builder)
+ : array_builder_(array_builder),
+ previous_bytecode_start_(array_builder_.last_bytecode_start_) {
+ // This helper is expected to be instantiated only when the last bytecode is
+ // in the same basic block.
+ DCHECK(array_builder_.LastBytecodeInSameBlock());
+ }
+
+ // Returns the previous bytecode in the same basic block.
+ MUST_USE_RESULT Bytecode GetBytecode() const {
+ DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+ return Bytecodes::FromByte(
+ array_builder_.bytecodes()->at(previous_bytecode_start_));
+ }
+
+ // Returns the operand at operand_index for the previous bytecode in the
+ // same basic block.
+ MUST_USE_RESULT uint32_t GetOperand(int operand_index) const {
+ DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+ Bytecode bytecode = GetBytecode();
+ DCHECK_GE(operand_index, 0);
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode));
+ size_t operand_offset =
+ previous_bytecode_start_ +
+ Bytecodes::GetOperandOffset(bytecode, operand_index);
+ OperandSize size = Bytecodes::GetOperandSize(bytecode, operand_index);
+ switch (size) {
+ default:
+ case OperandSize::kNone:
+ UNREACHABLE();
+ case OperandSize::kByte:
+ return static_cast<uint32_t>(
+ array_builder_.bytecodes()->at(operand_offset));
+ case OperandSize::kShort:
+ uint16_t operand =
+ (array_builder_.bytecodes()->at(operand_offset) << 8) +
+ array_builder_.bytecodes()->at(operand_offset + 1);
+ return static_cast<uint32_t>(operand);
+ }
+ }
+
+ Handle<Object> GetConstantForIndexOperand(int operand_index) const {
+ return array_builder_.constant_array_builder()->At(
+ GetOperand(operand_index));
+ }
+
+ private:
+ const BytecodeArrayBuilder& array_builder_;
+ size_t previous_bytecode_start_;
+
+ DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
+};
+
+
BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone)
: isolate_(isolate),
+ zone_(zone),
bytecodes_(zone),
bytecode_generated_(false),
+ constant_array_builder_(isolate, zone),
last_block_end_(0),
last_bytecode_start_(~0),
- return_seen_in_block_(false),
- constants_map_(isolate->heap(), zone),
- constants_(zone),
+ exit_seen_in_block_(false),
+ unbound_jumps_(0),
parameter_count_(-1),
local_register_count_(-1),
+ context_register_count_(-1),
temporary_register_count_(0),
- temporary_register_next_(0) {}
+ free_temporaries_(zone) {}
+
+
+BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
void BytecodeArrayBuilder::set_locals_count(int number_of_locals) {
local_register_count_ = number_of_locals;
- temporary_register_next_ = local_register_count_;
+ DCHECK_LE(context_register_count_, 0);
}
-int BytecodeArrayBuilder::locals_count() const { return local_register_count_; }
-
-
void BytecodeArrayBuilder::set_parameter_count(int number_of_parameters) {
parameter_count_ = number_of_parameters;
}
-int BytecodeArrayBuilder::parameter_count() const { return parameter_count_; }
+void BytecodeArrayBuilder::set_context_count(int number_of_contexts) {
+ context_register_count_ = number_of_contexts;
+ DCHECK_GE(local_register_count_, 0);
+}
+
+
+Register BytecodeArrayBuilder::first_context_register() const {
+ DCHECK_GT(context_register_count_, 0);
+ return Register(local_register_count_);
+}
+
+
+Register BytecodeArrayBuilder::last_context_register() const {
+ DCHECK_GT(context_register_count_, 0);
+ return Register(local_register_count_ + context_register_count_ - 1);
+}
+
+
+Register BytecodeArrayBuilder::first_temporary_register() const {
+ DCHECK_GT(temporary_register_count_, 0);
+ return Register(fixed_register_count());
+}
+
+
+Register BytecodeArrayBuilder::last_temporary_register() const {
+ DCHECK_GT(temporary_register_count_, 0);
+ return Register(fixed_register_count() + temporary_register_count_ - 1);
+}
-Register BytecodeArrayBuilder::Parameter(int parameter_index) {
+Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
DCHECK_GE(parameter_index, 0);
- DCHECK_LT(parameter_index, parameter_count_);
- return Register::FromParameterIndex(parameter_index, parameter_count_);
+ return Register::FromParameterIndex(parameter_index, parameter_count());
+}
+
+
+bool BytecodeArrayBuilder::RegisterIsParameterOrLocal(Register reg) const {
+ return reg.is_parameter() || reg.index() < locals_count();
+}
+
+
+bool BytecodeArrayBuilder::RegisterIsTemporary(Register reg) const {
+ return temporary_register_count_ > 0 && first_temporary_register() <= reg &&
+ reg <= last_temporary_register();
}
Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
DCHECK_EQ(bytecode_generated_, false);
- DCHECK_GE(parameter_count_, 0);
- DCHECK_GE(local_register_count_, 0);
-
EnsureReturn();
int bytecode_size = static_cast<int>(bytecodes_.size());
- int register_count = local_register_count_ + temporary_register_count_;
+ int register_count = fixed_register_count() + temporary_register_count_;
int frame_size = register_count * kPointerSize;
-
Factory* factory = isolate_->factory();
- int constants_count = static_cast<int>(constants_.size());
Handle<FixedArray> constant_pool =
- factory->NewFixedArray(constants_count, TENURED);
- for (int i = 0; i < constants_count; i++) {
- constant_pool->set(i, *constants_[i]);
- }
-
+ constant_array_builder()->ToFixedArray(factory);
Handle<BytecodeArray> output =
factory->NewBytecodeArray(bytecode_size, &bytecodes_.front(), frame_size,
- parameter_count_, constant_pool);
+ parameter_count(), constant_pool);
bytecode_generated_ = true;
return output;
}
template <size_t N>
-void BytecodeArrayBuilder::Output(uint8_t(&bytes)[N]) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(Bytecodes::FromByte(bytes[0])),
- static_cast<int>(N) - 1);
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
+ // Don't output dead code.
+ if (exit_seen_in_block_) return;
+
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), static_cast<int>(N));
last_bytecode_start_ = bytecodes()->size();
- for (int i = 1; i < static_cast<int>(N); i++) {
- DCHECK(OperandIsValid(Bytecodes::FromByte(bytes[0]), i - 1, bytes[i]));
+ bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+ for (int i = 0; i < static_cast<int>(N); i++) {
+ DCHECK(OperandIsValid(bytecode, i, operands[i]));
+ switch (Bytecodes::GetOperandSize(bytecode, i)) {
+ case OperandSize::kNone:
+ UNREACHABLE();
+ case OperandSize::kByte:
+ bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
+ break;
+ case OperandSize::kShort: {
+ uint8_t operand_bytes[2];
+ WriteUnalignedUInt16(operand_bytes, operands[i]);
+ bytecodes()->insert(bytecodes()->end(), operand_bytes,
+ operand_bytes + 2);
+ break;
+ }
+ }
}
- bytecodes()->insert(bytecodes()->end(), bytes, bytes + N);
}
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0,
- uint8_t operand1, uint8_t operand2) {
- uint8_t bytes[] = {Bytecodes::ToByte(bytecode), operand0, operand1, operand2};
- Output(bytes);
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ uint32_t operand3) {
+ uint32_t operands[] = {operand0, operand1, operand2, operand3};
+ Output(bytecode, operands);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2) {
+ uint32_t operands[] = {operand0, operand1, operand2};
+ Output(bytecode, operands);
}
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0,
- uint8_t operand1) {
- uint8_t bytes[] = {Bytecodes::ToByte(bytecode), operand0, operand1};
- Output(bytes);
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1) {
+ uint32_t operands[] = {operand0, operand1};
+ Output(bytecode, operands);
}
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0) {
- uint8_t bytes[] = {Bytecodes::ToByte(bytecode), operand0};
- Output(bytes);
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
+ uint32_t operands[] = {operand0};
+ Output(bytecode, operands);
}
void BytecodeArrayBuilder::Output(Bytecode bytecode) {
- uint8_t bytes[] = {Bytecodes::ToByte(bytecode)};
- Output(bytes);
+ // Don't output dead code.
+ if (exit_seen_in_block_) return;
+
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
+ last_bytecode_start_ = bytecodes()->size();
+ bytecodes()->push_back(Bytecodes::ToByte(bytecode));
}
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
- Register reg) {
+ Register reg,
+ Strength strength) {
+ if (is_strong(strength)) {
+ UNIMPLEMENTED();
+ }
+
Output(BytecodeForBinaryOperation(op), reg.ToOperand());
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
+ Strength strength) {
+ if (is_strong(strength)) {
+ UNIMPLEMENTED();
+ }
+
+ Output(BytecodeForCountOperation(op));
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LogicalNot() {
+ Output(Bytecode::kLogicalNot);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
+ Output(Bytecode::kTypeOf);
+ return *this;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
- Token::Value op, Register reg, LanguageMode language_mode) {
- if (!is_sloppy(language_mode)) {
+ Token::Value op, Register reg, Strength strength) {
+ if (is_strong(strength)) {
UNIMPLEMENTED();
}
@@ -146,8 +288,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
size_t entry = GetConstantPoolEntry(object);
- if (FitsInIdxOperand(entry)) {
+ if (FitsInIdx8Operand(entry)) {
Output(Bytecode::kLdaConstant, static_cast<uint8_t>(entry));
+ } else if (FitsInIdx16Operand(entry)) {
+ Output(Bytecode::kLdaConstantWide, static_cast<uint16_t>(entry));
} else {
UNIMPLEMENTED();
}
@@ -185,39 +329,145 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadBooleanConstant(bool value) {
+ if (value) {
+ LoadTrue();
+ } else {
+ LoadFalse();
+ }
+ return *this;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
Register reg) {
- Output(Bytecode::kLdar, reg.ToOperand());
+ if (!IsRegisterInAccumulator(reg)) {
+ Output(Bytecode::kLdar, reg.ToOperand());
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
Register reg) {
+ // TODO(oth): Avoid storing the accumulator in the register if the
+ // previous bytecode loaded the accumulator with the same register.
+ //
+ // TODO(oth): If the previous bytecode is a MOV into this register,
+ // the previous instruction can be removed. The logic for determining
+ // these redundant MOVs appears complex.
Output(Bytecode::kStar, reg.ToOperand());
+ if (!IsRegisterInAccumulator(reg)) {
+ Output(Bytecode::kStar, reg.ToOperand());
+ }
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int slot_index) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
+ Register to) {
+ DCHECK(from != to);
+ Output(Bytecode::kMov, from.ToOperand(), to.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ExchangeRegisters(Register reg0,
+ Register reg1) {
+ DCHECK(reg0 != reg1);
+ if (FitsInReg8Operand(reg0)) {
+ Output(Bytecode::kExchange, reg0.ToOperand(), reg1.ToWideOperand());
+ } else if (FitsInReg8Operand(reg1)) {
+ Output(Bytecode::kExchange, reg1.ToOperand(), reg0.ToWideOperand());
+ } else {
+ Output(Bytecode::kExchangeWide, reg0.ToWideOperand(), reg1.ToWideOperand());
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
+ const Handle<String> name, int feedback_slot, LanguageMode language_mode,
+ TypeofMode typeof_mode) {
+ // TODO(rmcilroy): Potentially store language and typeof information in an
+ // operand rather than having extra bytecodes.
+ Bytecode bytecode = BytecodeForLoadGlobal(language_mode, typeof_mode);
+ size_t name_index = GetConstantPoolEntry(name);
+ if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, static_cast<uint8_t>(name_index),
+ static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(name_index) &&
+ FitsInIdx16Operand(feedback_slot)) {
+ Output(BytecodeForWideOperands(bytecode), static_cast<uint16_t>(name_index),
+ static_cast<uint16_t>(feedback_slot));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
+ const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
+ Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
+ size_t name_index = GetConstantPoolEntry(name);
+ if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, static_cast<uint8_t>(name_index),
+ static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(name_index) &&
+ FitsInIdx16Operand(feedback_slot)) {
+ Output(BytecodeForWideOperands(bytecode), static_cast<uint16_t>(name_index),
+ static_cast<uint16_t>(feedback_slot));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
+ int slot_index) {
DCHECK(slot_index >= 0);
- if (FitsInIdxOperand(slot_index)) {
- Output(Bytecode::kLdaGlobal, static_cast<uint8_t>(slot_index));
+ if (FitsInIdx8Operand(slot_index)) {
+ Output(Bytecode::kLdaContextSlot, context.ToOperand(),
+ static_cast<uint8_t>(slot_index));
+ } else if (FitsInIdx16Operand(slot_index)) {
+ Output(Bytecode::kLdaContextSlotWide, context.ToOperand(),
+ static_cast<uint16_t>(slot_index));
} else {
UNIMPLEMENTED();
}
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
- Register object, int feedback_slot, LanguageMode language_mode) {
- if (!is_sloppy(language_mode)) {
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
+ int slot_index) {
+ DCHECK(slot_index >= 0);
+ if (FitsInIdx8Operand(slot_index)) {
+ Output(Bytecode::kStaContextSlot, context.ToOperand(),
+ static_cast<uint8_t>(slot_index));
+ } else if (FitsInIdx16Operand(slot_index)) {
+ Output(Bytecode::kStaContextSlotWide, context.ToOperand(),
+ static_cast<uint16_t>(slot_index));
+ } else {
UNIMPLEMENTED();
}
+ return *this;
+}
- if (FitsInIdxOperand(feedback_slot)) {
- Output(Bytecode::kLoadIC, object.ToOperand(),
- static_cast<uint8_t>(feedback_slot));
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
+ const Handle<String> name, TypeofMode typeof_mode) {
+ Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
+ ? Bytecode::kLdaLookupSlotInsideTypeof
+ : Bytecode::kLdaLookupSlot;
+ size_t name_index = GetConstantPoolEntry(name);
+ if (FitsInIdx8Operand(name_index)) {
+ Output(bytecode, static_cast<uint8_t>(name_index));
+ } else if (FitsInIdx16Operand(name_index)) {
+ Output(BytecodeForWideOperands(bytecode),
+ static_cast<uint16_t>(name_index));
} else {
UNIMPLEMENTED();
}
@@ -225,15 +475,35 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
- Register object, int feedback_slot, LanguageMode language_mode) {
- if (!is_sloppy(language_mode)) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
+ const Handle<String> name, LanguageMode language_mode) {
+ Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
+ size_t name_index = GetConstantPoolEntry(name);
+ if (FitsInIdx8Operand(name_index)) {
+ Output(bytecode, static_cast<uint8_t>(name_index));
+ } else if (FitsInIdx16Operand(name_index)) {
+ Output(BytecodeForWideOperands(bytecode),
+ static_cast<uint16_t>(name_index));
+ } else {
UNIMPLEMENTED();
}
+ return *this;
+}
+
- if (FitsInIdxOperand(feedback_slot)) {
- Output(Bytecode::kKeyedLoadIC, object.ToOperand(),
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
+ Register object, const Handle<String> name, int feedback_slot,
+ LanguageMode language_mode) {
+ Bytecode bytecode = BytecodeForLoadIC(language_mode);
+ size_t name_index = GetConstantPoolEntry(name);
+ if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(name_index) &&
+ FitsInIdx16Operand(feedback_slot)) {
+ Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+ static_cast<uint16_t>(name_index),
+ static_cast<uint16_t>(feedback_slot));
} else {
UNIMPLEMENTED();
}
@@ -241,16 +511,34 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
- Register object, Register name, int feedback_slot,
- LanguageMode language_mode) {
- if (!is_sloppy(language_mode)) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
+ Register object, int feedback_slot, LanguageMode language_mode) {
+ Bytecode bytecode = BytecodeForKeyedLoadIC(language_mode);
+ if (FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, object.ToOperand(), static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(feedback_slot)) {
+ Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+ static_cast<uint16_t>(feedback_slot));
+ } else {
UNIMPLEMENTED();
}
+ return *this;
+}
- if (FitsInIdxOperand(feedback_slot)) {
- Output(Bytecode::kStoreIC, object.ToOperand(), name.ToOperand(),
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
+ Register object, const Handle<String> name, int feedback_slot,
+ LanguageMode language_mode) {
+ Bytecode bytecode = BytecodeForStoreIC(language_mode);
+ size_t name_index = GetConstantPoolEntry(name);
+ if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(name_index) &&
+ FitsInIdx16Operand(feedback_slot)) {
+ Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+ static_cast<uint16_t>(name_index),
+ static_cast<uint16_t>(feedback_slot));
} else {
UNIMPLEMENTED();
}
@@ -261,13 +549,30 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
Register object, Register key, int feedback_slot,
LanguageMode language_mode) {
- if (!is_sloppy(language_mode)) {
+ Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
+ if (FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, object.ToOperand(), key.ToOperand(),
+ static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(feedback_slot)) {
+ Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+ key.ToOperand(), static_cast<uint16_t>(feedback_slot));
+ } else {
UNIMPLEMENTED();
}
+ return *this;
+}
- if (FitsInIdxOperand(feedback_slot)) {
- Output(Bytecode::kKeyedStoreIC, object.ToOperand(), key.ToOperand(),
- static_cast<uint8_t>(feedback_slot));
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
+ Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
+ size_t entry = GetConstantPoolEntry(shared_info);
+ DCHECK(FitsInImm8Operand(tenured));
+ if (FitsInIdx8Operand(entry)) {
+ Output(Bytecode::kCreateClosure, static_cast<uint8_t>(entry),
+ static_cast<uint8_t>(tenured));
+ } else if (FitsInIdx16Operand(entry)) {
+ Output(Bytecode::kCreateClosureWide, static_cast<uint16_t>(entry),
+ static_cast<uint8_t>(tenured));
} else {
UNIMPLEMENTED();
}
@@ -275,30 +580,150 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToBoolean() {
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
+ CreateArgumentsType type) {
+ // TODO(rmcilroy): Consider passing the type as a bytecode operand rather
+ // than having two different bytecodes once we have better support for
+ // branches in the InterpreterAssembler.
+ Bytecode bytecode = BytecodeForCreateArguments(type);
+ Output(bytecode);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
+ Handle<String> pattern, int literal_index, int flags) {
+ DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bits.
+ size_t pattern_entry = GetConstantPoolEntry(pattern);
+ if (FitsInIdx8Operand(literal_index) && FitsInIdx8Operand(pattern_entry)) {
+ Output(Bytecode::kCreateRegExpLiteral, static_cast<uint8_t>(pattern_entry),
+ static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
+ } else if (FitsInIdx16Operand(literal_index) &&
+ FitsInIdx16Operand(pattern_entry)) {
+ Output(Bytecode::kCreateRegExpLiteralWide,
+ static_cast<uint16_t>(pattern_entry),
+ static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
+ Handle<FixedArray> constant_elements, int literal_index, int flags) {
+ DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bits.
+ size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
+ if (FitsInIdx8Operand(literal_index) &&
+ FitsInIdx8Operand(constant_elements_entry)) {
+ Output(Bytecode::kCreateArrayLiteral,
+ static_cast<uint8_t>(constant_elements_entry),
+ static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
+ } else if (FitsInIdx16Operand(literal_index) &&
+ FitsInIdx16Operand(constant_elements_entry)) {
+ Output(Bytecode::kCreateArrayLiteralWide,
+ static_cast<uint16_t>(constant_elements_entry),
+ static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
+ Handle<FixedArray> constant_properties, int literal_index, int flags) {
+ DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bits.
+ size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
+ if (FitsInIdx8Operand(literal_index) &&
+ FitsInIdx8Operand(constant_properties_entry)) {
+ Output(Bytecode::kCreateObjectLiteral,
+ static_cast<uint8_t>(constant_properties_entry),
+ static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
+ } else if (FitsInIdx16Operand(literal_index) &&
+ FitsInIdx16Operand(constant_properties_entry)) {
+ Output(Bytecode::kCreateObjectLiteralWide,
+ static_cast<uint16_t>(constant_properties_entry),
+ static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
+ Output(Bytecode::kPushContext, context.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
+ Output(Bytecode::kPopContext, context.ToOperand());
+ return *this;
+}
+
+
+bool BytecodeArrayBuilder::NeedToBooleanCast() {
+ if (!LastBytecodeInSameBlock()) {
+ return true;
+ }
+ PreviousBytecodeHelper previous_bytecode(*this);
+ switch (previous_bytecode.GetBytecode()) {
+ // If the previous bytecode puts a boolean in the accumulator return true.
+ case Bytecode::kLdaTrue:
+ case Bytecode::kLdaFalse:
+ case Bytecode::kLogicalNot:
+ case Bytecode::kTestEqual:
+ case Bytecode::kTestNotEqual:
+ case Bytecode::kTestEqualStrict:
+ case Bytecode::kTestNotEqualStrict:
+ case Bytecode::kTestLessThan:
+ case Bytecode::kTestLessThanOrEqual:
+ case Bytecode::kTestGreaterThan:
+ case Bytecode::kTestGreaterThanOrEqual:
+ case Bytecode::kTestInstanceOf:
+ case Bytecode::kTestIn:
+ case Bytecode::kForInDone:
+ return false;
+ default:
+ return true;
+ }
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
+ Output(Bytecode::kToObject);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName() {
if (LastBytecodeInSameBlock()) {
- // If the previous bytecode puts a boolean in the accumulator
- // there is no need to emit an instruction.
- switch (Bytecodes::FromByte(bytecodes()->at(last_bytecode_start_))) {
- case Bytecode::kToBoolean:
- UNREACHABLE();
- case Bytecode::kLdaTrue:
- case Bytecode::kLdaFalse:
- case Bytecode::kTestEqual:
- case Bytecode::kTestNotEqual:
- case Bytecode::kTestEqualStrict:
- case Bytecode::kTestNotEqualStrict:
- case Bytecode::kTestLessThan:
- case Bytecode::kTestLessThanOrEqual:
- case Bytecode::kTestGreaterThan:
- case Bytecode::kTestGreaterThanOrEqual:
- case Bytecode::kTestInstanceOf:
- case Bytecode::kTestIn:
+ PreviousBytecodeHelper previous_bytecode(*this);
+ switch (previous_bytecode.GetBytecode()) {
+ case Bytecode::kToName:
+ case Bytecode::kTypeOf:
+ return *this;
+ case Bytecode::kLdaConstantWide:
+ case Bytecode::kLdaConstant: {
+ Handle<Object> object = previous_bytecode.GetConstantForIndexOperand(0);
+ if (object->IsName()) return *this;
break;
+ }
default:
- Output(Bytecode::kToBoolean);
+ break;
}
}
+ Output(Bytecode::kToName);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToNumber() {
+ // TODO(rmcilroy): consider omitting if the preceeding bytecode always returns
+ // a number.
+ Output(Bytecode::kToNumber);
return *this;
}
@@ -310,15 +735,20 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
// Now treat as if the label will only be back referred to.
}
label->bind_to(bytecodes()->size());
+ LeaveBasicBlock();
return *this;
}
-// static
-bool BytecodeArrayBuilder::IsJumpWithImm8Operand(Bytecode jump_bytecode) {
- return jump_bytecode == Bytecode::kJump ||
- jump_bytecode == Bytecode::kJumpIfTrue ||
- jump_bytecode == Bytecode::kJumpIfFalse;
+BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
+ BytecodeLabel* label) {
+ DCHECK(!label->is_bound());
+ DCHECK(target.is_bound());
+ PatchJump(bytecodes()->begin() + target.offset(),
+ bytecodes()->begin() + label->offset());
+ label->bind_to(target.offset());
+ LeaveBasicBlock();
+ return *this;
}
@@ -332,75 +762,180 @@ Bytecode BytecodeArrayBuilder::GetJumpWithConstantOperand(
return Bytecode::kJumpIfTrueConstant;
case Bytecode::kJumpIfFalse:
return Bytecode::kJumpIfFalseConstant;
+ case Bytecode::kJumpIfToBooleanTrue:
+ return Bytecode::kJumpIfToBooleanTrueConstant;
+ case Bytecode::kJumpIfToBooleanFalse:
+ return Bytecode::kJumpIfToBooleanFalseConstant;
+ case Bytecode::kJumpIfNull:
+ return Bytecode::kJumpIfNullConstant;
+ case Bytecode::kJumpIfUndefined:
+ return Bytecode::kJumpIfUndefinedConstant;
default:
UNREACHABLE();
- return Bytecode::kJumpConstant;
+ return static_cast<Bytecode>(-1);
}
}
-void BytecodeArrayBuilder::PatchJump(
- const ZoneVector<uint8_t>::iterator& jump_target,
- ZoneVector<uint8_t>::iterator jump_location) {
- Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
- int delta = static_cast<int>(jump_target - jump_location);
+// static
+Bytecode BytecodeArrayBuilder::GetJumpWithConstantWideOperand(
+ Bytecode jump_bytecode) {
+ switch (jump_bytecode) {
+ case Bytecode::kJump:
+ return Bytecode::kJumpConstantWide;
+ case Bytecode::kJumpIfTrue:
+ return Bytecode::kJumpIfTrueConstantWide;
+ case Bytecode::kJumpIfFalse:
+ return Bytecode::kJumpIfFalseConstantWide;
+ case Bytecode::kJumpIfToBooleanTrue:
+ return Bytecode::kJumpIfToBooleanTrueConstantWide;
+ case Bytecode::kJumpIfToBooleanFalse:
+ return Bytecode::kJumpIfToBooleanFalseConstantWide;
+ case Bytecode::kJumpIfNull:
+ return Bytecode::kJumpIfNullConstantWide;
+ case Bytecode::kJumpIfUndefined:
+ return Bytecode::kJumpIfUndefinedConstantWide;
+ default:
+ UNREACHABLE();
+ return static_cast<Bytecode>(-1);
+ }
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
+ switch (jump_bytecode) {
+ case Bytecode::kJump:
+ case Bytecode::kJumpIfNull:
+ case Bytecode::kJumpIfUndefined:
+ return jump_bytecode;
+ case Bytecode::kJumpIfTrue:
+ return Bytecode::kJumpIfToBooleanTrue;
+ case Bytecode::kJumpIfFalse:
+ return Bytecode::kJumpIfToBooleanFalse;
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
- DCHECK(IsJumpWithImm8Operand(jump_bytecode));
- DCHECK_EQ(Bytecodes::Size(jump_bytecode), 2);
- DCHECK_GE(delta, 0);
+void BytecodeArrayBuilder::PatchIndirectJumpWith8BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
+ Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+ DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+ ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+ DCHECK_EQ(*operand_location, 0);
if (FitsInImm8Operand(delta)) {
- // Just update the operand
- jump_location++;
- *jump_location = static_cast<uint8_t>(delta);
+ // The jump fits within the range of an Imm8 operand, so cancel
+ // the reservation and jump directly.
+ constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
+ *operand_location = static_cast<uint8_t>(delta);
} else {
- // Update the jump type and operand
- size_t entry = GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
- if (FitsInIdxOperand(entry)) {
- *jump_location++ =
- Bytecodes::ToByte(GetJumpWithConstantOperand(jump_bytecode));
- *jump_location = static_cast<uint8_t>(entry);
- } else {
- // TODO(oth): OutputJump should reserve a constant pool entry
- // when jump is written. The reservation should be used here if
- // needed, or cancelled if not. This is due to the patch needing
- // to match the size of the code it's replacing. In future,
- // there will probably be a jump with 32-bit operand for cases
- // when constant pool is full, but that needs to be emitted in
- // OutputJump too.
- UNIMPLEMENTED();
- }
+ // The jump does not fit within the range of an Imm8 operand, so
+ // commit reservation putting the offset into the constant pool,
+ // and update the jump instruction and operand.
+ size_t entry = constant_array_builder()->CommitReservedEntry(
+ OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
+ DCHECK(FitsInIdx8Operand(entry));
+ jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+ *jump_location = Bytecodes::ToByte(jump_bytecode);
+ *operand_location = static_cast<uint8_t>(entry);
+ }
+}
+
+
+void BytecodeArrayBuilder::PatchIndirectJumpWith16BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
+ DCHECK(Bytecodes::IsJumpConstantWide(Bytecodes::FromByte(*jump_location)));
+ ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+ size_t entry = constant_array_builder()->CommitReservedEntry(
+ OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
+ DCHECK(FitsInIdx16Operand(entry));
+ uint8_t operand_bytes[2];
+ WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+ DCHECK(*operand_location == 0 && *(operand_location + 1) == 0);
+ *operand_location++ = operand_bytes[0];
+ *operand_location = operand_bytes[1];
+}
+
+
+void BytecodeArrayBuilder::PatchJump(
+ const ZoneVector<uint8_t>::iterator& jump_target,
+ const ZoneVector<uint8_t>::iterator& jump_location) {
+ Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+ int delta = static_cast<int>(jump_target - jump_location);
+ DCHECK(Bytecodes::IsJump(jump_bytecode));
+ switch (Bytecodes::GetOperandSize(jump_bytecode, 0)) {
+ case OperandSize::kByte:
+ PatchIndirectJumpWith8BitOperand(jump_location, delta);
+ break;
+ case OperandSize::kShort:
+ PatchIndirectJumpWith16BitOperand(jump_location, delta);
+ break;
+ case OperandSize::kNone:
+ UNREACHABLE();
}
+ unbound_jumps_--;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label) {
- int delta;
+ // Don't emit dead code.
+ if (exit_seen_in_block_) return *this;
+
+ // Check if the value in accumulator is boolean, if not choose an
+ // appropriate JumpIfToBoolean bytecode.
+ if (NeedToBooleanCast()) {
+ jump_bytecode = GetJumpWithToBoolean(jump_bytecode);
+ }
+
if (label->is_bound()) {
// Label has been bound already so this is a backwards jump.
CHECK_GE(bytecodes()->size(), label->offset());
CHECK_LE(bytecodes()->size(), static_cast<size_t>(kMaxInt));
size_t abs_delta = bytecodes()->size() - label->offset();
- delta = -static_cast<int>(abs_delta);
- } else {
- // Label has not yet been bound so this is a forward reference
- // that will be patched when the label is bound.
- label->set_referrer(bytecodes()->size());
- delta = 0;
- }
+ int delta = -static_cast<int>(abs_delta);
- if (FitsInImm8Operand(delta)) {
- Output(jump_bytecode, static_cast<uint8_t>(delta));
- } else {
- size_t entry = GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
- if (FitsInIdxOperand(entry)) {
- Output(GetJumpWithConstantOperand(jump_bytecode),
- static_cast<uint8_t>(entry));
+ if (FitsInImm8Operand(delta)) {
+ Output(jump_bytecode, static_cast<uint8_t>(delta));
} else {
- UNIMPLEMENTED();
+ size_t entry =
+ GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
+ if (FitsInIdx8Operand(entry)) {
+ Output(GetJumpWithConstantOperand(jump_bytecode),
+ static_cast<uint8_t>(entry));
+ } else if (FitsInIdx16Operand(entry)) {
+ Output(GetJumpWithConstantWideOperand(jump_bytecode),
+ static_cast<uint16_t>(entry));
+ } else {
+ UNREACHABLE();
+ }
+ }
+ } else {
+ // The label has not yet been bound so this is a forward reference
+ // that will be patched when the label is bound. We create a
+ // reservation in the constant pool so the jump can be patched
+ // when the label is bound. The reservation means the maximum size
+ // of the operand for the constant is known and the jump can
+ // be emitted into the bytecode stream with space for the operand.
+ label->set_referrer(bytecodes()->size());
+ unbound_jumps_++;
+ OperandSize reserved_operand_size =
+ constant_array_builder()->CreateReservedEntry();
+ switch (reserved_operand_size) {
+ case OperandSize::kByte:
+ Output(jump_bytecode, 0);
+ break;
+ case OperandSize::kShort:
+ Output(GetJumpWithConstantWideOperand(jump_bytecode), 0);
+ break;
+ case OperandSize::kNone:
+ UNREACHABLE();
}
}
+ LeaveBasicBlock();
return *this;
}
@@ -420,36 +955,89 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
+ return OutputJump(Bytecode::kJumpIfNull, label);
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
+ BytecodeLabel* label) {
+ return OutputJump(Bytecode::kJumpIfUndefined, label);
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
+ Output(Bytecode::kThrow);
+ exit_seen_in_block_ = true;
+ return *this;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
Output(Bytecode::kReturn);
- return_seen_in_block_ = true;
+ exit_seen_in_block_ = true;
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::EnterBlock() { return *this; }
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
+ Register cache_type, Register cache_array, Register cache_length) {
+ Output(Bytecode::kForInPrepare, cache_type.ToOperand(),
+ cache_array.ToOperand(), cache_length.ToOperand());
+ return *this;
+}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LeaveBlock() {
- last_block_end_ = bytecodes()->size();
- return_seen_in_block_ = false;
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
+ Register cache_length) {
+ Output(Bytecode::kForInDone, index.ToOperand(), cache_length.ToOperand());
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(Register receiver,
+ Register cache_type,
+ Register cache_array,
+ Register index) {
+ Output(Bytecode::kForInNext, receiver.ToOperand(), cache_type.ToOperand(),
+ cache_array.ToOperand(), index.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
+ Output(Bytecode::kForInStep, index.ToOperand());
+ return *this;
+}
+
+
+void BytecodeArrayBuilder::LeaveBasicBlock() {
+ last_block_end_ = bytecodes()->size();
+ exit_seen_in_block_ = false;
+}
+
+
void BytecodeArrayBuilder::EnsureReturn() {
- if (!return_seen_in_block_) {
+ if (!exit_seen_in_block_) {
LoadUndefined();
Return();
}
}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
Register receiver,
- size_t arg_count) {
- if (FitsInIdxOperand(arg_count)) {
+ size_t arg_count,
+ int feedback_slot) {
+ if (FitsInIdx8Operand(arg_count) && FitsInIdx8Operand(feedback_slot)) {
Output(Bytecode::kCall, callable.ToOperand(), receiver.ToOperand(),
- static_cast<uint8_t>(arg_count));
+ static_cast<uint8_t>(arg_count),
+ static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(arg_count) &&
+ FitsInIdx16Operand(feedback_slot)) {
+ Output(Bytecode::kCallWide, callable.ToOperand(), receiver.ToOperand(),
+ static_cast<uint16_t>(arg_count),
+ static_cast<uint16_t>(feedback_slot));
} else {
UNIMPLEMENTED();
}
@@ -457,73 +1045,249 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
}
-size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
- // These constants shouldn't be added to the constant pool, the should use
- // specialzed bytecodes instead.
- DCHECK(!object.is_identical_to(isolate_->factory()->undefined_value()));
- DCHECK(!object.is_identical_to(isolate_->factory()->null_value()));
- DCHECK(!object.is_identical_to(isolate_->factory()->the_hole_value()));
- DCHECK(!object.is_identical_to(isolate_->factory()->true_value()));
- DCHECK(!object.is_identical_to(isolate_->factory()->false_value()));
+BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
+ Register first_arg,
+ size_t arg_count) {
+ if (!first_arg.is_valid()) {
+ DCHECK_EQ(0u, arg_count);
+ first_arg = Register(0);
+ }
+ DCHECK(FitsInIdx8Operand(arg_count));
+ Output(Bytecode::kNew, constructor.ToOperand(), first_arg.ToOperand(),
+ static_cast<uint8_t>(arg_count));
+ return *this;
+}
+
- size_t* entry = constants_map_.Find(object);
- if (!entry) {
- entry = constants_map_.Get(object);
- *entry = constants_.size();
- constants_.push_back(object);
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
+ Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
+ DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
+ DCHECK(FitsInIdx16Operand(function_id));
+ DCHECK(FitsInIdx8Operand(arg_count));
+ if (!first_arg.is_valid()) {
+ DCHECK_EQ(0u, arg_count);
+ first_arg = Register(0);
}
- DCHECK(constants_[*entry].is_identical_to(object));
- return *entry;
+ Output(Bytecode::kCallRuntime, static_cast<uint16_t>(function_id),
+ first_arg.ToOperand(), static_cast<uint8_t>(arg_count));
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
+ Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
+ Register first_return) {
+ DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
+ DCHECK(FitsInIdx16Operand(function_id));
+ DCHECK(FitsInIdx8Operand(arg_count));
+ if (!first_arg.is_valid()) {
+ DCHECK_EQ(0u, arg_count);
+ first_arg = Register(0);
+ }
+ Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
+ first_arg.ToOperand(), static_cast<uint8_t>(arg_count),
+ first_return.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
+ Register receiver,
+ size_t arg_count) {
+ DCHECK(FitsInIdx16Operand(context_index));
+ DCHECK(FitsInIdx8Operand(arg_count));
+ Output(Bytecode::kCallJSRuntime, static_cast<uint16_t>(context_index),
+ receiver.ToOperand(), static_cast<uint8_t>(arg_count));
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
+ LanguageMode language_mode) {
+ Output(BytecodeForDelete(language_mode), object.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::DeleteLookupSlot() {
+ Output(Bytecode::kDeleteLookupSlot);
+ return *this;
+}
+
+
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
+ return constant_array_builder()->Insert(object);
}
int BytecodeArrayBuilder::BorrowTemporaryRegister() {
- DCHECK_GE(local_register_count_, 0);
- int temporary_reg_index = temporary_register_next_++;
- int count = temporary_register_next_ - local_register_count_;
- if (count > temporary_register_count_) {
- temporary_register_count_ = count;
+ if (free_temporaries_.empty()) {
+ temporary_register_count_ += 1;
+ return last_temporary_register().index();
+ } else {
+ auto pos = free_temporaries_.begin();
+ int retval = *pos;
+ free_temporaries_.erase(pos);
+ return retval;
}
- return temporary_reg_index;
+}
+
+
+int BytecodeArrayBuilder::BorrowTemporaryRegisterNotInRange(int start_index,
+ int end_index) {
+ auto index = free_temporaries_.lower_bound(start_index);
+ if (index == free_temporaries_.begin()) {
+ // If start_index is the first free register, check for a register
+ // greater than end_index.
+ index = free_temporaries_.upper_bound(end_index);
+ if (index == free_temporaries_.end()) {
+ temporary_register_count_ += 1;
+ return last_temporary_register().index();
+ }
+ } else {
+ // If there is a free register < start_index
+ index--;
+ }
+
+ int retval = *index;
+ free_temporaries_.erase(index);
+ return retval;
+}
+
+
+void BytecodeArrayBuilder::BorrowConsecutiveTemporaryRegister(int reg_index) {
+ DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
+ free_temporaries_.erase(reg_index);
}
void BytecodeArrayBuilder::ReturnTemporaryRegister(int reg_index) {
- DCHECK_EQ(reg_index, temporary_register_next_ - 1);
- temporary_register_next_ = reg_index;
+ DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
+ free_temporaries_.insert(reg_index);
+}
+
+
+int BytecodeArrayBuilder::PrepareForConsecutiveTemporaryRegisters(
+ size_t count) {
+ if (count == 0) {
+ return -1;
+ }
+
+ // Search within existing temporaries for a run.
+ auto start = free_temporaries_.begin();
+ size_t run_length = 0;
+ for (auto run_end = start; run_end != free_temporaries_.end(); run_end++) {
+ if (*run_end != *start + static_cast<int>(run_length)) {
+ start = run_end;
+ run_length = 0;
+ }
+ if (++run_length == count) {
+ return *start;
+ }
+ }
+
+ // Continue run if possible across existing last temporary.
+ if (temporary_register_count_ > 0 &&
+ (start == free_temporaries_.end() ||
+ *start + static_cast<int>(run_length) !=
+ last_temporary_register().index() + 1)) {
+ run_length = 0;
+ }
+
+ // Ensure enough registers for run.
+ while (run_length++ < count) {
+ temporary_register_count_++;
+ free_temporaries_.insert(last_temporary_register().index());
+ }
+ return last_temporary_register().index() - static_cast<int>(count) + 1;
+}
+
+
+bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
+ if (temporary_register_count_ > 0) {
+ DCHECK(reg.index() >= first_temporary_register().index() &&
+ reg.index() <= last_temporary_register().index());
+ return free_temporaries_.find(reg.index()) == free_temporaries_.end();
+ } else {
+ return false;
+ }
+}
+
+
+bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
+ if (reg.is_function_context() || reg.is_function_closure() ||
+ reg.is_new_target()) {
+ return true;
+ } else if (reg.is_parameter()) {
+ int parameter_index = reg.ToParameterIndex(parameter_count_);
+ return parameter_index >= 0 && parameter_index < parameter_count_;
+ } else if (reg.index() < fixed_register_count()) {
+ return true;
+ } else {
+ return TemporaryRegisterIsLive(reg);
+ }
}
bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
- uint8_t operand_value) const {
+ uint32_t operand_value) const {
OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
switch (operand_type) {
case OperandType::kNone:
return false;
- case OperandType::kCount:
+ case OperandType::kCount16:
+ case OperandType::kIdx16:
+ return static_cast<uint16_t>(operand_value) == operand_value;
+ case OperandType::kCount8:
case OperandType::kImm8:
- case OperandType::kIdx:
- return true;
- case OperandType::kReg: {
- Register reg = Register::FromOperand(operand_value);
- if (reg.is_parameter()) {
- int parameter_index = reg.ToParameterIndex(parameter_count_);
- return parameter_index >= 0 && parameter_index < parameter_count_;
- } else {
- return (reg.index() >= 0 && reg.index() < temporary_register_next_);
+ case OperandType::kIdx8:
+ return static_cast<uint8_t>(operand_value) == operand_value;
+ case OperandType::kMaybeReg8:
+ if (operand_value == 0) {
+ return true;
}
+ // Fall-through to kReg8 case.
+ case OperandType::kReg8:
+ return RegisterIsValid(
+ Register::FromOperand(static_cast<uint8_t>(operand_value)));
+ case OperandType::kRegPair8: {
+ Register reg0 =
+ Register::FromOperand(static_cast<uint8_t>(operand_value));
+ Register reg1 = Register(reg0.index() + 1);
+ return RegisterIsValid(reg0) && RegisterIsValid(reg1);
}
+ case OperandType::kReg16:
+ if (bytecode != Bytecode::kExchange &&
+ bytecode != Bytecode::kExchangeWide) {
+ return false;
+ }
+ return RegisterIsValid(
+ Register::FromWideOperand(static_cast<uint16_t>(operand_value)));
}
UNREACHABLE();
return false;
}
+
bool BytecodeArrayBuilder::LastBytecodeInSameBlock() const {
return last_bytecode_start_ < bytecodes()->size() &&
last_bytecode_start_ >= last_block_end_;
}
+bool BytecodeArrayBuilder::IsRegisterInAccumulator(Register reg) {
+ if (LastBytecodeInSameBlock()) {
+ PreviousBytecodeHelper previous_bytecode(*this);
+ Bytecode bytecode = previous_bytecode.GetBytecode();
+ if ((bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) &&
+ (reg == Register::FromOperand(previous_bytecode.GetOperand(0)))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
// static
Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
switch (op) {
@@ -537,6 +1301,32 @@ Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
return Bytecode::kDiv;
case Token::Value::MOD:
return Bytecode::kMod;
+ case Token::Value::BIT_OR:
+ return Bytecode::kBitwiseOr;
+ case Token::Value::BIT_XOR:
+ return Bytecode::kBitwiseXor;
+ case Token::Value::BIT_AND:
+ return Bytecode::kBitwiseAnd;
+ case Token::Value::SHL:
+ return Bytecode::kShiftLeft;
+ case Token::Value::SAR:
+ return Bytecode::kShiftRight;
+ case Token::Value::SHR:
+ return Bytecode::kShiftRightLogical;
+ default:
+ UNREACHABLE();
+ return static_cast<Bytecode>(-1);
+ }
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForCountOperation(Token::Value op) {
+ switch (op) {
+ case Token::Value::ADD:
+ return Bytecode::kInc;
+ case Token::Value::SUB:
+ return Bytecode::kDec;
default:
UNREACHABLE();
return static_cast<Bytecode>(-1);
@@ -575,38 +1365,242 @@ Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
// static
-bool BytecodeArrayBuilder::FitsInIdxOperand(int value) {
+Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kLoadICSloppy:
+ return Bytecode::kLoadICSloppyWide;
+ case Bytecode::kLoadICStrict:
+ return Bytecode::kLoadICStrictWide;
+ case Bytecode::kKeyedLoadICSloppy:
+ return Bytecode::kKeyedLoadICSloppyWide;
+ case Bytecode::kKeyedLoadICStrict:
+ return Bytecode::kKeyedLoadICStrictWide;
+ case Bytecode::kStoreICSloppy:
+ return Bytecode::kStoreICSloppyWide;
+ case Bytecode::kStoreICStrict:
+ return Bytecode::kStoreICStrictWide;
+ case Bytecode::kKeyedStoreICSloppy:
+ return Bytecode::kKeyedStoreICSloppyWide;
+ case Bytecode::kKeyedStoreICStrict:
+ return Bytecode::kKeyedStoreICStrictWide;
+ case Bytecode::kLdaGlobalSloppy:
+ return Bytecode::kLdaGlobalSloppyWide;
+ case Bytecode::kLdaGlobalStrict:
+ return Bytecode::kLdaGlobalStrictWide;
+ case Bytecode::kLdaGlobalInsideTypeofSloppy:
+ return Bytecode::kLdaGlobalInsideTypeofSloppyWide;
+ case Bytecode::kLdaGlobalInsideTypeofStrict:
+ return Bytecode::kLdaGlobalInsideTypeofStrictWide;
+ case Bytecode::kStaGlobalSloppy:
+ return Bytecode::kStaGlobalSloppyWide;
+ case Bytecode::kStaGlobalStrict:
+ return Bytecode::kStaGlobalStrictWide;
+ case Bytecode::kLdaLookupSlot:
+ return Bytecode::kLdaLookupSlotWide;
+ case Bytecode::kLdaLookupSlotInsideTypeof:
+ return Bytecode::kLdaLookupSlotInsideTypeofWide;
+ case Bytecode::kStaLookupSlotStrict:
+ return Bytecode::kStaLookupSlotStrictWide;
+ case Bytecode::kStaLookupSlotSloppy:
+ return Bytecode::kStaLookupSlotSloppyWide;
+ default:
+ UNREACHABLE();
+ return static_cast<Bytecode>(-1);
+ }
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForLoadIC(LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kLoadICSloppy;
+ case STRICT:
+ return Bytecode::kLoadICStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForKeyedLoadIC(
+ LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kKeyedLoadICSloppy;
+ case STRICT:
+ return Bytecode::kKeyedLoadICStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForStoreIC(LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kStoreICSloppy;
+ case STRICT:
+ return Bytecode::kStoreICStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForKeyedStoreIC(
+ LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kKeyedStoreICSloppy;
+ case STRICT:
+ return Bytecode::kKeyedStoreICStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(LanguageMode language_mode,
+ TypeofMode typeof_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return typeof_mode == INSIDE_TYPEOF
+ ? Bytecode::kLdaGlobalInsideTypeofSloppy
+ : Bytecode::kLdaGlobalSloppy;
+ case STRICT:
+ return typeof_mode == INSIDE_TYPEOF
+ ? Bytecode::kLdaGlobalInsideTypeofStrict
+ : Bytecode::kLdaGlobalStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForStoreGlobal(
+ LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kStaGlobalSloppy;
+ case STRICT:
+ return Bytecode::kStaGlobalStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForStoreLookupSlot(
+ LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kStaLookupSlotSloppy;
+ case STRICT:
+ return Bytecode::kStaLookupSlotStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
+ CreateArgumentsType type) {
+ switch (type) {
+ case CreateArgumentsType::kMappedArguments:
+ return Bytecode::kCreateMappedArguments;
+ case CreateArgumentsType::kUnmappedArguments:
+ return Bytecode::kCreateUnmappedArguments;
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForDelete(LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kDeletePropertySloppy;
+ case STRICT:
+ return Bytecode::kDeletePropertyStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInIdx8Operand(int value) {
return kMinUInt8 <= value && value <= kMaxUInt8;
}
// static
-bool BytecodeArrayBuilder::FitsInIdxOperand(size_t value) {
+bool BytecodeArrayBuilder::FitsInIdx8Operand(size_t value) {
return value <= static_cast<size_t>(kMaxUInt8);
}
// static
bool BytecodeArrayBuilder::FitsInImm8Operand(int value) {
- return kMinInt8 <= value && value < kMaxInt8;
+ return kMinInt8 <= value && value <= kMaxInt8;
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInIdx16Operand(int value) {
+ return kMinUInt16 <= value && value <= kMaxUInt16;
}
-TemporaryRegisterScope::TemporaryRegisterScope(BytecodeArrayBuilder* builder)
- : builder_(builder), count_(0), last_register_index_(-1) {}
+// static
+bool BytecodeArrayBuilder::FitsInIdx16Operand(size_t value) {
+ return value <= static_cast<size_t>(kMaxUInt16);
+}
-TemporaryRegisterScope::~TemporaryRegisterScope() {
- while (count_-- != 0) {
- builder_->ReturnTemporaryRegister(last_register_index_--);
- }
+// static
+bool BytecodeArrayBuilder::FitsInReg8Operand(Register value) {
+ return kMinInt8 <= value.index() && value.index() <= kMaxInt8;
}
-Register TemporaryRegisterScope::NewRegister() {
- count_++;
- last_register_index_ = builder_->BorrowTemporaryRegister();
- return Register(last_register_index_);
+// static
+bool BytecodeArrayBuilder::FitsInReg16Operand(Register value) {
+ return kMinInt16 <= value.index() && value.index() <= kMaxInt16;
}
} // namespace interpreter
diff --git a/chromium/v8/src/interpreter/bytecode-array-builder.h b/chromium/v8/src/interpreter/bytecode-array-builder.h
index d68d5e7ffbf..7c23dc3f22c 100644
--- a/chromium/v8/src/interpreter/bytecode-array-builder.h
+++ b/chromium/v8/src/interpreter/bytecode-array-builder.h
@@ -5,12 +5,9 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
-#include <vector>
-
-#include "src/ast.h"
-#include "src/identity-map.h"
+#include "src/ast/ast.h"
#include "src/interpreter/bytecodes.h"
-#include "src/zone.h"
+#include "src/interpreter/constant-array-builder.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -21,22 +18,55 @@ class Isolate;
namespace interpreter {
class BytecodeLabel;
+class ConstantArrayBuilder;
class Register;
-class BytecodeArrayBuilder {
+// TODO(rmcilroy): Unify this with CreateArgumentsParameters::Type in Turbofan
+// when rest parameters implementation has settled down.
+enum class CreateArgumentsType { kMappedArguments, kUnmappedArguments };
+
+class BytecodeArrayBuilder final {
public:
BytecodeArrayBuilder(Isolate* isolate, Zone* zone);
+ ~BytecodeArrayBuilder();
+
Handle<BytecodeArray> ToBytecodeArray();
- // Set number of parameters expected by function.
+ // Set the number of parameters expected by function.
void set_parameter_count(int number_of_params);
- int parameter_count() const;
+ int parameter_count() const {
+ DCHECK_GE(parameter_count_, 0);
+ return parameter_count_;
+ }
- // Set number of locals required for bytecode array.
+ // Set the number of locals required for bytecode array.
void set_locals_count(int number_of_locals);
- int locals_count() const;
+ int locals_count() const {
+ DCHECK_GE(local_register_count_, 0);
+ return local_register_count_;
+ }
+
+ // Set number of contexts required for bytecode array.
+ void set_context_count(int number_of_contexts);
+ int context_count() const {
+ DCHECK_GE(context_register_count_, 0);
+ return context_register_count_;
+ }
- Register Parameter(int parameter_index);
+ Register first_context_register() const;
+ Register last_context_register() const;
+
+ // Returns the number of fixed (non-temporary) registers.
+ int fixed_register_count() const { return context_count() + locals_count(); }
+
+ Register Parameter(int parameter_index) const;
+
+ // Return true if the register |reg| represents a parameter or a
+ // local.
+ bool RegisterIsParameterOrLocal(Register reg) const;
+
+ // Return true if the register |reg| represents a temporary register.
+ bool RegisterIsTemporary(Register reg) const;
// Constant loads to accumulator.
BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
@@ -46,109 +76,267 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& LoadTheHole();
BytecodeArrayBuilder& LoadTrue();
BytecodeArrayBuilder& LoadFalse();
+ BytecodeArrayBuilder& LoadBooleanConstant(bool value);
+
+ // Global loads to the accumulator and stores from the accumulator.
+ BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
+ LanguageMode language_mode,
+ TypeofMode typeof_mode);
+ BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
+ int feedback_slot,
+ LanguageMode language_mode);
+
+ // Load the object at |slot_index| in |context| into the accumulator.
+ BytecodeArrayBuilder& LoadContextSlot(Register context, int slot_index);
- // Global loads to accumulator.
- BytecodeArrayBuilder& LoadGlobal(int slot_index);
+ // Stores the object in the accumulator into |slot_index| of |context|.
+ BytecodeArrayBuilder& StoreContextSlot(Register context, int slot_index);
// Register-accumulator transfers.
BytecodeArrayBuilder& LoadAccumulatorWithRegister(Register reg);
BytecodeArrayBuilder& StoreAccumulatorInRegister(Register reg);
- // Load properties. The property name should be in the accumulator.
- BytecodeArrayBuilder& LoadNamedProperty(Register object, int feedback_slot,
+ // Register-register transfer.
+ BytecodeArrayBuilder& MoveRegister(Register from, Register to);
+ BytecodeArrayBuilder& ExchangeRegisters(Register reg0, Register reg1);
+
+ // Named load property.
+ BytecodeArrayBuilder& LoadNamedProperty(Register object,
+ const Handle<String> name,
+ int feedback_slot,
LanguageMode language_mode);
+ // Keyed load property. The key should be in the accumulator.
BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot,
LanguageMode language_mode);
// Store properties. The value to be stored should be in the accumulator.
- BytecodeArrayBuilder& StoreNamedProperty(Register object, Register name,
+ BytecodeArrayBuilder& StoreNamedProperty(Register object,
+ const Handle<String> name,
int feedback_slot,
LanguageMode language_mode);
BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
int feedback_slot,
LanguageMode language_mode);
+ // Lookup the variable with |name|.
+ BytecodeArrayBuilder& LoadLookupSlot(const Handle<String> name,
+ TypeofMode typeof_mode);
+
+ // Store value in the accumulator into the variable with |name|.
+ BytecodeArrayBuilder& StoreLookupSlot(const Handle<String> name,
+ LanguageMode language_mode);
+
+ // Create a new closure for the SharedFunctionInfo.
+ BytecodeArrayBuilder& CreateClosure(Handle<SharedFunctionInfo> shared_info,
+ PretenureFlag tenured);
+
+ // Create a new arguments object in the accumulator.
+ BytecodeArrayBuilder& CreateArguments(CreateArgumentsType type);
+
+ // Literals creation. Constant elements should be in the accumulator.
+ BytecodeArrayBuilder& CreateRegExpLiteral(Handle<String> pattern,
+ int literal_index, int flags);
+ BytecodeArrayBuilder& CreateArrayLiteral(Handle<FixedArray> constant_elements,
+ int literal_index, int flags);
+ BytecodeArrayBuilder& CreateObjectLiteral(
+ Handle<FixedArray> constant_properties, int literal_index, int flags);
+
+ // Push the context in accumulator as the new context, and store in register
+ // |context|.
+ BytecodeArrayBuilder& PushContext(Register context);
+
+ // Pop the current context and replace with |context|.
+ BytecodeArrayBuilder& PopContext(Register context);
+
// Call a JS function. The JSFunction or Callable to be called should be in
// |callable|, the receiver should be in |receiver| and all subsequent
// arguments should be in registers <receiver + 1> to
// <receiver + 1 + arg_count>.
BytecodeArrayBuilder& Call(Register callable, Register receiver,
- size_t arg_count);
-
- // Operators (register == lhs, accumulator = rhs).
- BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg);
+ size_t arg_count, int feedback_slot);
+
+ // Call the new operator. The |constructor| register is followed by
+ // |arg_count| consecutive registers containing arguments to be
+ // applied to the constructor.
+ BytecodeArrayBuilder& New(Register constructor, Register first_arg,
+ size_t arg_count);
+
+ // Call the runtime function with |function_id|. The first argument should be
+ // in |first_arg| and all subsequent arguments should be in registers
+ // <first_arg + 1> to <first_arg + 1 + arg_count>.
+ BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
+ Register first_arg, size_t arg_count);
+
+ // Call the runtime function with |function_id| that returns a pair of values.
+ // The first argument should be in |first_arg| and all subsequent arguments
+ // should be in registers <first_arg + 1> to <first_arg + 1 + arg_count>. The
+ // return values will be returned in <first_return> and <first_return + 1>.
+ BytecodeArrayBuilder& CallRuntimeForPair(Runtime::FunctionId function_id,
+ Register first_arg, size_t arg_count,
+ Register first_return);
+
+ // Call the JS runtime function with |context_index|. The the receiver should
+ // be in |receiver| and all subsequent arguments should be in registers
+ // <receiver + 1> to <receiver + 1 + arg_count>.
+ BytecodeArrayBuilder& CallJSRuntime(int context_index, Register receiver,
+ size_t arg_count);
+
+ // Operators (register holds the lhs value, accumulator holds the rhs value).
+ BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg,
+ Strength strength);
+
+ // Count Operators (value stored in accumulator).
+ BytecodeArrayBuilder& CountOperation(Token::Value op, Strength strength);
+
+ // Unary Operators.
+ BytecodeArrayBuilder& LogicalNot();
+ BytecodeArrayBuilder& TypeOf();
+
+ // Deletes property from an object. This expects that accumulator contains
+ // the key to be deleted and the register contains a reference to the object.
+ BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
+ BytecodeArrayBuilder& DeleteLookupSlot();
// Tests.
BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
- LanguageMode language_mode);
+ Strength strength);
- // Casts
+ // Casts.
BytecodeArrayBuilder& CastAccumulatorToBoolean();
+ BytecodeArrayBuilder& CastAccumulatorToJSObject();
+ BytecodeArrayBuilder& CastAccumulatorToName();
+ BytecodeArrayBuilder& CastAccumulatorToNumber();
// Flow Control.
BytecodeArrayBuilder& Bind(BytecodeLabel* label);
+ BytecodeArrayBuilder& Bind(const BytecodeLabel& target, BytecodeLabel* label);
+
BytecodeArrayBuilder& Jump(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfTrue(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfFalse(BytecodeLabel* label);
+ BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
+ BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
+
+ BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& Return();
- BytecodeArrayBuilder& EnterBlock();
- BytecodeArrayBuilder& LeaveBlock();
+ // Complex flow control.
+ BytecodeArrayBuilder& ForInPrepare(Register cache_type, Register cache_array,
+ Register cache_length);
+ BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
+ BytecodeArrayBuilder& ForInNext(Register receiver, Register cache_type,
+ Register cache_array, Register index);
+ BytecodeArrayBuilder& ForInStep(Register index);
+
+ // Accessors
+ Zone* zone() const { return zone_; }
private:
ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
Isolate* isolate() const { return isolate_; }
+ ConstantArrayBuilder* constant_array_builder() {
+ return &constant_array_builder_;
+ }
+ const ConstantArrayBuilder* constant_array_builder() const {
+ return &constant_array_builder_;
+ }
static Bytecode BytecodeForBinaryOperation(Token::Value op);
+ static Bytecode BytecodeForCountOperation(Token::Value op);
static Bytecode BytecodeForCompareOperation(Token::Value op);
- static bool FitsInIdxOperand(int value);
- static bool FitsInIdxOperand(size_t value);
+ static Bytecode BytecodeForWideOperands(Bytecode bytecode);
+ static Bytecode BytecodeForLoadIC(LanguageMode language_mode);
+ static Bytecode BytecodeForKeyedLoadIC(LanguageMode language_mode);
+ static Bytecode BytecodeForStoreIC(LanguageMode language_mode);
+ static Bytecode BytecodeForKeyedStoreIC(LanguageMode language_mode);
+ static Bytecode BytecodeForLoadGlobal(LanguageMode language_mode,
+ TypeofMode typeof_mode);
+ static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
+ static Bytecode BytecodeForStoreLookupSlot(LanguageMode language_mode);
+ static Bytecode BytecodeForCreateArguments(CreateArgumentsType type);
+ static Bytecode BytecodeForDelete(LanguageMode language_mode);
+
+ static bool FitsInIdx8Operand(int value);
+ static bool FitsInIdx8Operand(size_t value);
static bool FitsInImm8Operand(int value);
- static bool IsJumpWithImm8Operand(Bytecode jump_bytecode);
- static Bytecode GetJumpWithConstantOperand(Bytecode jump_with_smi8_operand);
+ static bool FitsInIdx16Operand(int value);
+ static bool FitsInIdx16Operand(size_t value);
+ static bool FitsInReg8Operand(Register value);
+ static bool FitsInReg16Operand(Register value);
+
+ static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
+ static Bytecode GetJumpWithConstantWideOperand(Bytecode jump_smi8_operand);
+ static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
+
+ Register MapRegister(Register reg);
+ Register MapRegisters(Register reg, Register args_base, int args_length = 1);
template <size_t N>
- INLINE(void Output(uint8_t(&bytes)[N]));
- void Output(Bytecode bytecode, uint8_t operand0, uint8_t operand1,
- uint8_t operand2);
- void Output(Bytecode bytecode, uint8_t operand0, uint8_t operand1);
- void Output(Bytecode bytecode, uint8_t operand0);
+ INLINE(void Output(Bytecode bytecode, uint32_t(&operands)[N]));
+ void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3);
+ void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2);
+ void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
+ void Output(Bytecode bytecode, uint32_t operand0);
void Output(Bytecode bytecode);
- void PatchJump(const ZoneVector<uint8_t>::iterator& jump_target,
- ZoneVector<uint8_t>::iterator jump_location);
+
BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label);
+ void PatchJump(const ZoneVector<uint8_t>::iterator& jump_target,
+ const ZoneVector<uint8_t>::iterator& jump_location);
+ void PatchIndirectJumpWith8BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta);
+ void PatchIndirectJumpWith16BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta);
+ void LeaveBasicBlock();
void EnsureReturn();
bool OperandIsValid(Bytecode bytecode, int operand_index,
- uint8_t operand_value) const;
+ uint32_t operand_value) const;
bool LastBytecodeInSameBlock() const;
- size_t GetConstantPoolEntry(Handle<Object> object);
+ bool NeedToBooleanCast();
+ bool IsRegisterInAccumulator(Register reg);
- // Scope helpers used by TemporaryRegisterScope
+ bool RegisterIsValid(Register reg) const;
+
+ // Temporary register management.
int BorrowTemporaryRegister();
+ int BorrowTemporaryRegisterNotInRange(int start_index, int end_index);
void ReturnTemporaryRegister(int reg_index);
+ int PrepareForConsecutiveTemporaryRegisters(size_t count);
+ void BorrowConsecutiveTemporaryRegister(int reg_index);
+ bool TemporaryRegisterIsLive(Register reg) const;
+
+ Register first_temporary_register() const;
+ Register last_temporary_register() const;
+
+ // Gets a constant pool entry for the |object|.
+ size_t GetConstantPoolEntry(Handle<Object> object);
Isolate* isolate_;
+ Zone* zone_;
ZoneVector<uint8_t> bytecodes_;
bool bytecode_generated_;
+ ConstantArrayBuilder constant_array_builder_;
size_t last_block_end_;
size_t last_bytecode_start_;
- bool return_seen_in_block_;
-
- IdentityMap<size_t> constants_map_;
- ZoneVector<Handle<Object>> constants_;
+ bool exit_seen_in_block_;
+ int unbound_jumps_;
int parameter_count_;
int local_register_count_;
+ int context_register_count_;
int temporary_register_count_;
- int temporary_register_next_;
+ ZoneSet<int> free_temporaries_;
+
+ class PreviousBytecodeHelper;
+ friend class BytecodeRegisterAllocator;
- friend class TemporaryRegisterScope;
- DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArrayBuilder);
+ DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
};
@@ -159,23 +347,25 @@ class BytecodeArrayBuilder {
class BytecodeLabel final {
public:
BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
- ~BytecodeLabel() { DCHECK(bound_ && offset_ != kInvalidOffset); }
+
+ bool is_bound() const { return bound_; }
+ size_t offset() const { return offset_; }
private:
static const size_t kInvalidOffset = static_cast<size_t>(-1);
- INLINE(void bind_to(size_t offset)) {
+ void bind_to(size_t offset) {
DCHECK(!bound_ && offset != kInvalidOffset);
offset_ = offset;
bound_ = true;
}
- INLINE(void set_referrer(size_t offset)) {
- DCHECK(!bound_ && offset != kInvalidOffset);
+
+ void set_referrer(size_t offset) {
+ DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
offset_ = offset;
}
- INLINE(size_t offset() const) { return offset_; }
- INLINE(bool is_bound() const) { return bound_; }
- INLINE(bool is_forward_target() const) {
+
+ bool is_forward_target() const {
return offset() != kInvalidOffset && !is_bound();
}
@@ -188,30 +378,8 @@ class BytecodeLabel final {
size_t offset_;
friend class BytecodeArrayBuilder;
- DISALLOW_COPY_AND_ASSIGN(BytecodeLabel);
};
-
-// A stack-allocated class than allows the instantiator to allocate
-// temporary registers that are cleaned up when scope is closed.
-class TemporaryRegisterScope {
- public:
- explicit TemporaryRegisterScope(BytecodeArrayBuilder* builder);
- ~TemporaryRegisterScope();
- Register NewRegister();
-
- private:
- void* operator new(size_t size);
- void operator delete(void* p);
-
- BytecodeArrayBuilder* builder_;
- int count_;
- int last_register_index_;
-
- DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterScope);
-};
-
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/interpreter/bytecode-array-iterator.cc b/chromium/v8/src/interpreter/bytecode-array-iterator.cc
index dc49308fbe8..d09d72f01a2 100644
--- a/chromium/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/chromium/v8/src/interpreter/bytecode-array-iterator.cc
@@ -32,31 +32,66 @@ Bytecode BytecodeArrayIterator::current_bytecode() const {
}
-uint8_t BytecodeArrayIterator::GetRawOperand(int operand_index,
- OperandType operand_type) const {
+int BytecodeArrayIterator::current_bytecode_size() const {
+ return Bytecodes::Size(current_bytecode());
+}
+
+
+uint32_t BytecodeArrayIterator::GetRawOperand(int operand_index,
+ OperandType operand_type) const {
DCHECK_GE(operand_index, 0);
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
DCHECK_EQ(operand_type,
Bytecodes::GetOperandType(current_bytecode(), operand_index));
- int operands_start = bytecode_offset_ + 1;
- return bytecode_array()->get(operands_start + operand_index);
+ uint8_t* operand_start =
+ bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index);
+ switch (Bytecodes::SizeOfOperand(operand_type)) {
+ default:
+ case OperandSize::kNone:
+ UNREACHABLE();
+ case OperandSize::kByte:
+ return static_cast<uint32_t>(*operand_start);
+ case OperandSize::kShort:
+ return ReadUnalignedUInt16(operand_start);
+ }
}
-int8_t BytecodeArrayIterator::GetSmi8Operand(int operand_index) const {
- uint8_t operand = GetRawOperand(operand_index, OperandType::kImm8);
+int8_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
+ uint32_t operand = GetRawOperand(operand_index, OperandType::kImm8);
return static_cast<int8_t>(operand);
}
+int BytecodeArrayIterator::GetCountOperand(int operand_index) const {
+ OperandSize size =
+ Bytecodes::GetOperandSize(current_bytecode(), operand_index);
+ OperandType type = (size == OperandSize::kByte) ? OperandType::kCount8
+ : OperandType::kCount16;
+ uint32_t operand = GetRawOperand(operand_index, type);
+ return static_cast<int>(operand);
+}
+
+
int BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
- uint8_t operand = GetRawOperand(operand_index, OperandType::kIdx);
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(operand_type == OperandType::kIdx8 ||
+ operand_type == OperandType::kIdx16);
+ uint32_t operand = GetRawOperand(operand_index, operand_type);
return static_cast<int>(operand);
}
Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
- uint8_t operand = GetRawOperand(operand_index, OperandType::kReg);
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(operand_type == OperandType::kReg8 ||
+ operand_type == OperandType::kRegPair8 ||
+ operand_type == OperandType::kMaybeReg8 ||
+ operand_type == OperandType::kReg16);
+ uint32_t operand = GetRawOperand(operand_index, operand_type);
return Register::FromOperand(operand);
}
@@ -67,6 +102,22 @@ Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
return FixedArray::get(constants, GetIndexOperand(operand_index));
}
+
+int BytecodeArrayIterator::GetJumpTargetOffset() const {
+ Bytecode bytecode = current_bytecode();
+ if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
+ int relative_offset = GetImmediateOperand(0);
+ return current_offset() + relative_offset;
+ } else if (interpreter::Bytecodes::IsJumpConstant(bytecode) ||
+ interpreter::Bytecodes::IsJumpConstantWide(bytecode)) {
+ Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
+ return current_offset() + smi->value();
+ } else {
+ UNREACHABLE();
+ return kMinInt;
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/interpreter/bytecode-array-iterator.h b/chromium/v8/src/interpreter/bytecode-array-iterator.h
index 0d9011f242e..e67fa974bd0 100644
--- a/chromium/v8/src/interpreter/bytecode-array-iterator.h
+++ b/chromium/v8/src/interpreter/bytecode-array-iterator.h
@@ -20,19 +20,26 @@ class BytecodeArrayIterator {
void Advance();
bool done() const;
Bytecode current_bytecode() const;
+ int current_bytecode_size() const;
int current_offset() const { return bytecode_offset_; }
const Handle<BytecodeArray>& bytecode_array() const {
return bytecode_array_;
}
- int8_t GetSmi8Operand(int operand_index) const;
+ int8_t GetImmediateOperand(int operand_index) const;
int GetIndexOperand(int operand_index) const;
+ int GetCountOperand(int operand_index) const;
Register GetRegisterOperand(int operand_index) const;
Handle<Object> GetConstantForIndexOperand(int operand_index) const;
// Get the raw byte for the given operand. Note: you should prefer using the
// typed versions above which cast the return to an appropriate type.
- uint8_t GetRawOperand(int operand_index, OperandType operand_type) const;
+ uint32_t GetRawOperand(int operand_index, OperandType operand_type) const;
+
+ // Returns the absolute offset of the branch target at the current
+ // bytecode. It is an error to call this method if the bytecode is
+ // not for a jump or conditional jump.
+ int GetJumpTargetOffset() const;
private:
Handle<BytecodeArray> bytecode_array_;
diff --git a/chromium/v8/src/interpreter/bytecode-generator.cc b/chromium/v8/src/interpreter/bytecode-generator.cc
index 7257fd41342..959e1551497 100644
--- a/chromium/v8/src/interpreter/bytecode-generator.cc
+++ b/chromium/v8/src/interpreter/bytecode-generator.cc
@@ -4,83 +4,482 @@
#include "src/interpreter/bytecode-generator.h"
-#include <stack>
-
+#include "src/ast/scopes.h"
#include "src/compiler.h"
+#include "src/interpreter/bytecode-register-allocator.h"
+#include "src/interpreter/control-flow-builders.h"
#include "src/objects.h"
-#include "src/scopes.h"
-#include "src/token.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/token.h"
namespace v8 {
namespace internal {
namespace interpreter {
-BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
- : builder_(isolate, zone) {
- InitializeAstVisitor(isolate, zone);
+
+// Scoped class tracking context objects created by the visitor. Represents
+// mutations of the context chain within the function body, allowing pushing and
+// popping of the current {context_register} during visitation.
+class BytecodeGenerator::ContextScope BASE_EMBEDDED {
+ public:
+ ContextScope(BytecodeGenerator* generator, Scope* scope,
+ bool should_pop_context = true)
+ : generator_(generator),
+ scope_(scope),
+ outer_(generator_->execution_context()),
+ register_(generator_->NextContextRegister()),
+ depth_(0),
+ should_pop_context_(should_pop_context) {
+ if (outer_) {
+ depth_ = outer_->depth_ + 1;
+ generator_->builder()->PushContext(register_);
+ }
+ generator_->set_execution_context(this);
+ }
+
+ ~ContextScope() {
+ if (outer_ && should_pop_context_) {
+ generator_->builder()->PopContext(outer_->reg());
+ }
+ generator_->set_execution_context(outer_);
+ }
+
+ // Returns the depth of the given |scope| for the current execution context.
+ int ContextChainDepth(Scope* scope) {
+ return scope_->ContextChainLength(scope);
+ }
+
+ // Returns the execution context at |depth| in the current context chain if it
+ // is a function local execution context, otherwise returns nullptr.
+ ContextScope* Previous(int depth) {
+ if (depth > depth_) {
+ return nullptr;
+ }
+
+ ContextScope* previous = this;
+ for (int i = depth; i > 0; --i) {
+ previous = previous->outer_;
+ }
+ return previous;
+ }
+
+ Scope* scope() const { return scope_; }
+ Register reg() const { return register_; }
+
+ private:
+ BytecodeGenerator* generator_;
+ Scope* scope_;
+ ContextScope* outer_;
+ Register register_;
+ int depth_;
+ bool should_pop_context_;
+};
+
+
+// Scoped class for tracking control statements entered by the
+// visitor. The pattern derives AstGraphBuilder::ControlScope.
+class BytecodeGenerator::ControlScope BASE_EMBEDDED {
+ public:
+ explicit ControlScope(BytecodeGenerator* generator)
+ : generator_(generator), outer_(generator->execution_control()) {
+ generator_->set_execution_control(this);
+ }
+ virtual ~ControlScope() { generator_->set_execution_control(outer()); }
+
+ void Break(Statement* stmt) { PerformCommand(CMD_BREAK, stmt); }
+ void Continue(Statement* stmt) { PerformCommand(CMD_CONTINUE, stmt); }
+
+ protected:
+ enum Command { CMD_BREAK, CMD_CONTINUE };
+ void PerformCommand(Command command, Statement* statement);
+ virtual bool Execute(Command command, Statement* statement) = 0;
+
+ BytecodeGenerator* generator() const { return generator_; }
+ ControlScope* outer() const { return outer_; }
+
+ private:
+ BytecodeGenerator* generator_;
+ ControlScope* outer_;
+
+ DISALLOW_COPY_AND_ASSIGN(ControlScope);
+};
+
+
+// Scoped class for enabling break inside blocks and switch blocks.
+class BytecodeGenerator::ControlScopeForBreakable final
+ : public BytecodeGenerator::ControlScope {
+ public:
+ ControlScopeForBreakable(BytecodeGenerator* generator,
+ BreakableStatement* statement,
+ BreakableControlFlowBuilder* control_builder)
+ : ControlScope(generator),
+ statement_(statement),
+ control_builder_(control_builder) {}
+
+ protected:
+ virtual bool Execute(Command command, Statement* statement) {
+ if (statement != statement_) return false;
+ switch (command) {
+ case CMD_BREAK:
+ control_builder_->Break();
+ return true;
+ case CMD_CONTINUE:
+ break;
+ }
+ return false;
+ }
+
+ private:
+ Statement* statement_;
+ BreakableControlFlowBuilder* control_builder_;
+};
+
+
+// Scoped class for enabling 'break' and 'continue' in iteration
+// constructs, e.g. do...while, while..., for...
+class BytecodeGenerator::ControlScopeForIteration final
+ : public BytecodeGenerator::ControlScope {
+ public:
+ ControlScopeForIteration(BytecodeGenerator* generator,
+ IterationStatement* statement,
+ LoopBuilder* loop_builder)
+ : ControlScope(generator),
+ statement_(statement),
+ loop_builder_(loop_builder) {}
+
+ protected:
+ virtual bool Execute(Command command, Statement* statement) {
+ if (statement != statement_) return false;
+ switch (command) {
+ case CMD_BREAK:
+ loop_builder_->Break();
+ return true;
+ case CMD_CONTINUE:
+ loop_builder_->Continue();
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ Statement* statement_;
+ LoopBuilder* loop_builder_;
+};
+
+
+void BytecodeGenerator::ControlScope::PerformCommand(Command command,
+ Statement* statement) {
+ ControlScope* current = this;
+ do {
+ if (current->Execute(command, statement)) return;
+ current = current->outer();
+ } while (current != nullptr);
+ UNREACHABLE();
}
-BytecodeGenerator::~BytecodeGenerator() {}
+class BytecodeGenerator::RegisterAllocationScope {
+ public:
+ explicit RegisterAllocationScope(BytecodeGenerator* generator)
+ : generator_(generator),
+ outer_(generator->register_allocator()),
+ allocator_(builder()) {
+ generator_->set_register_allocator(this);
+ }
+
+ virtual ~RegisterAllocationScope() {
+ generator_->set_register_allocator(outer_);
+ }
+
+ Register NewRegister() {
+ RegisterAllocationScope* current_scope = generator()->register_allocator();
+ if ((current_scope == this) ||
+ (current_scope->outer() == this &&
+ !current_scope->allocator_.HasConsecutiveAllocations())) {
+ // Regular case - Allocating registers in current or outer context.
+ // VisitForRegisterValue allocates register in outer context.
+ return allocator_.NewRegister();
+ } else {
+ // If it is required to allocate a register other than current or outer
+ // scopes, allocate a new temporary register. It might be expensive to
+ // walk the full context chain and compute the list of consecutive
+ // reservations in the innerscopes.
+ UNIMPLEMENTED();
+ return Register(-1);
+ }
+ }
+
+ void PrepareForConsecutiveAllocations(size_t count) {
+ allocator_.PrepareForConsecutiveAllocations(count);
+ }
+
+ Register NextConsecutiveRegister() {
+ return allocator_.NextConsecutiveRegister();
+ }
+
+ bool RegisterIsAllocatedInThisScope(Register reg) const {
+ return allocator_.RegisterIsAllocatedInThisScope(reg);
+ }
+
+ RegisterAllocationScope* outer() const { return outer_; }
+
+ private:
+ BytecodeGenerator* generator() const { return generator_; }
+ BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+
+ BytecodeGenerator* generator_;
+ RegisterAllocationScope* outer_;
+ BytecodeRegisterAllocator allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
+};
+
+
+// Scoped base class for determining where the result of an expression
+// is stored.
+class BytecodeGenerator::ExpressionResultScope {
+ public:
+ ExpressionResultScope(BytecodeGenerator* generator, Expression::Context kind)
+ : generator_(generator),
+ kind_(kind),
+ outer_(generator->execution_result()),
+ allocator_(generator),
+ result_identified_(false) {
+ generator_->set_execution_result(this);
+ }
+
+ virtual ~ExpressionResultScope() {
+ generator_->set_execution_result(outer_);
+ DCHECK(result_identified());
+ }
+
+ bool IsEffect() const { return kind_ == Expression::kEffect; }
+ bool IsValue() const { return kind_ == Expression::kValue; }
+
+ virtual void SetResultInAccumulator() = 0;
+ virtual void SetResultInRegister(Register reg) = 0;
+
+ protected:
+ ExpressionResultScope* outer() const { return outer_; }
+ BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+ const RegisterAllocationScope* allocator() const { return &allocator_; }
+
+ void set_result_identified() {
+ DCHECK(!result_identified());
+ result_identified_ = true;
+ }
+
+ bool result_identified() const { return result_identified_; }
+
+ private:
+ BytecodeGenerator* generator_;
+ Expression::Context kind_;
+ ExpressionResultScope* outer_;
+ RegisterAllocationScope allocator_;
+ bool result_identified_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
+};
+
+
+// Scoped class used when the result of the current expression is not
+// expected to produce a result.
+class BytecodeGenerator::EffectResultScope final
+ : public ExpressionResultScope {
+ public:
+ explicit EffectResultScope(BytecodeGenerator* generator)
+ : ExpressionResultScope(generator, Expression::kEffect) {
+ set_result_identified();
+ }
+
+ virtual void SetResultInAccumulator() {}
+ virtual void SetResultInRegister(Register reg) {}
+};
+
+
+// Scoped class used when the result of the current expression to be
+// evaluated should go into the interpreter's accumulator register.
+class BytecodeGenerator::AccumulatorResultScope final
+ : public ExpressionResultScope {
+ public:
+ explicit AccumulatorResultScope(BytecodeGenerator* generator)
+ : ExpressionResultScope(generator, Expression::kValue) {}
+
+ virtual void SetResultInAccumulator() { set_result_identified(); }
+
+ virtual void SetResultInRegister(Register reg) {
+ builder()->LoadAccumulatorWithRegister(reg);
+ set_result_identified();
+ }
+};
+
+
+// Scoped class used when the result of the current expression to be
+// evaluated should go into an interpreter register.
+class BytecodeGenerator::RegisterResultScope final
+ : public ExpressionResultScope {
+ public:
+ explicit RegisterResultScope(BytecodeGenerator* generator)
+ : ExpressionResultScope(generator, Expression::kValue) {}
+
+ virtual void SetResultInAccumulator() {
+ result_register_ = allocator()->outer()->NewRegister();
+ builder()->StoreAccumulatorInRegister(result_register_);
+ set_result_identified();
+ }
+
+ virtual void SetResultInRegister(Register reg) {
+ DCHECK(builder()->RegisterIsParameterOrLocal(reg) ||
+ (builder()->RegisterIsTemporary(reg) &&
+ !allocator()->RegisterIsAllocatedInThisScope(reg)));
+ result_register_ = reg;
+ set_result_identified();
+ }
+
+ Register ResultRegister() const { return result_register_; }
+
+ private:
+ Register result_register_;
+};
+
+
+BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
+ : isolate_(isolate),
+ zone_(zone),
+ builder_(isolate, zone),
+ info_(nullptr),
+ scope_(nullptr),
+ globals_(0, zone),
+ execution_control_(nullptr),
+ execution_context_(nullptr),
+ execution_result_(nullptr),
+ register_allocator_(nullptr) {
+ InitializeAstVisitor(isolate);
+}
Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
set_info(info);
set_scope(info->scope());
- // This a temporary guard (oth).
- DCHECK(scope()->is_function_scope());
+ // Initialize the incoming context.
+ ContextScope incoming_context(this, scope(), false);
+
+ builder()->set_parameter_count(info->num_parameters_including_this());
+ builder()->set_locals_count(scope()->num_stack_slots());
+ builder()->set_context_count(scope()->MaxNestedContextChainLength());
+
+ // Build function context only if there are context allocated variables.
+ if (scope()->NeedsContext()) {
+ // Push a new inner context scope for the function.
+ VisitNewLocalFunctionContext();
+ ContextScope local_function_context(this, scope(), false);
+ VisitBuildLocalActivationContext();
+ MakeBytecodeBody();
+ } else {
+ MakeBytecodeBody();
+ }
+
+ set_scope(nullptr);
+ set_info(nullptr);
+ return builder_.ToBytecodeArray();
+}
+
+
+void BytecodeGenerator::MakeBytecodeBody() {
+ // Build the arguments object if it is used.
+ VisitArgumentsObject(scope()->arguments());
+
+ // TODO(mythria): Build rest arguments array if it is used.
+ int rest_index;
+ if (scope()->rest_parameter(&rest_index)) {
+ UNIMPLEMENTED();
+ }
+
+ // Build assignment to {.this_function} variable if it is used.
+ VisitThisFunctionVariable(scope()->this_function_var());
- builder().set_parameter_count(info->num_parameters_including_this());
- builder().set_locals_count(scope()->num_stack_slots());
+ // Build assignment to {new.target} variable if it is used.
+ VisitNewTargetVariable(scope()->new_target_var());
+
+ // TODO(rmcilroy): Emit tracing call if requested to do so.
+ if (FLAG_trace) {
+ UNIMPLEMENTED();
+ }
- // Visit implicit declaration of the function name.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VisitVariableDeclaration(scope()->function());
+ // Visit illegal re-declaration and bail out if it exists.
+ if (scope()->HasIllegalRedeclaration()) {
+ Visit(scope()->GetIllegalRedeclaration());
+ return;
}
// Visit declarations within the function scope.
VisitDeclarations(scope()->declarations());
// Visit statements in the function body.
- VisitStatements(info->literal()->body());
-
- set_scope(nullptr);
- set_info(nullptr);
- return builder_.ToBytecodeArray();
+ VisitStatements(info()->literal()->body());
}
-void BytecodeGenerator::VisitBlock(Block* node) {
- builder().EnterBlock();
- if (node->scope() == NULL) {
+void BytecodeGenerator::VisitBlock(Block* stmt) {
+ BlockBuilder block_builder(this->builder());
+ ControlScopeForBreakable execution_control(this, stmt, &block_builder);
+
+ if (stmt->scope() == NULL) {
// Visit statements in the same scope, no declarations.
- VisitStatements(node->statements());
+ VisitStatements(stmt->statements());
} else {
// Visit declarations and statements in a block scope.
- if (node->scope()->ContextLocalCount() > 0) {
- UNIMPLEMENTED();
+ if (stmt->scope()->NeedsContext()) {
+ VisitNewLocalBlockContext(stmt->scope());
+ ContextScope scope(this, stmt->scope());
+ VisitDeclarations(stmt->scope()->declarations());
+ VisitStatements(stmt->statements());
} else {
- VisitDeclarations(node->scope()->declarations());
- VisitStatements(node->statements());
+ VisitDeclarations(stmt->scope()->declarations());
+ VisitStatements(stmt->statements());
}
}
- builder().LeaveBlock();
+ if (stmt->labels() != nullptr) block_builder.EndBlock();
}
void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
Variable* variable = decl->proxy()->var();
+ VariableMode mode = decl->mode();
+ // Const and let variables are initialized with the hole so that we can
+ // check that they are only assigned once.
+ bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- UNIMPLEMENTED();
+ case VariableLocation::UNALLOCATED: {
+ Handle<Oddball> value = variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value();
+ globals()->push_back(variable->name());
+ globals()->push_back(value);
break;
- case VariableLocation::PARAMETER:
+ }
case VariableLocation::LOCAL:
- // Details stored in scope, i.e. variable index.
+ if (hole_init) {
+ Register destination(variable->index());
+ builder()->LoadTheHole().StoreAccumulatorInRegister(destination);
+ }
+ break;
+ case VariableLocation::PARAMETER:
+ if (hole_init) {
+ // The parameter indices are shifted by 1 (receiver is variable
+ // index -1 but is parameter index 0 in BytecodeArrayBuilder).
+ Register destination(builder()->Parameter(variable->index() + 1));
+ builder()->LoadTheHole().StoreAccumulatorInRegister(destination);
+ }
break;
case VariableLocation::CONTEXT:
+ if (hole_init) {
+ builder()->LoadTheHole().StoreContextSlot(execution_context()->reg(),
+ variable->index());
+ }
+ break;
case VariableLocation::LOOKUP:
UNIMPLEMENTED();
break;
@@ -89,7 +488,34 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
- UNIMPLEMENTED();
+ Variable* variable = decl->proxy()->var();
+ switch (variable->location()) {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
+ decl->fun(), info()->script(), info());
+ // Check for stack-overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals()->push_back(variable->name());
+ globals()->push_back(function);
+ break;
+ }
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
+ VisitForAccumulatorValue(decl->fun());
+ VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+ break;
+ }
+ case VariableLocation::CONTEXT: {
+ DCHECK_EQ(0, execution_context()->ContextChainDepth(variable->scope()));
+ VisitForAccumulatorValue(decl->fun());
+ builder()->StoreContextSlot(execution_context()->reg(),
+ variable->index());
+ break;
+ }
+ case VariableLocation::LOOKUP:
+ UNIMPLEMENTED();
+ }
}
@@ -103,31 +529,80 @@ void BytecodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
}
+void BytecodeGenerator::VisitDeclarations(
+ ZoneList<Declaration*>* declarations) {
+ RegisterAllocationScope register_scope(this);
+ DCHECK(globals()->empty());
+ AstVisitor::VisitDeclarations(declarations);
+ if (globals()->empty()) return;
+ int array_index = 0;
+ Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
+ static_cast<int>(globals()->size()), TENURED);
+ for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
+ int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
+ DeclareGlobalsNativeFlag::encode(info()->is_native()) |
+ DeclareGlobalsLanguageMode::encode(language_mode());
+
+ Register pairs = register_allocator()->NewRegister();
+ builder()->LoadLiteral(data);
+ builder()->StoreAccumulatorInRegister(pairs);
+
+ Register flags = register_allocator()->NewRegister();
+ builder()->LoadLiteral(Smi::FromInt(encoded_flags));
+ builder()->StoreAccumulatorInRegister(flags);
+ DCHECK(flags.index() == pairs.index() + 1);
+
+ builder()->CallRuntime(Runtime::kDeclareGlobals, pairs, 2);
+ globals()->clear();
+}
+
+
+void BytecodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+ for (int i = 0; i < statements->length(); i++) {
+ // Allocate an outer register allocations scope for the statement.
+ RegisterAllocationScope allocation_scope(this);
+ Statement* stmt = statements->at(i);
+ Visit(stmt);
+ if (stmt->IsJump()) break;
+ }
+}
+
+
void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Visit(stmt->expression());
+ VisitForEffect(stmt->expression());
}
void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
- // TODO(oth): For control-flow it could be useful to signal empty paths here.
}
void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
- BytecodeLabel else_start, else_end;
- // TODO(oth): Spot easy cases where there code would not need to
- // emit the then block or the else block, e.g. condition is
- // obviously true/1/false/0.
- Visit(stmt->condition());
- builder().CastAccumulatorToBoolean();
- builder().JumpIfFalse(&else_start);
-
- Visit(stmt->then_statement());
- builder().Jump(&else_end);
- builder().Bind(&else_start);
-
- Visit(stmt->else_statement());
- builder().Bind(&else_end);
+ BytecodeLabel else_label, end_label;
+ if (stmt->condition()->ToBooleanIsTrue()) {
+ // Generate then block unconditionally as always true.
+ Visit(stmt->then_statement());
+ } else if (stmt->condition()->ToBooleanIsFalse()) {
+ // Generate else block unconditionally if it exists.
+ if (stmt->HasElseStatement()) {
+ Visit(stmt->else_statement());
+ }
+ } else {
+ // TODO(oth): If then statement is BreakStatement or
+ // ContinueStatement we can reduce number of generated
+ // jump/jump_ifs here. See BasicLoops test.
+ VisitForAccumulatorValue(stmt->condition());
+ builder()->JumpIfFalse(&else_label);
+ Visit(stmt->then_statement());
+ if (stmt->HasElseStatement()) {
+ builder()->Jump(&end_label);
+ builder()->Bind(&else_label);
+ Visit(stmt->else_statement());
+ } else {
+ builder()->Bind(&else_label);
+ }
+ builder()->Bind(&end_label);
+ }
}
@@ -138,18 +613,18 @@ void BytecodeGenerator::VisitSloppyBlockFunctionStatement(
void BytecodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
- UNIMPLEMENTED();
+ execution_control()->Continue(stmt->target());
}
void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
- UNIMPLEMENTED();
+ execution_control()->Break(stmt->target());
}
void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Visit(stmt->expression());
- builder().Return();
+ VisitForAccumulatorValue(stmt->expression());
+ builder()->Return();
}
@@ -159,30 +634,220 @@ void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- UNIMPLEMENTED();
+ // We need this scope because we visit for register values. We have to
+ // maintain a execution result scope where registers can be allocated.
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ SwitchBuilder switch_builder(builder(), clauses->length());
+ ControlScopeForBreakable scope(this, stmt, &switch_builder);
+ int default_index = -1;
+
+ // Keep the switch value in a register until a case matches.
+ Register tag = VisitForRegisterValue(stmt->tag());
+
+ // Iterate over all cases and create nodes for label comparison.
+ BytecodeLabel done_label;
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+
+ // The default is not a test, remember index.
+ if (clause->is_default()) {
+ default_index = i;
+ continue;
+ }
+
+ // Perform label comparison as if via '===' with tag.
+ VisitForAccumulatorValue(clause->label());
+ builder()->CompareOperation(Token::Value::EQ_STRICT, tag,
+ language_mode_strength());
+ switch_builder.Case(i);
+ }
+
+ if (default_index >= 0) {
+ // Emit default jump if there is a default case.
+ switch_builder.DefaultAt(default_index);
+ } else {
+ // Otherwise if we have reached here none of the cases matched, so jump to
+ // done.
+ builder()->Jump(&done_label);
+ }
+
+ // Iterate over all cases and create the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ switch_builder.SetCaseTarget(i);
+ VisitStatements(clause->statements());
+ }
+ builder()->Bind(&done_label);
+
+ switch_builder.SetBreakTarget(done_label);
}
-void BytecodeGenerator::VisitCaseClause(CaseClause* clause) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
+ // Handled entirely in VisitSwitchStatement.
+ UNREACHABLE();
+}
void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
- UNIMPLEMENTED();
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+ loop_builder.LoopHeader();
+ if (stmt->cond()->ToBooleanIsFalse()) {
+ Visit(stmt->body());
+ loop_builder.Condition();
+ } else if (stmt->cond()->ToBooleanIsTrue()) {
+ loop_builder.Condition();
+ Visit(stmt->body());
+ loop_builder.JumpToHeader();
+ } else {
+ Visit(stmt->body());
+ loop_builder.Condition();
+ VisitForAccumulatorValue(stmt->cond());
+ loop_builder.JumpToHeaderIfTrue();
+ }
+ loop_builder.EndLoop();
}
void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
- UNIMPLEMENTED();
+ if (stmt->cond()->ToBooleanIsFalse()) {
+ // If the condition is false there is no need to generate the loop.
+ return;
+ }
+
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+ loop_builder.LoopHeader();
+ loop_builder.Condition();
+ if (!stmt->cond()->ToBooleanIsTrue()) {
+ VisitForAccumulatorValue(stmt->cond());
+ loop_builder.BreakIfFalse();
+ }
+ Visit(stmt->body());
+ loop_builder.JumpToHeader();
+ loop_builder.EndLoop();
}
void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
- UNIMPLEMENTED();
+ if (stmt->init() != nullptr) {
+ Visit(stmt->init());
+ }
+ if (stmt->cond() && stmt->cond()->ToBooleanIsFalse()) {
+ // If the condition is known to be false there is no need to generate
+ // body, next or condition blocks. Init block should be generated.
+ return;
+ }
+
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+
+ loop_builder.LoopHeader();
+ loop_builder.Condition();
+ if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
+ VisitForAccumulatorValue(stmt->cond());
+ loop_builder.BreakIfFalse();
+ }
+ Visit(stmt->body());
+ if (stmt->next() != nullptr) {
+ loop_builder.Next();
+ Visit(stmt->next());
+ }
+ loop_builder.JumpToHeader();
+ loop_builder.EndLoop();
+}
+
+
+void BytecodeGenerator::VisitForInAssignment(Expression* expr,
+ FeedbackVectorSlot slot) {
+ DCHECK(expr->IsValidReferenceExpression());
+
+ // Evaluate assignment starting with the value to be stored in the
+ // accumulator.
+ Property* property = expr->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(property);
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* variable = expr->AsVariableProxy()->var();
+ VisitVariableAssignment(variable, slot);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ RegisterAllocationScope register_scope(this);
+ Register value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ Register object = VisitForRegisterValue(property->obj());
+ Handle<String> name = property->key()->AsLiteral()->AsPropertyName();
+ builder()->LoadAccumulatorWithRegister(value);
+ builder()->StoreNamedProperty(object, name, feedback_index(slot),
+ language_mode());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ RegisterAllocationScope register_scope(this);
+ Register value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ Register object = VisitForRegisterValue(property->obj());
+ Register key = VisitForRegisterValue(property->key());
+ builder()->LoadAccumulatorWithRegister(value);
+ builder()->StoreKeyedProperty(object, key, feedback_index(slot),
+ language_mode());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNIMPLEMENTED();
+ }
}
void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- UNIMPLEMENTED();
+ if (stmt->subject()->IsNullLiteral() ||
+ stmt->subject()->IsUndefinedLiteral(isolate())) {
+ // ForIn generates lots of code, skip if it wouldn't produce any effects.
+ return;
+ }
+
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration control_scope(this, stmt, &loop_builder);
+ BytecodeLabel subject_null_label, subject_undefined_label, not_object_label;
+
+ // Prepare the state for executing ForIn.
+ VisitForAccumulatorValue(stmt->subject());
+ builder()->JumpIfUndefined(&subject_undefined_label);
+ builder()->JumpIfNull(&subject_null_label);
+ Register receiver = register_allocator()->NewRegister();
+ builder()->CastAccumulatorToJSObject();
+ builder()->JumpIfNull(&not_object_label);
+ builder()->StoreAccumulatorInRegister(receiver);
+ Register cache_type = register_allocator()->NewRegister();
+ Register cache_array = register_allocator()->NewRegister();
+ Register cache_length = register_allocator()->NewRegister();
+ builder()->ForInPrepare(cache_type, cache_array, cache_length);
+
+ // Set up loop counter
+ Register index = register_allocator()->NewRegister();
+ builder()->LoadLiteral(Smi::FromInt(0));
+ builder()->StoreAccumulatorInRegister(index);
+
+ // The loop
+ loop_builder.LoopHeader();
+ loop_builder.Condition();
+ builder()->ForInDone(index, cache_length);
+ loop_builder.BreakIfTrue();
+ builder()->ForInNext(receiver, cache_type, cache_array, index);
+ loop_builder.ContinueIfUndefined();
+ VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
+ Visit(stmt->body());
+ loop_builder.Next();
+ builder()->ForInStep(index);
+ builder()->StoreAccumulatorInRegister(index);
+ loop_builder.JumpToHeader();
+ loop_builder.EndLoop();
+ builder()->Bind(&not_object_label);
+ builder()->Bind(&subject_null_label);
+ builder()->Bind(&subject_undefined_label);
}
@@ -192,11 +857,20 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ if (FLAG_ignition_fake_try_catch) {
+ Visit(stmt->try_block());
+ return;
+ }
UNIMPLEMENTED();
}
void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ if (FLAG_ignition_fake_try_catch) {
+ Visit(stmt->try_block());
+ Visit(stmt->finally_block());
+ return;
+ }
UNIMPLEMENTED();
}
@@ -207,7 +881,13 @@ void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- UNIMPLEMENTED();
+ // Find or build a shared function info.
+ Handle<SharedFunctionInfo> shared_info =
+ Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
+ CHECK(!shared_info.is_null()); // TODO(rmcilroy): Set stack overflow?
+ builder()->CreateClosure(shared_info,
+ expr->pretenure() ? TENURED : NOT_TENURED);
+ execution_result()->SetResultInAccumulator();
}
@@ -222,84 +902,434 @@ void BytecodeGenerator::VisitNativeFunctionLiteral(
}
-void BytecodeGenerator::VisitConditional(Conditional* expr) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitConditional(Conditional* expr) {
+ // TODO(rmcilroy): Spot easy cases where there code would not need to
+ // emit the then block or the else block, e.g. condition is
+ // obviously true/1/false/0.
+
+ BytecodeLabel else_label, end_label;
+
+ VisitForAccumulatorValue(expr->condition());
+ builder()->JumpIfFalse(&else_label);
+
+ VisitForAccumulatorValue(expr->then_expression());
+ builder()->Jump(&end_label);
+
+ builder()->Bind(&else_label);
+ VisitForAccumulatorValue(expr->else_expression());
+ builder()->Bind(&end_label);
+
+ execution_result()->SetResultInAccumulator();
+}
void BytecodeGenerator::VisitLiteral(Literal* expr) {
- Handle<Object> value = expr->value();
- if (value->IsSmi()) {
- builder().LoadLiteral(Smi::cast(*value));
- } else if (value->IsUndefined()) {
- builder().LoadUndefined();
- } else if (value->IsTrue()) {
- builder().LoadTrue();
- } else if (value->IsFalse()) {
- builder().LoadFalse();
- } else if (value->IsNull()) {
- builder().LoadNull();
- } else if (value->IsTheHole()) {
- builder().LoadTheHole();
- } else {
- builder().LoadLiteral(value);
+ if (!execution_result()->IsEffect()) {
+ Handle<Object> value = expr->value();
+ if (value->IsSmi()) {
+ builder()->LoadLiteral(Smi::cast(*value));
+ } else if (value->IsUndefined()) {
+ builder()->LoadUndefined();
+ } else if (value->IsTrue()) {
+ builder()->LoadTrue();
+ } else if (value->IsFalse()) {
+ builder()->LoadFalse();
+ } else if (value->IsNull()) {
+ builder()->LoadNull();
+ } else if (value->IsTheHole()) {
+ builder()->LoadTheHole();
+ } else {
+ builder()->LoadLiteral(value);
+ }
+ execution_result()->SetResultInAccumulator();
}
}
void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- UNIMPLEMENTED();
+ // Materialize a regular expression literal.
+ builder()->CreateRegExpLiteral(expr->pattern(), expr->literal_index(),
+ expr->flags());
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- UNIMPLEMENTED();
+ // Deep-copy the literal boilerplate.
+ builder()->CreateObjectLiteral(expr->constant_properties(),
+ expr->literal_index(),
+ expr->ComputeFlags(true));
+ Register literal;
+
+ // Store computed values into the literal.
+ bool literal_in_accumulator = true;
+ int property_index = 0;
+ AccessorTable accessor_table(zone());
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
+ if (property->IsCompileTimeValue()) continue;
+
+ if (literal_in_accumulator) {
+ literal = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(literal);
+ literal_in_accumulator = false;
+ }
+
+ RegisterAllocationScope inner_register_scope(this);
+ Literal* literal_key = property->key()->AsLiteral();
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED: {
+ // It is safe to use [[Put]] here because the boilerplate already
+ // contains computed properties with an uninitialized value.
+ if (literal_key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreNamedProperty(
+ literal, literal_key->AsPropertyName(),
+ feedback_index(property->GetSlot(0)), language_mode());
+ } else {
+ VisitForEffect(property->value());
+ }
+ } else {
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+ Register key = register_allocator()->NextConsecutiveRegister();
+ Register value = register_allocator()->NextConsecutiveRegister();
+ Register language = register_allocator()->NextConsecutiveRegister();
+ // TODO(oth): This is problematic - can't assume contiguous here.
+ // literal is allocated in outer register scope, whereas key, value,
+ // language are in another.
+ DCHECK(Register::AreContiguous(literal, key, value, language));
+ VisitForAccumulatorValue(property->key());
+ builder()->StoreAccumulatorInRegister(key);
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreAccumulatorInRegister(value);
+ if (property->emit_store()) {
+ builder()
+ ->LoadLiteral(Smi::FromInt(SLOPPY))
+ .StoreAccumulatorInRegister(language)
+ .CallRuntime(Runtime::kSetProperty, literal, 4);
+ VisitSetHomeObject(value, literal, property);
+ }
+ }
+ break;
+ }
+ case ObjectLiteral::Property::PROTOTYPE: {
+ register_allocator()->PrepareForConsecutiveAllocations(1);
+ DCHECK(property->emit_store());
+ Register value = register_allocator()->NextConsecutiveRegister();
+ DCHECK(Register::AreContiguous(literal, value));
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreAccumulatorInRegister(value).CallRuntime(
+ Runtime::kInternalSetPrototype, literal, 2);
+ break;
+ }
+ case ObjectLiteral::Property::GETTER:
+ if (property->emit_store()) {
+ accessor_table.lookup(literal_key)->second->getter = property;
+ }
+ break;
+ case ObjectLiteral::Property::SETTER:
+ if (property->emit_store()) {
+ accessor_table.lookup(literal_key)->second->setter = property;
+ }
+ break;
+ }
+ }
+
+ // Define accessors, using only a single call to the runtime for each pair of
+ // corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end(); ++it) {
+ RegisterAllocationScope inner_register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(4);
+ Register name = register_allocator()->NextConsecutiveRegister();
+ Register getter = register_allocator()->NextConsecutiveRegister();
+ Register setter = register_allocator()->NextConsecutiveRegister();
+ Register attr = register_allocator()->NextConsecutiveRegister();
+ DCHECK(Register::AreContiguous(literal, name, getter, setter, attr));
+ VisitForAccumulatorValue(it->first);
+ builder()->StoreAccumulatorInRegister(name);
+ VisitObjectLiteralAccessor(literal, it->second->getter, getter);
+ VisitObjectLiteralAccessor(literal, it->second->setter, setter);
+ builder()
+ ->LoadLiteral(Smi::FromInt(NONE))
+ .StoreAccumulatorInRegister(attr)
+ .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, literal, 5);
+ }
+
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // Runtime_CreateObjectLiteralBoilerplate. The second "dynamic" part starts
+ // with the first computed property name and continues with all properties to
+ // its right. All the code from above initializes the static component of the
+ // object literal, and arranges for the map of the result to reflect the
+ // static order in which the keys appear. For the dynamic properties, we
+ // compile them into a series of "SetOwnProperty" runtime calls. This will
+ // preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ if (literal_in_accumulator) {
+ literal = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(literal);
+ literal_in_accumulator = false;
+ }
+
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ RegisterAllocationScope inner_register_scope(this);
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(property->emit_store());
+ Register value = register_allocator()->NewRegister();
+ DCHECK(Register::AreContiguous(literal, value));
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreAccumulatorInRegister(value).CallRuntime(
+ Runtime::kInternalSetPrototype, literal, 2);
+ continue;
+ }
+
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+ Register key = register_allocator()->NextConsecutiveRegister();
+ Register value = register_allocator()->NextConsecutiveRegister();
+ Register attr = register_allocator()->NextConsecutiveRegister();
+ DCHECK(Register::AreContiguous(literal, key, value, attr));
+
+ VisitForAccumulatorValue(property->key());
+ builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreAccumulatorInRegister(value);
+ VisitSetHomeObject(value, literal, property);
+ builder()->LoadLiteral(Smi::FromInt(NONE)).StoreAccumulatorInRegister(attr);
+ Runtime::FunctionId function_id = static_cast<Runtime::FunctionId>(-1);
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::COMPUTED:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ function_id = Runtime::kDefineDataPropertyUnchecked;
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE(); // Handled specially above.
+ break;
+ case ObjectLiteral::Property::GETTER:
+ function_id = Runtime::kDefineGetterPropertyUnchecked;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ function_id = Runtime::kDefineSetterPropertyUnchecked;
+ break;
+ }
+ builder()->CallRuntime(function_id, literal, 4);
+ }
+
+ // Transform literals that contain functions to fast properties.
+ if (expr->has_function()) {
+ DCHECK(!literal_in_accumulator);
+ builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
+ }
+
+ if (!literal_in_accumulator) {
+ // Restore literal array into accumulator.
+ builder()->LoadAccumulatorWithRegister(literal);
+ }
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- UNIMPLEMENTED();
+ // Deep-copy the literal boilerplate.
+ builder()->CreateArrayLiteral(expr->constant_elements(),
+ expr->literal_index(),
+ expr->ComputeFlags(true));
+ Register index, literal;
+
+ // Evaluate all the non-constant subexpressions and store them into the
+ // newly cloned array.
+ bool literal_in_accumulator = true;
+ for (int array_index = 0; array_index < expr->values()->length();
+ array_index++) {
+ Expression* subexpr = expr->values()->at(array_index);
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+ if (subexpr->IsSpread()) {
+ // TODO(rmcilroy): Deal with spread expressions.
+ UNIMPLEMENTED();
+ }
+
+ if (literal_in_accumulator) {
+ index = register_allocator()->NewRegister();
+ literal = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(literal);
+ literal_in_accumulator = false;
+ }
+
+ FeedbackVectorSlot slot = expr->LiteralFeedbackSlot();
+ builder()
+ ->LoadLiteral(Smi::FromInt(array_index))
+ .StoreAccumulatorInRegister(index);
+ VisitForAccumulatorValue(subexpr);
+ builder()->StoreKeyedProperty(literal, index, feedback_index(slot),
+ language_mode());
+ }
+
+ if (!literal_in_accumulator) {
+ // Restore literal array into accumulator.
+ builder()->LoadAccumulatorWithRegister(literal);
+ }
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
- VisitVariableLoad(proxy->var());
+ VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
}
-void BytecodeGenerator::VisitVariableLoad(Variable* variable) {
+void BytecodeGenerator::VisitVariableLoad(Variable* variable,
+ FeedbackVectorSlot slot,
+ TypeofMode typeof_mode) {
switch (variable->location()) {
case VariableLocation::LOCAL: {
- Register source(variable->index());
- builder().LoadAccumulatorWithRegister(source);
+ Register source(Register(variable->index()));
+ builder()->LoadAccumulatorWithRegister(source);
+ execution_result()->SetResultInAccumulator();
break;
}
case VariableLocation::PARAMETER: {
// The parameter indices are shifted by 1 (receiver is variable
// index -1 but is parameter index 0 in BytecodeArrayBuilder).
- Register source(builder().Parameter(variable->index() + 1));
- builder().LoadAccumulatorWithRegister(source);
+ Register source = builder()->Parameter(variable->index() + 1);
+ builder()->LoadAccumulatorWithRegister(source);
+ execution_result()->SetResultInAccumulator();
break;
}
- case VariableLocation::GLOBAL: {
- // Global var, const, or let variable.
- // TODO(rmcilroy): If context chain depth is short enough, do this using
- // a generic version of LoadGlobalViaContextStub rather than calling the
- // runtime.
- DCHECK(variable->IsStaticGlobalObjectProperty());
- builder().LoadGlobal(variable->index());
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ builder()->LoadGlobal(variable->name(), feedback_index(slot),
+ language_mode(), typeof_mode);
+ execution_result()->SetResultInAccumulator();
+ break;
+ }
+ case VariableLocation::CONTEXT: {
+ int depth = execution_context()->ContextChainDepth(variable->scope());
+ ContextScope* context = execution_context()->Previous(depth);
+ Register context_reg;
+ if (context) {
+ context_reg = context->reg();
+ } else {
+ context_reg = register_allocator()->NewRegister();
+ // Walk the context chain to find the context at the given depth.
+ // TODO(rmcilroy): Perform this work in a bytecode handler once we have
+ // a generic mechanism for performing jumps in interpreter.cc.
+ // TODO(mythria): Also update bytecode graph builder with correct depth
+ // when this changes.
+ builder()
+ ->LoadAccumulatorWithRegister(execution_context()->reg())
+ .StoreAccumulatorInRegister(context_reg);
+ for (int i = 0; i < depth; ++i) {
+ builder()
+ ->LoadContextSlot(context_reg, Context::PREVIOUS_INDEX)
+ .StoreAccumulatorInRegister(context_reg);
+ }
+ }
+ builder()->LoadContextSlot(context_reg, variable->index());
+ execution_result()->SetResultInAccumulator();
+ // TODO(rmcilroy): Perform check for uninitialized legacy const, const and
+ // let variables.
+ break;
+ }
+ case VariableLocation::LOOKUP: {
+ builder()->LoadLookupSlot(variable->name(), typeof_mode);
+ execution_result()->SetResultInAccumulator();
+ break;
+ }
+ }
+}
+
+
+void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
+ Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
+ AccumulatorResultScope accumulator_result(this);
+ VisitVariableLoad(variable, slot, typeof_mode);
+}
+
+
+Register BytecodeGenerator::VisitVariableLoadForRegisterValue(
+ Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
+ RegisterResultScope register_scope(this);
+ VisitVariableLoad(variable, slot, typeof_mode);
+ return register_scope.ResultRegister();
+}
+
+
+void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
+ FeedbackVectorSlot slot) {
+ switch (variable->location()) {
+ case VariableLocation::LOCAL: {
+ // TODO(rmcilroy): support const mode initialization.
+ Register destination(variable->index());
+ builder()->StoreAccumulatorInRegister(destination);
+ break;
+ }
+ case VariableLocation::PARAMETER: {
+ // The parameter indices are shifted by 1 (receiver is variable
+ // index -1 but is parameter index 0 in BytecodeArrayBuilder).
+ Register destination(builder()->Parameter(variable->index() + 1));
+ builder()->StoreAccumulatorInRegister(destination);
+ break;
+ }
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ builder()->StoreGlobal(variable->name(), feedback_index(slot),
+ language_mode());
+ break;
+ }
+ case VariableLocation::CONTEXT: {
+ // TODO(rmcilroy): support const mode initialization.
+ int depth = execution_context()->ContextChainDepth(variable->scope());
+ ContextScope* context = execution_context()->Previous(depth);
+ Register context_reg;
+ if (context) {
+ context_reg = context->reg();
+ } else {
+ Register value_temp = register_allocator()->NewRegister();
+ context_reg = register_allocator()->NewRegister();
+ // Walk the context chain to find the context at the given depth.
+ // TODO(rmcilroy): Perform this work in a bytecode handler once we have
+ // a generic mechanism for performing jumps in interpreter.cc.
+ // TODO(mythria): Also update bytecode graph builder with correct depth
+ // when this changes.
+ builder()
+ ->StoreAccumulatorInRegister(value_temp)
+ .LoadAccumulatorWithRegister(execution_context()->reg())
+ .StoreAccumulatorInRegister(context_reg);
+ for (int i = 0; i < depth; ++i) {
+ builder()
+ ->LoadContextSlot(context_reg, Context::PREVIOUS_INDEX)
+ .StoreAccumulatorInRegister(context_reg);
+ }
+ builder()->LoadAccumulatorWithRegister(value_temp);
+ }
+ builder()->StoreContextSlot(context_reg, variable->index());
+ break;
+ }
+ case VariableLocation::LOOKUP: {
+ builder()->StoreLookupSlot(variable->name(), language_mode());
break;
}
- case VariableLocation::UNALLOCATED:
- case VariableLocation::CONTEXT:
- case VariableLocation::LOOKUP:
- UNIMPLEMENTED();
}
}
void BytecodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
- TemporaryRegisterScope temporary_register_scope(&builder_);
Register object, key;
+ Handle<String> name;
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->target()->AsProperty();
@@ -310,22 +1340,24 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
// Nothing to do to evaluate variable assignment LHS.
break;
- case NAMED_PROPERTY:
- object = temporary_register_scope.NewRegister();
- key = temporary_register_scope.NewRegister();
- Visit(property->obj());
- builder().StoreAccumulatorInRegister(object);
- builder().LoadLiteral(property->key()->AsLiteral()->AsPropertyName());
- builder().StoreAccumulatorInRegister(key);
+ case NAMED_PROPERTY: {
+ object = VisitForRegisterValue(property->obj());
+ name = property->key()->AsLiteral()->AsPropertyName();
break;
- case KEYED_PROPERTY:
- object = temporary_register_scope.NewRegister();
- key = temporary_register_scope.NewRegister();
- Visit(property->obj());
- builder().StoreAccumulatorInRegister(object);
- Visit(property->key());
- builder().StoreAccumulatorInRegister(key);
+ }
+ case KEYED_PROPERTY: {
+ object = VisitForRegisterValue(property->obj());
+ if (expr->is_compound()) {
+ // Use VisitForAccumulator and store to register so that the key is
+ // still in the accumulator for loading the old value below.
+ key = register_allocator()->NewRegister();
+ VisitForAccumulatorValue(property->key());
+ builder()->StoreAccumulatorInRegister(key);
+ } else {
+ key = VisitForRegisterValue(property->key());
+ }
break;
+ }
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
UNIMPLEMENTED();
@@ -334,151 +1366,510 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
// Evaluate the value and potentially handle compound assignments by loading
// the left-hand side value and performing a binary operation.
if (expr->is_compound()) {
- UNIMPLEMENTED();
+ Register old_value;
+ switch (assign_type) {
+ case VARIABLE: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ old_value = VisitVariableLoadForRegisterValue(
+ proxy->var(), proxy->VariableFeedbackSlot());
+ break;
+ }
+ case NAMED_PROPERTY: {
+ FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+ old_value = register_allocator()->NewRegister();
+ builder()
+ ->LoadNamedProperty(object, name, feedback_index(slot),
+ language_mode())
+ .StoreAccumulatorInRegister(old_value);
+ break;
+ }
+ case KEYED_PROPERTY: {
+ // Key is already in accumulator at this point due to evaluating the
+ // LHS above.
+ FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+ old_value = register_allocator()->NewRegister();
+ builder()
+ ->LoadKeyedProperty(object, feedback_index(slot), language_mode())
+ .StoreAccumulatorInRegister(old_value);
+ break;
+ }
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNIMPLEMENTED();
+ break;
+ }
+ VisitForAccumulatorValue(expr->value());
+ builder()->BinaryOperation(expr->binary_op(), old_value,
+ language_mode_strength());
} else {
- Visit(expr->value());
+ VisitForAccumulatorValue(expr->value());
}
// Store the value.
- FeedbackVectorICSlot slot = expr->AssignmentSlot();
+ FeedbackVectorSlot slot = expr->AssignmentSlot();
switch (assign_type) {
case VARIABLE: {
+ // TODO(oth): The VisitVariableAssignment() call is hard to reason about.
+ // Is the value in the accumulator safe? Yes, but scary.
Variable* variable = expr->target()->AsVariableProxy()->var();
- DCHECK(variable->location() == VariableLocation::LOCAL);
- Register destination(variable->index());
- builder().StoreAccumulatorInRegister(destination);
+ VisitVariableAssignment(variable, slot);
break;
}
case NAMED_PROPERTY:
- builder().StoreNamedProperty(object, key, feedback_index(slot),
- language_mode());
+ builder()->StoreNamedProperty(object, name, feedback_index(slot),
+ language_mode());
break;
case KEYED_PROPERTY:
- builder().StoreKeyedProperty(object, key, feedback_index(slot),
- language_mode());
+ builder()->StoreKeyedProperty(object, key, feedback_index(slot),
+ language_mode());
break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
UNIMPLEMENTED();
}
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitYield(Yield* expr) { UNIMPLEMENTED(); }
-void BytecodeGenerator::VisitThrow(Throw* expr) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitThrow(Throw* expr) {
+ VisitForAccumulatorValue(expr->exception());
+ builder()->Throw();
+}
void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
LhsKind property_kind = Property::GetAssignType(expr);
- FeedbackVectorICSlot slot = expr->PropertyFeedbackSlot();
+ FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
switch (property_kind) {
case VARIABLE:
UNREACHABLE();
case NAMED_PROPERTY: {
- builder().LoadLiteral(expr->key()->AsLiteral()->AsPropertyName());
- builder().LoadNamedProperty(obj, feedback_index(slot), language_mode());
+ builder()->LoadNamedProperty(obj,
+ expr->key()->AsLiteral()->AsPropertyName(),
+ feedback_index(slot), language_mode());
break;
}
case KEYED_PROPERTY: {
- Visit(expr->key());
- builder().LoadKeyedProperty(obj, feedback_index(slot), language_mode());
+ VisitForAccumulatorValue(expr->key());
+ builder()->LoadKeyedProperty(obj, feedback_index(slot), language_mode());
break;
}
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
UNIMPLEMENTED();
}
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitPropertyLoadForAccumulator(Register obj,
+ Property* expr) {
+ AccumulatorResultScope result_scope(this);
+ VisitPropertyLoad(obj, expr);
}
void BytecodeGenerator::VisitProperty(Property* expr) {
- TemporaryRegisterScope temporary_register_scope(&builder_);
- Register obj = temporary_register_scope.NewRegister();
- Visit(expr->obj());
- builder().StoreAccumulatorInRegister(obj);
+ Register obj = VisitForRegisterValue(expr->obj());
VisitPropertyLoad(obj, expr);
}
+Register BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args) {
+ if (args->length() == 0) {
+ return Register();
+ }
+
+ // Visit arguments and place in a contiguous block of temporary
+ // registers. Return the first temporary register corresponding to
+ // the first argument.
+ //
+ // NB the caller may have already called
+ // PrepareForConsecutiveAllocations() with args->length() + N. The
+ // second call here will be a no-op provided there have been N or
+ // less calls to NextConsecutiveRegister(). Otherwise, the arguments
+ // here will be consecutive, but they will not be consecutive with
+ // earlier consecutive allocations made by the caller.
+ register_allocator()->PrepareForConsecutiveAllocations(args->length());
+
+ // Visit for first argument that goes into returned register
+ Register first_arg = register_allocator()->NextConsecutiveRegister();
+ VisitForAccumulatorValue(args->at(0));
+ builder()->StoreAccumulatorInRegister(first_arg);
+
+ // Visit remaining arguments
+ for (int i = 1; i < static_cast<int>(args->length()); i++) {
+ Register ith_arg = register_allocator()->NextConsecutiveRegister();
+ VisitForAccumulatorValue(args->at(i));
+ builder()->StoreAccumulatorInRegister(ith_arg);
+ DCHECK(ith_arg.index() - i == first_arg.index());
+ }
+ return first_arg;
+}
+
+
void BytecodeGenerator::VisitCall(Call* expr) {
Expression* callee_expr = expr->expression();
Call::CallType call_type = expr->GetCallType(isolate());
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
- TemporaryRegisterScope temporary_register_scope(&builder_);
- Register callee = temporary_register_scope.NewRegister();
- Register receiver = temporary_register_scope.NewRegister();
+
+ // The receiver and arguments need to be allocated consecutively for
+ // Call(). We allocate the callee and receiver consecutively for calls to
+ // kLoadLookupSlot. Future optimizations could avoid this there are no
+ // arguments or the receiver and arguments are already consecutive.
+ ZoneList<Expression*>* args = expr->arguments();
+ register_allocator()->PrepareForConsecutiveAllocations(args->length() + 2);
+ Register callee = register_allocator()->NextConsecutiveRegister();
+ Register receiver = register_allocator()->NextConsecutiveRegister();
switch (call_type) {
- case Call::PROPERTY_CALL: {
+ case Call::NAMED_PROPERTY_CALL:
+ case Call::KEYED_PROPERTY_CALL: {
Property* property = callee_expr->AsProperty();
- if (property->IsSuperAccess()) {
- UNIMPLEMENTED();
- }
- Visit(property->obj());
- builder().StoreAccumulatorInRegister(receiver);
- // Perform a property load of the callee.
- VisitPropertyLoad(receiver, property);
- builder().StoreAccumulatorInRegister(callee);
+ VisitForAccumulatorValue(property->obj());
+ builder()->StoreAccumulatorInRegister(receiver);
+ VisitPropertyLoadForAccumulator(receiver, property);
+ builder()->StoreAccumulatorInRegister(callee);
break;
}
case Call::GLOBAL_CALL: {
// Receiver is undefined for global calls.
- builder().LoadUndefined().StoreAccumulatorInRegister(receiver);
+ builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
// Load callee as a global variable.
VariableProxy* proxy = callee_expr->AsVariableProxy();
- VisitVariableLoad(proxy->var());
- builder().StoreAccumulatorInRegister(callee);
+ VisitVariableLoadForAccumulatorValue(proxy->var(),
+ proxy->VariableFeedbackSlot());
+ builder()->StoreAccumulatorInRegister(callee);
break;
}
case Call::LOOKUP_SLOT_CALL:
+ case Call::POSSIBLY_EVAL_CALL: {
+ if (callee_expr->AsVariableProxy()->var()->IsLookupSlot()) {
+ RegisterAllocationScope inner_register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(2);
+ Register context = register_allocator()->NextConsecutiveRegister();
+ Register name = register_allocator()->NextConsecutiveRegister();
+
+ // Call LoadLookupSlot to get the callee and receiver.
+ DCHECK(Register::AreContiguous(callee, receiver));
+ Variable* variable = callee_expr->AsVariableProxy()->var();
+ builder()
+ ->MoveRegister(Register::function_context(), context)
+ .LoadLiteral(variable->name())
+ .StoreAccumulatorInRegister(name)
+ .CallRuntimeForPair(Runtime::kLoadLookupSlot, context, 2, callee);
+ break;
+ }
+ // Fall through.
+ DCHECK_EQ(call_type, Call::POSSIBLY_EVAL_CALL);
+ }
+ case Call::OTHER_CALL: {
+ builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+ VisitForAccumulatorValue(callee_expr);
+ builder()->StoreAccumulatorInRegister(callee);
+ break;
+ }
+ case Call::NAMED_SUPER_PROPERTY_CALL:
+ case Call::KEYED_SUPER_PROPERTY_CALL:
case Call::SUPER_CALL:
- case Call::POSSIBLY_EVAL_CALL:
- case Call::OTHER_CALL:
UNIMPLEMENTED();
}
// Evaluate all arguments to the function call and store in sequential
// registers.
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Visit(args->at(i));
- Register arg = temporary_register_scope.NewRegister();
- DCHECK(arg.index() - i == receiver.index() + 1);
- builder().StoreAccumulatorInRegister(arg);
+ Register arg = VisitArguments(args);
+ CHECK(args->length() == 0 || arg.index() == receiver.index() + 1);
+
+ // Resolve callee for a potential direct eval call. This block will mutate the
+ // callee value.
+ if (call_type == Call::POSSIBLY_EVAL_CALL && args->length() > 0) {
+ RegisterAllocationScope inner_register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(5);
+ Register callee_for_eval = register_allocator()->NextConsecutiveRegister();
+ Register source = register_allocator()->NextConsecutiveRegister();
+ Register function = register_allocator()->NextConsecutiveRegister();
+ Register language = register_allocator()->NextConsecutiveRegister();
+ Register position = register_allocator()->NextConsecutiveRegister();
+
+ // Set up arguments for ResolvePossiblyDirectEval by copying callee, source
+ // strings and function closure, and loading language and
+ // position.
+ builder()
+ ->MoveRegister(callee, callee_for_eval)
+ .MoveRegister(arg, source)
+ .MoveRegister(Register::function_closure(), function)
+ .LoadLiteral(Smi::FromInt(language_mode()))
+ .StoreAccumulatorInRegister(language)
+ .LoadLiteral(
+ Smi::FromInt(execution_context()->scope()->start_position()))
+ .StoreAccumulatorInRegister(position);
+
+ // Call ResolvePossiblyDirectEval and modify the callee.
+ builder()
+ ->CallRuntime(Runtime::kResolvePossiblyDirectEval, callee_for_eval, 5)
+ .StoreAccumulatorInRegister(callee);
}
- // TODO(rmcilroy): Deal with possible direct eval here?
// TODO(rmcilroy): Use CallIC to allow call type feedback.
- builder().Call(callee, receiver, args->length());
+ builder()->Call(callee, receiver, args->length(),
+ feedback_index(expr->CallFeedbackICSlot()));
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitCallNew(CallNew* expr) {
+ Register constructor = register_allocator()->NewRegister();
+ VisitForAccumulatorValue(expr->expression());
+ builder()->StoreAccumulatorInRegister(constructor);
+
+ ZoneList<Expression*>* args = expr->arguments();
+ Register first_arg = VisitArguments(args);
+ builder()->New(constructor, first_arg, args->length());
+ execution_result()->SetResultInAccumulator();
}
-void BytecodeGenerator::VisitCallNew(CallNew* expr) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ Register receiver;
+ if (expr->is_jsruntime()) {
+ // Allocate a register for the receiver and load it with undefined.
+ register_allocator()->PrepareForConsecutiveAllocations(args->length() + 1);
+ receiver = register_allocator()->NextConsecutiveRegister();
+ builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+ }
+ // Evaluate all arguments to the runtime call.
+ Register first_arg = VisitArguments(args);
+
+ if (expr->is_jsruntime()) {
+ DCHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
+ builder()->CallJSRuntime(expr->context_index(), receiver, args->length());
+ } else {
+ Runtime::FunctionId function_id = expr->function()->function_id;
+ builder()->CallRuntime(function_id, first_arg, args->length());
+ }
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitVoid(UnaryOperation* expr) {
+ VisitForEffect(expr->expression());
+ builder()->LoadUndefined();
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitTypeOf(UnaryOperation* expr) {
+ if (expr->expression()->IsVariableProxy()) {
+ // Typeof does not throw a reference error on global variables, hence we
+ // perform a non-contextual load in case the operand is a variable proxy.
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ VisitVariableLoadForAccumulatorValue(
+ proxy->var(), proxy->VariableFeedbackSlot(), INSIDE_TYPEOF);
+ } else {
+ VisitForAccumulatorValue(expr->expression());
+ }
+ builder()->TypeOf();
+ execution_result()->SetResultInAccumulator();
+}
-void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitNot(UnaryOperation* expr) {
+ VisitForAccumulatorValue(expr->expression());
+ builder()->LogicalNot();
+ execution_result()->SetResultInAccumulator();
+}
void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- UNIMPLEMENTED();
+ switch (expr->op()) {
+ case Token::Value::NOT:
+ VisitNot(expr);
+ break;
+ case Token::Value::TYPEOF:
+ VisitTypeOf(expr);
+ break;
+ case Token::Value::VOID:
+ VisitVoid(expr);
+ break;
+ case Token::Value::DELETE:
+ VisitDelete(expr);
+ break;
+ case Token::Value::BIT_NOT:
+ case Token::Value::ADD:
+ case Token::Value::SUB:
+ // These operators are converted to an equivalent binary operators in
+ // the parser. These operators are not expected to be visited here.
+ UNREACHABLE();
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
+ if (expr->expression()->IsProperty()) {
+ // Delete of an object property is allowed both in sloppy
+ // and strict modes.
+ Property* property = expr->expression()->AsProperty();
+ Register object = VisitForRegisterValue(property->obj());
+ VisitForAccumulatorValue(property->key());
+ builder()->Delete(object, language_mode());
+ } else if (expr->expression()->IsVariableProxy()) {
+ // Delete of an unqualified identifier is allowed in sloppy mode but is
+ // not allowed in strict mode. Deleting 'this' is allowed in both modes.
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ Variable* variable = proxy->var();
+ DCHECK(is_sloppy(language_mode()) || variable->HasThisName(isolate()));
+ switch (variable->location()) {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ // Global var, let, const or variables not explicitly declared.
+ Register native_context = register_allocator()->NewRegister();
+ Register global_object = register_allocator()->NewRegister();
+ builder()
+ ->LoadContextSlot(execution_context()->reg(),
+ Context::NATIVE_CONTEXT_INDEX)
+ .StoreAccumulatorInRegister(native_context)
+ .LoadContextSlot(native_context, Context::EXTENSION_INDEX)
+ .StoreAccumulatorInRegister(global_object)
+ .LoadLiteral(variable->name())
+ .Delete(global_object, language_mode());
+ break;
+ }
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ // Deleting local var/let/const, context variables, and arguments
+ // does not have any effect.
+ if (variable->HasThisName(isolate())) {
+ builder()->LoadTrue();
+ } else {
+ builder()->LoadFalse();
+ }
+ break;
+ }
+ case VariableLocation::LOOKUP: {
+ builder()->LoadLiteral(variable->name()).DeleteLookupSlot();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Delete of an unresolvable reference returns true.
+ VisitForEffect(expr->expression());
+ builder()->LoadTrue();
+ }
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
- UNIMPLEMENTED();
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
+
+ // Left-hand side can only be a property, a global or a variable slot.
+ Property* property = expr->expression()->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(property);
+
+ // TODO(rmcilroy): Set is_postfix to false if visiting for effect.
+ bool is_postfix = expr->is_postfix();
+
+ // Evaluate LHS expression and get old value.
+ Register obj, key, old_value;
+ Handle<String> name;
+ switch (assign_type) {
+ case VARIABLE: {
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ VisitVariableLoadForAccumulatorValue(proxy->var(),
+ proxy->VariableFeedbackSlot());
+ break;
+ }
+ case NAMED_PROPERTY: {
+ FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+ obj = VisitForRegisterValue(property->obj());
+ name = property->key()->AsLiteral()->AsPropertyName();
+ builder()->LoadNamedProperty(obj, name, feedback_index(slot),
+ language_mode());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+ obj = VisitForRegisterValue(property->obj());
+ // Use visit for accumulator here since we need the key in the accumulator
+ // for the LoadKeyedProperty.
+ key = register_allocator()->NewRegister();
+ VisitForAccumulatorValue(property->key());
+ builder()->StoreAccumulatorInRegister(key).LoadKeyedProperty(
+ obj, feedback_index(slot), language_mode());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNIMPLEMENTED();
+ }
+
+ // Convert old value into a number.
+ if (!is_strong(language_mode())) {
+ builder()->CastAccumulatorToNumber();
+ }
+
+ // Save result for postfix expressions.
+ if (is_postfix) {
+ old_value = register_allocator()->outer()->NewRegister();
+ builder()->StoreAccumulatorInRegister(old_value);
+ }
+
+ // Perform +1/-1 operation.
+ builder()->CountOperation(expr->binary_op(), language_mode_strength());
+
+ // Store the value.
+ FeedbackVectorSlot feedback_slot = expr->CountSlot();
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* variable = expr->expression()->AsVariableProxy()->var();
+ VisitVariableAssignment(variable, feedback_slot);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ builder()->StoreNamedProperty(obj, name, feedback_index(feedback_slot),
+ language_mode());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ builder()->StoreKeyedProperty(obj, key, feedback_index(feedback_slot),
+ language_mode());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNIMPLEMENTED();
+ }
+
+ // Restore old value for postfix expressions.
+ if (is_postfix) {
+ execution_result()->SetResultInRegister(old_value);
+ } else {
+ execution_result()->SetResultInAccumulator();
+ }
}
void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
switch (binop->op()) {
case Token::COMMA:
+ VisitCommaExpression(binop);
+ break;
case Token::OR:
+ VisitLogicalOrExpression(binop);
+ break;
case Token::AND:
- UNIMPLEMENTED();
+ VisitLogicalAndExpression(binop);
break;
default:
VisitArithmeticExpression(binop);
@@ -488,17 +1879,18 @@ void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
+ Register lhs = VisitForRegisterValue(expr->left());
+ VisitForAccumulatorValue(expr->right());
+ builder()->CompareOperation(expr->op(), lhs, language_mode_strength());
+ execution_result()->SetResultInAccumulator();
+}
- TemporaryRegisterScope temporary_register_scope(&builder_);
- Register temporary = temporary_register_scope.NewRegister();
- Visit(left);
- builder().StoreAccumulatorInRegister(temporary);
- Visit(right);
- builder().CompareOperation(op, temporary, language_mode());
+void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
+ Register lhs = VisitForRegisterValue(expr->left());
+ VisitForAccumulatorValue(expr->right());
+ builder()->BinaryOperation(expr->op(), lhs, language_mode_strength());
+ execution_result()->SetResultInAccumulator();
}
@@ -511,7 +1903,7 @@ void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
- UNIMPLEMENTED();
+ execution_result()->SetResultInRegister(Register::function_closure());
}
@@ -526,18 +1918,248 @@ void BytecodeGenerator::VisitSuperPropertyReference(
}
-void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* binop) {
- Token::Value op = binop->op();
+void BytecodeGenerator::VisitCommaExpression(BinaryOperation* binop) {
+ VisitForEffect(binop->left());
+ Visit(binop->right());
+}
+
+
+void BytecodeGenerator::VisitLogicalOrExpression(BinaryOperation* binop) {
+ Expression* left = binop->left();
+ Expression* right = binop->right();
+
+ // Short-circuit evaluation- If it is known that left is always true,
+ // no need to visit right
+ if (left->ToBooleanIsTrue()) {
+ VisitForAccumulatorValue(left);
+ } else {
+ BytecodeLabel end_label;
+ VisitForAccumulatorValue(left);
+ builder()->JumpIfTrue(&end_label);
+ VisitForAccumulatorValue(right);
+ builder()->Bind(&end_label);
+ }
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
Expression* left = binop->left();
Expression* right = binop->right();
- TemporaryRegisterScope temporary_register_scope(&builder_);
- Register temporary = temporary_register_scope.NewRegister();
+ // Short-circuit evaluation- If it is known that left is always false,
+ // no need to visit right
+ if (left->ToBooleanIsFalse()) {
+ VisitForAccumulatorValue(left);
+ } else {
+ BytecodeLabel end_label;
+ VisitForAccumulatorValue(left);
+ builder()->JumpIfFalse(&end_label);
+ VisitForAccumulatorValue(right);
+ builder()->Bind(&end_label);
+ }
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ Visit(expr->expression());
+}
+
+
+void BytecodeGenerator::VisitNewLocalFunctionContext() {
+ AccumulatorResultScope accumulator_execution_result(this);
+ Scope* scope = this->scope();
+
+ // Allocate a new local context.
+ if (scope->is_script_scope()) {
+ RegisterAllocationScope register_scope(this);
+ Register closure = register_allocator()->NewRegister();
+ Register scope_info = register_allocator()->NewRegister();
+ DCHECK(Register::AreContiguous(closure, scope_info));
+ builder()
+ ->LoadAccumulatorWithRegister(Register::function_closure())
+ .StoreAccumulatorInRegister(closure)
+ .LoadLiteral(scope->GetScopeInfo(isolate()))
+ .StoreAccumulatorInRegister(scope_info)
+ .CallRuntime(Runtime::kNewScriptContext, closure, 2);
+ } else {
+ builder()->CallRuntime(Runtime::kNewFunctionContext,
+ Register::function_closure(), 1);
+ }
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitBuildLocalActivationContext() {
+ Scope* scope = this->scope();
+
+ if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
+ Variable* variable = scope->receiver();
+ Register receiver(builder()->Parameter(0));
+ // Context variable (at bottom of the context chain).
+ DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
+ builder()->LoadAccumulatorWithRegister(receiver).StoreContextSlot(
+ execution_context()->reg(), variable->index());
+ }
+
+ // Copy parameters into context if necessary.
+ int num_parameters = scope->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Variable* variable = scope->parameter(i);
+ if (!variable->IsContextSlot()) continue;
+
+ // The parameter indices are shifted by 1 (receiver is variable
+ // index -1 but is parameter index 0 in BytecodeArrayBuilder).
+ Register parameter(builder()->Parameter(i + 1));
+ // Context variable (at bottom of the context chain).
+ DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
+ builder()->LoadAccumulatorWithRegister(parameter)
+ .StoreContextSlot(execution_context()->reg(), variable->index());
+ }
+}
+
+
+void BytecodeGenerator::VisitNewLocalBlockContext(Scope* scope) {
+ AccumulatorResultScope accumulator_execution_result(this);
+ DCHECK(scope->is_block_scope());
+
+ // Allocate a new local block context.
+ register_allocator()->PrepareForConsecutiveAllocations(2);
+ Register scope_info = register_allocator()->NextConsecutiveRegister();
+ Register closure = register_allocator()->NextConsecutiveRegister();
+
+ builder()
+ ->LoadLiteral(scope->GetScopeInfo(isolate()))
+ .StoreAccumulatorInRegister(scope_info);
+ VisitFunctionClosureForContext();
+ builder()
+ ->StoreAccumulatorInRegister(closure)
+ .CallRuntime(Runtime::kPushBlockContext, scope_info, 2);
+ execution_result()->SetResultInAccumulator();
+}
+
+
+void BytecodeGenerator::VisitObjectLiteralAccessor(
+ Register home_object, ObjectLiteralProperty* property, Register value_out) {
+ // TODO(rmcilroy): Replace value_out with VisitForRegister();
+ if (property == nullptr) {
+ builder()->LoadNull().StoreAccumulatorInRegister(value_out);
+ } else {
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreAccumulatorInRegister(value_out);
+ VisitSetHomeObject(value_out, home_object, property);
+ }
+}
+
+
+void BytecodeGenerator::VisitSetHomeObject(Register value, Register home_object,
+ ObjectLiteralProperty* property,
+ int slot_number) {
+ Expression* expr = property->value();
+ if (!FunctionLiteral::NeedsHomeObject(expr)) return;
+
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGenerator::VisitArgumentsObject(Variable* variable) {
+ if (variable == nullptr) return;
+
+ DCHECK(variable->IsContextSlot() || variable->IsStackAllocated());
+
+ // Allocate and initialize a new arguments object and assign to the
+ // {arguments} variable.
+ CreateArgumentsType type =
+ is_strict(language_mode()) || !info()->has_simple_parameters()
+ ? CreateArgumentsType::kUnmappedArguments
+ : CreateArgumentsType::kMappedArguments;
+ builder()->CreateArguments(type);
+ VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+}
+
- Visit(left);
- builder().StoreAccumulatorInRegister(temporary);
- Visit(right);
- builder().BinaryOperation(op, temporary);
+void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
+ if (variable == nullptr) return;
+
+ // TODO(rmcilroy): Remove once we have tests which exercise this code path.
+ UNIMPLEMENTED();
+
+ // Store the closure we were called with in the given variable.
+ builder()->LoadAccumulatorWithRegister(Register::function_closure());
+ VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+}
+
+
+void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
+ if (variable == nullptr) return;
+
+ // Store the new target we were called with in the given variable.
+ builder()->LoadAccumulatorWithRegister(Register::new_target());
+ VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+}
+
+
+void BytecodeGenerator::VisitFunctionClosureForContext() {
+ AccumulatorResultScope accumulator_execution_result(this);
+ Scope* closure_scope = execution_context()->scope()->ClosureScope();
+ if (closure_scope->is_script_scope() ||
+ closure_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function as
+ // their closure, not the anonymous closure containing the global code.
+ Register native_context = register_allocator()->NewRegister();
+ builder()
+ ->LoadContextSlot(execution_context()->reg(),
+ Context::NATIVE_CONTEXT_INDEX)
+ .StoreAccumulatorInRegister(native_context)
+ .LoadContextSlot(native_context, Context::CLOSURE_INDEX);
+ } else {
+ DCHECK(closure_scope->is_function_scope());
+ builder()->LoadAccumulatorWithRegister(Register::function_closure());
+ }
+ execution_result()->SetResultInAccumulator();
+}
+
+
+// Visits the expression |expr| and places the result in the accumulator.
+void BytecodeGenerator::VisitForAccumulatorValue(Expression* expr) {
+ AccumulatorResultScope accumulator_scope(this);
+ Visit(expr);
+}
+
+
+// Visits the expression |expr| and discards the result.
+void BytecodeGenerator::VisitForEffect(Expression* expr) {
+ EffectResultScope effect_scope(this);
+ Visit(expr);
+}
+
+
+// Visits the expression |expr| and returns the register containing
+// the expression result.
+Register BytecodeGenerator::VisitForRegisterValue(Expression* expr) {
+ RegisterResultScope register_scope(this);
+ Visit(expr);
+ return register_scope.ResultRegister();
+}
+
+
+Register BytecodeGenerator::NextContextRegister() const {
+ if (execution_context() == nullptr) {
+ // Return the incoming function context for the outermost execution context.
+ return Register::function_context();
+ }
+ Register previous = execution_context()->reg();
+ if (previous == Register::function_context()) {
+ // If the previous context was the incoming function context, then the next
+ // context register is the first local context register.
+ return builder_.first_context_register();
+ } else {
+ // Otherwise use the next local context register.
+ DCHECK_LT(previous.index(), builder_.last_context_register().index());
+ return Register(previous.index() + 1);
+ }
}
@@ -546,7 +2168,12 @@ LanguageMode BytecodeGenerator::language_mode() const {
}
-int BytecodeGenerator::feedback_index(FeedbackVectorICSlot slot) const {
+Strength BytecodeGenerator::language_mode_strength() const {
+ return strength(language_mode());
+}
+
+
+int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
return info()->feedback_vector()->GetIndex(slot);
}
diff --git a/chromium/v8/src/interpreter/bytecode-generator.h b/chromium/v8/src/interpreter/bytecode-generator.h
index 99536c33fb5..8bda7be3016 100644
--- a/chromium/v8/src/interpreter/bytecode-generator.h
+++ b/chromium/v8/src/interpreter/bytecode-generator.h
@@ -5,7 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_GENERATOR_H_
#define V8_INTERPRETER_BYTECODE_GENERATOR_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecodes.h"
@@ -13,10 +13,9 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeGenerator : public AstVisitor {
+class BytecodeGenerator final : public AstVisitor {
public:
BytecodeGenerator(Isolate* isolate, Zone* zone);
- virtual ~BytecodeGenerator();
Handle<BytecodeArray> MakeBytecode(CompilationInfo* info);
@@ -24,25 +23,126 @@ class BytecodeGenerator : public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
+ // Visiting function for declarations list and statements are overridden.
+ void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+ void VisitStatements(ZoneList<Statement*>* statments) override;
+
private:
+ class ContextScope;
+ class ControlScope;
+ class ControlScopeForBreakable;
+ class ControlScopeForIteration;
+ class ExpressionResultScope;
+ class EffectResultScope;
+ class AccumulatorResultScope;
+ class RegisterResultScope;
+ class RegisterAllocationScope;
+
+ void MakeBytecodeBody();
+ Register NextContextRegister() const;
+
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+ // Dispatched from VisitBinaryOperation.
void VisitArithmeticExpression(BinaryOperation* binop);
+ void VisitCommaExpression(BinaryOperation* binop);
+ void VisitLogicalOrExpression(BinaryOperation* binop);
+ void VisitLogicalAndExpression(BinaryOperation* binop);
+
+ // Dispatched from VisitUnaryOperation.
+ void VisitVoid(UnaryOperation* expr);
+ void VisitTypeOf(UnaryOperation* expr);
+ void VisitNot(UnaryOperation* expr);
+ void VisitDelete(UnaryOperation* expr);
+
+ // Used by flow control routines to evaluate loop condition.
+ void VisitCondition(Expression* expr);
+
+ // Helper visitors which perform common operations.
+ Register VisitArguments(ZoneList<Expression*>* arguments);
+
void VisitPropertyLoad(Register obj, Property* expr);
- void VisitVariableLoad(Variable* variable);
+ void VisitPropertyLoadForAccumulator(Register obj, Property* expr);
+
+ void VisitVariableLoad(Variable* variable, FeedbackVectorSlot slot,
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+ void VisitVariableLoadForAccumulatorValue(
+ Variable* variable, FeedbackVectorSlot slot,
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+ MUST_USE_RESULT Register
+ VisitVariableLoadForRegisterValue(Variable* variable, FeedbackVectorSlot slot,
+ TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+ void VisitVariableAssignment(Variable* variable, FeedbackVectorSlot slot);
+
+ void VisitArgumentsObject(Variable* variable);
+ void VisitThisFunctionVariable(Variable* variable);
+ void VisitNewTargetVariable(Variable* variable);
+ void VisitNewLocalFunctionContext();
+ void VisitBuildLocalActivationContext();
+ void VisitNewLocalBlockContext(Scope* scope);
+ void VisitFunctionClosureForContext();
+ void VisitSetHomeObject(Register value, Register home_object,
+ ObjectLiteralProperty* property, int slot_number = 0);
+ void VisitObjectLiteralAccessor(Register home_object,
+ ObjectLiteralProperty* property,
+ Register value_out);
+ void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
+
+ // Visitors for obtaining expression result in the accumulator, in a
+ // register, or just getting the effect.
+ void VisitForAccumulatorValue(Expression* expression);
+ MUST_USE_RESULT Register VisitForRegisterValue(Expression* expression);
+ void VisitForEffect(Expression* node);
+
+ // Methods for tracking and remapping register.
+ void RecordStoreToRegister(Register reg);
+ Register LoadFromAliasedRegister(Register reg);
+
+ inline BytecodeArrayBuilder* builder() { return &builder_; }
+
+ inline Isolate* isolate() const { return isolate_; }
+ inline Zone* zone() const { return zone_; }
- inline BytecodeArrayBuilder& builder() { return builder_; }
inline Scope* scope() const { return scope_; }
inline void set_scope(Scope* scope) { scope_ = scope; }
inline CompilationInfo* info() const { return info_; }
inline void set_info(CompilationInfo* info) { info_ = info; }
- LanguageMode language_mode() const;
- int feedback_index(FeedbackVectorICSlot slot) const;
+ inline ControlScope* execution_control() const { return execution_control_; }
+ inline void set_execution_control(ControlScope* scope) {
+ execution_control_ = scope;
+ }
+ inline ContextScope* execution_context() const { return execution_context_; }
+ inline void set_execution_context(ContextScope* context) {
+ execution_context_ = context;
+ }
+ inline void set_execution_result(ExpressionResultScope* execution_result) {
+ execution_result_ = execution_result;
+ }
+ ExpressionResultScope* execution_result() const { return execution_result_; }
+ inline void set_register_allocator(
+ RegisterAllocationScope* register_allocator) {
+ register_allocator_ = register_allocator;
+ }
+ RegisterAllocationScope* register_allocator() const {
+ return register_allocator_;
+ }
+
+ ZoneVector<Handle<Object>>* globals() { return &globals_; }
+ inline LanguageMode language_mode() const;
+ Strength language_mode_strength() const;
+ int feedback_index(FeedbackVectorSlot slot) const;
+ Isolate* isolate_;
+ Zone* zone_;
BytecodeArrayBuilder builder_;
CompilationInfo* info_;
Scope* scope_;
+ ZoneVector<Handle<Object>> globals_;
+ ControlScope* execution_control_;
+ ContextScope* execution_context_;
+ ExpressionResultScope* execution_result_;
+ RegisterAllocationScope* register_allocator_;
};
} // namespace interpreter
diff --git a/chromium/v8/src/interpreter/bytecode-register-allocator.cc b/chromium/v8/src/interpreter/bytecode-register-allocator.cc
new file mode 100644
index 00000000000..4efb612db52
--- /dev/null
+++ b/chromium/v8/src/interpreter/bytecode-register-allocator.cc
@@ -0,0 +1,72 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-register-allocator.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeRegisterAllocator::BytecodeRegisterAllocator(
+ BytecodeArrayBuilder* builder)
+ : builder_(builder),
+ allocated_(builder->zone()),
+ next_consecutive_register_(-1),
+ next_consecutive_count_(-1) {}
+
+
+BytecodeRegisterAllocator::~BytecodeRegisterAllocator() {
+ for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) {
+ builder_->ReturnTemporaryRegister(*i);
+ }
+ allocated_.clear();
+}
+
+
+Register BytecodeRegisterAllocator::NewRegister() {
+ int allocated = -1;
+ if (next_consecutive_count_ <= 0) {
+ allocated = builder_->BorrowTemporaryRegister();
+ } else {
+ allocated = builder_->BorrowTemporaryRegisterNotInRange(
+ next_consecutive_register_,
+ next_consecutive_register_ + next_consecutive_count_ - 1);
+ }
+ allocated_.push_back(allocated);
+ return Register(allocated);
+}
+
+
+bool BytecodeRegisterAllocator::RegisterIsAllocatedInThisScope(
+ Register reg) const {
+ for (auto i = allocated_.begin(); i != allocated_.end(); i++) {
+ if (*i == reg.index()) return true;
+ }
+ return false;
+}
+
+
+void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
+ if (static_cast<int>(count) > next_consecutive_count_) {
+ next_consecutive_register_ =
+ builder_->PrepareForConsecutiveTemporaryRegisters(count);
+ next_consecutive_count_ = static_cast<int>(count);
+ }
+}
+
+
+Register BytecodeRegisterAllocator::NextConsecutiveRegister() {
+ DCHECK_GE(next_consecutive_register_, 0);
+ DCHECK_GT(next_consecutive_count_, 0);
+ builder_->BorrowConsecutiveTemporaryRegister(next_consecutive_register_);
+ allocated_.push_back(next_consecutive_register_);
+ next_consecutive_count_--;
+ return Register(next_consecutive_register_++);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/interpreter/bytecode-register-allocator.h b/chromium/v8/src/interpreter/bytecode-register-allocator.h
new file mode 100644
index 00000000000..74ab3a42727
--- /dev/null
+++ b/chromium/v8/src/interpreter/bytecode-register-allocator.h
@@ -0,0 +1,49 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
+#define V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayBuilder;
+class Register;
+
+// A class than allows the instantiator to allocate temporary registers that are
+// cleaned up when scope is closed.
+class BytecodeRegisterAllocator {
+ public:
+ explicit BytecodeRegisterAllocator(BytecodeArrayBuilder* builder);
+ ~BytecodeRegisterAllocator();
+ Register NewRegister();
+
+ void PrepareForConsecutiveAllocations(size_t count);
+ Register NextConsecutiveRegister();
+
+ bool RegisterIsAllocatedInThisScope(Register reg) const;
+
+ bool HasConsecutiveAllocations() const { return next_consecutive_count_ > 0; }
+
+ private:
+ void* operator new(size_t size);
+ void operator delete(void* p);
+
+ BytecodeArrayBuilder* builder_;
+ ZoneVector<int> allocated_;
+ int next_consecutive_register_;
+ int next_consecutive_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterAllocator);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+
+#endif // V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
diff --git a/chromium/v8/src/interpreter/bytecode-traits.h b/chromium/v8/src/interpreter/bytecode-traits.h
new file mode 100644
index 00000000000..fd778d7c927
--- /dev/null
+++ b/chromium/v8/src/interpreter/bytecode-traits.h
@@ -0,0 +1,180 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_TRAITS_H_
+#define V8_INTERPRETER_BYTECODE_TRAITS_H_
+
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// TODO(rmcilroy): consider simplifying this to avoid the template magic.
+
+// Template helpers to deduce the number of operands each bytecode has.
+#define OPERAND_TERM OperandType::kNone, OperandType::kNone, OperandType::kNone
+
+template <OperandType>
+struct OperandTraits {};
+
+#define DECLARE_OPERAND_SIZE(Name, Size) \
+ template <> \
+ struct OperandTraits<OperandType::k##Name> { \
+ static const OperandSize kSizeType = Size; \
+ static const int kSize = static_cast<int>(Size); \
+ };
+OPERAND_TYPE_LIST(DECLARE_OPERAND_SIZE)
+#undef DECLARE_OPERAND_SIZE
+
+
+template <OperandType... Args>
+struct BytecodeTraits {};
+
+template <OperandType operand_0, OperandType operand_1, OperandType operand_2,
+ OperandType operand_3>
+struct BytecodeTraits<operand_0, operand_1, operand_2, operand_3,
+ OPERAND_TERM> {
+ static OperandType GetOperandType(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const OperandType kOperands[] = {operand_0, operand_1, operand_2,
+ operand_3};
+ return kOperands[i];
+ }
+
+ static inline OperandSize GetOperandSize(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const OperandSize kOperandSizes[] =
+ {OperandTraits<operand_0>::kSizeType,
+ OperandTraits<operand_1>::kSizeType,
+ OperandTraits<operand_2>::kSizeType,
+ OperandTraits<operand_3>::kSizeType};
+ return kOperandSizes[i];
+ }
+
+ static inline int GetOperandOffset(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const int kOffset0 = 1;
+ const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
+ const int kOffset2 = kOffset1 + OperandTraits<operand_1>::kSize;
+ const int kOffset3 = kOffset2 + OperandTraits<operand_2>::kSize;
+ const int kOperandOffsets[] = {kOffset0, kOffset1, kOffset2, kOffset3};
+ return kOperandOffsets[i];
+ }
+
+ static const int kOperandCount = 4;
+ static const int kSize =
+ 1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
+ OperandTraits<operand_2>::kSize + OperandTraits<operand_3>::kSize;
+};
+
+
+template <OperandType operand_0, OperandType operand_1, OperandType operand_2>
+struct BytecodeTraits<operand_0, operand_1, operand_2, OPERAND_TERM> {
+ static inline OperandType GetOperandType(int i) {
+ DCHECK(0 <= i && i <= 2);
+ const OperandType kOperands[] = {operand_0, operand_1, operand_2};
+ return kOperands[i];
+ }
+
+ static inline OperandSize GetOperandSize(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const OperandSize kOperandSizes[] =
+ {OperandTraits<operand_0>::kSizeType,
+ OperandTraits<operand_1>::kSizeType,
+ OperandTraits<operand_2>::kSizeType};
+ return kOperandSizes[i];
+ }
+
+ static inline int GetOperandOffset(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const int kOffset0 = 1;
+ const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
+ const int kOffset2 = kOffset1 + OperandTraits<operand_1>::kSize;
+ const int kOperandOffsets[] = {kOffset0, kOffset1, kOffset2};
+ return kOperandOffsets[i];
+ }
+
+ static const int kOperandCount = 3;
+ static const int kSize =
+ 1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
+ OperandTraits<operand_2>::kSize;
+};
+
+template <OperandType operand_0, OperandType operand_1>
+struct BytecodeTraits<operand_0, operand_1, OPERAND_TERM> {
+ static inline OperandType GetOperandType(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const OperandType kOperands[] = {operand_0, operand_1};
+ return kOperands[i];
+ }
+
+ static inline OperandSize GetOperandSize(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const OperandSize kOperandSizes[] =
+ {OperandTraits<operand_0>::kSizeType,
+ OperandTraits<operand_1>::kSizeType};
+ return kOperandSizes[i];
+ }
+
+ static inline int GetOperandOffset(int i) {
+ DCHECK(0 <= i && i < kOperandCount);
+ const int kOffset0 = 1;
+ const int kOffset1 = kOffset0 + OperandTraits<operand_0>::kSize;
+ const int kOperandOffsets[] = {kOffset0, kOffset1};
+ return kOperandOffsets[i];
+ }
+
+ static const int kOperandCount = 2;
+ static const int kSize =
+ 1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize;
+};
+
+template <OperandType operand_0>
+struct BytecodeTraits<operand_0, OPERAND_TERM> {
+ static inline OperandType GetOperandType(int i) {
+ DCHECK(i == 0);
+ return operand_0;
+ }
+
+ static inline OperandSize GetOperandSize(int i) {
+ DCHECK(i == 0);
+ return OperandTraits<operand_0>::kSizeType;
+ }
+
+ static inline int GetOperandOffset(int i) {
+ DCHECK(i == 0);
+ return 1;
+ }
+
+ static const int kOperandCount = 1;
+ static const int kSize = 1 + OperandTraits<operand_0>::kSize;
+};
+
+template <>
+struct BytecodeTraits<OperandType::kNone, OPERAND_TERM> {
+ static inline OperandType GetOperandType(int i) {
+ UNREACHABLE();
+ return OperandType::kNone;
+ }
+
+ static inline OperandSize GetOperandSize(int i) {
+ UNREACHABLE();
+ return OperandSize::kNone;
+ }
+
+ static inline int GetOperandOffset(int i) {
+ UNREACHABLE();
+ return 1;
+ }
+
+ static const int kOperandCount = 0;
+ static const int kSize = 1 + OperandTraits<OperandType::kNone>::kSize;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_TRAITS_H_
diff --git a/chromium/v8/src/interpreter/bytecodes.cc b/chromium/v8/src/interpreter/bytecodes.cc
index e5b9ab73a9c..2d4406cc1bc 100644
--- a/chromium/v8/src/interpreter/bytecodes.cc
+++ b/chromium/v8/src/interpreter/bytecodes.cc
@@ -5,25 +5,12 @@
#include "src/interpreter/bytecodes.h"
#include "src/frames.h"
+#include "src/interpreter/bytecode-traits.h"
namespace v8 {
namespace internal {
namespace interpreter {
-// Maximum number of operands a bytecode may have.
-static const int kMaxOperands = 3;
-
-// kBytecodeTable relies on kNone being the same as zero to detect length.
-STATIC_ASSERT(static_cast<int>(OperandType::kNone) == 0);
-
-static const OperandType kBytecodeTable[][kMaxOperands] = {
-#define DECLARE_OPERAND(_, ...) \
- { __VA_ARGS__ } \
- ,
- BYTECODE_LIST(DECLARE_OPERAND)
-#undef DECLARE_OPERAND
-};
-
// static
const char* Bytecodes::ToString(Bytecode bytecode) {
@@ -42,7 +29,7 @@ const char* Bytecodes::ToString(Bytecode bytecode) {
// static
const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
switch (operand_type) {
-#define CASE(Name) \
+#define CASE(Name, _) \
case OperandType::k##Name: \
return #Name;
OPERAND_TYPE_LIST(CASE)
@@ -54,6 +41,21 @@ const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
// static
+const char* Bytecodes::OperandSizeToString(OperandSize operand_size) {
+ switch (operand_size) {
+ case OperandSize::kNone:
+ return "None";
+ case OperandSize::kByte:
+ return "Byte";
+ case OperandSize::kShort:
+ return "Short";
+ }
+ UNREACHABLE();
+ return "";
+}
+
+
+// static
uint8_t Bytecodes::ToByte(Bytecode bytecode) {
return static_cast<uint8_t>(bytecode);
}
@@ -68,38 +70,166 @@ Bytecode Bytecodes::FromByte(uint8_t value) {
// static
+int Bytecodes::Size(Bytecode bytecode) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kSize;
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+// static
int Bytecodes::NumberOfOperands(Bytecode bytecode) {
DCHECK(bytecode <= Bytecode::kLast);
- int count;
- uint8_t row = ToByte(bytecode);
- for (count = 0; count < kMaxOperands; count++) {
- if (kBytecodeTable[row][count] == OperandType::kNone) {
- break;
- }
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kOperandCount;
+ BYTECODE_LIST(CASE)
+#undef CASE
}
- return count;
+ UNREACHABLE();
+ return 0;
}
// static
OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
- DCHECK(bytecode <= Bytecode::kLast && i < NumberOfOperands(bytecode));
- return kBytecodeTable[ToByte(bytecode)][i];
+ DCHECK(bytecode <= Bytecode::kLast);
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandType(i);
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return OperandType::kNone;
}
// static
-int Bytecodes::Size(Bytecode bytecode) {
- return 1 + NumberOfOperands(bytecode);
+OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandSize(i);
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return OperandSize::kNone;
}
// static
-int Bytecodes::MaximumNumberOfOperands() { return kMaxOperands; }
+int Bytecodes::GetOperandOffset(Bytecode bytecode, int i) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::GetOperandOffset(i);
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return 0;
+}
// static
-int Bytecodes::MaximumSize() { return 1 + kMaxOperands; }
+OperandSize Bytecodes::SizeOfOperand(OperandType operand_type) {
+ switch (operand_type) {
+#define CASE(Name, Size) \
+ case OperandType::k##Name: \
+ return Size;
+ OPERAND_TYPE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return OperandSize::kNone;
+}
+
+
+// static
+bool Bytecodes::IsConditionalJumpImmediate(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfTrue ||
+ bytecode == Bytecode::kJumpIfFalse ||
+ bytecode == Bytecode::kJumpIfToBooleanTrue ||
+ bytecode == Bytecode::kJumpIfToBooleanFalse ||
+ bytecode == Bytecode::kJumpIfNull ||
+ bytecode == Bytecode::kJumpIfUndefined;
+}
+
+
+// static
+bool Bytecodes::IsConditionalJumpConstant(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfTrueConstant ||
+ bytecode == Bytecode::kJumpIfFalseConstant ||
+ bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
+ bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
+ bytecode == Bytecode::kJumpIfNullConstant ||
+ bytecode == Bytecode::kJumpIfUndefinedConstant;
+}
+
+
+// static
+bool Bytecodes::IsConditionalJumpConstantWide(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfTrueConstantWide ||
+ bytecode == Bytecode::kJumpIfFalseConstantWide ||
+ bytecode == Bytecode::kJumpIfToBooleanTrueConstantWide ||
+ bytecode == Bytecode::kJumpIfToBooleanFalseConstantWide ||
+ bytecode == Bytecode::kJumpIfNullConstantWide ||
+ bytecode == Bytecode::kJumpIfUndefinedConstantWide;
+}
+
+
+// static
+bool Bytecodes::IsConditionalJump(Bytecode bytecode) {
+ return IsConditionalJumpImmediate(bytecode) ||
+ IsConditionalJumpConstant(bytecode) ||
+ IsConditionalJumpConstantWide(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpImmediate(Bytecode bytecode) {
+ return bytecode == Bytecode::kJump || IsConditionalJumpImmediate(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpConstant(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpConstant ||
+ IsConditionalJumpConstant(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpConstantWide(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpConstantWide ||
+ IsConditionalJumpConstantWide(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJump(Bytecode bytecode) {
+ return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode) ||
+ IsJumpConstantWide(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
+ return bytecode == Bytecode::kReturn || IsJump(bytecode);
+}
// static
@@ -114,30 +244,44 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
SNPrintF(buf, "%02x ", bytecode_start[i]);
os << buf.start();
}
- for (int i = bytecode_size; i < Bytecodes::MaximumSize(); i++) {
+ const int kBytecodeColumnSize = 6;
+ for (int i = bytecode_size; i < kBytecodeColumnSize; i++) {
os << " ";
}
os << bytecode << " ";
- const uint8_t* operands_start = bytecode_start + 1;
- int operands_size = bytecode_size - 1;
- for (int i = 0; i < operands_size; i++) {
+ int number_of_operands = NumberOfOperands(bytecode);
+ for (int i = 0; i < number_of_operands; i++) {
OperandType op_type = GetOperandType(bytecode, i);
- uint8_t operand = operands_start[i];
+ const uint8_t* operand_start =
+ &bytecode_start[GetOperandOffset(bytecode, i)];
switch (op_type) {
- case interpreter::OperandType::kCount:
- os << "#" << static_cast<unsigned int>(operand);
+ case interpreter::OperandType::kCount8:
+ os << "#" << static_cast<unsigned int>(*operand_start);
break;
- case interpreter::OperandType::kIdx:
- os << "[" << static_cast<unsigned int>(operand) << "]";
+ case interpreter::OperandType::kCount16:
+ os << '#' << ReadUnalignedUInt16(operand_start);
+ break;
+ case interpreter::OperandType::kIdx8:
+ os << "[" << static_cast<unsigned int>(*operand_start) << "]";
+ break;
+ case interpreter::OperandType::kIdx16:
+ os << "[" << ReadUnalignedUInt16(operand_start) << "]";
break;
case interpreter::OperandType::kImm8:
- os << "#" << static_cast<int>(static_cast<int8_t>(operand));
+ os << "#" << static_cast<int>(static_cast<int8_t>(*operand_start));
break;
- case interpreter::OperandType::kReg: {
- Register reg = Register::FromOperand(operand);
- if (reg.is_parameter()) {
+ case interpreter::OperandType::kReg8:
+ case interpreter::OperandType::kMaybeReg8: {
+ Register reg = Register::FromOperand(*operand_start);
+ if (reg.is_function_context()) {
+ os << "<context>";
+ } else if (reg.is_function_closure()) {
+ os << "<closure>";
+ } else if (reg.is_new_target()) {
+ os << "<new.target>";
+ } else if (reg.is_parameter()) {
int parameter_index = reg.ToParameterIndex(parameter_count);
if (parameter_index == 0) {
os << "<this>";
@@ -149,11 +293,34 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
}
break;
}
+ case interpreter::OperandType::kRegPair8: {
+ Register reg = Register::FromOperand(*operand_start);
+ if (reg.is_parameter()) {
+ int parameter_index = reg.ToParameterIndex(parameter_count);
+ DCHECK_NE(parameter_index, 0);
+ os << "a" << parameter_index - 1 << "-" << parameter_index;
+ } else {
+ os << "r" << reg.index() << "-" << reg.index() + 1;
+ }
+ break;
+ }
+ case interpreter::OperandType::kReg16: {
+ Register reg =
+ Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
+ if (reg.is_parameter()) {
+ int parameter_index = reg.ToParameterIndex(parameter_count);
+ DCHECK_NE(parameter_index, 0);
+ os << "a" << parameter_index - 1;
+ } else {
+ os << "r" << reg.index();
+ }
+ break;
+ }
case interpreter::OperandType::kNone:
UNREACHABLE();
break;
}
- if (i != operands_size - 1) {
+ if (i != number_of_operands - 1) {
os << ", ";
}
}
@@ -171,8 +338,19 @@ std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
}
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size) {
+ return os << Bytecodes::OperandSizeToString(operand_size);
+}
+
+
static const int kLastParamRegisterIndex =
-InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
+static const int kFunctionClosureRegisterIndex =
+ -InterpreterFrameConstants::kFunctionFromRegisterPointer / kPointerSize;
+static const int kFunctionContextRegisterIndex =
+ -InterpreterFrameConstants::kContextFromRegisterPointer / kPointerSize;
+static const int kNewTargetRegisterIndex =
+ -InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
// Registers occupy range 0-127 in 8-bit value leaving 128 unused values.
@@ -187,7 +365,7 @@ Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_LE(parameter_count, kMaxParameterIndex + 1);
int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
DCHECK_LT(register_index, 0);
- DCHECK_GE(register_index, Register::kMinRegisterIndex);
+ DCHECK_GE(register_index, kMinInt8);
return Register(register_index);
}
@@ -198,16 +376,78 @@ int Register::ToParameterIndex(int parameter_count) const {
}
+Register Register::function_closure() {
+ return Register(kFunctionClosureRegisterIndex);
+}
+
+
+bool Register::is_function_closure() const {
+ return index() == kFunctionClosureRegisterIndex;
+}
+
+
+Register Register::function_context() {
+ return Register(kFunctionContextRegisterIndex);
+}
+
+
+bool Register::is_function_context() const {
+ return index() == kFunctionContextRegisterIndex;
+}
+
+
+Register Register::new_target() { return Register(kNewTargetRegisterIndex); }
+
+
+bool Register::is_new_target() const {
+ return index() == kNewTargetRegisterIndex;
+}
+
+
int Register::MaxParameterIndex() { return kMaxParameterIndex; }
-uint8_t Register::ToOperand() const { return static_cast<uint8_t>(-index_); }
+uint8_t Register::ToOperand() const {
+ DCHECK_GE(index_, kMinInt8);
+ DCHECK_LE(index_, kMaxInt8);
+ return static_cast<uint8_t>(-index_);
+}
Register Register::FromOperand(uint8_t operand) {
return Register(-static_cast<int8_t>(operand));
}
+
+uint16_t Register::ToWideOperand() const {
+ DCHECK_GE(index_, kMinInt16);
+ DCHECK_LE(index_, kMaxInt16);
+ return static_cast<uint16_t>(-index_);
+}
+
+
+Register Register::FromWideOperand(uint16_t operand) {
+ return Register(-static_cast<int16_t>(operand));
+}
+
+
+bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5) {
+ if (reg1.index() + 1 != reg2.index()) {
+ return false;
+ }
+ if (reg3.is_valid() && reg2.index() + 1 != reg3.index()) {
+ return false;
+ }
+ if (reg4.is_valid() && reg3.index() + 1 != reg4.index()) {
+ return false;
+ }
+ if (reg5.is_valid() && reg4.index() + 1 != reg5.index()) {
+ return false;
+ }
+ return true;
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/interpreter/bytecodes.h b/chromium/v8/src/interpreter/bytecodes.h
index 38628422772..a9beb6c918e 100644
--- a/chromium/v8/src/interpreter/bytecodes.h
+++ b/chromium/v8/src/interpreter/bytecodes.h
@@ -16,82 +16,234 @@ namespace internal {
namespace interpreter {
// The list of operand types used by bytecodes.
-#define OPERAND_TYPE_LIST(V) \
- V(None) \
- V(Count) \
- V(Imm8) \
- V(Idx) \
- V(Reg)
+#define OPERAND_TYPE_LIST(V) \
+ \
+ /* None operand. */ \
+ V(None, OperandSize::kNone) \
+ \
+ /* Byte operands. */ \
+ V(Count8, OperandSize::kByte) \
+ V(Imm8, OperandSize::kByte) \
+ V(Idx8, OperandSize::kByte) \
+ V(MaybeReg8, OperandSize::kByte) \
+ V(Reg8, OperandSize::kByte) \
+ V(RegPair8, OperandSize::kByte) \
+ \
+ /* Short operands. */ \
+ V(Count16, OperandSize::kShort) \
+ V(Idx16, OperandSize::kShort) \
+ V(Reg16, OperandSize::kShort)
// The list of bytecodes which are interpreted by the interpreter.
-#define BYTECODE_LIST(V) \
- \
- /* Loading the accumulator */ \
- V(LdaZero, OperandType::kNone) \
- V(LdaSmi8, OperandType::kImm8) \
- V(LdaConstant, OperandType::kIdx) \
- V(LdaUndefined, OperandType::kNone) \
- V(LdaNull, OperandType::kNone) \
- V(LdaTheHole, OperandType::kNone) \
- V(LdaTrue, OperandType::kNone) \
- V(LdaFalse, OperandType::kNone) \
- \
- /* Load globals */ \
- V(LdaGlobal, OperandType::kIdx) \
- \
- /* Register-accumulator transfers */ \
- V(Ldar, OperandType::kReg) \
- V(Star, OperandType::kReg) \
- \
- /* LoadIC operations */ \
- V(LoadIC, OperandType::kReg, OperandType::kIdx) \
- V(KeyedLoadIC, OperandType::kReg, OperandType::kIdx) \
- \
- /* StoreIC operations */ \
- V(StoreIC, OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
- V(KeyedStoreIC, OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
- \
- /* Binary Operators */ \
- V(Add, OperandType::kReg) \
- V(Sub, OperandType::kReg) \
- V(Mul, OperandType::kReg) \
- V(Div, OperandType::kReg) \
- V(Mod, OperandType::kReg) \
- \
- /* Call operations. */ \
- V(Call, OperandType::kReg, OperandType::kReg, OperandType::kCount) \
- \
- /* Test Operators */ \
- V(TestEqual, OperandType::kReg) \
- V(TestNotEqual, OperandType::kReg) \
- V(TestEqualStrict, OperandType::kReg) \
- V(TestNotEqualStrict, OperandType::kReg) \
- V(TestLessThan, OperandType::kReg) \
- V(TestGreaterThan, OperandType::kReg) \
- V(TestLessThanOrEqual, OperandType::kReg) \
- V(TestGreaterThanOrEqual, OperandType::kReg) \
- V(TestInstanceOf, OperandType::kReg) \
- V(TestIn, OperandType::kReg) \
- \
- /* Cast operators */ \
- V(ToBoolean, OperandType::kNone) \
- \
- /* Control Flow */ \
- V(Jump, OperandType::kImm8) \
- V(JumpConstant, OperandType::kIdx) \
- V(JumpIfTrue, OperandType::kImm8) \
- V(JumpIfTrueConstant, OperandType::kIdx) \
- V(JumpIfFalse, OperandType::kImm8) \
- V(JumpIfFalseConstant, OperandType::kIdx) \
+#define BYTECODE_LIST(V) \
+ \
+ /* Loading the accumulator */ \
+ V(LdaZero, OperandType::kNone) \
+ V(LdaSmi8, OperandType::kImm8) \
+ V(LdaUndefined, OperandType::kNone) \
+ V(LdaNull, OperandType::kNone) \
+ V(LdaTheHole, OperandType::kNone) \
+ V(LdaTrue, OperandType::kNone) \
+ V(LdaFalse, OperandType::kNone) \
+ V(LdaConstant, OperandType::kIdx8) \
+ V(LdaConstantWide, OperandType::kIdx16) \
+ \
+ /* Globals */ \
+ V(LdaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8) \
+ V(LdaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8) \
+ V(LdaGlobalInsideTypeofSloppy, OperandType::kIdx8, OperandType::kIdx8) \
+ V(LdaGlobalInsideTypeofStrict, OperandType::kIdx8, OperandType::kIdx8) \
+ V(LdaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
+ V(LdaGlobalStrictWide, OperandType::kIdx16, OperandType::kIdx16) \
+ V(LdaGlobalInsideTypeofSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
+ V(LdaGlobalInsideTypeofStrictWide, OperandType::kIdx16, OperandType::kIdx16) \
+ V(StaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8) \
+ V(StaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8) \
+ V(StaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
+ V(StaGlobalStrictWide, OperandType::kIdx16, OperandType::kIdx16) \
+ \
+ /* Context operations */ \
+ V(PushContext, OperandType::kReg8) \
+ V(PopContext, OperandType::kReg8) \
+ V(LdaContextSlot, OperandType::kReg8, OperandType::kIdx8) \
+ V(StaContextSlot, OperandType::kReg8, OperandType::kIdx8) \
+ V(LdaContextSlotWide, OperandType::kReg8, OperandType::kIdx16) \
+ V(StaContextSlotWide, OperandType::kReg8, OperandType::kIdx16) \
+ \
+ /* Load-Store lookup slots */ \
+ V(LdaLookupSlot, OperandType::kIdx8) \
+ V(LdaLookupSlotInsideTypeof, OperandType::kIdx8) \
+ V(LdaLookupSlotWide, OperandType::kIdx16) \
+ V(LdaLookupSlotInsideTypeofWide, OperandType::kIdx16) \
+ V(StaLookupSlotSloppy, OperandType::kIdx8) \
+ V(StaLookupSlotStrict, OperandType::kIdx8) \
+ V(StaLookupSlotSloppyWide, OperandType::kIdx16) \
+ V(StaLookupSlotStrictWide, OperandType::kIdx16) \
+ \
+ /* Register-accumulator transfers */ \
+ V(Ldar, OperandType::kReg8) \
+ V(Star, OperandType::kReg8) \
+ \
+ /* Register-register transfers */ \
+ V(Mov, OperandType::kReg8, OperandType::kReg8) \
+ V(Exchange, OperandType::kReg8, OperandType::kReg16) \
+ V(ExchangeWide, OperandType::kReg16, OperandType::kReg16) \
+ \
+ /* LoadIC operations */ \
+ V(LoadICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
+ V(LoadICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
+ V(KeyedLoadICSloppy, OperandType::kReg8, OperandType::kIdx8) \
+ V(KeyedLoadICStrict, OperandType::kReg8, OperandType::kIdx8) \
+ /* TODO(rmcilroy): Wide register operands too? */ \
+ V(LoadICSloppyWide, OperandType::kReg8, OperandType::kIdx16, \
+ OperandType::kIdx16) \
+ V(LoadICStrictWide, OperandType::kReg8, OperandType::kIdx16, \
+ OperandType::kIdx16) \
+ V(KeyedLoadICSloppyWide, OperandType::kReg8, OperandType::kIdx16) \
+ V(KeyedLoadICStrictWide, OperandType::kReg8, OperandType::kIdx16) \
+ \
+ /* StoreIC operations */ \
+ V(StoreICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
+ V(StoreICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
+ V(KeyedStoreICSloppy, OperandType::kReg8, OperandType::kReg8, \
+ OperandType::kIdx8) \
+ V(KeyedStoreICStrict, OperandType::kReg8, OperandType::kReg8, \
+ OperandType::kIdx8) \
+ /* TODO(rmcilroy): Wide register operands too? */ \
+ V(StoreICSloppyWide, OperandType::kReg8, OperandType::kIdx16, \
+ OperandType::kIdx16) \
+ V(StoreICStrictWide, OperandType::kReg8, OperandType::kIdx16, \
+ OperandType::kIdx16) \
+ V(KeyedStoreICSloppyWide, OperandType::kReg8, OperandType::kReg8, \
+ OperandType::kIdx16) \
+ V(KeyedStoreICStrictWide, OperandType::kReg8, OperandType::kReg8, \
+ OperandType::kIdx16) \
+ \
+ /* Binary Operators */ \
+ V(Add, OperandType::kReg8) \
+ V(Sub, OperandType::kReg8) \
+ V(Mul, OperandType::kReg8) \
+ V(Div, OperandType::kReg8) \
+ V(Mod, OperandType::kReg8) \
+ V(BitwiseOr, OperandType::kReg8) \
+ V(BitwiseXor, OperandType::kReg8) \
+ V(BitwiseAnd, OperandType::kReg8) \
+ V(ShiftLeft, OperandType::kReg8) \
+ V(ShiftRight, OperandType::kReg8) \
+ V(ShiftRightLogical, OperandType::kReg8) \
+ \
+ /* Unary Operators */ \
+ V(Inc, OperandType::kNone) \
+ V(Dec, OperandType::kNone) \
+ V(LogicalNot, OperandType::kNone) \
+ V(TypeOf, OperandType::kNone) \
+ V(DeletePropertyStrict, OperandType::kReg8) \
+ V(DeletePropertySloppy, OperandType::kReg8) \
+ V(DeleteLookupSlot, OperandType::kNone) \
+ \
+ /* Call operations */ \
+ V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kCount8, \
+ OperandType::kIdx8) \
+ V(CallWide, OperandType::kReg8, OperandType::kReg8, OperandType::kCount16, \
+ OperandType::kIdx16) \
+ V(CallRuntime, OperandType::kIdx16, OperandType::kMaybeReg8, \
+ OperandType::kCount8) \
+ V(CallRuntimeForPair, OperandType::kIdx16, OperandType::kMaybeReg8, \
+ OperandType::kCount8, OperandType::kRegPair8) \
+ V(CallJSRuntime, OperandType::kIdx16, OperandType::kReg8, \
+ OperandType::kCount8) \
+ \
+ /* New operator */ \
+ V(New, OperandType::kReg8, OperandType::kMaybeReg8, OperandType::kCount8) \
+ \
+ /* Test Operators */ \
+ V(TestEqual, OperandType::kReg8) \
+ V(TestNotEqual, OperandType::kReg8) \
+ V(TestEqualStrict, OperandType::kReg8) \
+ V(TestNotEqualStrict, OperandType::kReg8) \
+ V(TestLessThan, OperandType::kReg8) \
+ V(TestGreaterThan, OperandType::kReg8) \
+ V(TestLessThanOrEqual, OperandType::kReg8) \
+ V(TestGreaterThanOrEqual, OperandType::kReg8) \
+ V(TestInstanceOf, OperandType::kReg8) \
+ V(TestIn, OperandType::kReg8) \
+ \
+ /* Cast operators */ \
+ V(ToName, OperandType::kNone) \
+ V(ToNumber, OperandType::kNone) \
+ V(ToObject, OperandType::kNone) \
+ \
+ /* Literals */ \
+ V(CreateRegExpLiteral, OperandType::kIdx8, OperandType::kIdx8, \
+ OperandType::kImm8) \
+ V(CreateArrayLiteral, OperandType::kIdx8, OperandType::kIdx8, \
+ OperandType::kImm8) \
+ V(CreateObjectLiteral, OperandType::kIdx8, OperandType::kIdx8, \
+ OperandType::kImm8) \
+ V(CreateRegExpLiteralWide, OperandType::kIdx16, OperandType::kIdx16, \
+ OperandType::kImm8) \
+ V(CreateArrayLiteralWide, OperandType::kIdx16, OperandType::kIdx16, \
+ OperandType::kImm8) \
+ V(CreateObjectLiteralWide, OperandType::kIdx16, OperandType::kIdx16, \
+ OperandType::kImm8) \
+ \
+ /* Closure allocation */ \
+ V(CreateClosure, OperandType::kIdx8, OperandType::kImm8) \
+ V(CreateClosureWide, OperandType::kIdx16, OperandType::kImm8) \
+ \
+ /* Arguments allocation */ \
+ V(CreateMappedArguments, OperandType::kNone) \
+ V(CreateUnmappedArguments, OperandType::kNone) \
+ \
+ /* Control Flow */ \
+ V(Jump, OperandType::kImm8) \
+ V(JumpConstant, OperandType::kIdx8) \
+ V(JumpConstantWide, OperandType::kIdx16) \
+ V(JumpIfTrue, OperandType::kImm8) \
+ V(JumpIfTrueConstant, OperandType::kIdx8) \
+ V(JumpIfTrueConstantWide, OperandType::kIdx16) \
+ V(JumpIfFalse, OperandType::kImm8) \
+ V(JumpIfFalseConstant, OperandType::kIdx8) \
+ V(JumpIfFalseConstantWide, OperandType::kIdx16) \
+ V(JumpIfToBooleanTrue, OperandType::kImm8) \
+ V(JumpIfToBooleanTrueConstant, OperandType::kIdx8) \
+ V(JumpIfToBooleanTrueConstantWide, OperandType::kIdx16) \
+ V(JumpIfToBooleanFalse, OperandType::kImm8) \
+ V(JumpIfToBooleanFalseConstant, OperandType::kIdx8) \
+ V(JumpIfToBooleanFalseConstantWide, OperandType::kIdx16) \
+ V(JumpIfNull, OperandType::kImm8) \
+ V(JumpIfNullConstant, OperandType::kIdx8) \
+ V(JumpIfNullConstantWide, OperandType::kIdx16) \
+ V(JumpIfUndefined, OperandType::kImm8) \
+ V(JumpIfUndefinedConstant, OperandType::kIdx8) \
+ V(JumpIfUndefinedConstantWide, OperandType::kIdx16) \
+ \
+ /* Complex flow control For..in */ \
+ V(ForInPrepare, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8) \
+ V(ForInDone, OperandType::kReg8, OperandType::kReg8) \
+ V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8, \
+ OperandType::kReg8) \
+ V(ForInStep, OperandType::kReg8) \
+ \
+ /* Non-local flow control */ \
+ V(Throw, OperandType::kNone) \
V(Return, OperandType::kNone)
+// Enumeration of the size classes of operand types used by bytecodes.
+enum class OperandSize : uint8_t {
+ kNone = 0,
+ kByte = 1,
+ kShort = 2,
+};
+
+
// Enumeration of operand types used by bytecodes.
enum class OperandType : uint8_t {
-#define DECLARE_OPERAND_TYPE(Name) k##Name,
+#define DECLARE_OPERAND_TYPE(Name, _) k##Name,
OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE)
#undef DECLARE_OPERAND_TYPE
-#define COUNT_OPERAND_TYPES(x) +1
+#define COUNT_OPERAND_TYPES(x, _) +1
// The COUNT_OPERAND macro will turn this into kLast = -1 +1 +1... which will
// evaluate to the same value as the last operand.
kLast = -1 OPERAND_TYPE_LIST(COUNT_OPERAND_TYPES)
@@ -112,33 +264,61 @@ enum class Bytecode : uint8_t {
};
-// An interpreter register which is located in the function's register file
+// An interpreter Register which is located in the function's Register file
// in its stack-frame. Register hold parameters, this, and expression values.
class Register {
public:
- static const int kMaxRegisterIndex = 127;
- static const int kMinRegisterIndex = -128;
-
Register() : index_(kIllegalIndex) {}
- explicit Register(int index) : index_(index) {
- DCHECK_LE(index_, kMaxRegisterIndex);
- DCHECK_GE(index_, kMinRegisterIndex);
- }
+ explicit Register(int index) : index_(index) {}
int index() const {
DCHECK(index_ != kIllegalIndex);
return index_;
}
bool is_parameter() const { return index() < 0; }
+ bool is_valid() const { return index_ != kIllegalIndex; }
static Register FromParameterIndex(int index, int parameter_count);
int ToParameterIndex(int parameter_count) const;
static int MaxParameterIndex();
+ // Returns the register for the function's closure object.
+ static Register function_closure();
+ bool is_function_closure() const;
+
+ // Returns the register for the function's outer context.
+ static Register function_context();
+ bool is_function_context() const;
+
+ // Returns the register for the incoming new target value.
+ static Register new_target();
+ bool is_new_target() const;
+
static Register FromOperand(uint8_t operand);
uint8_t ToOperand() const;
+ static Register FromWideOperand(uint16_t operand);
+ uint16_t ToWideOperand() const;
+
+ static bool AreContiguous(Register reg1, Register reg2,
+ Register reg3 = Register(),
+ Register reg4 = Register(),
+ Register reg5 = Register());
+
+ bool operator==(const Register& other) const {
+ return index() == other.index();
+ }
+ bool operator!=(const Register& other) const {
+ return index() != other.index();
+ }
+ bool operator<(const Register& other) const {
+ return index() < other.index();
+ }
+ bool operator<=(const Register& other) const {
+ return index() <= other.index();
+ }
+
private:
static const int kIllegalIndex = kMaxInt;
@@ -157,6 +337,9 @@ class Bytecodes {
// Returns string representation of |operand_type|.
static const char* OperandTypeToString(OperandType operand_type);
+ // Returns string representation of |operand_size|.
+ static const char* OperandSizeToString(OperandSize operand_size);
+
// Returns byte value of bytecode.
static uint8_t ToByte(Bytecode bytecode);
@@ -169,14 +352,53 @@ class Bytecodes {
// Return the i-th operand of |bytecode|.
static OperandType GetOperandType(Bytecode bytecode, int i);
+ // Return the size of the i-th operand of |bytecode|.
+ static OperandSize GetOperandSize(Bytecode bytecode, int i);
+
+ // Returns the offset of the i-th operand of |bytecode| relative to the start
+ // of the bytecode.
+ static int GetOperandOffset(Bytecode bytecode, int i);
+
// Returns the size of the bytecode including its operands.
static int Size(Bytecode bytecode);
- // The maximum number of operands across all bytecodes.
- static int MaximumNumberOfOperands();
+ // Returns the size of |operand|.
+ static OperandSize SizeOfOperand(OperandType operand);
+
+ // Return true if the bytecode is a conditional jump taking
+ // an immediate byte operand (OperandType::kImm8).
+ static bool IsConditionalJumpImmediate(Bytecode bytecode);
+
+ // Return true if the bytecode is a conditional jump taking
+ // a constant pool entry (OperandType::kIdx8).
+ static bool IsConditionalJumpConstant(Bytecode bytecode);
+
+ // Return true if the bytecode is a conditional jump taking
+ // a constant pool entry (OperandType::kIdx16).
+ static bool IsConditionalJumpConstantWide(Bytecode bytecode);
+
+ // Return true if the bytecode is a conditional jump taking
+ // any kind of operand.
+ static bool IsConditionalJump(Bytecode bytecode);
+
+ // Return true if the bytecode is a jump or a conditional jump taking
+ // an immediate byte operand (OperandType::kImm8).
+ static bool IsJumpImmediate(Bytecode bytecode);
+
+ // Return true if the bytecode is a jump or conditional jump taking a
+ // constant pool entry (OperandType::kIdx8).
+ static bool IsJumpConstant(Bytecode bytecode);
+
+ // Return true if the bytecode is a jump or conditional jump taking a
+ // constant pool entry (OperandType::kIdx16).
+ static bool IsJumpConstantWide(Bytecode bytecode);
+
+ // Return true if the bytecode is a jump or conditional jump taking
+ // any kind of operand.
+ static bool IsJump(Bytecode bytecode);
- // Maximum size of a bytecode and its operands.
- static int MaximumSize();
+ // Return true if the bytecode is a conditional jump, a jump, or a return.
+ static bool IsJumpOrReturn(Bytecode bytecode);
// Decode a single bytecode and operands to |os|.
static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
@@ -188,6 +410,7 @@ class Bytecodes {
std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
+std::ostream& operator<<(std::ostream& os, const OperandSize& operand_type);
} // namespace interpreter
} // namespace internal
diff --git a/chromium/v8/src/interpreter/constant-array-builder.cc b/chromium/v8/src/interpreter/constant-array-builder.cc
new file mode 100644
index 00000000000..2586e1ff4d5
--- /dev/null
+++ b/chromium/v8/src/interpreter/constant-array-builder.cc
@@ -0,0 +1,174 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/constant-array-builder.h"
+
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+ConstantArrayBuilder::ConstantArraySlice::ConstantArraySlice(Zone* zone,
+ size_t start_index,
+ size_t capacity)
+ : start_index_(start_index),
+ capacity_(capacity),
+ reserved_(0),
+ constants_(zone) {}
+
+
+void ConstantArrayBuilder::ConstantArraySlice::Reserve() {
+ DCHECK_GT(available(), 0u);
+ reserved_++;
+ DCHECK_LE(reserved_, capacity() - constants_.size());
+}
+
+
+void ConstantArrayBuilder::ConstantArraySlice::Unreserve() {
+ DCHECK_GT(reserved_, 0u);
+ reserved_--;
+}
+
+
+size_t ConstantArrayBuilder::ConstantArraySlice::Allocate(
+ Handle<Object> object) {
+ DCHECK_GT(available(), 0u);
+ size_t index = constants_.size();
+ DCHECK_LT(index, capacity());
+ constants_.push_back(object);
+ return index + start_index();
+}
+
+
+Handle<Object> ConstantArrayBuilder::ConstantArraySlice::At(
+ size_t index) const {
+ return constants_[index - start_index()];
+}
+
+
+STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kMaxCapacity;
+STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kLowCapacity;
+
+
+ConstantArrayBuilder::ConstantArrayBuilder(Isolate* isolate, Zone* zone)
+ : isolate_(isolate),
+ idx8_slice_(zone, 0, kLowCapacity),
+ idx16_slice_(zone, kLowCapacity, kHighCapacity),
+ constants_map_(isolate->heap(), zone) {
+ STATIC_ASSERT(kMaxCapacity == static_cast<size_t>(kMaxUInt16 + 1));
+ DCHECK_EQ(idx8_slice_.start_index(), 0u);
+ DCHECK_EQ(idx8_slice_.capacity(), kLowCapacity);
+ DCHECK_EQ(idx16_slice_.start_index(), kLowCapacity);
+ DCHECK_EQ(idx16_slice_.capacity(), kMaxCapacity - kLowCapacity);
+}
+
+
+size_t ConstantArrayBuilder::size() const {
+ if (idx16_slice_.size() > 0) {
+ return idx16_slice_.start_index() + idx16_slice_.size();
+ } else {
+ return idx8_slice_.size();
+ }
+}
+
+
+Handle<Object> ConstantArrayBuilder::At(size_t index) const {
+ if (index >= idx16_slice_.start_index()) {
+ return idx16_slice_.At(index);
+ } else if (index < idx8_slice_.size()) {
+ return idx8_slice_.At(index);
+ } else {
+ return isolate_->factory()->the_hole_value();
+ }
+}
+
+
+Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Factory* factory) const {
+ Handle<FixedArray> fixed_array =
+ factory->NewFixedArray(static_cast<int>(size()), PretenureFlag::TENURED);
+ for (int i = 0; i < fixed_array->length(); i++) {
+ fixed_array->set(i, *At(static_cast<size_t>(i)));
+ }
+ return fixed_array;
+}
+
+
+size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
+ index_t* entry = constants_map_.Find(object);
+ return (entry == nullptr) ? AllocateEntry(object) : *entry;
+}
+
+
+ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateEntry(
+ Handle<Object> object) {
+ DCHECK(!object->IsOddball());
+ size_t index;
+ index_t* entry = constants_map_.Get(object);
+ if (idx8_slice_.available() > 0) {
+ index = idx8_slice_.Allocate(object);
+ } else {
+ index = idx16_slice_.Allocate(object);
+ }
+ CHECK_LT(index, kMaxCapacity);
+ *entry = static_cast<index_t>(index);
+ return *entry;
+}
+
+
+OperandSize ConstantArrayBuilder::CreateReservedEntry() {
+ if (idx8_slice_.available() > 0) {
+ idx8_slice_.Reserve();
+ return OperandSize::kByte;
+ } else if (idx16_slice_.available() > 0) {
+ idx16_slice_.Reserve();
+ return OperandSize::kShort;
+ } else {
+ UNREACHABLE();
+ return OperandSize::kNone;
+ }
+}
+
+
+size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
+ Handle<Object> object) {
+ DiscardReservedEntry(operand_size);
+ size_t index;
+ index_t* entry = constants_map_.Find(object);
+ if (nullptr == entry) {
+ index = AllocateEntry(object);
+ } else {
+ if (operand_size == OperandSize::kByte &&
+ *entry >= idx8_slice_.capacity()) {
+ // The object is already in the constant array, but has an index
+ // outside the range of an idx8 operand so we need to create a
+ // duplicate entry in the idx8 operand range to satisfy the
+ // commitment.
+ *entry = static_cast<index_t>(idx8_slice_.Allocate(object));
+ }
+ index = *entry;
+ }
+ DCHECK(operand_size == OperandSize::kShort || index < idx8_slice_.capacity());
+ DCHECK_LT(index, kMaxCapacity);
+ return index;
+}
+
+
+void ConstantArrayBuilder::DiscardReservedEntry(OperandSize operand_size) {
+ switch (operand_size) {
+ case OperandSize::kByte:
+ idx8_slice_.Unreserve();
+ return;
+ case OperandSize::kShort:
+ idx16_slice_.Unreserve();
+ return;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/interpreter/constant-array-builder.h b/chromium/v8/src/interpreter/constant-array-builder.h
new file mode 100644
index 00000000000..c882b1d540a
--- /dev/null
+++ b/chromium/v8/src/interpreter/constant-array-builder.h
@@ -0,0 +1,97 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
+#define V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
+
+#include "src/identity-map.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Factory;
+class Isolate;
+
+namespace interpreter {
+
+// A helper class for constructing constant arrays for the interpreter.
+class ConstantArrayBuilder final : public ZoneObject {
+ public:
+ // Capacity of the 8-bit operand slice.
+ static const size_t kLowCapacity = 1u << kBitsPerByte;
+
+ // Capacity of the combined 8-bit and 16-bit operand slices.
+ static const size_t kMaxCapacity = 1u << (2 * kBitsPerByte);
+
+ // Capacity of the 16-bit operand slice.
+ static const size_t kHighCapacity = kMaxCapacity - kLowCapacity;
+
+ ConstantArrayBuilder(Isolate* isolate, Zone* zone);
+
+ // Generate a fixed array of constants based on inserted objects.
+ Handle<FixedArray> ToFixedArray(Factory* factory) const;
+
+ // Returns the object in the constant pool array that at index
+ // |index|.
+ Handle<Object> At(size_t index) const;
+
+ // Returns the number of elements in the array.
+ size_t size() const;
+
+ // Insert an object into the constants array if it is not already
+ // present. Returns the array index associated with the object.
+ size_t Insert(Handle<Object> object);
+
+ // Creates a reserved entry in the constant pool and returns
+ // the size of the operand that'll be required to hold the entry
+ // when committed.
+ OperandSize CreateReservedEntry();
+
+ // Commit reserved entry and returns the constant pool index for the
+ // object.
+ size_t CommitReservedEntry(OperandSize operand_size, Handle<Object> object);
+
+ // Discards constant pool reservation.
+ void DiscardReservedEntry(OperandSize operand_size);
+
+ private:
+ typedef uint16_t index_t;
+
+ index_t AllocateEntry(Handle<Object> object);
+
+ struct ConstantArraySlice final {
+ ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity);
+ void Reserve();
+ void Unreserve();
+ size_t Allocate(Handle<Object> object);
+ Handle<Object> At(size_t index) const;
+
+ inline size_t available() const { return capacity() - reserved() - size(); }
+ inline size_t reserved() const { return reserved_; }
+ inline size_t capacity() const { return capacity_; }
+ inline size_t size() const { return constants_.size(); }
+ inline size_t start_index() const { return start_index_; }
+
+ private:
+ const size_t start_index_;
+ const size_t capacity_;
+ size_t reserved_;
+ ZoneVector<Handle<Object>> constants_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
+ };
+
+ Isolate* isolate_;
+ ConstantArraySlice idx8_slice_;
+ ConstantArraySlice idx16_slice_;
+ IdentityMap<index_t> constants_map_;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
diff --git a/chromium/v8/src/interpreter/control-flow-builders.cc b/chromium/v8/src/interpreter/control-flow-builders.cc
new file mode 100644
index 00000000000..99066e8c7e9
--- /dev/null
+++ b/chromium/v8/src/interpreter/control-flow-builders.cc
@@ -0,0 +1,142 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/control-flow-builders.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+
+BreakableControlFlowBuilder::~BreakableControlFlowBuilder() {
+ DCHECK(break_sites_.empty());
+}
+
+
+void BreakableControlFlowBuilder::SetBreakTarget(const BytecodeLabel& target) {
+ BindLabels(target, &break_sites_);
+}
+
+
+void BreakableControlFlowBuilder::EmitJump(ZoneVector<BytecodeLabel>* sites) {
+ sites->push_back(BytecodeLabel());
+ builder()->Jump(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfTrue(
+ ZoneVector<BytecodeLabel>* sites) {
+ sites->push_back(BytecodeLabel());
+ builder()->JumpIfTrue(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfFalse(
+ ZoneVector<BytecodeLabel>* sites) {
+ sites->push_back(BytecodeLabel());
+ builder()->JumpIfFalse(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfUndefined(
+ ZoneVector<BytecodeLabel>* sites) {
+ sites->push_back(BytecodeLabel());
+ builder()->JumpIfUndefined(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfNull(
+ ZoneVector<BytecodeLabel>* sites) {
+ sites->push_back(BytecodeLabel());
+ builder()->JumpIfNull(&sites->back());
+}
+
+
+void BreakableControlFlowBuilder::EmitJump(ZoneVector<BytecodeLabel>* sites,
+ int index) {
+ builder()->Jump(&sites->at(index));
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfTrue(
+ ZoneVector<BytecodeLabel>* sites, int index) {
+ builder()->JumpIfTrue(&sites->at(index));
+}
+
+
+void BreakableControlFlowBuilder::EmitJumpIfFalse(
+ ZoneVector<BytecodeLabel>* sites, int index) {
+ builder()->JumpIfFalse(&sites->at(index));
+}
+
+
+void BreakableControlFlowBuilder::BindLabels(const BytecodeLabel& target,
+ ZoneVector<BytecodeLabel>* sites) {
+ for (size_t i = 0; i < sites->size(); i++) {
+ BytecodeLabel& site = sites->at(i);
+ builder()->Bind(target, &site);
+ }
+ sites->clear();
+}
+
+
+void BlockBuilder::EndBlock() {
+ builder()->Bind(&block_end_);
+ SetBreakTarget(block_end_);
+}
+
+
+LoopBuilder::~LoopBuilder() { DCHECK(continue_sites_.empty()); }
+
+
+void LoopBuilder::LoopHeader() {
+ // Jumps from before the loop header into the loop violate ordering
+ // requirements of bytecode basic blocks. The only entry into a loop
+ // must be the loop header. Surely breaks is okay? Not if nested
+ // and misplaced between the headers.
+ DCHECK(break_sites_.empty() && continue_sites_.empty());
+ builder()->Bind(&loop_header_);
+}
+
+
+void LoopBuilder::EndLoop() {
+ // Loop must have closed form, i.e. all loop elements are within the loop,
+ // the loop header precedes the body and next elements in the loop.
+ DCHECK(loop_header_.is_bound());
+ builder()->Bind(&loop_end_);
+ SetBreakTarget(loop_end_);
+ if (next_.is_bound()) {
+ DCHECK(!condition_.is_bound() || next_.offset() >= condition_.offset());
+ SetContinueTarget(next_);
+ } else {
+ DCHECK(condition_.is_bound());
+ DCHECK_GE(condition_.offset(), loop_header_.offset());
+ DCHECK_LE(condition_.offset(), loop_end_.offset());
+ SetContinueTarget(condition_);
+ }
+}
+
+
+void LoopBuilder::SetContinueTarget(const BytecodeLabel& target) {
+ BindLabels(target, &continue_sites_);
+}
+
+
+SwitchBuilder::~SwitchBuilder() {
+#ifdef DEBUG
+ for (auto site : case_sites_) {
+ DCHECK(site.is_bound());
+ }
+#endif
+}
+
+
+void SwitchBuilder::SetCaseTarget(int index) {
+ BytecodeLabel& site = case_sites_.at(index);
+ builder()->Bind(&site);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/interpreter/control-flow-builders.h b/chromium/v8/src/interpreter/control-flow-builders.h
new file mode 100644
index 00000000000..24a7dfe3e5b
--- /dev/null
+++ b/chromium/v8/src/interpreter/control-flow-builders.h
@@ -0,0 +1,151 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_CONTROL_FLOW_BUILDERS_H_
+#define V8_INTERPRETER_CONTROL_FLOW_BUILDERS_H_
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class ControlFlowBuilder BASE_EMBEDDED {
+ public:
+ explicit ControlFlowBuilder(BytecodeArrayBuilder* builder)
+ : builder_(builder) {}
+ virtual ~ControlFlowBuilder() {}
+
+ protected:
+ BytecodeArrayBuilder* builder() const { return builder_; }
+
+ private:
+ BytecodeArrayBuilder* builder_;
+
+ DISALLOW_COPY_AND_ASSIGN(ControlFlowBuilder);
+};
+
+class BreakableControlFlowBuilder : public ControlFlowBuilder {
+ public:
+ explicit BreakableControlFlowBuilder(BytecodeArrayBuilder* builder)
+ : ControlFlowBuilder(builder),
+ break_sites_(builder->zone()) {}
+ virtual ~BreakableControlFlowBuilder();
+
+ // This method should be called by the control flow owner before
+ // destruction to update sites that emit jumps for break.
+ void SetBreakTarget(const BytecodeLabel& break_target);
+
+ // This method is called when visiting break statements in the AST.
+ // Inserts a jump to a unbound label that is patched when the corresponding
+ // SetBreakTarget is called.
+ void Break() { EmitJump(&break_sites_); }
+ void BreakIfTrue() { EmitJumpIfTrue(&break_sites_); }
+ void BreakIfFalse() { EmitJumpIfFalse(&break_sites_); }
+ void BreakIfUndefined() { EmitJumpIfUndefined(&break_sites_); }
+ void BreakIfNull() { EmitJumpIfNull(&break_sites_); }
+
+ protected:
+ void EmitJump(ZoneVector<BytecodeLabel>* labels);
+ void EmitJump(ZoneVector<BytecodeLabel>* labels, int index);
+ void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels);
+ void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels, int index);
+ void EmitJumpIfFalse(ZoneVector<BytecodeLabel>* labels);
+ void EmitJumpIfFalse(ZoneVector<BytecodeLabel>* labels, int index);
+ void EmitJumpIfUndefined(ZoneVector<BytecodeLabel>* labels);
+ void EmitJumpIfNull(ZoneVector<BytecodeLabel>* labels);
+
+ void BindLabels(const BytecodeLabel& target, ZoneVector<BytecodeLabel>* site);
+
+ // Unbound labels that identify jumps for break statements in the code.
+ ZoneVector<BytecodeLabel> break_sites_;
+};
+
+
+// Class to track control flow for block statements (which can break in JS).
+class BlockBuilder final : public BreakableControlFlowBuilder {
+ public:
+ explicit BlockBuilder(BytecodeArrayBuilder* builder)
+ : BreakableControlFlowBuilder(builder) {}
+
+ void EndBlock();
+
+ private:
+ BytecodeLabel block_end_;
+};
+
+
+// A class to help with co-ordinating break and continue statements with
+// their loop.
+class LoopBuilder final : public BreakableControlFlowBuilder {
+ public:
+ explicit LoopBuilder(BytecodeArrayBuilder* builder)
+ : BreakableControlFlowBuilder(builder),
+ continue_sites_(builder->zone()) {}
+ ~LoopBuilder();
+
+ void LoopHeader();
+ void Condition() { builder()->Bind(&condition_); }
+ void Next() { builder()->Bind(&next_); }
+ void JumpToHeader() { builder()->Jump(&loop_header_); }
+ void JumpToHeaderIfTrue() { builder()->JumpIfTrue(&loop_header_); }
+ void EndLoop();
+
+ // This method is called when visiting continue statements in the AST.
+ // Inserts a jump to a unbound label that is patched when the corresponding
+ // SetContinueTarget is called.
+ void Continue() { EmitJump(&continue_sites_); }
+ void ContinueIfTrue() { EmitJumpIfTrue(&continue_sites_); }
+ void ContinueIfUndefined() { EmitJumpIfUndefined(&continue_sites_); }
+ void ContinueIfNull() { EmitJumpIfNull(&continue_sites_); }
+
+ private:
+ void SetContinueTarget(const BytecodeLabel& continue_target);
+
+ BytecodeLabel loop_header_;
+ BytecodeLabel condition_;
+ BytecodeLabel next_;
+ BytecodeLabel loop_end_;
+
+ // Unbound labels that identify jumps for continue statements in the code.
+ ZoneVector<BytecodeLabel> continue_sites_;
+};
+
+
+// A class to help with co-ordinating break statements with their switch.
+class SwitchBuilder final : public BreakableControlFlowBuilder {
+ public:
+ explicit SwitchBuilder(BytecodeArrayBuilder* builder, int number_of_cases)
+ : BreakableControlFlowBuilder(builder),
+ case_sites_(builder->zone()) {
+ case_sites_.resize(number_of_cases);
+ }
+ ~SwitchBuilder();
+
+ // This method should be called by the SwitchBuilder owner when the case
+ // statement with |index| is emitted to update the case jump site.
+ void SetCaseTarget(int index);
+
+ // This method is called when visiting case comparison operation for |index|.
+ // Inserts a JumpIfTrue to a unbound label that is patched when the
+ // corresponding SetCaseTarget is called.
+ void Case(int index) { EmitJumpIfTrue(&case_sites_, index); }
+
+ // This method is called when all cases comparisons have been emitted if there
+ // is a default case statement. Inserts a Jump to a unbound label that is
+ // patched when the corresponding SetCaseTarget is called.
+ void DefaultAt(int index) { EmitJump(&case_sites_, index); }
+
+ private:
+ // Unbound labels that identify jumps for case statements in the code.
+ ZoneVector<BytecodeLabel> case_sites_;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_CONTROL_FLOW_BUILDERS_H_
diff --git a/chromium/v8/src/interpreter/interpreter.cc b/chromium/v8/src/interpreter/interpreter.cc
index 2d97fc8ef28..574602b0ed8 100644
--- a/chromium/v8/src/interpreter/interpreter.cc
+++ b/chromium/v8/src/interpreter/interpreter.cc
@@ -17,6 +17,7 @@ namespace internal {
namespace interpreter {
using compiler::Node;
+
#define __ assembler->
@@ -59,21 +60,17 @@ void Interpreter::Initialize() {
bool Interpreter::MakeBytecode(CompilationInfo* info) {
- Handle<SharedFunctionInfo> shared_info = info->shared_info();
-
BytecodeGenerator generator(info->isolate(), info->zone());
info->EnsureFeedbackVector();
Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
if (FLAG_print_bytecode) {
- bytecodes->Print();
- }
-
- DCHECK(shared_info->function_data()->IsUndefined());
- if (!shared_info->function_data()->IsUndefined()) {
- return false;
+ OFStream os(stdout);
+ os << "Function: " << info->GetDebugName().get() << std::endl;
+ bytecodes->Print(os);
+ os << std::flush;
}
- shared_info->set_function_data(*bytecodes);
+ info->SetBytecodeArray(bytecodes);
info->SetCode(info->isolate()->builtins()->InterpreterEntryTrampoline());
return true;
}
@@ -100,17 +97,14 @@ void Interpreter::DoLdaZero(compiler::InterpreterAssembler* assembler) {
//
// Load an 8-bit integer literal into the accumulator as a Smi.
void Interpreter::DoLdaSmi8(compiler::InterpreterAssembler* assembler) {
- Node* raw_int = __ BytecodeOperandImm8(0);
+ Node* raw_int = __ BytecodeOperandImm(0);
Node* smi_int = __ SmiTag(raw_int);
__ SetAccumulator(smi_int);
__ Dispatch();
}
-// LdaConstant <idx>
-//
-// Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLoadConstant(compiler::InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
__ SetAccumulator(constant);
@@ -118,6 +112,22 @@ void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
}
+// LdaConstant <idx>
+//
+// Load constant literal at |idx| in the constant pool into the accumulator.
+void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
+ DoLoadConstant(assembler);
+}
+
+
+// LdaConstantWide <idx>
+//
+// Load constant literal at |idx| in the constant pool into the accumulator.
+void Interpreter::DoLdaConstantWide(compiler::InterpreterAssembler* assembler) {
+ DoLoadConstant(assembler);
+}
+
+
// LdaUndefined
//
// Load Undefined into the accumulator.
@@ -191,20 +201,439 @@ void Interpreter::DoStar(compiler::InterpreterAssembler* assembler) {
}
-// LdaGlobal <slot_index>
+// Exchange <reg8> <reg16>
+//
+// Exchange two registers.
+void Interpreter::DoExchange(compiler::InterpreterAssembler* assembler) {
+ Node* reg0_index = __ BytecodeOperandReg(0);
+ Node* reg1_index = __ BytecodeOperandReg(1);
+ Node* reg0_value = __ LoadRegister(reg0_index);
+ Node* reg1_value = __ LoadRegister(reg1_index);
+ __ StoreRegister(reg1_value, reg0_index);
+ __ StoreRegister(reg0_value, reg1_index);
+ __ Dispatch();
+}
+
+
+// ExchangeWide <reg16> <reg16>
+//
+// Exchange two registers.
+void Interpreter::DoExchangeWide(compiler::InterpreterAssembler* assembler) {
+ return DoExchange(assembler);
+}
+
+
+// Mov <src> <dst>
+//
+// Stores the value of register <src> to register <dst>.
+void Interpreter::DoMov(compiler::InterpreterAssembler* assembler) {
+ Node* src_index = __ BytecodeOperandReg(0);
+ Node* src_value = __ LoadRegister(src_index);
+ Node* dst_index = __ BytecodeOperandReg(1);
+ __ StoreRegister(src_value, dst_index);
+ __ Dispatch();
+}
+
+
+void Interpreter::DoLoadGlobal(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
+ // Get the global object.
+ Node* context = __ GetContext();
+ Node* native_context =
+ __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
+ Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
+
+ // Load the global via the LoadIC.
+ Node* code_target = __ HeapConstant(ic.code());
+ Node* constant_index = __ BytecodeOperandIdx(0);
+ Node* name = __ LoadConstantPoolEntry(constant_index);
+ Node* raw_slot = __ BytecodeOperandIdx(1);
+ Node* smi_slot = __ SmiTag(raw_slot);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* result = __ CallIC(ic.descriptor(), code_target, global, name, smi_slot,
+ type_feedback_vector);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// LdaGlobalSloppy <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoLdaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalSloppy <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoLdaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ STRICT, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalInsideTypeofSloppy <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoLdaGlobalInsideTypeofSloppy(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalInsideTypeofStrict <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoLdaGlobalInsideTypeofStrict(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+ STRICT, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalSloppyWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoLdaGlobalSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalSloppyWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoLdaGlobalStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ STRICT, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoLdaGlobalInsideTypeofSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoLdaGlobalInsideTypeofStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+ STRICT, UNINITIALIZED);
+ DoLoadGlobal(ic, assembler);
+}
+
+
+void Interpreter::DoStoreGlobal(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
+ // Get the global object.
+ Node* context = __ GetContext();
+ Node* native_context =
+ __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
+ Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
+
+ // Store the global via the StoreIC.
+ Node* code_target = __ HeapConstant(ic.code());
+ Node* constant_index = __ BytecodeOperandIdx(0);
+ Node* name = __ LoadConstantPoolEntry(constant_index);
+ Node* value = __ GetAccumulator();
+ Node* raw_slot = __ BytecodeOperandIdx(1);
+ Node* smi_slot = __ SmiTag(raw_slot);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ __ CallIC(ic.descriptor(), code_target, global, name, value, smi_slot,
+ type_feedback_vector);
+
+ __ Dispatch();
+}
+
+
+// StaGlobalSloppy <name_index> <slot>
+//
+// Store the value in the accumulator into the global with name in constant pool
+// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoStaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoStoreGlobal(ic, assembler);
+}
+
+
+// StaGlobalStrict <name_index> <slot>
+//
+// Store the value in the accumulator into the global with name in constant pool
+// entry <name_index> using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoStaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoStoreGlobal(ic, assembler);
+}
+
+
+// StaGlobalSloppyWide <name_index> <slot>
+//
+// Store the value in the accumulator into the global with name in constant pool
+// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
+void Interpreter::DoStaGlobalSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoStoreGlobal(ic, assembler);
+}
+
+
+// StaGlobalStrictWide <name_index> <slot>
+//
+// Store the value in the accumulator into the global with name in constant pool
+// entry <name_index> using FeedBackVector slot <slot> in strict mode.
+void Interpreter::DoStaGlobalStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoStoreGlobal(ic, assembler);
+}
+
+
+// LdaContextSlot <context> <slot_index>
//
-// Load the global at |slot_index| into the accumulator.
-void Interpreter::DoLdaGlobal(compiler::InterpreterAssembler* assembler) {
- Node* slot_index = __ BytecodeOperandIdx(0);
- Node* smi_slot_index = __ SmiTag(slot_index);
- Node* result = __ CallRuntime(Runtime::kLoadGlobalViaContext, smi_slot_index);
+// Load the object in |slot_index| of |context| into the accumulator.
+void Interpreter::DoLdaContextSlot(compiler::InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* context = __ LoadRegister(reg_index);
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ Node* result = __ LoadContextSlot(context, slot_index);
__ SetAccumulator(result);
__ Dispatch();
}
-void Interpreter::DoPropertyLoadIC(Callable ic,
+// LdaContextSlotWide <context> <slot_index>
+//
+// Load the object in |slot_index| of |context| into the accumulator.
+void Interpreter::DoLdaContextSlotWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoLdaContextSlot(assembler);
+}
+
+
+// StaContextSlot <context> <slot_index>
+//
+// Stores the object in the accumulator into |slot_index| of |context|.
+void Interpreter::DoStaContextSlot(compiler::InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* context = __ LoadRegister(reg_index);
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ __ StoreContextSlot(context, slot_index, value);
+ __ Dispatch();
+}
+
+
+// StaContextSlot <context> <slot_index>
+//
+// Stores the object in the accumulator into |slot_index| of |context|.
+void Interpreter::DoStaContextSlotWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoStaContextSlot(assembler);
+}
+
+
+void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler) {
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* name = __ LoadConstantPoolEntry(index);
+ Node* context = __ GetContext();
+ Node* result_pair = __ CallRuntime(function_id, context, name);
+ Node* result = __ Projection(0, result_pair);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// LdaLookupSlot <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically.
+void Interpreter::DoLdaLookupSlot(compiler::InterpreterAssembler* assembler) {
+ DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
+}
+
+
+// LdaLookupSlotInsideTypeof <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically without causing a NoReferenceError.
+void Interpreter::DoLdaLookupSlotInsideTypeof(
+ compiler::InterpreterAssembler* assembler) {
+ DoLoadLookupSlot(Runtime::kLoadLookupSlotNoReferenceError, assembler);
+}
+
+
+// LdaLookupSlotWide <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically.
+void Interpreter::DoLdaLookupSlotWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoLdaLookupSlot(assembler);
+}
+
+
+// LdaLookupSlotInsideTypeofWide <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically without causing a NoReferenceError.
+void Interpreter::DoLdaLookupSlotInsideTypeofWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoLdaLookupSlotInsideTypeof(assembler);
+}
+
+
+void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
+ compiler::InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* name = __ LoadConstantPoolEntry(index);
+ Node* context = __ GetContext();
+ Node* language_mode_node = __ NumberConstant(language_mode);
+ Node* result = __ CallRuntime(Runtime::kStoreLookupSlot, value, context, name,
+ language_mode_node);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// StaLookupSlotSloppy <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in sloppy mode.
+void Interpreter::DoStaLookupSlotSloppy(
+ compiler::InterpreterAssembler* assembler) {
+ DoStoreLookupSlot(LanguageMode::SLOPPY, assembler);
+}
+
+
+// StaLookupSlotStrict <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in strict mode.
+void Interpreter::DoStaLookupSlotStrict(
+ compiler::InterpreterAssembler* assembler) {
+ DoStoreLookupSlot(LanguageMode::STRICT, assembler);
+}
+
+
+// StaLookupSlotSloppyWide <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in sloppy mode.
+void Interpreter::DoStaLookupSlotSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoStaLookupSlotSloppy(assembler);
+}
+
+
+// StaLookupSlotStrictWide <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in strict mode.
+void Interpreter::DoStaLookupSlotStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoStaLookupSlotStrict(assembler);
+}
+
+
+void Interpreter::DoLoadIC(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
+ Node* code_target = __ HeapConstant(ic.code());
+ Node* register_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(register_index);
+ Node* constant_index = __ BytecodeOperandIdx(1);
+ Node* name = __ LoadConstantPoolEntry(constant_index);
+ Node* raw_slot = __ BytecodeOperandIdx(2);
+ Node* smi_slot = __ SmiTag(raw_slot);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
+ type_feedback_vector);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// LoadICSloppy <object> <name_index> <slot>
+//
+// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
+// the name at constant pool entry <name_index>.
+void Interpreter::DoLoadICSloppy(compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoLoadIC(ic, assembler);
+}
+
+
+// LoadICStrict <object> <name_index> <slot>
+//
+// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
+// the name at constant pool entry <name_index>.
+void Interpreter::DoLoadICStrict(compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ STRICT, UNINITIALIZED);
+ DoLoadIC(ic, assembler);
+}
+
+
+// LoadICSloppyWide <object> <name_index> <slot>
+//
+// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
+// the name at constant pool entry <name_index>.
+void Interpreter::DoLoadICSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoLoadIC(ic, assembler);
+}
+
+
+// LoadICStrictWide <object> <name_index> <slot>
+//
+// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
+// the name at constant pool entry <name_index>.
+void Interpreter::DoLoadICStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ STRICT, UNINITIALIZED);
+ DoLoadIC(ic, assembler);
+}
+
+
+void Interpreter::DoKeyedLoadIC(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(reg_index);
@@ -219,30 +648,123 @@ void Interpreter::DoPropertyLoadIC(Callable ic,
}
-// LoadIC <object> <slot>
+// KeyedLoadICSloppy <object> <slot>
//
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name
-// in the accumulator.
-void Interpreter::DoLoadIC(compiler::InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- SLOPPY, UNINITIALIZED);
- DoPropertyLoadIC(ic, assembler);
+// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
+// and the key in the accumulator.
+void Interpreter::DoKeyedLoadICSloppy(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoKeyedLoadIC(ic, assembler);
}
-// KeyedLoadIC <object> <slot>
+// KeyedLoadICStrict <object> <slot>
//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator.
-void Interpreter::DoKeyedLoadIC(compiler::InterpreterAssembler* assembler) {
+// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
+// and the key in the accumulator.
+void Interpreter::DoKeyedLoadICStrict(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoKeyedLoadIC(ic, assembler);
+}
+
+
+// KeyedLoadICSloppyWide <object> <slot>
+//
+// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
+// and the key in the accumulator.
+void Interpreter::DoKeyedLoadICSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
- DoPropertyLoadIC(ic, assembler);
+ DoKeyedLoadIC(ic, assembler);
}
-void Interpreter::DoPropertyStoreIC(Callable ic,
- compiler::InterpreterAssembler* assembler) {
+// KeyedLoadICStrictWide <object> <slot>
+//
+// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
+// and the key in the accumulator.
+void Interpreter::DoKeyedLoadICStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoKeyedLoadIC(ic, assembler);
+}
+
+
+void Interpreter::DoStoreIC(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
+ Node* code_target = __ HeapConstant(ic.code());
+ Node* object_reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(object_reg_index);
+ Node* constant_index = __ BytecodeOperandIdx(1);
+ Node* name = __ LoadConstantPoolEntry(constant_index);
+ Node* value = __ GetAccumulator();
+ Node* raw_slot = __ BytecodeOperandIdx(2);
+ Node* smi_slot = __ SmiTag(raw_slot);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
+ type_feedback_vector);
+ __ Dispatch();
+}
+
+
+// StoreICSloppy <object> <name_index> <slot>
+//
+// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStoreICSloppy(compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoStoreIC(ic, assembler);
+}
+
+
+// StoreICStrict <object> <name_index> <slot>
+//
+// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStoreICStrict(compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoStoreIC(ic, assembler);
+}
+
+
+// StoreICSloppyWide <object> <name_index> <slot>
+//
+// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStoreICSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoStoreIC(ic, assembler);
+}
+
+
+// StoreICStrictWide <object> <name_index> <slot>
+//
+// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStoreICStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoStoreIC(ic, assembler);
+}
+
+
+void Interpreter::DoKeyedStoreIC(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(object_reg_index);
@@ -252,32 +774,80 @@ void Interpreter::DoPropertyStoreIC(Callable ic,
Node* raw_slot = __ BytecodeOperandIdx(2);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Node* result = __ CallIC(ic.descriptor(), code_target, object, name, value,
- smi_slot, type_feedback_vector);
- __ SetAccumulator(result);
+ __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
+ type_feedback_vector);
__ Dispatch();
}
-// StoreIC <object> <name> <slot>
+// KeyedStoreICSloppy <object> <key> <slot>
//
-// Calls the StoreIC at FeedBackVector slot <slot> for <object> and the name
-// <name> with the value in the accumulator.
-void Interpreter::DoStoreIC(compiler::InterpreterAssembler* assembler) {
+// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
+// and the key <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreICSloppy(
+ compiler::InterpreterAssembler* assembler) {
Callable ic =
- CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
- DoPropertyStoreIC(ic, assembler);
+ CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoKeyedStoreIC(ic, assembler);
}
-// KeyedStoreIC <object> <key> <slot>
+// KeyedStoreICStore <object> <key> <slot>
//
-// Calls the KeyStoreIC at FeedBackVector slot <slot> for <object> and the key
-// <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreIC(compiler::InterpreterAssembler* assembler) {
+// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
+// and the key <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreICStrict(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoKeyedStoreIC(ic, assembler);
+}
+
+
+// KeyedStoreICSloppyWide <object> <key> <slot>
+//
+// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
+// and the key <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreICSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
- DoPropertyStoreIC(ic, assembler);
+ DoKeyedStoreIC(ic, assembler);
+}
+
+
+// KeyedStoreICStoreWide <object> <key> <slot>
+//
+// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
+// and the key <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreICStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ DoKeyedStoreIC(ic, assembler);
+}
+
+
+// PushContext <context>
+//
+// Pushes the accumulator as the current context, and saves it in <context>
+void Interpreter::DoPushContext(compiler::InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* context = __ GetAccumulator();
+ __ SetContext(context);
+ __ StoreRegister(context, reg_index);
+ __ Dispatch();
+}
+
+
+// PopContext <context>
+//
+// Pops the current context and sets <context> as the new context.
+void Interpreter::DoPopContext(compiler::InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* context = __ LoadRegister(reg_index);
+ __ SetContext(context);
+ __ Dispatch();
}
@@ -334,22 +904,275 @@ void Interpreter::DoMod(compiler::InterpreterAssembler* assembler) {
}
-// Call <receiver> <arg_count>
+// BitwiseOr <src>
//
-// Call a JS function with receiver and |arg_count| arguments in subsequent
-// registers. The JSfunction or Callable to call is in the accumulator.
-void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
+// BitwiseOr register <src> to accumulator.
+void Interpreter::DoBitwiseOr(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kBitwiseOr, assembler);
+}
+
+
+// BitwiseXor <src>
+//
+// BitwiseXor register <src> to accumulator.
+void Interpreter::DoBitwiseXor(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kBitwiseXor, assembler);
+}
+
+
+// BitwiseAnd <src>
+//
+// BitwiseAnd register <src> to accumulator.
+void Interpreter::DoBitwiseAnd(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kBitwiseAnd, assembler);
+}
+
+
+// ShiftLeft <src>
+//
+// Left shifts register <src> by the count specified in the accumulator.
+// Register <src> is converted to an int32 and the accumulator to uint32
+// before the operation. 5 lsb bits from the accumulator are used as count
+// i.e. <src> << (accumulator & 0x1F).
+void Interpreter::DoShiftLeft(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kShiftLeft, assembler);
+}
+
+
+// ShiftRight <src>
+//
+// Right shifts register <src> by the count specified in the accumulator.
+// Result is sign extended. Register <src> is converted to an int32 and the
+// accumulator to uint32 before the operation. 5 lsb bits from the accumulator
+// are used as count i.e. <src> >> (accumulator & 0x1F).
+void Interpreter::DoShiftRight(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kShiftRight, assembler);
+}
+
+
+// ShiftRightLogical <src>
+//
+// Right Shifts register <src> by the count specified in the accumulator.
+// Result is zero-filled. The accumulator and register <src> are converted to
+// uint32 before the operation 5 lsb bits from the accumulator are used as
+// count i.e. <src> << (accumulator & 0x1F).
+void Interpreter::DoShiftRightLogical(
+ compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kShiftRightLogical, assembler);
+}
+
+
+void Interpreter::DoCountOp(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* one = __ NumberConstant(1);
+ Node* result = __ CallRuntime(function_id, value, one);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// Inc
+//
+// Increments value in the accumulator by one.
+void Interpreter::DoInc(compiler::InterpreterAssembler* assembler) {
+ DoCountOp(Runtime::kAdd, assembler);
+}
+
+
+// Dec
+//
+// Decrements value in the accumulator by one.
+void Interpreter::DoDec(compiler::InterpreterAssembler* assembler) {
+ DoCountOp(Runtime::kSubtract, assembler);
+}
+
+
+// LogicalNot
+//
+// Perform logical-not on the accumulator, first casting the
+// accumulator to a boolean value if required.
+void Interpreter::DoLogicalNot(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kInterpreterLogicalNot, accumulator);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// TypeOf
+//
+// Load the accumulator with the string representating type of the
+// object in the accumulator.
+void Interpreter::DoTypeOf(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kInterpreterTypeOf, accumulator);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+void Interpreter::DoDelete(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(reg_index);
+ Node* key = __ GetAccumulator();
+ Node* result = __ CallRuntime(function_id, object, key);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// DeletePropertyStrict
+//
+// Delete the property specified in the accumulator from the object
+// referenced by the register operand following strict mode semantics.
+void Interpreter::DoDeletePropertyStrict(
+ compiler::InterpreterAssembler* assembler) {
+ DoDelete(Runtime::kDeleteProperty_Strict, assembler);
+}
+
+
+// DeletePropertySloppy
+//
+// Delete the property specified in the accumulator from the object
+// referenced by the register operand following sloppy mode semantics.
+void Interpreter::DoDeletePropertySloppy(
+ compiler::InterpreterAssembler* assembler) {
+ DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
+}
+
+
+// DeleteLookupSlot
+//
+// Delete the variable with the name specified in the accumulator by dynamically
+// looking it up.
+void Interpreter::DoDeleteLookupSlot(
+ compiler::InterpreterAssembler* assembler) {
+ Node* name = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(Runtime::kDeleteLookupSlot, context, name);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+void Interpreter::DoJSCall(compiler::InterpreterAssembler* assembler) {
Node* function_reg = __ BytecodeOperandReg(0);
Node* function = __ LoadRegister(function_reg);
Node* receiver_reg = __ BytecodeOperandReg(1);
Node* first_arg = __ RegisterLocation(receiver_reg);
Node* args_count = __ BytecodeOperandCount(2);
+ // TODO(rmcilroy): Use the call type feedback slot to call via CallIC.
+ Node* result = __ CallJS(function, first_arg, args_count);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// Call <callable> <receiver> <arg_count>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
+ DoJSCall(assembler);
+}
+
+
+// CallWide <callable> <receiver> <arg_count>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallWide(compiler::InterpreterAssembler* assembler) {
+ DoJSCall(assembler);
+}
+
+
+// CallRuntime <function_id> <first_arg> <arg_count>
+//
+// Call the runtime function |function_id| with the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers.
+void Interpreter::DoCallRuntime(compiler::InterpreterAssembler* assembler) {
+ Node* function_id = __ BytecodeOperandIdx(0);
+ Node* first_arg_reg = __ BytecodeOperandReg(1);
+ Node* first_arg = __ RegisterLocation(first_arg_reg);
+ Node* args_count = __ BytecodeOperandCount(2);
+ Node* result = __ CallRuntime(function_id, first_arg, args_count);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
+//
+// Call the runtime function |function_id| which returns a pair, with the
+// first argument in register |first_arg| and |arg_count| arguments in
+// subsequent registers. Returns the result in <first_return> and
+// <first_return + 1>
+void Interpreter::DoCallRuntimeForPair(
+ compiler::InterpreterAssembler* assembler) {
+ // Call the runtime function.
+ Node* function_id = __ BytecodeOperandIdx(0);
+ Node* first_arg_reg = __ BytecodeOperandReg(1);
+ Node* first_arg = __ RegisterLocation(first_arg_reg);
+ Node* args_count = __ BytecodeOperandCount(2);
+ Node* result_pair = __ CallRuntime(function_id, first_arg, args_count, 2);
+
+ // Store the results in <first_return> and <first_return + 1>
+ Node* first_return_reg = __ BytecodeOperandReg(3);
+ Node* second_return_reg = __ NextRegister(first_return_reg);
+ Node* result0 = __ Projection(0, result_pair);
+ Node* result1 = __ Projection(1, result_pair);
+ __ StoreRegister(result0, first_return_reg);
+ __ StoreRegister(result1, second_return_reg);
+
+ __ Dispatch();
+}
+
+
+// CallJSRuntime <context_index> <receiver> <arg_count>
+//
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntime(compiler::InterpreterAssembler* assembler) {
+ Node* context_index = __ BytecodeOperandIdx(0);
+ Node* receiver_reg = __ BytecodeOperandReg(1);
+ Node* first_arg = __ RegisterLocation(receiver_reg);
+ Node* args_count = __ BytecodeOperandCount(2);
+
+ // Get the function to call from the native context.
+ Node* context = __ GetContext();
+ Node* native_context =
+ __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
+ Node* function = __ LoadContextSlot(native_context, context_index);
+
+ // Call the function.
Node* result = __ CallJS(function, first_arg, args_count);
__ SetAccumulator(result);
__ Dispatch();
}
+// New <constructor> <first_arg> <arg_count>
+//
+// Call operator new with |constructor| and the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+//
+void Interpreter::DoNew(compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
+ Node* constructor_reg = __ BytecodeOperandReg(0);
+ Node* constructor = __ LoadRegister(constructor_reg);
+ Node* first_arg_reg = __ BytecodeOperandReg(1);
+ Node* first_arg = __ RegisterLocation(first_arg_reg);
+ Node* args_count = __ BytecodeOperandCount(2);
+ Node* result =
+ __ CallConstruct(constructor, constructor, first_arg, args_count);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
// TestEqual <src>
//
// Test if the value in the <src> register equals the accumulator.
@@ -438,28 +1261,51 @@ void Interpreter::DoTestInstanceOf(compiler::InterpreterAssembler* assembler) {
}
-// ToBoolean
+// ToName
+//
+// Cast the object referenced by the accumulator to a name.
+void Interpreter::DoToName(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kToName, accumulator);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// ToNumber
//
-// Cast the object referenced by the accumulator to a boolean.
-void Interpreter::DoToBoolean(compiler::InterpreterAssembler* assembler) {
- // TODO(oth): The next CL for test operations has interpreter specific
- // runtime calls. This looks like another candidate.
+// Cast the object referenced by the accumulator to a number.
+void Interpreter::DoToNumber(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kToNumber, accumulator);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// ToObject
+//
+// Cast the object referenced by the accumulator to a JSObject.
+void Interpreter::DoToObject(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kToObject, accumulator);
+ __ SetAccumulator(result);
__ Dispatch();
}
// Jump <imm8>
//
-// Jump by number of bytes represented by an immediate operand.
+// Jump by number of bytes represented by the immediate operand |imm8|.
void Interpreter::DoJump(compiler::InterpreterAssembler* assembler) {
- Node* relative_jump = __ BytecodeOperandImm8(0);
+ Node* relative_jump = __ BytecodeOperandImm(0);
__ Jump(relative_jump);
}
-// JumpConstant <idx>
+// JumpConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool.
void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
@@ -468,21 +1314,31 @@ void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
}
+// JumpConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the
+// constant pool.
+void Interpreter::DoJumpConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpConstant(assembler);
+}
+
+
// JumpIfTrue <imm8>
//
// Jump by number of bytes represented by an immediate operand if the
// accumulator contains true.
void Interpreter::DoJumpIfTrue(compiler::InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm8(0);
+ Node* relative_jump = __ BytecodeOperandImm(0);
Node* true_value = __ BooleanConstant(true);
__ JumpIfWordEqual(accumulator, true_value, relative_jump);
}
-// JumpIfTrueConstant <idx>
+// JumpIfTrueConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the accumulator contains true.
void Interpreter::DoJumpIfTrueConstant(
compiler::InterpreterAssembler* assembler) {
@@ -495,21 +1351,31 @@ void Interpreter::DoJumpIfTrueConstant(
}
+// JumpIfTrueConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the accumulator contains true.
+void Interpreter::DoJumpIfTrueConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfTrueConstant(assembler);
+}
+
+
// JumpIfFalse <imm8>
//
// Jump by number of bytes represented by an immediate operand if the
// accumulator contains false.
void Interpreter::DoJumpIfFalse(compiler::InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm8(0);
+ Node* relative_jump = __ BytecodeOperandImm(0);
Node* false_value = __ BooleanConstant(false);
__ JumpIfWordEqual(accumulator, false_value, relative_jump);
}
-// JumpIfFalseConstant <idx>
+// JumpIfFalseConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the accumulator contains false.
void Interpreter::DoJumpIfFalseConstant(
compiler::InterpreterAssembler* assembler) {
@@ -522,14 +1388,393 @@ void Interpreter::DoJumpIfFalseConstant(
}
+// JumpIfFalseConstant <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the accumulator contains false.
+void Interpreter::DoJumpIfFalseConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfFalseConstant(assembler);
+}
+
+
+// JumpIfToBooleanTrue <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is true when the object is cast to boolean.
+void Interpreter::DoJumpIfToBooleanTrue(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* to_boolean_value =
+ __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* true_value = __ BooleanConstant(true);
+ __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
+}
+
+
+// JumpIfToBooleanTrueConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the object referenced by the accumulator is true when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanTrueConstant(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* to_boolean_value =
+ __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ Node* relative_jump = __ SmiUntag(constant);
+ Node* true_value = __ BooleanConstant(true);
+ __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
+}
+
+
+// JumpIfToBooleanTrueConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is true when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanTrueConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfToBooleanTrueConstant(assembler);
+}
+
+
+// JumpIfToBooleanFalse <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is false when the object is cast to boolean.
+void Interpreter::DoJumpIfToBooleanFalse(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* to_boolean_value =
+ __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* false_value = __ BooleanConstant(false);
+ __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
+}
+
+
+// JumpIfToBooleanFalseConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the object referenced by the accumulator is false when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanFalseConstant(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* to_boolean_value =
+ __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ Node* relative_jump = __ SmiUntag(constant);
+ Node* false_value = __ BooleanConstant(false);
+ __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
+}
+
+
+// JumpIfToBooleanFalseConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is false when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanFalseConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfToBooleanFalseConstant(assembler);
+}
+
+
+// JumpIfNull <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is the null constant.
+void Interpreter::DoJumpIfNull(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
+ Node* relative_jump = __ BytecodeOperandImm(0);
+ __ JumpIfWordEqual(accumulator, null_value, relative_jump);
+}
+
+
+// JumpIfNullConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the object referenced by the accumulator is the null constant.
+void Interpreter::DoJumpIfNullConstant(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ Node* relative_jump = __ SmiUntag(constant);
+ __ JumpIfWordEqual(accumulator, null_value, relative_jump);
+}
+
+
+// JumpIfNullConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is the null constant.
+void Interpreter::DoJumpIfNullConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfNullConstant(assembler);
+}
+
+
+// jumpifundefined <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is the undefined constant.
+void Interpreter::DoJumpIfUndefined(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* undefined_value =
+ __ HeapConstant(isolate_->factory()->undefined_value());
+ Node* relative_jump = __ BytecodeOperandImm(0);
+ __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
+}
+
+
+// JumpIfUndefinedConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the object referenced by the accumulator is the undefined constant.
+void Interpreter::DoJumpIfUndefinedConstant(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* undefined_value =
+ __ HeapConstant(isolate_->factory()->undefined_value());
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ Node* relative_jump = __ SmiUntag(constant);
+ __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
+}
+
+
+// JumpIfUndefinedConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is the undefined constant.
+void Interpreter::DoJumpIfUndefinedConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfUndefinedConstant(assembler);
+}
+
+
+void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler) {
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant_elements = __ LoadConstantPoolEntry(index);
+ Node* literal_index_raw = __ BytecodeOperandIdx(1);
+ Node* literal_index = __ SmiTag(literal_index_raw);
+ Node* flags_raw = __ BytecodeOperandImm(2);
+ Node* flags = __ SmiTag(flags_raw);
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* result = __ CallRuntime(function_id, closure, literal_index,
+ constant_elements, flags);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
+//
+// Creates a regular expression literal for literal index <literal_idx> with
+// <flags> and the pattern in <pattern_idx>.
+void Interpreter::DoCreateRegExpLiteral(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
+}
+
+
+// CreateRegExpLiteralWide <pattern_idx> <literal_idx> <flags>
+//
+// Creates a regular expression literal for literal index <literal_idx> with
+// <flags> and the pattern in <pattern_idx>.
+void Interpreter::DoCreateRegExpLiteralWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
+}
+
+
+// CreateArrayLiteral <element_idx> <literal_idx> <flags>
+//
+// Creates an array literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
+void Interpreter::DoCreateArrayLiteral(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
+}
+
+
+// CreateArrayLiteralWide <element_idx> <literal_idx> <flags>
+//
+// Creates an array literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
+void Interpreter::DoCreateArrayLiteralWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
+}
+
+
+// CreateObjectLiteral <element_idx> <literal_idx> <flags>
+//
+// Creates an object literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
+void Interpreter::DoCreateObjectLiteral(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
+}
+
+
+// CreateObjectLiteralWide <element_idx> <literal_idx> <flags>
+//
+// Creates an object literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
+void Interpreter::DoCreateObjectLiteralWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
+}
+
+
+// CreateClosure <index> <tenured>
+//
+// Creates a new closure for SharedFunctionInfo at position |index| in the
+// constant pool and with the PretenureFlag <tenured>.
+void Interpreter::DoCreateClosure(compiler::InterpreterAssembler* assembler) {
+ // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
+ // calling into the runtime.
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* shared = __ LoadConstantPoolEntry(index);
+ Node* tenured_raw = __ BytecodeOperandImm(1);
+ Node* tenured = __ SmiTag(tenured_raw);
+ Node* result =
+ __ CallRuntime(Runtime::kInterpreterNewClosure, shared, tenured);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// CreateClosureWide <index> <tenured>
+//
+// Creates a new closure for SharedFunctionInfo at position |index| in the
+// constant pool and with the PretenureFlag <tenured>.
+void Interpreter::DoCreateClosureWide(
+ compiler::InterpreterAssembler* assembler) {
+ return DoCreateClosure(assembler);
+}
+
+
+// CreateMappedArguments
+//
+// Creates a new mapped arguments object.
+void Interpreter::DoCreateMappedArguments(
+ compiler::InterpreterAssembler* assembler) {
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* result = __ CallRuntime(Runtime::kNewSloppyArguments_Generic, closure);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// CreateUnmappedArguments
+//
+// Creates a new unmapped arguments object.
+void Interpreter::DoCreateUnmappedArguments(
+ compiler::InterpreterAssembler* assembler) {
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* result = __ CallRuntime(Runtime::kNewStrictArguments_Generic, closure);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// Throw
+//
+// Throws the exception in the accumulator.
+void Interpreter::DoThrow(compiler::InterpreterAssembler* assembler) {
+ Node* exception = __ GetAccumulator();
+ __ CallRuntime(Runtime::kThrow, exception);
+ // We shouldn't ever return from a throw.
+ __ Abort(kUnexpectedReturnFromThrow);
+}
+
+
// Return
//
-// Return the value in register 0.
+// Return the value in the accumulator.
void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
__ Return();
}
+// ForInPrepare <cache_type> <cache_array> <cache_length>
+//
+// Returns state for for..in loop execution based on the object in the
+// accumulator. The registers |cache_type|, |cache_array|, and
+// |cache_length| represent output parameters.
+void Interpreter::DoForInPrepare(compiler::InterpreterAssembler* assembler) {
+ Node* object = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kInterpreterForInPrepare, object);
+ for (int i = 0; i < 3; i++) {
+ // 0 == cache_type, 1 == cache_array, 2 == cache_length
+ Node* cache_info = __ LoadFixedArrayElement(result, i);
+ Node* cache_info_reg = __ BytecodeOperandReg(i);
+ __ StoreRegister(cache_info, cache_info_reg);
+ }
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// ForInNext <receiver> <cache_type> <cache_array> <index>
+//
+// Returns the next enumerable property in the the accumulator.
+void Interpreter::DoForInNext(compiler::InterpreterAssembler* assembler) {
+ Node* receiver_reg = __ BytecodeOperandReg(0);
+ Node* receiver = __ LoadRegister(receiver_reg);
+ Node* cache_type_reg = __ BytecodeOperandReg(1);
+ Node* cache_type = __ LoadRegister(cache_type_reg);
+ Node* cache_array_reg = __ BytecodeOperandReg(2);
+ Node* cache_array = __ LoadRegister(cache_array_reg);
+ Node* index_reg = __ BytecodeOperandReg(3);
+ Node* index = __ LoadRegister(index_reg);
+ Node* result = __ CallRuntime(Runtime::kForInNext, receiver, cache_array,
+ cache_type, index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// ForInDone <index> <cache_length>
+//
+// Returns true if the end of the enumerable properties has been reached.
+void Interpreter::DoForInDone(compiler::InterpreterAssembler* assembler) {
+ // TODO(oth): Implement directly rather than making a runtime call.
+ Node* index_reg = __ BytecodeOperandReg(0);
+ Node* index = __ LoadRegister(index_reg);
+ Node* cache_length_reg = __ BytecodeOperandReg(1);
+ Node* cache_length = __ LoadRegister(cache_length_reg);
+ Node* result = __ CallRuntime(Runtime::kForInDone, index, cache_length);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// ForInStep <index>
+//
+// Increments the loop counter in register |index| and stores the result
+// in the accumulator.
+void Interpreter::DoForInStep(compiler::InterpreterAssembler* assembler) {
+ // TODO(oth): Implement directly rather than making a runtime call.
+ Node* index_reg = __ BytecodeOperandReg(0);
+ Node* index = __ LoadRegister(index_reg);
+ Node* result = __ CallRuntime(Runtime::kForInStep, index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/interpreter/interpreter.h b/chromium/v8/src/interpreter/interpreter.h
index c32b6831d0a..ef9b5d1fe3e 100644
--- a/chromium/v8/src/interpreter/interpreter.h
+++ b/chromium/v8/src/interpreter/interpreter.h
@@ -11,8 +11,8 @@
#include "src/base/macros.h"
#include "src/builtins.h"
#include "src/interpreter/bytecodes.h"
+#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
-#include "src/token.h"
namespace v8 {
namespace internal {
@@ -54,16 +54,53 @@ class Interpreter {
void DoBinaryOp(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler);
+ // Generates code to perform the count operations via |function_id|.
+ void DoCountOp(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler);
+
// Generates code to perform the comparison operation associated with
// |compare_op|.
void DoCompareOp(Token::Value compare_op,
compiler::InterpreterAssembler* assembler);
- // Generates code to perform a property load via |ic|.
- void DoPropertyLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+ // Generates code to load a constant from the constant pool.
+ void DoLoadConstant(compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a global load via |ic|.
+ void DoLoadGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a global store via |ic|.
+ void DoStoreGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a named property load via |ic|.
+ void DoLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a keyed property load via |ic|.
+ void DoKeyedLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a namedproperty store via |ic|.
+ void DoStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a keyed property store via |ic|.
+ void DoKeyedStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a JS call.
+ void DoJSCall(compiler::InterpreterAssembler* assembler);
+
+ // Generates code ro create a literal via |function_id|.
+ void DoCreateLiteral(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform delete via function_id.
+ void DoDelete(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a lookup slot load via |function_id|.
+ void DoLoadLookupSlot(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler);
- // Generates code to perform a property store via |ic|.
- void DoPropertyStoreIC(Callable ic,
+ // Generates code to perform a lookup slot store depending on |language_mode|.
+ void DoStoreLookupSlot(LanguageMode language_mode,
compiler::InterpreterAssembler* assembler);
bool IsInterpreterTableInitialized(Handle<FixedArray> handler_table);
diff --git a/chromium/v8/src/isolate-inl.h b/chromium/v8/src/isolate-inl.h
index c281c246394..c27b7a700d4 100644
--- a/chromium/v8/src/isolate-inl.h
+++ b/chromium/v8/src/isolate-inl.h
@@ -72,8 +72,8 @@ bool Isolate::is_catchable_by_javascript(Object* exception) {
}
-Handle<GlobalObject> Isolate::global_object() {
- return Handle<GlobalObject>(context()->global_object());
+Handle<JSGlobalObject> Isolate::global_object() {
+ return Handle<JSGlobalObject>(context()->global_object(), this);
}
diff --git a/chromium/v8/src/isolate.cc b/chromium/v8/src/isolate.cc
index 3fff6b2ef7a..4e42b436b13 100644
--- a/chromium/v8/src/isolate.cc
+++ b/chromium/v8/src/isolate.cc
@@ -9,7 +9,8 @@
#include <fstream> // NOLINT(readability/streams)
#include <sstream>
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopeinfo.h"
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
#include "src/base/utils/random-number-generator.h"
@@ -18,14 +19,13 @@
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compilation-statistics.h"
+#include "src/crankshaft/hydrogen.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
-#include "src/hydrogen.h"
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
-#include "src/lithium-allocator.h"
#include "src/log.h"
#include "src/messages.h"
#include "src/profiler/cpu-profiler.h"
@@ -33,7 +33,6 @@
#include "src/prototype.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime-profiler.h"
-#include "src/scopeinfo.h"
#include "src/simulator.h"
#include "src/snapshot/serialize.h"
#include "src/v8.h"
@@ -136,6 +135,22 @@ Isolate::PerIsolateThreadData*
}
+void Isolate::DiscardPerThreadDataForThisThread() {
+ int thread_id_int = base::Thread::GetThreadLocalInt(Isolate::thread_id_key_);
+ if (thread_id_int) {
+ ThreadId thread_id = ThreadId(thread_id_int);
+ DCHECK(!thread_manager_->mutex_owner_.Equals(thread_id));
+ base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
+ PerIsolateThreadData* per_thread =
+ thread_data_table_->Lookup(this, thread_id);
+ if (per_thread) {
+ DCHECK(!per_thread->thread_state_);
+ thread_data_table_->Remove(per_thread);
+ }
+ }
+}
+
+
Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
return FindPerThreadDataForThread(thread_id);
@@ -317,9 +332,8 @@ static bool IsVisibleInStackTrace(JSFunction* fun,
// exposed, in which case the native flag is set.
// The --builtins-in-stack-traces command line flag allows including
// internal call sites in the stack trace for debugging purposes.
- if (!FLAG_builtins_in_stack_traces) {
- if (receiver->IsJSBuiltinsObject()) return false;
- if (fun->IsBuiltin()) return fun->shared()->native();
+ if (!FLAG_builtins_in_stack_traces && fun->shared()->IsBuiltin()) {
+ return fun->shared()->native();
}
return true;
}
@@ -774,25 +788,17 @@ bool Isolate::IsInternallyUsedPropertyName(Handle<Object> name) {
}
-bool Isolate::IsInternallyUsedPropertyName(Object* name) {
- if (name->IsSymbol()) {
- return Symbol::cast(name)->is_private();
- }
- return name == heap()->hidden_string();
-}
-
-
-bool Isolate::MayAccess(Handle<JSObject> receiver) {
+bool Isolate::MayAccess(Handle<Context> accessing_context,
+ Handle<JSObject> receiver) {
DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
- DCHECK(context());
+ // During bootstrapping, callback functions are not enabled yet.
+ if (bootstrapper()->IsActive()) return true;
{
DisallowHeapAllocation no_gc;
- // During bootstrapping, callback functions are not enabled yet.
- if (bootstrapper()->IsActive()) return true;
if (receiver->IsJSGlobalProxy()) {
Object* receiver_context =
@@ -801,7 +807,8 @@ bool Isolate::MayAccess(Handle<JSObject> receiver) {
// Get the native context of current top context.
// avoid using Isolate::native_context() because it uses Handle.
- Context* native_context = context()->global_object()->native_context();
+ Context* native_context =
+ accessing_context->global_object()->native_context();
if (receiver_context == native_context) return true;
if (Context::cast(receiver_context)->security_token() ==
@@ -812,23 +819,34 @@ bool Isolate::MayAccess(Handle<JSObject> receiver) {
HandleScope scope(this);
Handle<Object> data;
- v8::NamedSecurityCallback callback;
+ v8::AccessCheckCallback callback = nullptr;
+ v8::NamedSecurityCallback named_callback = nullptr;
{ DisallowHeapAllocation no_gc;
AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver);
if (!access_check_info) return false;
- Object* fun_obj = access_check_info->named_callback();
- callback = v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
- if (!callback) return false;
- data = handle(access_check_info->data(), this);
+ Object* fun_obj = access_check_info->callback();
+ callback = v8::ToCData<v8::AccessCheckCallback>(fun_obj);
+ if (!callback) {
+ fun_obj = access_check_info->named_callback();
+ named_callback = v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
+ if (!named_callback) return false;
+ data = handle(access_check_info->data(), this);
+ }
}
LOG(this, ApiSecurityCheck());
- // Leaving JavaScript.
- VMState<EXTERNAL> state(this);
- Handle<Object> key = factory()->undefined_value();
- return callback(v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(key),
- v8::ACCESS_HAS, v8::Utils::ToLocal(data));
+ {
+ // Leaving JavaScript.
+ VMState<EXTERNAL> state(this);
+ if (callback) {
+ return callback(v8::Utils::ToLocal(accessing_context),
+ v8::Utils::ToLocal(receiver));
+ }
+ Handle<Object> key = factory()->undefined_value();
+ return named_callback(v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(key),
+ v8::ACCESS_HAS, v8::Utils::ToLocal(data));
+ }
}
@@ -1008,13 +1026,21 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
Handle<Object> message_obj = CreateMessage(exception_handle, location);
thread_local_top()->pending_message_obj_ = *message_obj;
- // If the abort-on-uncaught-exception flag is specified, abort on any
- // exception not caught by JavaScript, even when an external handler is
- // present. This flag is intended for use by JavaScript developers, so
- // print a user-friendly stack trace (not an internal one).
+ // For any exception not caught by JavaScript, even when an external
+ // handler is present:
+ // If the abort-on-uncaught-exception flag is specified, and if the
+ // embedder didn't specify a custom uncaught exception callback,
+ // or if the custom callback determined that V8 should abort, then
+ // abort.
if (FLAG_abort_on_uncaught_exception &&
- PredictExceptionCatcher() != CAUGHT_BY_JAVASCRIPT) {
- FLAG_abort_on_uncaught_exception = false; // Prevent endless recursion.
+ PredictExceptionCatcher() != CAUGHT_BY_JAVASCRIPT &&
+ (!abort_on_uncaught_exception_callback_ ||
+ abort_on_uncaught_exception_callback_(
+ reinterpret_cast<v8::Isolate*>(this)))) {
+ // Prevent endless recursion.
+ FLAG_abort_on_uncaught_exception = false;
+ // This flag is intended for use by JavaScript developers, so
+ // print a user-friendly stack trace (not an internal one).
PrintF(stderr, "%s\n\nFROM\n",
MessageHandler::GetLocalizedMessage(this, message_obj).get());
PrintCurrentStackTrace(stderr);
@@ -1323,7 +1349,7 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
for (int i = 1; i < elements_limit; i += 4) {
Handle<JSFunction> fun =
handle(JSFunction::cast(elements->get(i + 1)), this);
- if (!fun->IsSubjectToDebugging()) continue;
+ if (!fun->shared()->IsSubjectToDebugging()) continue;
Object* script = fun->shared()->script();
if (script->IsScript() &&
@@ -1338,29 +1364,11 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
}
-// Traverse prototype chain to find out whether the object is derived from
-// the Error object.
-bool Isolate::IsErrorObject(Handle<Object> obj) {
- if (!obj->IsJSObject()) return false;
- Handle<Object> error_constructor = error_function();
- DisallowHeapAllocation no_gc;
- for (PrototypeIterator iter(this, *obj, PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(); iter.Advance()) {
- if (iter.GetCurrent()->IsJSProxy()) return false;
- if (iter.GetCurrent<JSObject>()->map()->GetConstructor() ==
- *error_constructor) {
- return true;
- }
- }
- return false;
-}
-
-
Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
MessageLocation* location) {
Handle<JSArray> stack_trace_object;
if (capture_stack_trace_for_uncaught_exceptions_) {
- if (IsErrorObject(exception)) {
+ if (Object::IsErrorObject(this, exception)) {
// We fetch the stack trace that corresponds to this error object.
// If the lookup fails, the exception is probably not a valid Error
// object. In that case, we fall through and capture the stack trace
@@ -1602,6 +1610,12 @@ void Isolate::SetCaptureStackTraceForUncaughtExceptions(
}
+void Isolate::SetAbortOnUncaughtExceptionCallback(
+ v8::Isolate::AbortOnUncaughtExceptionCallback callback) {
+ abort_on_uncaught_exception_callback_ = callback;
+}
+
+
Handle<Context> Isolate::native_context() {
return handle(context()->native_context());
}
@@ -1764,13 +1778,17 @@ Isolate::Isolate(bool enable_serializer)
deferred_handles_head_(NULL),
optimizing_compile_dispatcher_(NULL),
stress_deopt_count_(0),
- vector_store_virtual_register_(NULL),
+ virtual_handler_register_(NULL),
+ virtual_slot_register_(NULL),
next_optimization_id_(0),
+ js_calls_from_api_counter_(0),
#if TRACE_MAPS
next_unique_sfi_id_(0),
#endif
use_counter_callback_(NULL),
- basic_block_profiler_(NULL) {
+ basic_block_profiler_(NULL),
+ cancelable_task_manager_(new CancelableTaskManager()),
+ abort_on_uncaught_exception_callback_(NULL) {
{
base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
CHECK(thread_data_table_);
@@ -1808,6 +1826,8 @@ Isolate::Isolate(bool enable_serializer)
InitializeLoggingAndCounters();
debug_ = new Debug(this);
+
+ init_memcopy_functions(this);
}
@@ -1819,7 +1839,9 @@ void Isolate::TearDown() {
// direct pointer. We don't use Enter/Exit here to avoid
// initializing the thread data.
PerIsolateThreadData* saved_data = CurrentPerIsolateThreadData();
- Isolate* saved_isolate = UncheckedCurrent();
+ DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
+ Isolate* saved_isolate =
+ reinterpret_cast<Isolate*>(base::Thread::GetThreadLocal(isolate_key_));
SetIsolateThreadLocals(this, NULL);
Deinit();
@@ -1847,8 +1869,6 @@ void Isolate::ClearSerializerData() {
external_reference_table_ = NULL;
delete external_reference_map_;
external_reference_map_ = NULL;
- delete root_index_map_;
- root_index_map_ = NULL;
}
@@ -1875,6 +1895,10 @@ void Isolate::Deinit() {
PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);
}
+ if (cpu_profiler_) {
+ cpu_profiler_->DeleteAllProfiles();
+ }
+
// We must stop the logger before we tear down other components.
Sampler* sampler = logger_->sampler();
if (sampler && sampler->IsActive()) sampler->Stop();
@@ -1895,19 +1919,19 @@ void Isolate::Deinit() {
delete basic_block_profiler_;
basic_block_profiler_ = NULL;
- for (Cancelable* task : cancelable_tasks_) {
- task->Cancel();
- }
- cancelable_tasks_.clear();
-
heap_.TearDown();
logger_->TearDown();
+ cancelable_task_manager()->CancelAndWait();
+
delete heap_profiler_;
heap_profiler_ = NULL;
delete cpu_profiler_;
cpu_profiler_ = NULL;
+ delete root_index_map_;
+ root_index_map_ = NULL;
+
ClearSerializerData();
}
@@ -2000,6 +2024,9 @@ Isolate::~Isolate() {
delete debug_;
debug_ = NULL;
+ delete cancelable_task_manager_;
+ cancelable_task_manager_ = nullptr;
+
#if USE_SIMULATOR
Simulator::TearDown(simulator_i_cache_, simulator_redirection_);
simulator_i_cache_ = nullptr;
@@ -2120,7 +2147,7 @@ bool Isolate::Init(Deserializer* des) {
#endif
#endif
- code_aging_helper_ = new CodeAgingHelper();
+ code_aging_helper_ = new CodeAgingHelper(this);
{ // NOLINT
// Ensure that the thread has a valid stack guard. The v8::Locker object
@@ -2169,12 +2196,6 @@ bool Isolate::Init(Deserializer* des) {
// occur, clearing/updating ICs.
runtime_profiler_ = new RuntimeProfiler(this);
- if (create_heap_objects) {
- if (!bootstrapper_->CreateCodeStubContext(this)) {
- return false;
- }
- }
-
// If we are deserializing, read the state into the now-empty heap.
if (!create_heap_objects) {
des->Deserialize(this);
@@ -2214,7 +2235,7 @@ bool Isolate::Init(Deserializer* des) {
heap_.amount_of_external_allocated_memory_at_last_global_gc_)),
Internals::kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset);
- time_millis_at_init_ = base::OS::TimeCurrentMillis();
+ time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
heap_.NotifyDeserializationComplete();
@@ -2384,18 +2405,15 @@ CodeTracer* Isolate::GetCodeTracer() {
Map* Isolate::get_initial_js_array_map(ElementsKind kind, Strength strength) {
- Context* native_context = context()->native_context();
- Object* maybe_map_array = is_strong(strength)
- ? native_context->js_array_strong_maps()
- : native_context->js_array_maps();
- if (!maybe_map_array->IsUndefined()) {
- Object* maybe_transitioned_map =
- FixedArray::cast(maybe_map_array)->get(kind);
- if (!maybe_transitioned_map->IsUndefined()) {
- return Map::cast(maybe_transitioned_map);
+ if (IsFastElementsKind(kind)) {
+ DisallowHeapAllocation no_gc;
+ Object* const initial_js_array_map = context()->native_context()->get(
+ Context::ArrayMapIndex(kind, strength));
+ if (!initial_js_array_map->IsUndefined()) {
+ return Map::cast(initial_js_array_map);
}
}
- return NULL;
+ return nullptr;
}
@@ -2556,6 +2574,7 @@ Handle<JSObject> Isolate::GetSymbolRegistry() {
SetUpSubregistry(registry, map, "for");
SetUpSubregistry(registry, map, "for_api");
SetUpSubregistry(registry, map, "keyFor");
+ SetUpSubregistry(registry, map, "private_api");
}
return Handle<JSObject>::cast(factory()->symbol_registry());
}
@@ -2653,9 +2672,9 @@ void Isolate::RunMicrotasks() {
SaveContext save(this);
set_context(microtask_function->context()->native_context());
MaybeHandle<Object> maybe_exception;
- MaybeHandle<Object> result =
- Execution::TryCall(microtask_function, factory()->undefined_value(),
- 0, NULL, &maybe_exception);
+ MaybeHandle<Object> result = Execution::TryCall(
+ this, microtask_function, factory()->undefined_value(), 0, NULL,
+ &maybe_exception);
// If execution is terminating, just bail out.
Handle<Object> exception;
if (result.is_null() && maybe_exception.is_null()) {
@@ -2769,18 +2788,6 @@ void Isolate::CheckDetachedContextsAfterGC() {
}
-void Isolate::RegisterCancelableTask(Cancelable* task) {
- cancelable_tasks_.insert(task);
-}
-
-
-void Isolate::RemoveCancelableTask(Cancelable* task) {
- auto removed = cancelable_tasks_.erase(task);
- USE(removed);
- DCHECK(removed == 1);
-}
-
-
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
diff --git a/chromium/v8/src/isolate.h b/chromium/v8/src/isolate.h
index 035b4b363c2..40c81571656 100644
--- a/chromium/v8/src/isolate.h
+++ b/chromium/v8/src/isolate.h
@@ -481,14 +481,9 @@ class Isolate {
return isolate;
}
- INLINE(static Isolate* UncheckedCurrent()) {
- DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
- return reinterpret_cast<Isolate*>(
- base::Thread::GetThreadLocal(isolate_key_));
- }
-
- // Like UncheckedCurrent, but skips the check that |isolate_key_| was
- // initialized. Callers have to ensure that themselves.
+ // Like Current, but skips the check that |isolate_key_| was initialized.
+ // Callers have to ensure that themselves.
+ // DO NOT USE. The only remaining callsite will be deleted soon.
INLINE(static Isolate* UnsafeCurrent()) {
return reinterpret_cast<Isolate*>(
base::Thread::GetThreadLocal(isolate_key_));
@@ -523,6 +518,10 @@ class Isolate {
// If one does not yet exist, return null.
PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
+ // Discard the PerThread for this particular (isolate, thread) combination
+ // If one does not yet exist, no-op.
+ void DiscardPerThreadDataForThisThread();
+
// Returns the key used to store the pointer to the current isolate.
// Used internally for V8 threads that do not execute JavaScript but still
// are part of the domain of an isolate (like the context switcher).
@@ -617,7 +616,7 @@ class Isolate {
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
- inline Handle<GlobalObject> global_object();
+ inline Handle<JSGlobalObject> global_object();
// Returns the global proxy object of the current context.
JSObject* global_proxy() {
@@ -655,6 +654,9 @@ class Isolate {
int frame_limit,
StackTrace::StackTraceOptions options);
+ void SetAbortOnUncaughtExceptionCallback(
+ v8::Isolate::AbortOnUncaughtExceptionCallback callback);
+
enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
void PrintCurrentStackTrace(FILE* out);
void PrintStack(StringStream* accumulator,
@@ -676,13 +678,12 @@ class Isolate {
Handle<JSArray> GetDetailedFromSimpleStackTrace(
Handle<JSObject> error_object);
- // Returns if the top context may access the given global object. If
+ // Returns if the given context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
// set.
+ bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
- bool MayAccess(Handle<JSObject> receiver);
bool IsInternallyUsedPropertyName(Handle<Object> name);
- bool IsInternallyUsedPropertyName(Object* name);
void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
void ReportFailedAccessCheck(Handle<JSObject> receiver);
@@ -936,7 +937,7 @@ class Isolate {
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
double time_millis_since_init() {
- return base::OS::TimeCurrentMillis() - time_millis_at_init_;
+ return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
}
DateCache* date_cache() {
@@ -950,10 +951,6 @@ class Isolate {
date_cache_ = date_cache;
}
- ErrorToStringHelper* error_tostring_helper() {
- return &error_tostring_helper_;
- }
-
Map* get_initial_js_array_map(ElementsKind kind,
Strength strength = Strength::WEAK);
@@ -1024,10 +1021,12 @@ class Isolate {
void* stress_deopt_count_address() { return &stress_deopt_count_; }
- void* vector_store_virtual_register_address() {
- return &vector_store_virtual_register_;
+ void* virtual_handler_register_address() {
+ return &virtual_handler_register_;
}
+ void* virtual_slot_register_address() { return &virtual_slot_register_; }
+
base::RandomNumberGenerator* random_number_generator();
// Given an address occupied by a live code object, return that object.
@@ -1041,6 +1040,12 @@ class Isolate {
return id;
}
+ void IncrementJsCallsFromApiCounter() { ++js_calls_from_api_counter_; }
+
+ unsigned int js_calls_from_api_counter() {
+ return js_calls_from_api_counter_;
+ }
+
// Get (and lazily initialize) the registry for per-isolate symbols.
Handle<JSObject> GetSymbolRegistry();
@@ -1082,8 +1087,9 @@ class Isolate {
FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }
- void RegisterCancelableTask(Cancelable* task);
- void RemoveCancelableTask(Cancelable* task);
+ CancelableTaskManager* cancelable_task_manager() {
+ return cancelable_task_manager_;
+ }
interpreter::Interpreter* interpreter() const { return interpreter_; }
@@ -1195,10 +1201,6 @@ class Isolate {
// the frame.
void RemoveMaterializedObjectsOnUnwind(StackFrame* frame);
- // Traverse prototype chain to find out whether the object is derived from
- // the Error object.
- bool IsErrorObject(Handle<Object> obj);
-
base::Atomic32 id_;
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
@@ -1243,7 +1245,6 @@ class Isolate {
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
DateCache* date_cache_;
- ErrorToStringHelper error_tostring_helper_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CallInterfaceDescriptorData* call_descriptor_data_;
base::RandomNumberGenerator* random_number_generator_;
@@ -1303,10 +1304,14 @@ class Isolate {
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
- Address vector_store_virtual_register_;
+ Address virtual_handler_register_;
+ Address virtual_slot_register_;
int next_optimization_id_;
+ // Counts javascript calls from the API. Wraps around on overflow.
+ unsigned int js_calls_from_api_counter_;
+
#if TRACE_MAPS
int next_unique_sfi_id_;
#endif
@@ -1323,7 +1328,10 @@ class Isolate {
FutexWaitListNode futex_wait_list_node_;
- std::set<Cancelable*> cancelable_tasks_;
+ CancelableTaskManager* cancelable_task_manager_;
+
+ v8::Isolate::AbortOnUncaughtExceptionCallback
+ abort_on_uncaught_exception_callback_;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
@@ -1550,6 +1558,7 @@ class CodeTracer final : public Malloced {
int scope_depth_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ISOLATE_H_
diff --git a/chromium/v8/src/js/OWNERS b/chromium/v8/src/js/OWNERS
new file mode 100644
index 00000000000..f7002c723b3
--- /dev/null
+++ b/chromium/v8/src/js/OWNERS
@@ -0,0 +1,11 @@
+set noparent
+
+adamk@chromium.org
+bmeurer@chromium.org
+cbruni@chromium.org
+ishell@chromium.org
+jkummerow@chromium.org
+littledan@chromium.org
+rossberg@chromium.org
+verwaest@chromium.org
+yangguo@chromium.org
diff --git a/chromium/v8/src/array-iterator.js b/chromium/v8/src/js/array-iterator.js
index bf17a0ac8c8..2609ebdd736 100644
--- a/chromium/v8/src/array-iterator.js
+++ b/chromium/v8/src/js/array-iterator.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $arrayValues;
-
(function(global, utils) {
"use strict";
@@ -20,26 +18,15 @@ var arrayIteratorNextIndexSymbol =
var arrayIteratorObjectSymbol =
utils.ImportNow("array_iterator_object_symbol");
var GlobalArray = global.Array;
+var IteratorPrototype = utils.ImportNow("IteratorPrototype");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var MakeTypeError;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+var GlobalTypedArray = global.Uint8Array.__proto__;
-macro TYPED_ARRAYS(FUNCTION)
- FUNCTION(Uint8Array)
- FUNCTION(Int8Array)
- FUNCTION(Uint16Array)
- FUNCTION(Int16Array)
- FUNCTION(Uint32Array)
- FUNCTION(Int32Array)
- FUNCTION(Float32Array)
- FUNCTION(Float64Array)
- FUNCTION(Uint8ClampedArray)
-endmacro
-
-macro COPY_FROM_GLOBAL(NAME)
- var GlobalNAME = global.NAME;
-endmacro
-
-TYPED_ARRAYS(COPY_FROM_GLOBAL)
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+})
// -----------------------------------------------------------------------
@@ -74,7 +61,7 @@ function ArrayIteratorNext() {
var value = UNDEFINED;
var done = true;
- if (!IS_SPEC_OBJECT(iterator) ||
+ if (!IS_RECEIVER(iterator) ||
!HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'Array Iterator.prototype.next', this);
@@ -123,7 +110,7 @@ function ArrayKeys() {
}
-%FunctionSetPrototype(ArrayIterator, {__proto__: $iteratorPrototype});
+%FunctionSetPrototype(ArrayIterator, {__proto__: IteratorPrototype});
%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
utils.InstallFunctions(ArrayIterator.prototype, DONT_ENUM, [
@@ -148,20 +135,19 @@ utils.SetFunctionName(ArrayValues, 'values');
%AddNamedProperty(GlobalArray.prototype, iteratorSymbol, ArrayValues,
DONT_ENUM);
-macro EXTEND_TYPED_ARRAY(NAME)
- %AddNamedProperty(GlobalNAME.prototype, 'entries', ArrayEntries, DONT_ENUM);
- %AddNamedProperty(GlobalNAME.prototype, 'values', ArrayValues, DONT_ENUM);
- %AddNamedProperty(GlobalNAME.prototype, 'keys', ArrayKeys, DONT_ENUM);
- %AddNamedProperty(GlobalNAME.prototype, iteratorSymbol, ArrayValues,
- DONT_ENUM);
-endmacro
-
-TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
+%AddNamedProperty(GlobalTypedArray.prototype,
+ 'entries', ArrayEntries, DONT_ENUM);
+%AddNamedProperty(GlobalTypedArray.prototype, 'values', ArrayValues, DONT_ENUM);
+%AddNamedProperty(GlobalTypedArray.prototype, 'keys', ArrayKeys, DONT_ENUM);
+%AddNamedProperty(GlobalTypedArray.prototype,
+ iteratorSymbol, ArrayValues, DONT_ENUM);
// -------------------------------------------------------------------
// Exports
-$arrayValues = ArrayValues;
+utils.Export(function(to) {
+ to.ArrayValues = ArrayValues;
+});
%InstallToContext(["array_values_iterator", ArrayValues]);
diff --git a/chromium/v8/src/array.js b/chromium/v8/src/js/array.js
index bf04bb7e7b5..f9cf161191a 100644
--- a/chromium/v8/src/array.js
+++ b/chromium/v8/src/js/array.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, utils) {
+(function(global, utils, extrasUtils) {
"use strict";
@@ -11,32 +11,73 @@
// -------------------------------------------------------------------
// Imports
-var Delete;
+var AddIndexedProperty;
+var FLAG_harmony_tolength;
+var FLAG_harmony_species;
+var GetIterator;
+var GetMethod;
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
-var MathMin;
+var MakeTypeError;
+var MaxSimple;
+var MinSimple;
+var ObjectDefineProperty;
var ObjectHasOwnProperty;
-var ObjectIsFrozen;
-var ObjectIsSealed;
-var ObjectToString;
-var ToNumber;
-var ToString;
+var ObjectToString = utils.ImportNow("object_to_string");
+var ObserveBeginPerformSplice;
+var ObserveEndPerformSplice;
+var ObserveEnqueueSpliceRecord;
+var SameValueZero;
+var iteratorSymbol = utils.ImportNow("iterator_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
utils.Import(function(from) {
- Delete = from.Delete;
- MathMin = from.MathMin;
+ AddIndexedProperty = from.AddIndexedProperty;
+ GetIterator = from.GetIterator;
+ GetMethod = from.GetMethod;
+ MakeTypeError = from.MakeTypeError;
+ MaxSimple = from.MaxSimple;
+ MinSimple = from.MinSimple;
+ ObjectDefineProperty = from.ObjectDefineProperty;
ObjectHasOwnProperty = from.ObjectHasOwnProperty;
- ObjectIsFrozen = from.ObjectIsFrozen;
- ObjectIsSealed = from.ObjectIsSealed;
- ObjectToString = from.ObjectToString;
- ToNumber = from.ToNumber;
- ToString = from.ToString;
+ ObserveBeginPerformSplice = from.ObserveBeginPerformSplice;
+ ObserveEndPerformSplice = from.ObserveEndPerformSplice;
+ ObserveEnqueueSpliceRecord = from.ObserveEnqueueSpliceRecord;
+ SameValueZero = from.SameValueZero;
+});
+
+utils.ImportFromExperimental(function(from) {
+ FLAG_harmony_tolength = from.FLAG_harmony_tolength;
+ FLAG_harmony_species = from.FLAG_harmony_species;
});
// -------------------------------------------------------------------
+
+function ArraySpeciesCreate(array, length) {
+ var constructor;
+ if (FLAG_harmony_species) {
+ constructor = %ArraySpeciesConstructor(array);
+ } else {
+ constructor = GlobalArray;
+ }
+ return new constructor(length);
+}
+
+
+function DefineIndexedProperty(array, i, value) {
+ if (FLAG_harmony_species) {
+ var result = ObjectDefineProperty(array, i, {
+ value: value, writable: true, configurable: true, enumerable: true
+ });
+ if (!result) throw MakeTypeError(kStrictCannotAssign, i);
+ } else {
+ AddIndexedProperty(array, i, value);
+ }
+}
+
+
// Global list of arrays visited during toString, toLocaleString and
// join invocations.
var visited_arrays = new InternalArray();
@@ -221,11 +262,7 @@ function ConvertToLocaleString(e) {
if (IS_NULL_OR_UNDEFINED(e)) {
return '';
} else {
- // According to ES5, section 15.4.4.3, the toLocaleString conversion
- // must throw a TypeError if ToObject(e).toLocaleString isn't
- // callable.
- var e_obj = TO_OBJECT(e);
- return TO_STRING(e_obj.toLocaleString());
+ return TO_STRING(e.toLocaleString());
}
}
@@ -240,7 +277,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
for (var i = start_i; i < limit; ++i) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
- %AddElement(deleted_elements, i - start_i, current);
+ DefineIndexedProperty(deleted_elements, i - start_i, current);
}
}
} else {
@@ -251,7 +288,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
if (key >= start_i) {
var current = array[key];
if (!IS_UNDEFINED(current) || key in array) {
- %AddElement(deleted_elements, key - start_i, current);
+ DefineIndexedProperty(deleted_elements, key - start_i, current);
}
}
}
@@ -268,7 +305,7 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array.
var new_array = new InternalArray(
// Clamp array length to 2^32-1 to avoid early RangeError.
- MathMin(len - del_count + num_additional_args, 0xffffffff));
+ MinSimple(len - del_count + num_additional_args, 0xffffffff));
var big_indices;
var indices = %GetArrayKeys(array, len);
if (IS_NUMBER(indices)) {
@@ -331,9 +368,7 @@ function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
var index = start_i + i;
if (HAS_INDEX(array, index, is_array)) {
var current = array[index];
- // The spec requires [[DefineOwnProperty]] here, %AddElement is close
- // enough (in that it ignores the prototype).
- %AddElement(deleted_elements, i, current);
+ DefineIndexedProperty(deleted_elements, i, current);
}
}
}
@@ -389,7 +424,7 @@ function ArrayToString() {
func = array.join;
}
if (!IS_CALLABLE(func)) {
- return %_CallFunction(array, ObjectToString);
+ return %_Call(ObjectToString, array);
}
return %_Call(func, array);
}
@@ -445,12 +480,12 @@ function ObservedArrayPop(n) {
var value = this[n];
try {
- $observeBeginPerformSplice(this);
+ ObserveBeginPerformSplice(this);
delete this[n];
this.length = n;
} finally {
- $observeEndPerformSplice(this);
- $observeEnqueueSpliceRecord(this, n, [value], 0);
+ ObserveEndPerformSplice(this);
+ ObserveEnqueueSpliceRecord(this, n, [value], 0);
}
return value;
@@ -474,7 +509,7 @@ function ArrayPop() {
n--;
var value = array[n];
- Delete(array, n, true);
+ %DeleteProperty_Strict(array, n);
array.length = n;
return value;
}
@@ -485,15 +520,15 @@ function ObservedArrayPush() {
var m = %_ArgumentsLength();
try {
- $observeBeginPerformSplice(this);
+ ObserveBeginPerformSplice(this);
for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
}
var new_length = n + m;
this.length = new_length;
} finally {
- $observeEndPerformSplice(this);
- $observeEnqueueSpliceRecord(this, n, [], m);
+ ObserveEndPerformSplice(this);
+ ObserveEnqueueSpliceRecord(this, n, [], m);
}
return new_length;
@@ -512,6 +547,15 @@ function ArrayPush() {
var n = TO_LENGTH_OR_UINT32(array.length);
var m = %_ArgumentsLength();
+ // It appears that there is no enforced, absolute limit on the number of
+ // arguments, but it would surely blow the stack to use 2**30 or more.
+ // To avoid integer overflow, do the comparison to the max safe integer
+ // after subtracting 2**30 from both sides. (2**31 would seem like a
+ // natural value, but it is negative in JS, and 2**32 is 1.)
+ if (m > (1 << 30) || (n - (1 << 30)) + m > kMaxSafeInteger - (1 << 30)) {
+ throw MakeTypeError(kPushPastSafeLength, m, n);
+ }
+
for (var i = 0; i < m; i++) {
array[i+n] = %_Arguments(i);
}
@@ -625,12 +669,12 @@ function ObservedArrayShift(len) {
var first = this[0];
try {
- $observeBeginPerformSplice(this);
+ ObserveBeginPerformSplice(this);
SimpleMove(this, 0, 1, len, 0);
this.length = len - 1;
} finally {
- $observeEndPerformSplice(this);
- $observeEnqueueSpliceRecord(this, 0, [first], 0);
+ ObserveEndPerformSplice(this);
+ ObserveEnqueueSpliceRecord(this, 0, [first], 0);
}
return first;
@@ -648,7 +692,7 @@ function ArrayShift() {
return;
}
- if (ObjectIsSealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed);
+ if (%object_is_sealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed);
if (%IsObserved(array))
return ObservedArrayShift.call(array, len);
@@ -672,7 +716,7 @@ function ObservedArrayUnshift() {
var num_arguments = %_ArgumentsLength();
try {
- $observeBeginPerformSplice(this);
+ ObserveBeginPerformSplice(this);
SimpleMove(this, 0, 0, len, num_arguments);
for (var i = 0; i < num_arguments; i++) {
this[i] = %_Arguments(i);
@@ -680,8 +724,8 @@ function ObservedArrayUnshift() {
var new_length = len + num_arguments;
this.length = new_length;
} finally {
- $observeEndPerformSplice(this);
- $observeEnqueueSpliceRecord(this, 0, [], num_arguments);
+ ObserveEndPerformSplice(this);
+ ObserveEnqueueSpliceRecord(this, 0, [], num_arguments);
}
return new_length;
@@ -699,7 +743,7 @@ function ArrayUnshift(arg1) { // length == 1
var num_arguments = %_ArgumentsLength();
if (len > 0 && UseSparseVariant(array, len, IS_ARRAY(array), len) &&
- !ObjectIsSealed(array)) {
+ !%object_is_sealed(array)) {
SparseMove(array, 0, 0, len, num_arguments);
} else {
SimpleMove(array, 0, 0, len, num_arguments);
@@ -739,7 +783,7 @@ function ArraySlice(start, end) {
if (end_i > len) end_i = len;
}
- var result = [];
+ var result = ArraySpeciesCreate(array, MaxSimple(end_i - start_i, 0));
if (end_i < start_i) return result;
@@ -799,7 +843,7 @@ function ObservedArraySplice(start, delete_count) {
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
try {
- $observeBeginPerformSplice(this);
+ ObserveBeginPerformSplice(this);
SimpleSlice(this, start_i, del_count, len, deleted_elements);
SimpleMove(this, start_i, del_count, len, num_elements_to_add);
@@ -815,12 +859,12 @@ function ObservedArraySplice(start, delete_count) {
this.length = len - del_count + num_elements_to_add;
} finally {
- $observeEndPerformSplice(this);
+ ObserveEndPerformSplice(this);
if (deleted_elements.length || num_elements_to_add) {
- $observeEnqueueSpliceRecord(this,
- start_i,
- deleted_elements.slice(),
- num_elements_to_add);
+ ObserveEnqueueSpliceRecord(this,
+ start_i,
+ deleted_elements.slice(),
+ num_elements_to_add);
}
}
@@ -841,13 +885,13 @@ function ArraySplice(start, delete_count) {
var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
start_i);
- var deleted_elements = [];
+ var deleted_elements = ArraySpeciesCreate(array, del_count);
deleted_elements.length = del_count;
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
- if (del_count != num_elements_to_add && ObjectIsSealed(array)) {
+ if (del_count != num_elements_to_add && %object_is_sealed(array)) {
throw MakeTypeError(kArrayFunctionsOnSealed);
- } else if (del_count > 0 && ObjectIsFrozen(array)) {
+ } else if (del_count > 0 && %object_is_frozen(array)) {
throw MakeTypeError(kArrayFunctionsOnFrozen);
}
@@ -892,8 +936,8 @@ function InnerArraySort(array, length, comparefn) {
if (%_IsSmi(x) && %_IsSmi(y)) {
return %SmiLexicographicCompare(x, y);
}
- x = ToString(x);
- y = ToString(y);
+ x = TO_STRING(x);
+ y = TO_STRING(y);
if (x == y) return 0;
else return x < y ? -1 : 1;
};
@@ -1181,28 +1225,23 @@ function ArraySort(comparefn) {
// The following functions cannot be made efficient on sparse arrays while
// preserving the semantics, since the calls to the receiver function can add
// or delete elements from the array.
-function InnerArrayFilter(f, receiver, array, length) {
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
-
- var accumulator = new InternalArray();
- var accumulator_length = 0;
+function InnerArrayFilter(f, receiver, array, length, result) {
+ var result_length = 0;
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(f);
if (%_Call(f, receiver, element, i, array)) {
- accumulator[accumulator_length++] = element;
+ DefineIndexedProperty(result, result_length, element);
+ result_length++;
}
}
}
- var result = new GlobalArray();
- %MoveArrayContents(accumulator, result);
return result;
}
+
+
function ArrayFilter(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.filter");
@@ -1210,24 +1249,25 @@ function ArrayFilter(f, receiver) {
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
var length = TO_LENGTH_OR_UINT32(array.length);
- return InnerArrayFilter(f, receiver, array, length);
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ var result = ArraySpeciesCreate(array, 0);
+ return InnerArrayFilter(f, receiver, array, length, result);
}
+
function InnerArrayForEach(f, receiver, array, length) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(f);
%_Call(f, receiver, element, i, array);
}
}
}
+
function ArrayForEach(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.forEach");
@@ -1243,12 +1283,9 @@ function InnerArraySome(f, receiver, array, length) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(f);
if (%_Call(f, receiver, element, i, array)) return true;
}
}
@@ -1273,12 +1310,9 @@ function InnerArrayEvery(f, receiver, array, length) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(f);
if (!%_Call(f, receiver, element, i, array)) return false;
}
}
@@ -1296,37 +1330,26 @@ function ArrayEvery(f, receiver) {
}
-function InnerArrayMap(f, receiver, array, length) {
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+function ArrayMap(f, receiver) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
- var accumulator = new InternalArray(length);
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping and side effects are visible.
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH_OR_UINT32(array.length);
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ var result = ArraySpeciesCreate(array, length);
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(f);
- accumulator[i] = %_Call(f, receiver, element, i, array);
+ DefineIndexedProperty(result, i, %_Call(f, receiver, element, i, array));
}
}
- var result = new GlobalArray();
- %MoveArrayContents(accumulator, result);
return result;
}
-function ArrayMap(f, receiver) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
-
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
- return InnerArrayMap(f, receiver, array, length);
-}
-
-
// For .indexOf, we don't need to pass in the number of arguments
// at the callsite since ToInteger(undefined) == 0; however, for
// .lastIndexOf, we need to pass it, since the behavior for passing
@@ -1469,12 +1492,9 @@ function InnerArrayReduce(callback, current, array, length, argumentsLength) {
throw MakeTypeError(kReduceNoInitial);
}
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback);
for (; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(callback);
current = callback(current, element, i, array);
}
}
@@ -1512,12 +1532,9 @@ function InnerArrayReduceRight(callback, current, array, length,
throw MakeTypeError(kReduceNoInitial);
}
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback);
for (; i >= 0; i--) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(callback);
current = callback(current, element, i, array);
}
}
@@ -1536,12 +1553,289 @@ function ArrayReduceRight(callback, current) {
%_ArgumentsLength());
}
-// ES5, 15.4.3.2
-function ArrayIsArray(obj) {
- return IS_ARRAY(obj);
+
+function InnerArrayCopyWithin(target, start, end, array, length) {
+ target = TO_INTEGER(target);
+ var to;
+ if (target < 0) {
+ to = MaxSimple(length + target, 0);
+ } else {
+ to = MinSimple(target, length);
+ }
+
+ start = TO_INTEGER(start);
+ var from;
+ if (start < 0) {
+ from = MaxSimple(length + start, 0);
+ } else {
+ from = MinSimple(start, length);
+ }
+
+ end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
+ var final;
+ if (end < 0) {
+ final = MaxSimple(length + end, 0);
+ } else {
+ final = MinSimple(end, length);
+ }
+
+ var count = MinSimple(final - from, length - to);
+ var direction = 1;
+ if (from < to && to < (from + count)) {
+ direction = -1;
+ from = from + count - 1;
+ to = to + count - 1;
+ }
+
+ while (count > 0) {
+ if (from in array) {
+ array[to] = array[from];
+ } else {
+ delete array[to];
+ }
+ from = from + direction;
+ to = to + direction;
+ count--;
+ }
+
+ return array;
+}
+
+
+// ES6 draft 03-17-15, section 22.1.3.3
+function ArrayCopyWithin(target, start, end) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
+
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH(array.length);
+
+ return InnerArrayCopyWithin(target, start, end, array, length);
+}
+
+
+function InnerArrayFind(predicate, thisArg, array, length) {
+ if (!IS_CALLABLE(predicate)) {
+ throw MakeTypeError(kCalledNonCallable, predicate);
+ }
+
+ for (var i = 0; i < length; i++) {
+ var element = array[i];
+ if (%_Call(predicate, thisArg, element, i, array)) {
+ return element;
+ }
+ }
+
+ return;
+}
+
+
+// ES6 draft 07-15-13, section 15.4.3.23
+function ArrayFind(predicate, thisArg) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
+
+ var array = TO_OBJECT(this);
+ var length = TO_INTEGER(array.length);
+
+ return InnerArrayFind(predicate, thisArg, array, length);
}
+function InnerArrayFindIndex(predicate, thisArg, array, length) {
+ if (!IS_CALLABLE(predicate)) {
+ throw MakeTypeError(kCalledNonCallable, predicate);
+ }
+
+ for (var i = 0; i < length; i++) {
+ var element = array[i];
+ if (%_Call(predicate, thisArg, element, i, array)) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+
+// ES6 draft 07-15-13, section 15.4.3.24
+function ArrayFindIndex(predicate, thisArg) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
+
+ var array = TO_OBJECT(this);
+ var length = TO_INTEGER(array.length);
+
+ return InnerArrayFindIndex(predicate, thisArg, array, length);
+}
+
+
+// ES6, draft 04-05-14, section 22.1.3.6
+function InnerArrayFill(value, start, end, array, length) {
+ var i = IS_UNDEFINED(start) ? 0 : TO_INTEGER(start);
+ var end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
+
+ if (i < 0) {
+ i += length;
+ if (i < 0) i = 0;
+ } else {
+ if (i > length) i = length;
+ }
+
+ if (end < 0) {
+ end += length;
+ if (end < 0) end = 0;
+ } else {
+ if (end > length) end = length;
+ }
+
+ if ((end - i) > 0 && %object_is_frozen(array)) {
+ throw MakeTypeError(kArrayFunctionsOnFrozen);
+ }
+
+ for (; i < end; i++)
+ array[i] = value;
+ return array;
+}
+
+
+// ES6, draft 04-05-14, section 22.1.3.6
+function ArrayFill(value, start, end) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
+
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH_OR_UINT32(array.length);
+
+ return InnerArrayFill(value, start, end, array, length);
+}
+
+
+function InnerArrayIncludes(searchElement, fromIndex, array, length) {
+ if (length === 0) {
+ return false;
+ }
+
+ var n = TO_INTEGER(fromIndex);
+
+ var k;
+ if (n >= 0) {
+ k = n;
+ } else {
+ k = length + n;
+ if (k < 0) {
+ k = 0;
+ }
+ }
+
+ while (k < length) {
+ var elementK = array[k];
+ if (SameValueZero(searchElement, elementK)) {
+ return true;
+ }
+
+ ++k;
+ }
+
+ return false;
+}
+
+
+// ES2016 draft, section 22.1.3.11
+function ArrayIncludes(searchElement, fromIndex) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.includes");
+
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH(array.length);
+
+ return InnerArrayIncludes(searchElement, fromIndex, array, length);
+}
+
+
+function AddArrayElement(constructor, array, i, value) {
+ if (constructor === GlobalArray) {
+ AddIndexedProperty(array, i, value);
+ } else {
+ ObjectDefineProperty(array, i, {
+ value: value, writable: true, configurable: true, enumerable: true
+ });
+ }
+}
+
+
+// ES6, draft 10-14-14, section 22.1.2.1
+function ArrayFrom(arrayLike, mapfn, receiver) {
+ var items = TO_OBJECT(arrayLike);
+ var mapping = !IS_UNDEFINED(mapfn);
+
+ if (mapping) {
+ if (!IS_CALLABLE(mapfn)) {
+ throw MakeTypeError(kCalledNonCallable, mapfn);
+ }
+ }
+
+ var iterable = GetMethod(items, iteratorSymbol);
+ var k;
+ var result;
+ var mappedValue;
+ var nextValue;
+
+ if (!IS_UNDEFINED(iterable)) {
+ result = %IsConstructor(this) ? new this() : [];
+
+ var iterator = GetIterator(items, iterable);
+
+ k = 0;
+ while (true) {
+ var next = iterator.next();
+
+ if (!IS_RECEIVER(next)) {
+ throw MakeTypeError(kIteratorResultNotAnObject, next);
+ }
+
+ if (next.done) {
+ result.length = k;
+ return result;
+ }
+
+ nextValue = next.value;
+ if (mapping) {
+ mappedValue = %_Call(mapfn, receiver, nextValue, k);
+ } else {
+ mappedValue = nextValue;
+ }
+ AddArrayElement(this, result, k, mappedValue);
+ k++;
+ }
+ } else {
+ var len = TO_LENGTH(items.length);
+ result = %IsConstructor(this) ? new this(len) : new GlobalArray(len);
+
+ for (k = 0; k < len; ++k) {
+ nextValue = items[k];
+ if (mapping) {
+ mappedValue = %_Call(mapfn, receiver, nextValue, k);
+ } else {
+ mappedValue = nextValue;
+ }
+ AddArrayElement(this, result, k, mappedValue);
+ }
+
+ result.length = k;
+ return result;
+ }
+}
+
+
+// ES6, draft 05-22-14, section 22.1.2.3
+function ArrayOf() {
+ var length = %_ArgumentsLength();
+ var constructor = this;
+ // TODO: Implement IsConstructor (ES6 section 7.2.5)
+ var array = %IsConstructor(constructor) ? new constructor(length) : [];
+ for (var i = 0; i < length; i++) {
+ AddArrayElement(constructor, array, i, %_Arguments(i));
+ }
+ array.length = length;
+ return array;
+}
+
// -------------------------------------------------------------------
// Set up non-enumerable constructor property on the Array.prototype
@@ -1563,9 +1857,12 @@ var unscopables = {
%AddNamedProperty(GlobalArray.prototype, unscopablesSymbol, unscopables,
DONT_ENUM | READ_ONLY);
+%FunctionSetLength(ArrayFrom, 1);
+
// Set up non-enumerable functions on the Array object.
utils.InstallFunctions(GlobalArray, DONT_ENUM, [
- "isArray", ArrayIsArray
+ "from", ArrayFrom,
+ "of", ArrayOf
]);
var specialFunctions = %SpecialArrayFunctions();
@@ -1605,7 +1902,12 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"indexOf", getFunction("indexOf", ArrayIndexOf, 1),
"lastIndexOf", getFunction("lastIndexOf", ArrayLastIndexOf, 1),
"reduce", getFunction("reduce", ArrayReduce, 1),
- "reduceRight", getFunction("reduceRight", ArrayReduceRight, 1)
+ "reduceRight", getFunction("reduceRight", ArrayReduceRight, 1),
+ "copyWithin", getFunction("copyWithin", ArrayCopyWithin, 2),
+ "find", getFunction("find", ArrayFind, 1),
+ "findIndex", getFunction("findIndex", ArrayFindIndex, 1),
+ "fill", getFunction("fill", ArrayFill, 1),
+ "includes", getFunction("includes", ArrayIncludes, 1),
]);
%FinishArrayPrototypeSetup(GlobalArray.prototype);
@@ -1630,21 +1932,37 @@ utils.SetUpLockedPrototype(InternalPackedArray, GlobalArray(), [
"shift", getFunction("shift", ArrayShift)
]);
+// V8 extras get a separate copy of InternalPackedArray. We give them the basic
+// manipulation methods.
+utils.SetUpLockedPrototype(extrasUtils.InternalPackedArray, GlobalArray(), [
+ "push", getFunction("push", ArrayPush),
+ "pop", getFunction("pop", ArrayPop),
+ "shift", getFunction("shift", ArrayShift),
+ "unshift", getFunction("unshift", ArrayUnshift),
+ "splice", getFunction("splice", ArraySplice),
+ "slice", getFunction("slice", ArraySlice)
+]);
+
// -------------------------------------------------------------------
// Exports
utils.Export(function(to) {
+ to.ArrayFrom = ArrayFrom;
to.ArrayIndexOf = ArrayIndexOf;
to.ArrayJoin = ArrayJoin;
to.ArrayPush = ArrayPush;
to.ArrayToString = ArrayToString;
+ to.InnerArrayCopyWithin = InnerArrayCopyWithin;
to.InnerArrayEvery = InnerArrayEvery;
+ to.InnerArrayFill = InnerArrayFill;
to.InnerArrayFilter = InnerArrayFilter;
+ to.InnerArrayFind = InnerArrayFind;
+ to.InnerArrayFindIndex = InnerArrayFindIndex;
to.InnerArrayForEach = InnerArrayForEach;
+ to.InnerArrayIncludes = InnerArrayIncludes;
to.InnerArrayIndexOf = InnerArrayIndexOf;
to.InnerArrayJoin = InnerArrayJoin;
to.InnerArrayLastIndexOf = InnerArrayLastIndexOf;
- to.InnerArrayMap = InnerArrayMap;
to.InnerArrayReduce = InnerArrayReduce;
to.InnerArrayReduceRight = InnerArrayReduceRight;
to.InnerArraySome = InnerArraySome;
diff --git a/chromium/v8/src/arraybuffer.js b/chromium/v8/src/js/arraybuffer.js
index 0db0c2bf046..f0273c71ed7 100644
--- a/chromium/v8/src/arraybuffer.js
+++ b/chromium/v8/src/js/arraybuffer.js
@@ -12,29 +12,20 @@
// Imports
var GlobalArrayBuffer = global.ArrayBuffer;
-var GlobalObject = global.Object;
-var MathMax;
-var MathMin;
-var ToNumber;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+var MakeTypeError;
+var MaxSimple;
+var MinSimple;
+var SpeciesConstructor;
utils.Import(function(from) {
- MathMax = from.MathMax;
- MathMin = from.MathMin;
- ToNumber = from.ToNumber;
+ MakeTypeError = from.MakeTypeError;
+ MaxSimple = from.MaxSimple;
+ MinSimple = from.MinSimple;
+ SpeciesConstructor = from.SpeciesConstructor;
});
// -------------------------------------------------------------------
-function ArrayBufferConstructor(length) { // length = 1
- if (%_IsConstructCall()) {
- var byteLength = $toPositiveInteger(length, kInvalidArrayBufferLength);
- %ArrayBufferInitialize(this, byteLength, kNotShared);
- } else {
- throw MakeTypeError(kConstructorNotFunction, "ArrayBuffer");
- }
-}
-
function ArrayBufferGetByteLen() {
if (!IS_ARRAYBUFFER(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
@@ -57,52 +48,43 @@ function ArrayBufferSlice(start, end) {
var first;
var byte_length = %_ArrayBufferGetByteLength(this);
if (relativeStart < 0) {
- first = MathMax(byte_length + relativeStart, 0);
+ first = MaxSimple(byte_length + relativeStart, 0);
} else {
- first = MathMin(relativeStart, byte_length);
+ first = MinSimple(relativeStart, byte_length);
}
var relativeEnd = IS_UNDEFINED(end) ? byte_length : end;
var fin;
if (relativeEnd < 0) {
- fin = MathMax(byte_length + relativeEnd, 0);
+ fin = MaxSimple(byte_length + relativeEnd, 0);
} else {
- fin = MathMin(relativeEnd, byte_length);
+ fin = MinSimple(relativeEnd, byte_length);
}
if (fin < first) {
fin = first;
}
var newLen = fin - first;
- // TODO(dslomov): implement inheritance
- var result = new GlobalArrayBuffer(newLen);
+ var constructor = SpeciesConstructor(this, GlobalArrayBuffer, true);
+ var result = new constructor(newLen);
+ if (!IS_ARRAYBUFFER(result)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'ArrayBuffer.prototype.slice', result);
+ }
+ // TODO(littledan): Check for a detached ArrayBuffer
+ if (result === this) {
+ throw MakeTypeError(kArrayBufferSpeciesThis);
+ }
+ if (%_ArrayBufferGetByteLength(result) < newLen) {
+ throw MakeTypeError(kArrayBufferTooShort);
+ }
- %ArrayBufferSliceImpl(this, result, first);
+ %ArrayBufferSliceImpl(this, result, first, newLen);
return result;
}
-function ArrayBufferIsViewJS(obj) {
- return %ArrayBufferIsView(obj);
-}
-
-
-// Set up the ArrayBuffer constructor function.
-%SetCode(GlobalArrayBuffer, ArrayBufferConstructor);
-%FunctionSetPrototype(GlobalArrayBuffer, new GlobalObject());
-
-// Set up the constructor property on the ArrayBuffer prototype object.
-%AddNamedProperty(
- GlobalArrayBuffer.prototype, "constructor", GlobalArrayBuffer, DONT_ENUM);
-
-%AddNamedProperty(GlobalArrayBuffer.prototype,
- toStringTagSymbol, "ArrayBuffer", DONT_ENUM | READ_ONLY);
-
utils.InstallGetter(GlobalArrayBuffer.prototype, "byteLength",
ArrayBufferGetByteLen);
-utils.InstallFunctions(GlobalArrayBuffer, DONT_ENUM, [
- "isView", ArrayBufferIsViewJS
-]);
-
utils.InstallFunctions(GlobalArrayBuffer.prototype, DONT_ENUM, [
"slice", ArrayBufferSlice
]);
diff --git a/chromium/v8/src/collection-iterator.js b/chromium/v8/src/js/collection-iterator.js
index c799d6f9cd2..621d7266fc6 100644
--- a/chromium/v8/src/collection-iterator.js
+++ b/chromium/v8/src/js/collection-iterator.js
@@ -2,21 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $mapEntries;
-var $mapIteratorNext;
-var $setIteratorNext;
-var $setValues;
-
(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
var GlobalMap = global.Map;
var GlobalSet = global.Set;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var MakeTypeError;
+var MapIterator = utils.ImportNow("MapIterator");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+var SetIterator = utils.ImportNow("SetIterator");
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
// -------------------------------------------------------------------
@@ -70,7 +75,6 @@ function SetValues() {
// -------------------------------------------------------------------
%SetCode(SetIterator, SetIteratorConstructor);
-%FunctionSetPrototype(SetIterator, {__proto__: $iteratorPrototype});
%FunctionSetInstanceClassName(SetIterator, 'Set Iterator');
utils.InstallFunctions(SetIterator.prototype, DONT_ENUM, [
'next', SetIteratorNextJS
@@ -87,9 +91,6 @@ utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
%AddNamedProperty(GlobalSet.prototype, iteratorSymbol, SetValues, DONT_ENUM);
-$setIteratorNext = SetIteratorNextJS;
-$setValues = SetValues;
-
// -------------------------------------------------------------------
function MapIteratorConstructor(map, kind) {
@@ -152,7 +153,6 @@ function MapValues() {
// -------------------------------------------------------------------
%SetCode(MapIterator, MapIteratorConstructor);
-%FunctionSetPrototype(MapIterator, {__proto__: $iteratorPrototype});
%FunctionSetInstanceClassName(MapIterator, 'Map Iterator');
utils.InstallFunctions(MapIterator.prototype, DONT_ENUM, [
'next', MapIteratorNextJS
@@ -170,7 +170,14 @@ utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
%AddNamedProperty(GlobalMap.prototype, iteratorSymbol, MapEntries, DONT_ENUM);
-$mapEntries = MapEntries;
-$mapIteratorNext = MapIteratorNextJS;
+// -------------------------------------------------------------------
+// Exports
+
+utils.Export(function(to) {
+ to.MapEntries = MapEntries;
+ to.MapIteratorNext = MapIteratorNextJS;
+ to.SetIteratorNext = SetIteratorNextJS;
+ to.SetValues = SetValues;
+});
})
diff --git a/chromium/v8/src/collection.js b/chromium/v8/src/js/collection.js
index 8bf6ec3515b..0d7195d53e2 100644
--- a/chromium/v8/src/collection.js
+++ b/chromium/v8/src/js/collection.js
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $getHash;
-var $getExistingHash;
-
(function(global, utils) {
"use strict";
@@ -18,16 +15,18 @@ var GlobalObject = global.Object;
var GlobalSet = global.Set;
var hashCodeSymbol = utils.ImportNow("hash_code_symbol");
var IntRandom;
+var MakeTypeError;
+var MapIterator;
+var NumberIsNaN;
+var SetIterator;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
IntRandom = from.IntRandom;
-});
-
-var NumberIsNaN;
-
-utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+ MapIterator = from.MapIterator;
NumberIsNaN = from.NumberIsNaN;
+ SetIterator = from.SetIterator;
});
// -------------------------------------------------------------------
@@ -101,7 +100,7 @@ function GetExistingHash(key) {
if ((field & 1 /* Name::kHashNotComputedMask */) === 0) {
return field >>> 2 /* Name::kHashShift */;
}
- } else if (IS_SPEC_OBJECT(key) && !%_IsJSProxy(key) && !IS_GLOBAL(key)) {
+ } else if (IS_RECEIVER(key) && !IS_PROXY(key) && !IS_GLOBAL(key)) {
var hash = GET_PRIVATE(key, hashCodeSymbol);
return hash;
}
@@ -126,7 +125,7 @@ function GetHash(key) {
// Harmony Set
function SetConstructor(iterable) {
- if (!%_IsConstructCall()) {
+ if (IS_UNDEFINED(new.target)) {
throw MakeTypeError(kConstructorNotFunction, "Set");
}
@@ -135,7 +134,7 @@ function SetConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.add;
if (!IS_CALLABLE(adder)) {
- throw MakeTypeError(kPropertyNotFunction, 'add', this);
+ throw MakeTypeError(kPropertyNotFunction, adder, 'add', this);
}
for (var value of iterable) {
@@ -249,10 +248,8 @@ function SetForEach(f, receiver) {
var iterator = new SetIterator(this, ITERATOR_KIND_VALUES);
var key;
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
var value_array = [UNDEFINED];
while (%SetIteratorNext(iterator, value_array)) {
- if (stepping) %DebugPrepareStepInIfStepping(f);
key = value_array[0];
%_Call(f, receiver, key, key, this);
}
@@ -284,7 +281,7 @@ utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
// Harmony Map
function MapConstructor(iterable) {
- if (!%_IsConstructCall()) {
+ if (IS_UNDEFINED(new.target)) {
throw MakeTypeError(kConstructorNotFunction, "Map");
}
@@ -293,11 +290,11 @@ function MapConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.set;
if (!IS_CALLABLE(adder)) {
- throw MakeTypeError(kPropertyNotFunction, 'set', this);
+ throw MakeTypeError(kPropertyNotFunction, adder, 'set', this);
}
for (var nextItem of iterable) {
- if (!IS_SPEC_OBJECT(nextItem)) {
+ if (!IS_RECEIVER(nextItem)) {
throw MakeTypeError(kIteratorValueNotAnObject, nextItem);
}
%_Call(adder, this, nextItem[0], nextItem[1]);
@@ -432,10 +429,8 @@ function MapForEach(f, receiver) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var iterator = new MapIterator(this, ITERATOR_KIND_ENTRIES);
- var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
var value_array = [UNDEFINED, UNDEFINED];
while (%MapIteratorNext(iterator, value_array)) {
- if (stepping) %DebugPrepareStepInIfStepping(f);
%_Call(f, receiver, value_array[1], value_array[0], this);
}
}
@@ -462,30 +457,6 @@ utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
"forEach", MapForEach
]);
-// Expose to the global scope.
-$getHash = GetHash;
-$getExistingHash = GetExistingHash;
-
-function MapFromArray(array) {
- var map = new GlobalMap;
- var length = array.length;
- for (var i = 0; i < length; i += 2) {
- var key = array[i];
- var value = array[i + 1];
- %_Call(MapSet, map, key, value);
- }
- return map;
-};
-
-function SetFromArray(array) {
- var set = new GlobalSet;
- var length = array.length;
- for (var i = 0; i < length; ++i) {
- %_Call(SetAdd, set, array[i]);
- }
- return set;
-};
-
// -----------------------------------------------------------------------
// Exports
@@ -497,8 +468,11 @@ function SetFromArray(array) {
"set_add", SetAdd,
"set_has", SetHas,
"set_delete", SetDelete,
- "map_from_array", MapFromArray,
- "set_from_array",SetFromArray,
]);
+utils.Export(function(to) {
+ to.GetExistingHash = GetExistingHash;
+ to.GetHash = GetHash;
+});
+
})
diff --git a/chromium/v8/src/generator.js b/chromium/v8/src/js/generator.js
index 56579c59d40..7f43656ebcf 100644
--- a/chromium/v8/src/generator.js
+++ b/chromium/v8/src/js/generator.js
@@ -11,12 +11,14 @@
// -------------------------------------------------------------------
// Imports
+var GeneratorFunctionPrototype = utils.ImportNow("GeneratorFunctionPrototype");
+var GeneratorFunction = utils.ImportNow("GeneratorFunction");
var GlobalFunction = global.Function;
-var NewFunctionString;
+var MakeTypeError;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
- NewFunctionString = from.NewFunctionString;
+ MakeTypeError = from.MakeTypeError;
});
// ----------------------------------------------------------------------------
@@ -33,7 +35,7 @@ function GeneratorObjectNext(value) {
var continuation = %GeneratorGetContinuation(this);
if (continuation > 0) {
// Generator is suspended.
- if (DEBUG_IS_ACTIVE) %DebugPrepareStepInIfStepping(this);
+ DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
try {
return %_GeneratorNext(this, value);
} catch (e) {
@@ -74,17 +76,6 @@ function GeneratorObjectThrow(exn) {
}
}
-
-function GeneratorFunctionConstructor(arg1) { // length == 1
- var source = NewFunctionString(arguments, 'function*');
- var global_proxy = %GlobalProxy(GeneratorFunctionConstructor);
- // Compile the string in the constructor and not a helper so that errors
- // appear to come from here.
- var f = %_CallFunction(global_proxy, %CompileString(source, true));
- %FunctionMarkNameShouldPrintAsAnonymous(f);
- return f;
-}
-
// ----------------------------------------------------------------------------
// Both Runtime_GeneratorNext and Runtime_GeneratorThrow are supported by
@@ -109,6 +100,5 @@ utils.InstallFunctions(GeneratorObjectPrototype,
%AddNamedProperty(GeneratorFunctionPrototype, "constructor",
GeneratorFunction, DONT_ENUM | READ_ONLY);
%InternalSetPrototype(GeneratorFunction, GlobalFunction);
-%SetCode(GeneratorFunction, GeneratorFunctionConstructor);
})
diff --git a/chromium/v8/src/harmony-atomics.js b/chromium/v8/src/js/harmony-atomics.js
index b1b529fe866..b861a2a4718 100644
--- a/chromium/v8/src/harmony-atomics.js
+++ b/chromium/v8/src/js/harmony-atomics.js
@@ -12,13 +12,13 @@
// Imports
var GlobalObject = global.Object;
-var MathMax;
-var ToNumber;
+var MakeTypeError;
+var MaxSimple;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
- MathMax = from.MathMax;
- ToNumber = from.ToNumber;
+ MakeTypeError = from.MakeTypeError;
+ MaxSimple = from.MaxSimple;
});
// -------------------------------------------------------------------
@@ -32,7 +32,7 @@ function CheckSharedIntegerTypedArray(ia) {
function CheckSharedInteger32TypedArray(ia) {
CheckSharedIntegerTypedArray(ia);
- if (%_ClassOf(ia) !== 'Int32Array') {
+ if (!%IsSharedInteger32TypedArray(ia)) {
throw MakeTypeError(kNotInt32SharedTypedArray, ia);
}
}
@@ -45,8 +45,8 @@ function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
}
- oldValue = ToNumber(oldValue);
- newValue = ToNumber(newValue);
+ oldValue = TO_NUMBER(oldValue);
+ newValue = TO_NUMBER(newValue);
return %_AtomicsCompareExchange(sta, index, oldValue, newValue);
}
@@ -65,7 +65,7 @@ function AtomicsStoreJS(sta, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsStore(sta, index, value);
}
@@ -75,7 +75,7 @@ function AtomicsAddJS(ia, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsAdd(ia, index, value);
}
@@ -85,7 +85,7 @@ function AtomicsSubJS(ia, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsSub(ia, index, value);
}
@@ -95,7 +95,7 @@ function AtomicsAndJS(ia, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsAnd(ia, index, value);
}
@@ -105,7 +105,7 @@ function AtomicsOrJS(ia, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsOr(ia, index, value);
}
@@ -115,7 +115,7 @@ function AtomicsXorJS(ia, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsXor(ia, index, value);
}
@@ -125,7 +125,7 @@ function AtomicsExchangeJS(ia, index, value) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return %_AtomicsExchange(ia, index, value);
}
@@ -144,11 +144,11 @@ function AtomicsFutexWaitJS(ia, index, value, timeout) {
if (IS_UNDEFINED(timeout)) {
timeout = INFINITY;
} else {
- timeout = ToNumber(timeout);
+ timeout = TO_NUMBER(timeout);
if (NUMBER_IS_NAN(timeout)) {
timeout = INFINITY;
} else {
- timeout = MathMax(0, timeout);
+ timeout = MaxSimple(0, timeout);
}
}
return %AtomicsFutexWait(ia, index, value, timeout);
@@ -160,14 +160,14 @@ function AtomicsFutexWakeJS(ia, index, count) {
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- count = MathMax(0, TO_INTEGER(count));
+ count = MaxSimple(0, TO_INTEGER(count));
return %AtomicsFutexWake(ia, index, count);
}
function AtomicsFutexWakeOrRequeueJS(ia, index1, count, value, index2) {
CheckSharedInteger32TypedArray(ia);
index1 = TO_INTEGER(index1);
- count = MathMax(0, TO_INTEGER(count));
+ count = MaxSimple(0, TO_INTEGER(count));
value = TO_INT32(value);
index2 = TO_INTEGER(index2);
if (index1 < 0 || index1 >= %_TypedArrayGetLength(ia) ||
diff --git a/chromium/v8/src/js/harmony-object-observe.js b/chromium/v8/src/js/harmony-object-observe.js
new file mode 100644
index 00000000000..95dd298f0db
--- /dev/null
+++ b/chromium/v8/src/js/harmony-object-observe.js
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var ObserveArrayMethods = utils.ImportNow("ObserveArrayMethods");
+var ObserveObjectMethods = utils.ImportNow("ObserveObjectMethods");;
+
+utils.InstallFunctions(global.Object, DONT_ENUM, ObserveObjectMethods);
+utils.InstallFunctions(global.Array, DONT_ENUM, ObserveArrayMethods);
+
+})
diff --git a/chromium/v8/src/js/harmony-reflect.js b/chromium/v8/src/js/harmony-reflect.js
new file mode 100644
index 00000000000..dcadad522f6
--- /dev/null
+++ b/chromium/v8/src/js/harmony-reflect.js
@@ -0,0 +1,37 @@
+// Copyright 2013-2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+'use strict';
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalReflect = global.Reflect;
+var MakeTypeError;
+var ReflectApply = utils.ImportNow("reflect_apply");
+var ReflectConstruct = utils.ImportNow("reflect_construct");
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
+// -------------------------------------------------------------------
+
+function ReflectEnumerate(obj) {
+ if (!IS_RECEIVER(obj))
+ throw MakeTypeError(kCalledOnNonObject, "Reflect.enumerate")
+ return (function* () { for (var x in obj) yield x })();
+}
+
+utils.InstallFunctions(GlobalReflect, DONT_ENUM, [
+ "apply", ReflectApply,
+ "construct", ReflectConstruct,
+ "enumerate", ReflectEnumerate
+]);
+
+})
diff --git a/chromium/v8/src/js/harmony-regexp.js b/chromium/v8/src/js/harmony-regexp.js
new file mode 100644
index 00000000000..f76ef86ec7e
--- /dev/null
+++ b/chromium/v8/src/js/harmony-regexp.js
@@ -0,0 +1,60 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+'use strict';
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalRegExp = global.RegExp;
+var GlobalRegExpPrototype = GlobalRegExp.prototype;
+var MakeTypeError;
+var regExpFlagsSymbol = utils.ImportNow("regexp_flags_symbol");
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
+// -------------------------------------------------------------------
+
+// ES6 draft 12-06-13, section 21.2.5.3
+// + https://bugs.ecmascript.org/show_bug.cgi?id=3423
+function RegExpGetFlags() {
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(
+ kRegExpNonObject, "RegExp.prototype.flags", TO_STRING(this));
+ }
+ var result = '';
+ if (this.global) result += 'g';
+ if (this.ignoreCase) result += 'i';
+ if (this.multiline) result += 'm';
+ if (this.unicode) result += 'u';
+ if (this.sticky) result += 'y';
+ return result;
+}
+
+// ES6 21.2.5.12.
+function RegExpGetSticky() {
+ if (!IS_REGEXP(this)) {
+ // Compat fix: RegExp.prototype.sticky == undefined; UseCounter tracks it
+ // TODO(littledan): Remove this workaround or standardize it
+ if (this === GlobalRegExpPrototype) {
+ %IncrementUseCounter(kRegExpPrototypeStickyGetter);
+ return UNDEFINED;
+ }
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.sticky");
+ }
+ return !!REGEXP_STICKY(this);
+}
+%FunctionSetName(RegExpGetSticky, "RegExp.prototype.sticky");
+%SetNativeFlag(RegExpGetSticky);
+
+utils.InstallGetter(GlobalRegExp.prototype, 'flags', RegExpGetFlags);
+utils.InstallGetter(GlobalRegExp.prototype, 'sticky', RegExpGetSticky);
+
+})
diff --git a/chromium/v8/src/js/harmony-sharedarraybuffer.js b/chromium/v8/src/js/harmony-sharedarraybuffer.js
new file mode 100644
index 00000000000..10ceb70d27b
--- /dev/null
+++ b/chromium/v8/src/js/harmony-sharedarraybuffer.js
@@ -0,0 +1,31 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalSharedArrayBuffer = global.SharedArrayBuffer;
+var MakeTypeError;
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+})
+
+// -------------------------------------------------------------------
+
+function SharedArrayBufferGetByteLen() {
+ if (!IS_SHAREDARRAYBUFFER(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'SharedArrayBuffer.prototype.byteLength', this);
+ }
+ return %_ArrayBufferGetByteLength(this);
+}
+
+utils.InstallGetter(GlobalSharedArrayBuffer.prototype, "byteLength",
+ SharedArrayBufferGetByteLen);
+
+})
diff --git a/chromium/v8/src/harmony-simd.js b/chromium/v8/src/js/harmony-simd.js
index ef3d9948b15..4df2f437ecb 100644
--- a/chromium/v8/src/harmony-simd.js
+++ b/chromium/v8/src/js/harmony-simd.js
@@ -12,8 +12,15 @@
// Imports
var GlobalSIMD = global.SIMD;
+var MakeTypeError;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
+// -------------------------------------------------------------------
+
macro SIMD_FLOAT_TYPES(FUNCTION)
FUNCTION(Float32x4, float32x4, 4)
endmacro
@@ -55,11 +62,11 @@ function NAMECheckJS(a) {
}
function NAMEToString() {
- if (typeof(this) !== 'TYPE' && %_ClassOf(this) !== 'NAME') {
+ var value = %_ValueOf(this);
+ if (typeof(value) !== 'TYPE') {
throw MakeTypeError(kIncompatibleMethodReceiver,
"NAME.prototype.toString", this);
}
- var value = %_ValueOf(this);
var str = "SIMD.NAME(";
str += %NAMEExtractLane(value, 0);
for (var i = 1; i < LANES; i++) {
@@ -69,11 +76,11 @@ function NAMEToString() {
}
function NAMEToLocaleString() {
- if (typeof(this) !== 'TYPE' && %_ClassOf(this) !== 'NAME') {
+ var value = %_ValueOf(this);
+ if (typeof(value) !== 'TYPE') {
throw MakeTypeError(kIncompatibleMethodReceiver,
"NAME.prototype.toLocaleString", this);
}
- var value = %_ValueOf(this);
var str = "SIMD.NAME(";
str += %NAMEExtractLane(value, 0).toLocaleString();
for (var i = 1; i < LANES; i++) {
@@ -83,11 +90,12 @@ function NAMEToLocaleString() {
}
function NAMEValueOf() {
- if (typeof(this) !== 'TYPE' && %_ClassOf(this) !== 'NAME') {
+ var value = %_ValueOf(this);
+ if (typeof(value) !== 'TYPE') {
throw MakeTypeError(kIncompatibleMethodReceiver,
"NAME.prototype.valueOf", this);
}
- return %_ValueOf(this);
+ return value;
}
function NAMEExtractLaneJS(instance, lane) {
@@ -162,7 +170,7 @@ endmacro
macro DECLARE_NUMERIC_FUNCTIONS(NAME, TYPE, LANES)
function NAMEReplaceLaneJS(instance, lane, value) {
- return %NAMEReplaceLane(instance, lane, TO_NUMBER_INLINE(value));
+ return %NAMEReplaceLane(instance, lane, TO_NUMBER(value));
}
function NAMESelectJS(selector, a, b) {
@@ -425,87 +433,107 @@ SIMD_X16_TYPES(DECLARE_X16_FUNCTIONS)
//-------------------------------------------------------------------
function Float32x4Constructor(c0, c1, c2, c3) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Float32x4");
- return %CreateFloat32x4(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3));
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Float32x4");
+ }
+ return %CreateFloat32x4(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3));
}
function Int32x4Constructor(c0, c1, c2, c3) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int32x4");
- return %CreateInt32x4(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3));
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Int32x4");
+ }
+ return %CreateInt32x4(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3));
}
function Uint32x4Constructor(c0, c1, c2, c3) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Uint32x4");
- return %CreateUint32x4(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3));
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Uint32x4");
+ }
+ return %CreateUint32x4(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3));
}
function Bool32x4Constructor(c0, c1, c2, c3) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Bool32x4");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Bool32x4");
+ }
return %CreateBool32x4(c0, c1, c2, c3);
}
function Int16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int16x8");
- return %CreateInt16x8(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3),
- TO_NUMBER_INLINE(c4), TO_NUMBER_INLINE(c5),
- TO_NUMBER_INLINE(c6), TO_NUMBER_INLINE(c7));
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Int16x8");
+ }
+ return %CreateInt16x8(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3),
+ TO_NUMBER(c4), TO_NUMBER(c5),
+ TO_NUMBER(c6), TO_NUMBER(c7));
}
function Uint16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Uint16x8");
- return %CreateUint16x8(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3),
- TO_NUMBER_INLINE(c4), TO_NUMBER_INLINE(c5),
- TO_NUMBER_INLINE(c6), TO_NUMBER_INLINE(c7));
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Uint16x8");
+ }
+ return %CreateUint16x8(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3),
+ TO_NUMBER(c4), TO_NUMBER(c5),
+ TO_NUMBER(c6), TO_NUMBER(c7));
}
function Bool16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Bool16x8");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Bool16x8");
+ }
return %CreateBool16x8(c0, c1, c2, c3, c4, c5, c6, c7);
}
function Int8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
c12, c13, c14, c15) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int8x16");
- return %CreateInt8x16(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3),
- TO_NUMBER_INLINE(c4), TO_NUMBER_INLINE(c5),
- TO_NUMBER_INLINE(c6), TO_NUMBER_INLINE(c7),
- TO_NUMBER_INLINE(c8), TO_NUMBER_INLINE(c9),
- TO_NUMBER_INLINE(c10), TO_NUMBER_INLINE(c11),
- TO_NUMBER_INLINE(c12), TO_NUMBER_INLINE(c13),
- TO_NUMBER_INLINE(c14), TO_NUMBER_INLINE(c15));
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Int8x16");
+ }
+ return %CreateInt8x16(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3),
+ TO_NUMBER(c4), TO_NUMBER(c5),
+ TO_NUMBER(c6), TO_NUMBER(c7),
+ TO_NUMBER(c8), TO_NUMBER(c9),
+ TO_NUMBER(c10), TO_NUMBER(c11),
+ TO_NUMBER(c12), TO_NUMBER(c13),
+ TO_NUMBER(c14), TO_NUMBER(c15));
}
function Uint8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
c12, c13, c14, c15) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Uint8x16");
- return %CreateUint8x16(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3),
- TO_NUMBER_INLINE(c4), TO_NUMBER_INLINE(c5),
- TO_NUMBER_INLINE(c6), TO_NUMBER_INLINE(c7),
- TO_NUMBER_INLINE(c8), TO_NUMBER_INLINE(c9),
- TO_NUMBER_INLINE(c10), TO_NUMBER_INLINE(c11),
- TO_NUMBER_INLINE(c12), TO_NUMBER_INLINE(c13),
- TO_NUMBER_INLINE(c14), TO_NUMBER_INLINE(c15));
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Uint8x16");
+ }
+ return %CreateUint8x16(TO_NUMBER(c0), TO_NUMBER(c1),
+ TO_NUMBER(c2), TO_NUMBER(c3),
+ TO_NUMBER(c4), TO_NUMBER(c5),
+ TO_NUMBER(c6), TO_NUMBER(c7),
+ TO_NUMBER(c8), TO_NUMBER(c9),
+ TO_NUMBER(c10), TO_NUMBER(c11),
+ TO_NUMBER(c12), TO_NUMBER(c13),
+ TO_NUMBER(c14), TO_NUMBER(c15));
}
function Bool8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
c12, c13, c14, c15) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Bool8x16");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Bool8x16");
+ }
return %CreateBool8x16(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12,
c13, c14, c15);
}
diff --git a/chromium/v8/src/js/harmony-species.js b/chromium/v8/src/js/harmony-species.js
new file mode 100644
index 00000000000..426ac466e74
--- /dev/null
+++ b/chromium/v8/src/js/harmony-species.js
@@ -0,0 +1,60 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils, extrasUtils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalArray = global.Array;
+// It is important that this file is run after src/js/typedarray.js,
+// otherwise GlobalTypedArray would be Object, and we would break
+// old versions of Zepto.
+var GlobalTypedArray = global.Uint8Array.__proto__;
+var GlobalMap = global.Map;
+var GlobalSet = global.Set;
+var GlobalArrayBuffer = global.ArrayBuffer;
+var GlobalPromise = global.Promise;
+var GlobalRegExp = global.RegExp;
+var speciesSymbol = utils.ImportNow("species_symbol");
+
+function ArraySpecies() {
+ return this;
+}
+
+function TypedArraySpecies() {
+ return this;
+}
+
+function MapSpecies() {
+ return this;
+}
+
+function SetSpecies() {
+ return this;
+}
+
+function ArrayBufferSpecies() {
+ return this;
+}
+
+function PromiseSpecies() {
+ return this;
+}
+
+function RegExpSpecies() {
+ return this;
+}
+
+utils.InstallGetter(GlobalArray, speciesSymbol, ArraySpecies, DONT_ENUM);
+utils.InstallGetter(GlobalTypedArray, speciesSymbol, TypedArraySpecies, DONT_ENUM);
+utils.InstallGetter(GlobalMap, speciesSymbol, MapSpecies, DONT_ENUM);
+utils.InstallGetter(GlobalSet, speciesSymbol, SetSpecies, DONT_ENUM);
+utils.InstallGetter(GlobalArrayBuffer, speciesSymbol, ArrayBufferSpecies,
+ DONT_ENUM);
+utils.InstallGetter(GlobalPromise, speciesSymbol, PromiseSpecies, DONT_ENUM);
+utils.InstallGetter(GlobalRegExp, speciesSymbol, RegExpSpecies, DONT_ENUM);
+
+});
diff --git a/chromium/v8/src/js/harmony-unicode-regexps.js b/chromium/v8/src/js/harmony-unicode-regexps.js
new file mode 100644
index 00000000000..aa8fc76bd5e
--- /dev/null
+++ b/chromium/v8/src/js/harmony-unicode-regexps.js
@@ -0,0 +1,39 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+'use strict';
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalRegExp = global.RegExp;
+var GlobalRegExpPrototype = GlobalRegExp.prototype;
+var MakeTypeError;
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
+// -------------------------------------------------------------------
+
+// ES6 21.2.5.15.
+function RegExpGetUnicode() {
+ if (!IS_REGEXP(this)) {
+ if (this === GlobalRegExpPrototype) {
+ %IncrementUseCounter(kRegExpPrototypeUnicodeGetter);
+ }
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.unicode");
+ }
+ return !!REGEXP_UNICODE(this);
+}
+%FunctionSetName(RegExpGetUnicode, "RegExp.prototype.unicode");
+%SetNativeFlag(RegExpGetUnicode);
+
+utils.InstallGetter(GlobalRegExp.prototype, 'unicode', RegExpGetUnicode);
+
+})
diff --git a/chromium/v8/src/i18n.js b/chromium/v8/src/js/i18n.js
index b9d659c4423..7e00fcdac45 100644
--- a/chromium/v8/src/i18n.js
+++ b/chromium/v8/src/js/i18n.js
@@ -27,10 +27,15 @@ var GlobalDate = global.Date;
var GlobalNumber = global.Number;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
+var MakeError;
+var MakeRangeError;
+var MakeTypeError;
var MathFloor;
var ObjectDefineProperties = utils.ImportNow("ObjectDefineProperties");
var ObjectDefineProperty = utils.ImportNow("ObjectDefineProperty");
+var patternSymbol = utils.ImportNow("intl_pattern_symbol");
var RegExpTest;
+var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
var StringIndexOf;
var StringLastIndexOf;
var StringMatch;
@@ -45,6 +50,9 @@ utils.Import(function(from) {
ArrayPush = from.ArrayPush;
IsFinite = from.IsFinite;
IsNaN = from.IsNaN;
+ MakeError = from.MakeError;
+ MakeRangeError = from.MakeRangeError;
+ MakeTypeError = from.MakeTypeError;
MathFloor = from.MathFloor;
RegExpTest = from.RegExpTest;
StringIndexOf = from.StringIndexOf;
@@ -54,7 +62,6 @@ utils.Import(function(from) {
StringSplit = from.StringSplit;
StringSubstr = from.StringSubstr;
StringSubstring = from.StringSubstring;
- ToNumber = from.ToNumber;
});
// -------------------------------------------------------------------
@@ -171,13 +178,26 @@ var TIMEZONE_NAME_CHECK_RE = UNDEFINED;
function GetTimezoneNameCheckRE() {
if (IS_UNDEFINED(TIMEZONE_NAME_CHECK_RE)) {
- TIMEZONE_NAME_CHECK_RE =
- new GlobalRegExp('^([A-Za-z]+)/([A-Za-z]+)(?:_([A-Za-z]+))*$');
+ TIMEZONE_NAME_CHECK_RE = new GlobalRegExp(
+ '^([A-Za-z]+)/([A-Za-z_-]+)((?:\/[A-Za-z_-]+)+)*$');
}
return TIMEZONE_NAME_CHECK_RE;
}
/**
+ * Matches valid location parts of IANA time zone names.
+ */
+var TIMEZONE_NAME_LOCATION_PART_RE = UNDEFINED;
+
+function GetTimezoneNameLocationPartRE() {
+ if (IS_UNDEFINED(TIMEZONE_NAME_LOCATION_PART_RE)) {
+ TIMEZONE_NAME_LOCATION_PART_RE =
+ new GlobalRegExp('^([A-Za-z]+)((?:[_-][A-Za-z]+)+)*$');
+ }
+ return TIMEZONE_NAME_LOCATION_PART_RE;
+}
+
+/**
* Adds bound method to the prototype of the given object.
*/
function addBoundMethod(obj, methodName, implementation, length) {
@@ -192,21 +212,21 @@ function addBoundMethod(obj, methodName, implementation, length) {
var boundMethod;
if (IS_UNDEFINED(length) || length === 2) {
boundMethod = function(x, y) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
return implementation(that, x, y);
}
} else if (length === 1) {
boundMethod = function(x) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
return implementation(that, x);
}
} else {
boundMethod = function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
// DateTimeFormat.format needs to be 0 arg method, but can stil
@@ -244,7 +264,7 @@ function addBoundMethod(obj, methodName, implementation, length) {
* Parameter locales is treated as a priority list.
*/
function supportedLocalesOf(service, locales, options) {
- if (IS_NULL(%_CallFunction(service, GetServiceRE(), StringMatch))) {
+ if (IS_NULL(%_Call(StringMatch, service, GetServiceRE()))) {
throw MakeError(kWrongServiceType, service);
}
@@ -292,20 +312,22 @@ function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
var matchedLocales = [];
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove -u- extension.
- var locale = %_CallFunction(requestedLocales[i], GetUnicodeExtensionRE(),
- '', StringReplace);
+ var locale = %_Call(StringReplace,
+ requestedLocales[i],
+ GetUnicodeExtensionRE(),
+ '');
do {
if (!IS_UNDEFINED(availableLocales[locale])) {
// Push requested locale not the resolved one.
- %_CallFunction(matchedLocales, requestedLocales[i], ArrayPush);
+ %_Call(ArrayPush, matchedLocales, requestedLocales[i]);
break;
}
// Truncate locale if possible, if not break.
- var pos = %_CallFunction(locale, '-', StringLastIndexOf);
+ var pos = %_Call(StringLastIndexOf, locale, '-');
if (pos === -1) {
break;
}
- locale = %_CallFunction(locale, 0, pos, StringSubstring);
+ locale = %_Call(StringSubstring, locale, 0, pos);
} while (true);
}
@@ -350,8 +372,7 @@ function getGetOption(options, caller) {
throw MakeError(kWrongValueType);
}
- if (!IS_UNDEFINED(values) &&
- %_CallFunction(values, value, ArrayIndexOf) === -1) {
+ if (!IS_UNDEFINED(values) && %_Call(ArrayIndexOf, values, value) === -1) {
throw MakeRangeError(kValueOutOfRange, value, caller, property);
}
@@ -400,7 +421,7 @@ function resolveLocale(service, requestedLocales, options) {
* lookup algorithm.
*/
function lookupMatcher(service, requestedLocales) {
- if (IS_NULL(%_CallFunction(service, GetServiceRE(), StringMatch))) {
+ if (IS_NULL(%_Call(StringMatch, service, GetServiceRE()))) {
throw MakeError(kWrongServiceType, service);
}
@@ -411,23 +432,22 @@ function lookupMatcher(service, requestedLocales) {
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove all extensions.
- var locale = %_CallFunction(requestedLocales[i], GetAnyExtensionRE(), '',
- StringReplace);
+ var locale = %_Call(StringReplace, requestedLocales[i],
+ GetAnyExtensionRE(), '');
do {
if (!IS_UNDEFINED(AVAILABLE_LOCALES[service][locale])) {
// Return the resolved locale and extension.
var extensionMatch =
- %_CallFunction(requestedLocales[i], GetUnicodeExtensionRE(),
- StringMatch);
+ %_Call(StringMatch, requestedLocales[i], GetUnicodeExtensionRE());
var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
return {'locale': locale, 'extension': extension, 'position': i};
}
// Truncate locale if possible.
- var pos = %_CallFunction(locale, '-', StringLastIndexOf);
+ var pos = %_Call(StringLastIndexOf, locale, '-');
if (pos === -1) {
break;
}
- locale = %_CallFunction(locale, 0, pos, StringSubstring);
+ locale = %_Call(StringSubstring, locale, 0, pos);
} while (true);
}
@@ -456,7 +476,7 @@ function bestFitMatcher(service, requestedLocales) {
* We are not concerned with the validity of the values at this point.
*/
function parseExtension(extension) {
- var extensionSplit = %_CallFunction(extension, '-', StringSplit);
+ var extensionSplit = %_Call(StringSplit, extension, '-');
// Assume ['', 'u', ...] input, but don't throw.
if (extensionSplit.length <= 2 ||
@@ -593,7 +613,7 @@ function getOptimalLanguageTag(original, resolved) {
// Preserve extensions of resolved locale, but swap base tags with original.
var resolvedBase = new GlobalRegExp('^' + locales[1].base);
- return %_CallFunction(resolved, resolvedBase, locales[0].base, StringReplace);
+ return %_Call(StringReplace, resolved, resolvedBase, locales[0].base);
}
@@ -608,8 +628,8 @@ function getAvailableLocalesOf(service) {
for (var i in available) {
if (%HasOwnProperty(available, i)) {
- var parts = %_CallFunction(i, /^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/,
- StringMatch);
+ var parts =
+ %_Call(StringMatch, i, /^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/);
if (parts !== null) {
// Build xx-ZZ. We don't care about the actual value,
// as long it's not undefined.
@@ -669,8 +689,36 @@ function addWECPropertyIfDefined(object, property, value) {
* Returns titlecased word, aMeRricA -> America.
*/
function toTitleCaseWord(word) {
- return %StringToUpperCase(%_CallFunction(word, 0, 1, StringSubstr)) +
- %StringToLowerCase(%_CallFunction(word, 1, StringSubstr));
+ return %StringToUpperCase(%_Call(StringSubstr, word, 0, 1)) +
+ %StringToLowerCase(%_Call(StringSubstr, word, 1));
+}
+
+/**
+ * Returns titlecased location, bueNos_airES -> Buenos_Aires
+ * or ho_cHi_minH -> Ho_Chi_Minh. It is locale-agnostic and only
+ * deals with ASCII only characters.
+ * 'of', 'au' and 'es' are special-cased and lowercased.
+ */
+function toTitleCaseTimezoneLocation(location) {
+ var match = %_Call(StringMatch, location, GetTimezoneNameLocationPartRE());
+ if (IS_NULL(match)) throw MakeRangeError(kExpectedLocation, location);
+
+ var result = toTitleCaseWord(match[1]);
+ if (!IS_UNDEFINED(match[2]) && 2 < match.length) {
+ // The first character is a separator, '_' or '-'.
+ // None of IANA zone names has both '_' and '-'.
+ var separator = %_Call(StringSubstring, match[2], 0, 1);
+ var parts = %_Call(StringSplit, match[2], separator);
+ for (var i = 1; i < parts.length; i++) {
+ var part = parts[i]
+ var lowercasedPart = %StringToLowerCase(part);
+ result = result + separator +
+ ((lowercasedPart !== 'es' &&
+ lowercasedPart !== 'of' && lowercasedPart !== 'au') ?
+ toTitleCaseWord(part) : lowercasedPart);
+ }
+ }
+ return result;
}
/**
@@ -714,7 +762,7 @@ function initializeLocaleList(locales) {
} else {
// We allow single string localeID.
if (typeof locales === 'string') {
- %_CallFunction(seen, canonicalizeLanguageTag(locales), ArrayPush);
+ %_Call(ArrayPush, seen, canonicalizeLanguageTag(locales));
return freezeArray(seen);
}
@@ -727,8 +775,8 @@ function initializeLocaleList(locales) {
var tag = canonicalizeLanguageTag(value);
- if (%_CallFunction(seen, tag, ArrayIndexOf) === -1) {
- %_CallFunction(seen, tag, ArrayPush);
+ if (%_Call(ArrayIndexOf, seen, tag) === -1) {
+ %_Call(ArrayPush, seen, tag);
}
}
}
@@ -749,40 +797,40 @@ function initializeLocaleList(locales) {
*/
function isValidLanguageTag(locale) {
// Check if it's well-formed, including grandfadered tags.
- if (!%_CallFunction(GetLanguageTagRE(), locale, RegExpTest)) {
+ if (!%_Call(RegExpTest, GetLanguageTagRE(), locale)) {
return false;
}
// Just return if it's a x- form. It's all private.
- if (%_CallFunction(locale, 'x-', StringIndexOf) === 0) {
+ if (%_Call(StringIndexOf, locale, 'x-') === 0) {
return true;
}
// Check if there are any duplicate variants or singletons (extensions).
// Remove private use section.
- locale = %_CallFunction(locale, /-x-/, StringSplit)[0];
+ locale = %_Call(StringSplit, locale, /-x-/)[0];
// Skip language since it can match variant regex, so we start from 1.
// We are matching i-klingon here, but that's ok, since i-klingon-klingon
// is not valid and would fail LANGUAGE_TAG_RE test.
var variants = [];
var extensions = [];
- var parts = %_CallFunction(locale, /-/, StringSplit);
+ var parts = %_Call(StringSplit, locale, /-/);
for (var i = 1; i < parts.length; i++) {
var value = parts[i];
- if (%_CallFunction(GetLanguageVariantRE(), value, RegExpTest) &&
+ if (%_Call(RegExpTest, GetLanguageVariantRE(), value) &&
extensions.length === 0) {
- if (%_CallFunction(variants, value, ArrayIndexOf) === -1) {
- %_CallFunction(variants, value, ArrayPush);
+ if (%_Call(ArrayIndexOf, variants, value) === -1) {
+ %_Call(ArrayPush, variants, value);
} else {
return false;
}
}
- if (%_CallFunction(GetLanguageSingletonRE(), value, RegExpTest)) {
- if (%_CallFunction(extensions, value, ArrayIndexOf) === -1) {
- %_CallFunction(extensions, value, ArrayPush);
+ if (%_Call(RegExpTest, GetLanguageSingletonRE(), value)) {
+ if (%_Call(ArrayIndexOf, extensions, value) === -1) {
+ %_Call(ArrayPush, extensions, value);
} else {
return false;
}
@@ -832,6 +880,16 @@ function BuildLanguageTagREs() {
LANGUAGE_TAG_RE = new GlobalRegExp(languageTag, 'i');
}
+var resolvedAccessor = {
+ get() {
+ %IncrementUseCounter(kIntlResolved);
+ return this[resolvedSymbol];
+ },
+ set(value) {
+ this[resolvedSymbol] = value;
+ }
+};
+
/**
* Initializes the given object so it's a valid Collator instance.
* Useful for subclassing.
@@ -896,8 +954,7 @@ function initializeCollator(collator, locales, options) {
'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
];
- if (%_CallFunction(ALLOWED_CO_VALUES, extensionMap.co, ArrayIndexOf) !==
- -1) {
+ if (%_Call(ArrayIndexOf, ALLOWED_CO_VALUES, extensionMap.co) !== -1) {
extension = '-u-co-' + extensionMap.co;
// ICU can't tell us what the collation is, so save user's input.
collation = extensionMap.co;
@@ -931,7 +988,8 @@ function initializeCollator(collator, locales, options) {
// Writable, configurable and enumerable are set to false by default.
%MarkAsInitializedIntlObjectOfType(collator, 'collator', internalCollator);
- ObjectDefineProperty(collator, 'resolved', {value: resolved});
+ collator[resolvedSymbol] = resolved;
+ ObjectDefineProperty(collator, 'resolved', resolvedAccessor);
return collator;
}
@@ -962,7 +1020,7 @@ function initializeCollator(collator, locales, options) {
* Collator resolvedOptions method.
*/
%AddNamedProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -971,17 +1029,17 @@ function initializeCollator(collator, locales, options) {
}
var coll = this;
- var locale = getOptimalLanguageTag(coll.resolved.requestedLocale,
- coll.resolved.locale);
+ var locale = getOptimalLanguageTag(coll[resolvedSymbol].requestedLocale,
+ coll[resolvedSymbol].locale);
return {
locale: locale,
- usage: coll.resolved.usage,
- sensitivity: coll.resolved.sensitivity,
- ignorePunctuation: coll.resolved.ignorePunctuation,
- numeric: coll.resolved.numeric,
- caseFirst: coll.resolved.caseFirst,
- collation: coll.resolved.collation
+ usage: coll[resolvedSymbol].usage,
+ sensitivity: coll[resolvedSymbol].sensitivity,
+ ignorePunctuation: coll[resolvedSymbol].ignorePunctuation,
+ numeric: coll[resolvedSymbol].numeric,
+ caseFirst: coll[resolvedSymbol].caseFirst,
+ collation: coll[resolvedSymbol].collation
};
},
DONT_ENUM
@@ -998,7 +1056,7 @@ function initializeCollator(collator, locales, options) {
* Options are optional parameter.
*/
%AddNamedProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1037,7 +1095,7 @@ addBoundMethod(Intl.Collator, 'compare', compare, 2);
function isWellFormedCurrencyCode(currency) {
return typeof currency == "string" &&
currency.length == 3 &&
- %_CallFunction(currency, /[^A-Za-z]/, StringMatch) == null;
+ %_Call(StringMatch, currency, /[^A-Za-z]/) == null;
}
@@ -1058,6 +1116,15 @@ function getNumberOption(options, property, min, max, fallback) {
return fallback;
}
+var patternAccessor = {
+ get() {
+ %IncrementUseCounter(kIntlPattern);
+ return this[patternSymbol];
+ },
+ set(value) {
+ this[patternSymbol] = value;
+ }
+};
/**
* Initializes the given object so it's a valid NumberFormat instance.
@@ -1110,7 +1177,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
if (!IS_UNDEFINED(mxfd) || internalOptions.style !== 'currency') {
var min_mxfd = internalOptions.style === 'percent' ? 0 : 3;
mnfd = IS_UNDEFINED(mnfd) ? 0 : mnfd;
- fallback_limit = (mnfd > min_mxfd) ? mnfd : min_mxfd;
+ var fallback_limit = (mnfd > min_mxfd) ? mnfd : min_mxfd;
mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20, fallback_limit);
defineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
}
@@ -1153,6 +1220,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
minimumFractionDigits: {writable: true},
minimumIntegerDigits: {writable: true},
numberingSystem: {writable: true},
+ pattern: patternAccessor,
requestedLocale: {value: requestedLocale, writable: true},
style: {value: internalOptions.style, writable: true},
useGrouping: {writable: true}
@@ -1173,7 +1241,8 @@ function initializeNumberFormat(numberFormat, locales, options) {
}
%MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat', formatter);
- ObjectDefineProperty(numberFormat, 'resolved', {value: resolved});
+ numberFormat[resolvedSymbol] = resolved;
+ ObjectDefineProperty(numberFormat, 'resolved', resolvedAccessor);
return numberFormat;
}
@@ -1204,7 +1273,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
* NumberFormat resolvedOptions method.
*/
%AddNamedProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1213,33 +1282,33 @@ function initializeNumberFormat(numberFormat, locales, options) {
}
var format = this;
- var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
- format.resolved.locale);
+ var locale = getOptimalLanguageTag(format[resolvedSymbol].requestedLocale,
+ format[resolvedSymbol].locale);
var result = {
locale: locale,
- numberingSystem: format.resolved.numberingSystem,
- style: format.resolved.style,
- useGrouping: format.resolved.useGrouping,
- minimumIntegerDigits: format.resolved.minimumIntegerDigits,
- minimumFractionDigits: format.resolved.minimumFractionDigits,
- maximumFractionDigits: format.resolved.maximumFractionDigits,
+ numberingSystem: format[resolvedSymbol].numberingSystem,
+ style: format[resolvedSymbol].style,
+ useGrouping: format[resolvedSymbol].useGrouping,
+ minimumIntegerDigits: format[resolvedSymbol].minimumIntegerDigits,
+ minimumFractionDigits: format[resolvedSymbol].minimumFractionDigits,
+ maximumFractionDigits: format[resolvedSymbol].maximumFractionDigits,
};
if (result.style === 'currency') {
- defineWECProperty(result, 'currency', format.resolved.currency);
+ defineWECProperty(result, 'currency', format[resolvedSymbol].currency);
defineWECProperty(result, 'currencyDisplay',
- format.resolved.currencyDisplay);
+ format[resolvedSymbol].currencyDisplay);
}
- if (%HasOwnProperty(format.resolved, 'minimumSignificantDigits')) {
+ if (%HasOwnProperty(format[resolvedSymbol], 'minimumSignificantDigits')) {
defineWECProperty(result, 'minimumSignificantDigits',
- format.resolved.minimumSignificantDigits);
+ format[resolvedSymbol].minimumSignificantDigits);
}
- if (%HasOwnProperty(format.resolved, 'maximumSignificantDigits')) {
+ if (%HasOwnProperty(format[resolvedSymbol], 'maximumSignificantDigits')) {
defineWECProperty(result, 'maximumSignificantDigits',
- format.resolved.maximumSignificantDigits);
+ format[resolvedSymbol].maximumSignificantDigits);
}
return result;
@@ -1259,7 +1328,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
* Options are optional parameter.
*/
%AddNamedProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1279,7 +1348,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
*/
function formatNumber(formatter, value) {
// Spec treats -0 and +0 as 0.
- var number = ToNumber(value) + 0;
+ var number = TO_NUMBER(value) + 0;
return %InternalNumberFormat(%GetImplFromInitializedIntlObject(formatter),
number);
@@ -1366,58 +1435,57 @@ function appendToLDMLString(option, pairs) {
*/
function fromLDMLString(ldmlString) {
// First remove '' quoted text, so we lose 'Uhr' strings.
- ldmlString = %_CallFunction(ldmlString, GetQuotedStringRE(), '',
- StringReplace);
+ ldmlString = %_Call(StringReplace, ldmlString, GetQuotedStringRE(), '');
var options = {};
- var match = %_CallFunction(ldmlString, /E{3,5}/g, StringMatch);
+ var match = %_Call(StringMatch, ldmlString, /E{3,5}/g);
options = appendToDateTimeObject(
options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
- match = %_CallFunction(ldmlString, /G{3,5}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /G{3,5}/g);
options = appendToDateTimeObject(
options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
- match = %_CallFunction(ldmlString, /y{1,2}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /y{1,2}/g);
options = appendToDateTimeObject(
options, 'year', match, {y: 'numeric', yy: '2-digit'});
- match = %_CallFunction(ldmlString, /M{1,5}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /M{1,5}/g);
options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
// Sometimes we get L instead of M for month - standalone name.
- match = %_CallFunction(ldmlString, /L{1,5}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /L{1,5}/g);
options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
- match = %_CallFunction(ldmlString, /d{1,2}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /d{1,2}/g);
options = appendToDateTimeObject(
options, 'day', match, {d: 'numeric', dd: '2-digit'});
- match = %_CallFunction(ldmlString, /h{1,2}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /h{1,2}/g);
if (match !== null) {
options['hour12'] = true;
}
options = appendToDateTimeObject(
options, 'hour', match, {h: 'numeric', hh: '2-digit'});
- match = %_CallFunction(ldmlString, /H{1,2}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /H{1,2}/g);
if (match !== null) {
options['hour12'] = false;
}
options = appendToDateTimeObject(
options, 'hour', match, {H: 'numeric', HH: '2-digit'});
- match = %_CallFunction(ldmlString, /m{1,2}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /m{1,2}/g);
options = appendToDateTimeObject(
options, 'minute', match, {m: 'numeric', mm: '2-digit'});
- match = %_CallFunction(ldmlString, /s{1,2}/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /s{1,2}/g);
options = appendToDateTimeObject(
options, 'second', match, {s: 'numeric', ss: '2-digit'});
- match = %_CallFunction(ldmlString, /z|zzzz/g, StringMatch);
+ match = %_Call(StringMatch, ldmlString, /z|zzzz/g);
options = appendToDateTimeObject(
options, 'timeZoneName', match, {z: 'short', zzzz: 'long'});
@@ -1558,7 +1626,8 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
minute: {writable: true},
month: {writable: true},
numberingSystem: {writable: true},
- pattern: {writable: true},
+ [patternSymbol]: {writable: true},
+ pattern: patternAccessor,
requestedLocale: {value: requestedLocale, writable: true},
second: {writable: true},
timeZone: {writable: true},
@@ -1571,12 +1640,13 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
var formatter = %CreateDateTimeFormat(
requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
- if (!IS_UNDEFINED(tz) && tz !== resolved.timeZone) {
+ if (resolved.timeZone === "Etc/Unknown") {
throw MakeRangeError(kUnsupportedTimeZone, tz);
}
%MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat', formatter);
- ObjectDefineProperty(dateFormat, 'resolved', {value: resolved});
+ dateFormat[resolvedSymbol] = resolved;
+ ObjectDefineProperty(dateFormat, 'resolved', resolvedAccessor);
return dateFormat;
}
@@ -1607,7 +1677,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
* DateTimeFormat resolvedOptions method.
*/
%AddNamedProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1635,22 +1705,22 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
};
var format = this;
- var fromPattern = fromLDMLString(format.resolved.pattern);
- var userCalendar = ICU_CALENDAR_MAP[format.resolved.calendar];
+ var fromPattern = fromLDMLString(format[resolvedSymbol][patternSymbol]);
+ var userCalendar = ICU_CALENDAR_MAP[format[resolvedSymbol].calendar];
if (IS_UNDEFINED(userCalendar)) {
// Use ICU name if we don't have a match. It shouldn't happen, but
// it would be too strict to throw for this.
- userCalendar = format.resolved.calendar;
+ userCalendar = format[resolvedSymbol].calendar;
}
- var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
- format.resolved.locale);
+ var locale = getOptimalLanguageTag(format[resolvedSymbol].requestedLocale,
+ format[resolvedSymbol].locale);
var result = {
locale: locale,
- numberingSystem: format.resolved.numberingSystem,
+ numberingSystem: format[resolvedSymbol].numberingSystem,
calendar: userCalendar,
- timeZone: format.resolved.timeZone
+ timeZone: format[resolvedSymbol].timeZone
};
addWECPropertyIfDefined(result, 'timeZoneName', fromPattern.timeZoneName);
@@ -1681,7 +1751,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
* Options are optional parameter.
*/
%AddNamedProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1704,7 +1774,7 @@ function formatDate(formatter, dateValue) {
if (IS_UNDEFINED(dateValue)) {
dateMs = %DateCurrentTime();
} else {
- dateMs = ToNumber(dateValue);
+ dateMs = TO_NUMBER(dateValue);
}
if (!IsFinite(dateMs)) throw MakeRangeError(kDateRange);
@@ -1732,8 +1802,8 @@ addBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
/**
- * Returns canonical Area/Location name, or throws an exception if the zone
- * name is invalid IANA name.
+ * Returns canonical Area/Location(/Location) name, or throws an exception
+ * if the zone name is invalid IANA name.
*/
function canonicalizeTimeZoneID(tzID) {
// Skip undefined zones.
@@ -1748,16 +1818,22 @@ function canonicalizeTimeZoneID(tzID) {
return 'UTC';
}
- // We expect only _ and / beside ASCII letters.
- // All inputs should conform to Area/Location from now on.
- var match = %_CallFunction(tzID, GetTimezoneNameCheckRE(), StringMatch);
- if (IS_NULL(match)) throw MakeRangeError(kExpectedLocation, tzID);
+ // TODO(jshin): Add support for Etc/GMT[+-]([1-9]|1[0-2])
+
+ // We expect only _, '-' and / beside ASCII letters.
+ // All inputs should conform to Area/Location(/Location)* from now on.
+ var match = %_Call(StringMatch, tzID, GetTimezoneNameCheckRE());
+ if (IS_NULL(match)) throw MakeRangeError(kExpectedTimezoneID, tzID);
- var result = toTitleCaseWord(match[1]) + '/' + toTitleCaseWord(match[2]);
- var i = 3;
- while (!IS_UNDEFINED(match[i]) && i < match.length) {
- result = result + '_' + toTitleCaseWord(match[i]);
- i++;
+ var result = toTitleCaseTimezoneLocation(match[1]) + '/' +
+ toTitleCaseTimezoneLocation(match[2]);
+
+ if (!IS_UNDEFINED(match[3]) && 3 < match.length) {
+ var locations = %_Call(StringSplit, match[3], '/');
+ // The 1st element is empty. Starts with i=1.
+ for (var i = 1; i < locations.length; i++) {
+ result = result + '/' + toTitleCaseTimezoneLocation(locations[i]);
+ }
}
return result;
@@ -1796,7 +1872,8 @@ function initializeBreakIterator(iterator, locales, options) {
%MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator',
internalIterator);
- ObjectDefineProperty(iterator, 'resolved', {value: resolved});
+ iterator[resolvedSymbol] = resolved;
+ ObjectDefineProperty(iterator, 'resolved', resolvedAccessor);
return iterator;
}
@@ -1828,7 +1905,7 @@ function initializeBreakIterator(iterator, locales, options) {
*/
%AddNamedProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions',
function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1837,12 +1914,13 @@ function initializeBreakIterator(iterator, locales, options) {
}
var segmenter = this;
- var locale = getOptimalLanguageTag(segmenter.resolved.requestedLocale,
- segmenter.resolved.locale);
+ var locale =
+ getOptimalLanguageTag(segmenter[resolvedSymbol].requestedLocale,
+ segmenter[resolvedSymbol].locale);
return {
locale: locale,
- type: segmenter.resolved.type
+ type: segmenter[resolvedSymbol].type
};
},
DONT_ENUM
@@ -1861,7 +1939,7 @@ function initializeBreakIterator(iterator, locales, options) {
*/
%AddNamedProperty(Intl.v8BreakIterator, 'supportedLocalesOf',
function(locales) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1975,7 +2053,7 @@ function OverrideFunction(object, name, f) {
* Overrides the built-in method.
*/
OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -2000,7 +2078,7 @@ OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
*/
OverrideFunction(GlobalString.prototype, 'normalize', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -2012,11 +2090,10 @@ OverrideFunction(GlobalString.prototype, 'normalize', function() {
var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
- var normalizationForm =
- %_CallFunction(NORMALIZATION_FORMS, form, ArrayIndexOf);
+ var normalizationForm = %_Call(ArrayIndexOf, NORMALIZATION_FORMS, form);
if (normalizationForm === -1) {
throw MakeRangeError(kNormalizationForm,
- %_CallFunction(NORMALIZATION_FORMS, ', ', ArrayJoin));
+ %_Call(ArrayJoin, NORMALIZATION_FORMS, ', '));
}
return %StringNormalize(s, normalizationForm);
@@ -2029,7 +2106,7 @@ OverrideFunction(GlobalString.prototype, 'normalize', function() {
* If locale or options are omitted, defaults are used.
*/
OverrideFunction(GlobalNumber.prototype, 'toLocaleString', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -2070,7 +2147,7 @@ function toLocaleDateTime(date, locales, options, required, defaults, service) {
* present in the output.
*/
OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -2088,7 +2165,7 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
* in the output.
*/
OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -2106,7 +2183,7 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
* in the output.
*/
OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
diff --git a/chromium/v8/src/iterator-prototype.js b/chromium/v8/src/js/iterator-prototype.js
index 2f49d90b1c9..6f2501979d0 100644
--- a/chromium/v8/src/iterator-prototype.js
+++ b/chromium/v8/src/js/iterator-prototype.js
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $iteratorPrototype;
-
(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
var GlobalObject = global.Object;
+ var IteratorPrototype = utils.ImportNow("IteratorPrototype");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
// 25.1.2.1 %IteratorPrototype% [ @@iterator ] ( )
@@ -17,6 +16,6 @@ var $iteratorPrototype;
}
utils.SetFunctionName(IteratorPrototypeIterator, iteratorSymbol);
- %AddNamedProperty($iteratorPrototype, iteratorSymbol,
+ %AddNamedProperty(IteratorPrototype, iteratorSymbol,
IteratorPrototypeIterator, DONT_ENUM);
})
diff --git a/chromium/v8/src/json.js b/chromium/v8/src/js/json.js
index 6f8489088b5..b8836eaddd9 100644
--- a/chromium/v8/src/json.js
+++ b/chromium/v8/src/js/json.js
@@ -11,41 +11,52 @@
// -------------------------------------------------------------------
// Imports
+var GlobalDate = global.Date;
var GlobalJSON = global.JSON;
+var GlobalSet = global.Set;
var InternalArray = utils.InternalArray;
-var MathMax;
-var MathMin;
+var MakeTypeError;
+var MaxSimple;
+var MinSimple;
var ObjectHasOwnProperty;
-var ToNumber;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
- MathMax = from.MathMax;
- MathMin = from.MathMin;
+ MakeTypeError = from.MakeTypeError;
+ MaxSimple = from.MaxSimple;
+ MinSimple = from.MinSimple;
ObjectHasOwnProperty = from.ObjectHasOwnProperty;
- ToNumber = from.ToNumber;
});
// -------------------------------------------------------------------
-function Revive(holder, name, reviver) {
+function CreateDataProperty(o, p, v) {
+ var desc = {value: v, enumerable: true, writable: true, configurable: true};
+ return %reflect_define_property(o, p, desc);
+}
+
+
+function InternalizeJSONProperty(holder, name, reviver) {
var val = holder[name];
- if (IS_OBJECT(val)) {
- if (IS_ARRAY(val)) {
- var length = val.length;
+ if (IS_RECEIVER(val)) {
+ if (%is_arraylike(val)) {
+ var length = TO_LENGTH(val.length);
for (var i = 0; i < length; i++) {
- var newElement = Revive(val, %_NumberToString(i), reviver);
- val[i] = newElement;
+ var newElement =
+ InternalizeJSONProperty(val, %_NumberToString(i), reviver);
+ if (IS_UNDEFINED(newElement)) {
+ %reflect_delete_property(val, i);
+ } else {
+ CreateDataProperty(val, i, newElement);
+ }
}
} else {
- for (var p in val) {
- if (HAS_OWN_PROPERTY(val, p)) {
- var newElement = Revive(val, p, reviver);
- if (IS_UNDEFINED(newElement)) {
- delete val[p];
- } else {
- val[p] = newElement;
- }
+ for (var p of %object_keys(val)) {
+ var newElement = InternalizeJSONProperty(val, p, reviver);
+ if (IS_UNDEFINED(newElement)) {
+ %reflect_delete_property(val, p);
+ } else {
+ CreateDataProperty(val, p, newElement);
}
}
}
@@ -57,7 +68,7 @@ function Revive(holder, name, reviver) {
function JSONParse(text, reviver) {
var unfiltered = %ParseJson(text);
if (IS_CALLABLE(reviver)) {
- return Revive({'': unfiltered}, '', reviver);
+ return InternalizeJSONProperty({'': unfiltered}, '', reviver);
} else {
return unfiltered;
}
@@ -69,7 +80,7 @@ function SerializeArray(value, replacer, stack, indent, gap) {
var stepback = indent;
indent += gap;
var partial = new InternalArray();
- var len = value.length;
+ var len = TO_LENGTH(value.length);
for (var i = 0; i < len; i++) {
var strP = JSONSerialize(%_NumberToString(i), value, replacer, stack,
indent, gap);
@@ -101,27 +112,23 @@ function SerializeObject(value, replacer, stack, indent, gap) {
if (IS_ARRAY(replacer)) {
var length = replacer.length;
for (var i = 0; i < length; i++) {
- if (HAS_OWN_PROPERTY(replacer, i)) {
- var p = replacer[i];
- var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
- if (!IS_UNDEFINED(strP)) {
- var member = %QuoteJSONString(p) + ":";
- if (gap != "") member += " ";
- member += strP;
- partial.push(member);
- }
+ var p = replacer[i];
+ var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
+ if (!IS_UNDEFINED(strP)) {
+ var member = %QuoteJSONString(p) + ":";
+ if (gap != "") member += " ";
+ member += strP;
+ partial.push(member);
}
}
} else {
- for (var p in value) {
- if (HAS_OWN_PROPERTY(value, p)) {
- var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
- if (!IS_UNDEFINED(strP)) {
- var member = %QuoteJSONString(p) + ":";
- if (gap != "") member += " ";
- member += strP;
- partial.push(member);
- }
+ for (var p of %object_keys(value)) {
+ var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
+ if (!IS_UNDEFINED(strP)) {
+ var member = %QuoteJSONString(p) + ":";
+ if (gap != "") member += " ";
+ member += strP;
+ partial.push(member);
}
}
}
@@ -142,7 +149,7 @@ function SerializeObject(value, replacer, stack, indent, gap) {
function JSONSerialize(key, holder, replacer, stack, indent, gap) {
var value = holder[key];
- if (IS_SPEC_OBJECT(value)) {
+ if (IS_RECEIVER(value)) {
var toJSON = value.toJSON;
if (IS_CALLABLE(toJSON)) {
value = %_Call(toJSON, value, key);
@@ -159,12 +166,12 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
return value ? "true" : "false";
} else if (IS_NULL(value)) {
return "null";
- } else if (IS_SPEC_OBJECT(value) && !IS_CALLABLE(value)) {
+ } else if (IS_RECEIVER(value) && !IS_CALLABLE(value)) {
// Non-callable object. If it's a primitive wrapper, it must be unwrapped.
- if (IS_ARRAY(value)) {
+ if (%is_arraylike(value)) {
return SerializeArray(value, replacer, stack, indent, gap);
} else if (IS_NUMBER_WRAPPER(value)) {
- value = ToNumber(value);
+ value = TO_NUMBER(value);
return JSON_NUMBER_TO_STRING(value);
} else if (IS_STRING_WRAPPER(value)) {
return %QuoteJSONString(TO_STRING(value));
@@ -180,14 +187,13 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
function JSONStringify(value, replacer, space) {
- if (%_ArgumentsLength() == 1) {
+ if (%_ArgumentsLength() == 1 && !IS_PROXY(value)) {
return %BasicJSONStringify(value);
}
- if (IS_ARRAY(replacer)) {
- // Deduplicate replacer array items.
+ if (!IS_CALLABLE(replacer) && %is_arraylike(replacer)) {
var property_list = new InternalArray();
- var seen_properties = { __proto__: null };
- var length = replacer.length;
+ var seen_properties = new GlobalSet();
+ var length = TO_LENGTH(replacer.length);
for (var i = 0; i < length; i++) {
var v = replacer[i];
var item;
@@ -200,9 +206,9 @@ function JSONStringify(value, replacer, space) {
} else {
continue;
}
- if (!seen_properties[item]) {
+ if (!seen_properties.has(item)) {
property_list.push(item);
- seen_properties[item] = true;
+ seen_properties.add(item);
}
}
replacer = property_list;
@@ -210,14 +216,14 @@ function JSONStringify(value, replacer, space) {
if (IS_OBJECT(space)) {
// Unwrap 'space' if it is wrapped
if (IS_NUMBER_WRAPPER(space)) {
- space = ToNumber(space);
+ space = TO_NUMBER(space);
} else if (IS_STRING_WRAPPER(space)) {
space = TO_STRING(space);
}
}
var gap;
if (IS_NUMBER(space)) {
- space = MathMax(0, MathMin(TO_INTEGER(space), 10));
+ space = MaxSimple(0, MinSimple(TO_INTEGER(space), 10));
gap = %_SubString(" ", 0, space);
} else if (IS_STRING(space)) {
if (space.length > 10) {
@@ -242,6 +248,24 @@ utils.InstallFunctions(GlobalJSON, DONT_ENUM, [
]);
// -------------------------------------------------------------------
+// Date.toJSON
+
+// 20.3.4.37 Date.prototype.toJSON ( key )
+function DateToJSON(key) {
+ var o = TO_OBJECT(this);
+ var tv = TO_PRIMITIVE_NUMBER(o);
+ if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
+ return null;
+ }
+ return o.toISOString();
+}
+
+// Set up non-enumerable functions of the Date prototype object.
+utils.InstallFunctions(GlobalDate.prototype, DONT_ENUM, [
+ "toJSON", DateToJSON
+]);
+
+// -------------------------------------------------------------------
// JSON Builtins
function JsonSerializeAdapter(key, object) {
diff --git a/chromium/v8/src/macros.py b/chromium/v8/src/js/macros.py
index 6de9120fb29..3bcc8c114e4 100644
--- a/chromium/v8/src/macros.py
+++ b/chromium/v8/src/js/macros.py
@@ -39,32 +39,6 @@ define NEW_TWO_BYTE_STRING = false;
define GETTER = 0;
define SETTER = 1;
-# For date.js.
-define HoursPerDay = 24;
-define MinutesPerHour = 60;
-define SecondsPerMinute = 60;
-define msPerSecond = 1000;
-define msPerMinute = 60000;
-define msPerHour = 3600000;
-define msPerDay = 86400000;
-define msPerMonth = 2592000000;
-
-# Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1).
-define kInvalidDate = 'Invalid Date';
-define kDayZeroInJulianDay = 2440588;
-define kMonthMask = 0x1e0;
-define kDayMask = 0x01f;
-define kYearShift = 9;
-define kMonthShift = 5;
-
-# Limits for parts of the date, so that we support all the dates that
-# ECMA 262 - 15.9.1.1 requires us to, but at the same time be sure that
-# the date (days since 1970) is in SMI range.
-define kMinYear = -1000000;
-define kMaxYear = 1000000;
-define kMinMonth = -10000000;
-define kMaxMonth = 10000000;
-
# Safe maximum number of arguments to push to stack, when multiplied by
# pointer size. Used by Function.prototype.apply(), Reflect.apply() and
# Reflect.construct().
@@ -73,6 +47,9 @@ define kSafeArgumentsLength = 0x800000;
# 2^53 - 1
define kMaxSafeInteger = 9007199254740991;
+# 2^32 - 1
+define kMaxUint32 = 4294967295;
+
# Strict mode flags for passing to %SetProperty
define kSloppyMode = 0;
define kStrictMode = 1;
@@ -85,80 +62,70 @@ define STRING_TO_REGEXP_CACHE_ID = 0;
# Note: We have special support for typeof(foo) === 'bar' in the compiler.
# It will *not* generate a runtime typeof call for the most important
# values of 'bar'.
-macro IS_NULL(arg) = (arg === null);
-macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
-macro IS_UNDEFINED(arg) = (arg === (void 0));
-macro IS_NUMBER(arg) = (typeof(arg) === 'number');
-macro IS_STRING(arg) = (typeof(arg) === 'string');
-macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
-macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
-macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_ARRAY(arg) = (%_IsArray(arg));
+macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
+macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
+macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
+macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
macro IS_DATE(arg) = (%_IsDate(arg));
+macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_FUNCTION(arg) = (%_IsFunction(arg));
-macro IS_REGEXP(arg) = (%_IsRegExp(arg));
-macro IS_SIMD_VALUE(arg) = (%_IsSimdValue(arg));
-macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
+macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
+macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
-macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
-macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
+macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
+macro IS_NULL(arg) = (arg === null);
+macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
+macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
-macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
-macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol');
-macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
-macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
+macro IS_OBJECT(arg) = (typeof(arg) === 'object');
+macro IS_PROXY(arg) = (%_IsJSProxy(arg));
+macro IS_REGEXP(arg) = (%_IsRegExp(arg));
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
-macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
-macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
-macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
-macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
-macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
-macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
+macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
-macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
+macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
+macro IS_SIMD_VALUE(arg) = (%_IsSimdValue(arg));
+macro IS_STRING(arg) = (typeof(arg) === 'string');
+macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
macro IS_STRONG(arg) = (%IsStrong(arg));
+macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
+macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol');
+macro IS_UNDEFINED(arg) = (arg === (void 0));
+macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
+macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
-# Macro for ECMAScript 5 queries of the type:
-# "Type(O) is object."
-# This is the same as being either a function or an object in V8 terminology
-# (including proxies).
-# In addition, an undetectable object is also included by this.
-macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
+# Macro for ES queries of the type: "Type(O) is Object."
+macro IS_RECEIVER(arg) = (%_IsJSReceiver(arg));
-# Macro for ECMAScript 5 queries of the type:
-# "IsCallable(O)"
+# Macro for ES queries of the type: "IsCallable(O)"
macro IS_CALLABLE(arg) = (typeof(arg) === 'function');
# Macro for ES6 CheckObjectCoercible
# Will throw a TypeError of the form "[functionName] called on null or undefined".
macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw MakeTypeError(kCalledOnNullOrUndefined, functionName);
-# Indices in bound function info retrieved by %BoundFunctionGetBindings(...).
-define kBoundFunctionIndex = 0;
-define kBoundThisIndex = 1;
-define kBoundArgumentsStartIndex = 2;
-
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
+macro TO_BOOLEAN(arg) = (!!(arg));
macro TO_INTEGER(arg) = (%_ToInteger(arg));
-macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
-macro TO_INT32(arg) = (arg | 0);
-macro TO_UINT32(arg) = (arg >>> 0);
-macro TO_LENGTH(arg) = (%ToLength(arg));
-macro TO_LENGTH_OR_UINT32(arg) = (harmony_tolength ? TO_LENGTH(arg) : TO_UINT32(arg));
+macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(arg));
+macro TO_INT32(arg) = ((arg) | 0);
+macro TO_UINT32(arg) = ((arg) >>> 0);
+macro TO_LENGTH(arg) = (%_ToLength(arg));
+macro TO_LENGTH_OR_UINT32(arg) = (FLAG_harmony_tolength ? TO_LENGTH(arg) : TO_UINT32(arg));
+macro TO_LENGTH_OR_INTEGER(arg) = (FLAG_harmony_tolength ? TO_LENGTH(arg) : TO_INTEGER(arg));
macro TO_STRING(arg) = (%_ToString(arg));
-macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : $nonNumberToNumber(arg));
+macro TO_NUMBER(arg) = (%_ToNumber(arg));
macro TO_OBJECT(arg) = (%_ToObject(arg));
macro TO_PRIMITIVE(arg) = (%_ToPrimitive(arg));
macro TO_PRIMITIVE_NUMBER(arg) = (%_ToPrimitive_Number(arg));
macro TO_PRIMITIVE_STRING(arg) = (%_ToPrimitive_String(arg));
macro TO_NAME(arg) = (%_ToName(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
-macro HAS_OWN_PROPERTY(arg, index) = (%_CallFunction(arg, index, ObjectHasOwnProperty));
+macro HAS_OWN_PROPERTY(arg, index) = (%_Call(ObjectHasOwnProperty, arg, index));
macro HAS_INDEX(array, index, is_array) = ((is_array && %_HasFastPackedElements(%IS_VAR(array))) ? (index < array.length) : (index in array));
-macro MAX_SIMPLE(argA, argB) = (argA < argB ? argB : argA);
-macro MIN_SIMPLE(argA, argB) = (argA < argB ? argA : argB);
# Private names.
macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
@@ -168,7 +135,6 @@ macro GET_PRIVATE(obj, sym) = (obj[sym]);
macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
# Constants. The compiler constant folds them.
-define NAN = $NaN;
define INFINITY = (1/0);
define UNDEFINED = (void 0);
@@ -179,48 +145,18 @@ python macro CHAR_CODE(str) = ord(str[1]);
define REGEXP_NUMBER_OF_CAPTURES = 0;
define REGEXP_FIRST_CAPTURE = 3;
+# Macros for internal slot access.
+macro REGEXP_GLOBAL(regexp) = (%_RegExpFlags(regexp) & 1);
+macro REGEXP_IGNORE_CASE(regexp) = (%_RegExpFlags(regexp) & 2);
+macro REGEXP_MULTILINE(regexp) = (%_RegExpFlags(regexp) & 4);
+macro REGEXP_STICKY(regexp) = (%_RegExpFlags(regexp) & 8);
+macro REGEXP_UNICODE(regexp) = (%_RegExpFlags(regexp) & 16);
+macro REGEXP_SOURCE(regexp) = (%_RegExpSource(regexp));
+
# We can't put macros in macros so we use constants here.
# REGEXP_NUMBER_OF_CAPTURES
macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
-# Limit according to ECMA 262 15.9.1.1
-define MAX_TIME_MS = 8640000000000000;
-# Limit which is MAX_TIME_MS + msPerMonth.
-define MAX_TIME_BEFORE_UTC = 8640002592000000;
-
-# Gets the value of a Date object. If arg is not a Date object
-# a type error is thrown.
-macro CHECK_DATE(arg) = if (!%_IsDate(arg)) %_ThrowNotDateError();
-macro LOCAL_DATE_VALUE(arg) = (%_DateField(arg, 0) + %_DateField(arg, 21));
-macro UTC_DATE_VALUE(arg) = (%_DateField(arg, 0));
-
-macro LOCAL_YEAR(arg) = (%_DateField(arg, 1));
-macro LOCAL_MONTH(arg) = (%_DateField(arg, 2));
-macro LOCAL_DAY(arg) = (%_DateField(arg, 3));
-macro LOCAL_WEEKDAY(arg) = (%_DateField(arg, 4));
-macro LOCAL_HOUR(arg) = (%_DateField(arg, 5));
-macro LOCAL_MIN(arg) = (%_DateField(arg, 6));
-macro LOCAL_SEC(arg) = (%_DateField(arg, 7));
-macro LOCAL_MS(arg) = (%_DateField(arg, 8));
-macro LOCAL_DAYS(arg) = (%_DateField(arg, 9));
-macro LOCAL_TIME_IN_DAY(arg) = (%_DateField(arg, 10));
-
-macro UTC_YEAR(arg) = (%_DateField(arg, 11));
-macro UTC_MONTH(arg) = (%_DateField(arg, 12));
-macro UTC_DAY(arg) = (%_DateField(arg, 13));
-macro UTC_WEEKDAY(arg) = (%_DateField(arg, 14));
-macro UTC_HOUR(arg) = (%_DateField(arg, 15));
-macro UTC_MIN(arg) = (%_DateField(arg, 16));
-macro UTC_SEC(arg) = (%_DateField(arg, 17));
-macro UTC_MS(arg) = (%_DateField(arg, 18));
-macro UTC_DAYS(arg) = (%_DateField(arg, 19));
-macro UTC_TIME_IN_DAY(arg) = (%_DateField(arg, 20));
-
-macro TIMEZONE_OFFSET(arg) = (%_DateField(arg, 21));
-
-macro SET_UTC_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 1));
-macro SET_LOCAL_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 0));
-
# Last input and last subject of regexp matches.
define LAST_SUBJECT_INDEX = 1;
macro LAST_SUBJECT(array) = ((array)[1]);
@@ -264,11 +200,11 @@ define COMPILATION_TYPE_JSON = 2;
# Matches Messages::kNoLineNumberInfo from v8.h
define kNoLineNumberInfo = 0;
-# Matches PropertyAttributes from property-details.h
-define PROPERTY_ATTRIBUTES_NONE = 0;
-define PROPERTY_ATTRIBUTES_STRING = 8;
-define PROPERTY_ATTRIBUTES_SYMBOLIC = 16;
-define PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL = 32;
+# Must match PropertyFilter in property-details.h
+define PROPERTY_FILTER_NONE = 0;
+define PROPERTY_FILTER_ONLY_ENUMERABLE = 2;
+define PROPERTY_FILTER_SKIP_STRINGS = 8;
+define PROPERTY_FILTER_SKIP_SYMBOLS = 16;
# Use for keys, values and entries iterators.
define ITERATOR_KIND_KEYS = 1;
@@ -304,9 +240,30 @@ define NOT_FOUND = -1;
# Check whether debug is active.
define DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
-macro DEBUG_IS_STEPPING(function) = (%_DebugIsActive() != 0 && %DebugCallbackSupportsStepping(function));
-macro DEBUG_PREPARE_STEP_IN_IF_STEPPING(function) = if (DEBUG_IS_STEPPING(function)) %DebugPrepareStepInIfStepping(function);
+macro DEBUG_PREPARE_STEP_IN_IF_STEPPING(function) = if (%_DebugIsActive() != 0) %DebugPrepareStepInIfStepping(function);
# SharedFlag equivalents
define kNotShared = false;
define kShared = true;
+
+# UseCounters from include/v8.h
+define kUseAsm = 0;
+define kBreakIterator = 1;
+define kLegacyConst = 2;
+define kMarkDequeOverflow = 3;
+define kStoreBufferOverflow = 4;
+define kSlotsBufferOverflow = 5;
+define kObjectObserve = 6;
+define kForcedGC = 7;
+define kSloppyMode = 8;
+define kStrictMode = 9;
+define kStrongMode = 10;
+define kRegExpPrototypeStickyGetter = 11;
+define kRegExpPrototypeToString = 12;
+define kRegExpPrototypeUnicodeGetter = 13;
+define kIntlV8Parse = 14;
+define kIntlPattern = 15;
+define kIntlResolved = 16;
+define kPromiseChain = 17;
+define kPromiseAccept = 18;
+define kPromiseDefer = 19;
diff --git a/chromium/v8/src/math.js b/chromium/v8/src/js/math.js
index 05eb9e46d7e..990a7e993c4 100644
--- a/chromium/v8/src/math.js
+++ b/chromium/v8/src/js/math.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var rngstate; // Initialized to a Uint32Array during genesis.
-
(function(global, utils) {
"use strict";
@@ -12,9 +10,17 @@ var rngstate; // Initialized to a Uint32Array during genesis.
// -------------------------------------------------------------------
// Imports
+define kRandomBatchSize = 64;
+// The first two slots are reserved to persist PRNG state.
+define kRandomNumberStart = 2;
+
+var GlobalFloat64Array = global.Float64Array;
var GlobalMath = global.Math;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
+var NaN = %GetRootNaN();
+var nextRandomIndex = kRandomBatchSize;
+var randomNumbers = UNDEFINED;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
//-------------------------------------------------------------------
@@ -56,7 +62,7 @@ function MathCeil(x) {
// ECMA 262 - 15.8.2.8
function MathExp(x) {
- return %MathExpRT(TO_NUMBER_INLINE(x));
+ return %MathExpRT(TO_NUMBER(x));
}
// ECMA 262 - 15.8.2.9
@@ -66,15 +72,15 @@ function MathFloorJS(x) {
// ECMA 262 - 15.8.2.10
function MathLog(x) {
- return %_MathLogRT(TO_NUMBER_INLINE(x));
+ return %_MathLogRT(TO_NUMBER(x));
}
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
if (length == 2) {
- arg1 = TO_NUMBER_INLINE(arg1);
- arg2 = TO_NUMBER_INLINE(arg2);
+ arg1 = TO_NUMBER(arg1);
+ arg2 = TO_NUMBER(arg2);
if (arg2 > arg1) return arg2;
if (arg1 > arg2) return arg1;
if (arg1 == arg2) {
@@ -82,12 +88,12 @@ function MathMax(arg1, arg2) { // length == 2
return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg2 : arg1;
}
// All comparisons failed, one of the arguments must be NaN.
- return NAN;
+ return NaN;
}
var r = -INFINITY;
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
- n = TO_NUMBER_INLINE(n);
+ n = TO_NUMBER(n);
// Make sure +0 is considered greater than -0.
if (NUMBER_IS_NAN(n) || n > r || (r === 0 && n === 0 && %_IsMinusZero(r))) {
r = n;
@@ -100,8 +106,8 @@ function MathMax(arg1, arg2) { // length == 2
function MathMin(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
if (length == 2) {
- arg1 = TO_NUMBER_INLINE(arg1);
- arg2 = TO_NUMBER_INLINE(arg2);
+ arg1 = TO_NUMBER(arg1);
+ arg2 = TO_NUMBER(arg2);
if (arg2 > arg1) return arg1;
if (arg1 > arg2) return arg2;
if (arg1 == arg2) {
@@ -109,12 +115,12 @@ function MathMin(arg1, arg2) { // length == 2
return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg1 : arg2;
}
// All comparisons failed, one of the arguments must be NaN.
- return NAN;
+ return NaN;
}
var r = INFINITY;
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
- n = TO_NUMBER_INLINE(n);
+ n = TO_NUMBER(n);
// Make sure -0 is considered less than +0.
if (NUMBER_IS_NAN(n) || n < r || (r === 0 && n === 0 && %_IsMinusZero(n))) {
r = n;
@@ -125,32 +131,29 @@ function MathMin(arg1, arg2) { // length == 2
// ECMA 262 - 15.8.2.13
function MathPowJS(x, y) {
- return %_MathPow(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y));
+ return %_MathPow(TO_NUMBER(x), TO_NUMBER(y));
}
// ECMA 262 - 15.8.2.14
function MathRandom() {
- var r0 = (MathImul(18030, rngstate[0] & 0xFFFF) + (rngstate[0] >>> 16)) | 0;
- rngstate[0] = r0;
- var r1 = (MathImul(36969, rngstate[1] & 0xFFFF) + (rngstate[1] >>> 16)) | 0;
- rngstate[1] = r1;
- var x = ((r0 << 16) + (r1 & 0xFFFF)) | 0;
- // Division by 0x100000000 through multiplication by reciprocal.
- return (x < 0 ? (x + 0x100000000) : x) * 2.3283064365386962890625e-10;
+ if (nextRandomIndex >= kRandomBatchSize) {
+ randomNumbers = %GenerateRandomNumbers(randomNumbers);
+ nextRandomIndex = kRandomNumberStart;
+ }
+ return randomNumbers[nextRandomIndex++];
}
function MathRandomRaw() {
- var r0 = (MathImul(18030, rngstate[0] & 0xFFFF) + (rngstate[0] >>> 16)) | 0;
- rngstate[0] = r0;
- var r1 = (MathImul(36969, rngstate[1] & 0xFFFF) + (rngstate[1] >>> 16)) | 0;
- rngstate[1] = r1;
- var x = ((r0 << 16) + (r1 & 0xFFFF)) | 0;
- return x & 0x3fffffff;
+ if (nextRandomIndex >= kRandomBatchSize) {
+ randomNumbers = %GenerateRandomNumbers(randomNumbers);
+ nextRandomIndex = kRandomNumberStart;
+ }
+ return %_DoubleLo(randomNumbers[nextRandomIndex++]) & 0x3FFFFFFF;
}
// ECMA 262 - 15.8.2.15
function MathRound(x) {
- return %RoundNumber(TO_NUMBER_INLINE(x));
+ return %RoundNumber(TO_NUMBER(x));
}
// ECMA 262 - 15.8.2.17
@@ -160,7 +163,7 @@ function MathSqrtJS(x) {
// Non-standard extension.
function MathImul(x, y) {
- return %NumberImul(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y));
+ return %NumberImul(TO_NUMBER(x), TO_NUMBER(y));
}
// ES6 draft 09-27-13, section 20.2.2.28.
@@ -181,21 +184,9 @@ function MathTrunc(x) {
return x;
}
-// ES6 draft 09-27-13, section 20.2.2.33.
-function MathTanh(x) {
- x = TO_NUMBER_INLINE(x);
- // Idempotent for +/-0.
- if (x === 0) return x;
- // Returns +/-1 for +/-Infinity.
- if (!NUMBER_IS_FINITE(x)) return MathSign(x);
- var exp1 = MathExp(x);
- var exp2 = MathExp(-x);
- return (exp1 - exp2) / (exp1 + exp2);
-}
-
// ES6 draft 09-27-13, section 20.2.2.5.
function MathAsinh(x) {
- x = TO_NUMBER_INLINE(x);
+ x = TO_NUMBER(x);
// Idempotent for NaN, +/-0 and +/-Infinity.
if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
if (x > 0) return MathLog(x + %_MathSqrt(x * x + 1));
@@ -205,8 +196,8 @@ function MathAsinh(x) {
// ES6 draft 09-27-13, section 20.2.2.3.
function MathAcosh(x) {
- x = TO_NUMBER_INLINE(x);
- if (x < 1) return NAN;
+ x = TO_NUMBER(x);
+ if (x < 1) return NaN;
// Idempotent for NaN and +Infinity.
if (!NUMBER_IS_FINITE(x)) return x;
return MathLog(x + %_MathSqrt(x + 1) * %_MathSqrt(x - 1));
@@ -214,11 +205,11 @@ function MathAcosh(x) {
// ES6 draft 09-27-13, section 20.2.2.7.
function MathAtanh(x) {
- x = TO_NUMBER_INLINE(x);
+ x = TO_NUMBER(x);
// Idempotent for +/-0.
if (x === 0) return x;
// Returns NaN for NaN and +/- Infinity.
- if (!NUMBER_IS_FINITE(x)) return NAN;
+ if (!NUMBER_IS_FINITE(x)) return NaN;
return 0.5 * MathLog((1 + x) / (1 - x));
}
@@ -232,7 +223,7 @@ function MathHypot(x, y) { // Function length is 2.
var max = 0;
for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
- n = TO_NUMBER_INLINE(n);
+ n = TO_NUMBER(n);
if (n === INFINITY || n === -INFINITY) return INFINITY;
n = MathAbs(n);
if (n > max) max = n;
@@ -256,7 +247,7 @@ function MathHypot(x, y) { // Function length is 2.
// ES6 draft 09-27-13, section 20.2.2.16.
function MathFroundJS(x) {
- return %MathFround(TO_NUMBER_INLINE(x));
+ return %MathFround(TO_NUMBER(x));
}
// ES6 draft 07-18-14, section 20.2.2.11
@@ -269,7 +260,7 @@ function MathClz32JS(x) {
// Using initial approximation adapted from Kahan's cbrt and 4 iterations
// of Newton's method.
function MathCbrt(x) {
- x = TO_NUMBER_INLINE(x);
+ x = TO_NUMBER(x);
if (x == 0 || !NUMBER_IS_FINITE(x)) return x;
return x >= 0 ? CubeRoot(x) : -CubeRoot(-x);
}
@@ -280,7 +271,7 @@ endmacro
function CubeRoot(x) {
var approx_hi = MathFloorJS(%_DoubleHi(x) / 3) + 0x2A9F7893;
- var approx = %_ConstructDouble(approx_hi, 0);
+ var approx = %_ConstructDouble(approx_hi | 0, 0);
approx = NEWTON_ITERATION_CBRT(x, approx);
approx = NEWTON_ITERATION_CBRT(x, approx);
approx = NEWTON_ITERATION_CBRT(x, approx);
@@ -328,7 +319,6 @@ utils.InstallFunctions(GlobalMath, DONT_ENUM, [
"imul", MathImul,
"sign", MathSign,
"trunc", MathTrunc,
- "tanh", MathTanh,
"asinh", MathAsinh,
"acosh", MathAcosh,
"atanh", MathAtanh,
diff --git a/chromium/v8/src/messages.js b/chromium/v8/src/js/messages.js
index 5441cfe34a7..6b7306a7d35 100644
--- a/chromium/v8/src/messages.js
+++ b/chromium/v8/src/js/messages.js
@@ -4,15 +4,6 @@
// -------------------------------------------------------------------
-var $errorToString;
-var MakeError;
-var MakeEvalError;
-var MakeRangeError;
-var MakeReferenceError;
-var MakeSyntaxError;
-var MakeTypeError;
-var MakeURIError;
-
(function(global, utils) {
%CheckIsBootstrapping();
@@ -32,10 +23,10 @@ var callSitePositionSymbol =
utils.ImportNow("call_site_position_symbol");
var callSiteStrictSymbol =
utils.ImportNow("call_site_strict_symbol");
+var FLAG_harmony_tostring;
var Float32x4ToString;
var formattedStackTraceSymbol =
utils.ImportNow("formatted_stack_trace_symbol");
-var FunctionSourceString
var GlobalObject = global.Object;
var Int16x8ToString;
var Int32x4ToString;
@@ -43,12 +34,14 @@ var Int8x16ToString;
var InternalArray = utils.InternalArray;
var internalErrorSymbol = utils.ImportNow("internal_error_symbol");
var ObjectDefineProperty;
-var ObjectToString;
+var ObjectToString = utils.ImportNow("object_to_string");
+var Script = utils.ImportNow("Script");
var stackTraceSymbol = utils.ImportNow("stack_trace_symbol");
var StringCharAt;
var StringIndexOf;
var StringSubstring;
var SymbolToString;
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
var Uint16x8ToString;
var Uint32x4ToString;
var Uint8x16ToString;
@@ -59,12 +52,10 @@ utils.Import(function(from) {
Bool32x4ToString = from.Bool32x4ToString;
Bool8x16ToString = from.Bool8x16ToString;
Float32x4ToString = from.Float32x4ToString;
- FunctionSourceString = from.FunctionSourceString;
Int16x8ToString = from.Int16x8ToString;
Int32x4ToString = from.Int32x4ToString;
Int8x16ToString = from.Int8x16ToString;
ObjectDefineProperty = from.ObjectDefineProperty;
- ObjectToString = from.ObjectToString;
StringCharAt = from.StringCharAt;
StringIndexOf = from.StringIndexOf;
StringSubstring = from.StringSubstring;
@@ -74,6 +65,10 @@ utils.Import(function(from) {
Uint8x16ToString = from.Uint8x16ToString;
});
+utils.ImportFromExperimental(function(from) {
+ FLAG_harmony_tostring = from.FLAG_harmony_tostring;
+});
+
// -------------------------------------------------------------------
var GlobalError;
@@ -88,102 +83,83 @@ var GlobalEvalError;
function NoSideEffectsObjectToString() {
if (IS_UNDEFINED(this)) return "[object Undefined]";
if (IS_NULL(this)) return "[object Null]";
- return "[object " + %_ClassOf(TO_OBJECT(this)) + "]";
+ var O = TO_OBJECT(this);
+ var builtinTag = %_ClassOf(O);
+ var tag;
+ if (FLAG_harmony_tostring) {
+ tag = %GetDataProperty(O, toStringTagSymbol);
+ if (!IS_STRING(tag)) {
+ tag = builtinTag;
+ }
+ } else {
+ tag = builtinTag;
+ }
+ return `[object ${tag}]`;
}
+function IsErrorObject(obj) {
+ return HAS_PRIVATE(obj, stackTraceSymbol);
+}
+
+function NoSideEffectsErrorToString() {
+ var name = %GetDataProperty(this, "name");
+ var message = %GetDataProperty(this, "message");
+ name = IS_UNDEFINED(name) ? "Error" : NoSideEffectsToString(name);
+ message = IS_UNDEFINED(message) ? "" : NoSideEffectsToString(message);
+ if (name == "") return message;
+ if (message == "") return name;
+ return `${name}: ${message}`;
+}
-function NoSideEffectToString(obj) {
+function NoSideEffectsToString(obj) {
if (IS_STRING(obj)) return obj;
if (IS_NUMBER(obj)) return %_NumberToString(obj);
if (IS_BOOLEAN(obj)) return obj ? 'true' : 'false';
if (IS_UNDEFINED(obj)) return 'undefined';
if (IS_NULL(obj)) return 'null';
if (IS_FUNCTION(obj)) {
- var str = %_CallFunction(obj, obj, FunctionSourceString);
+ var str = %FunctionToString(obj);
if (str.length > 128) {
str = %_SubString(str, 0, 111) + "...<omitted>..." +
%_SubString(str, str.length - 2, str.length);
}
return str;
}
- if (IS_SYMBOL(obj)) return %_CallFunction(obj, SymbolToString);
+ if (IS_SYMBOL(obj)) return %_Call(SymbolToString, obj);
if (IS_SIMD_VALUE(obj)) {
switch (typeof(obj)) {
- case 'float32x4': return %_CallFunction(obj, Float32x4ToString);
- case 'int32x4': return %_CallFunction(obj, Int32x4ToString);
- case 'int16x8': return %_CallFunction(obj, Int16x8ToString);
- case 'int8x16': return %_CallFunction(obj, Int8x16ToString);
- case 'uint32x4': return %_CallFunction(obj, Uint32x4ToString);
- case 'uint16x8': return %_CallFunction(obj, Uint16x8ToString);
- case 'uint8x16': return %_CallFunction(obj, Uint8x16ToString);
- case 'bool32x4': return %_CallFunction(obj, Bool32x4ToString);
- case 'bool16x8': return %_CallFunction(obj, Bool16x8ToString);
- case 'bool8x16': return %_CallFunction(obj, Bool8x16ToString);
- }
- }
- if (IS_OBJECT(obj)
- && %GetDataProperty(obj, "toString") === ObjectToString) {
- var constructor = %GetDataProperty(obj, "constructor");
- if (typeof constructor == "function") {
- var constructorName = constructor.name;
- if (IS_STRING(constructorName) && constructorName !== "") {
- return "#<" + constructorName + ">";
- }
+ case 'float32x4': return %_Call(Float32x4ToString, obj);
+ case 'int32x4': return %_Call(Int32x4ToString, obj);
+ case 'int16x8': return %_Call(Int16x8ToString, obj);
+ case 'int8x16': return %_Call(Int8x16ToString, obj);
+ case 'uint32x4': return %_Call(Uint32x4ToString, obj);
+ case 'uint16x8': return %_Call(Uint16x8ToString, obj);
+ case 'uint8x16': return %_Call(Uint8x16ToString, obj);
+ case 'bool32x4': return %_Call(Bool32x4ToString, obj);
+ case 'bool16x8': return %_Call(Bool16x8ToString, obj);
+ case 'bool8x16': return %_Call(Bool8x16ToString, obj);
}
}
- if (CanBeSafelyTreatedAsAnErrorObject(obj)) {
- return %_CallFunction(obj, ErrorToString);
- }
-
- return %_CallFunction(obj, NoSideEffectsObjectToString);
-}
-
-// To determine whether we can safely stringify an object using ErrorToString
-// without the risk of side-effects, we need to check whether the object is
-// either an instance of a native error type (via '%_ClassOf'), or has Error
-// in its prototype chain and hasn't overwritten 'toString' with something
-// strange and unusual.
-function CanBeSafelyTreatedAsAnErrorObject(obj) {
- switch (%_ClassOf(obj)) {
- case 'Error':
- case 'EvalError':
- case 'RangeError':
- case 'ReferenceError':
- case 'SyntaxError':
- case 'TypeError':
- case 'URIError':
- return true;
- }
-
- var objToString = %GetDataProperty(obj, "toString");
- return obj instanceof GlobalError && objToString === ErrorToString;
-}
-
-
-// When formatting internally created error messages, do not
-// invoke overwritten error toString methods but explicitly use
-// the error to string method. This is to avoid leaking error
-// objects between script tags in a browser setting.
-function ToStringCheckErrorObject(obj) {
- if (CanBeSafelyTreatedAsAnErrorObject(obj)) {
- return %_CallFunction(obj, ErrorToString);
- } else {
- return TO_STRING(obj);
- }
-}
+ if (IS_RECEIVER(obj)) {
+ // When internally formatting error objects, use a side-effects-free version
+ // of Error.prototype.toString independent of the actually installed
+ // toString method.
+ if (IsErrorObject(obj) ||
+ %GetDataProperty(obj, "toString") === ErrorToString) {
+ return %_Call(NoSideEffectsErrorToString, obj);
+ }
-function ToDetailString(obj) {
- if (obj != null && IS_OBJECT(obj) && obj.toString === ObjectToString) {
- var constructor = obj.constructor;
- if (typeof constructor == "function") {
- var constructorName = constructor.name;
- if (IS_STRING(constructorName) && constructorName !== "") {
- return "#<" + constructorName + ">";
+ if (%GetDataProperty(obj, "toString") === ObjectToString) {
+ var constructor = %GetDataProperty(obj, "constructor");
+ if (IS_FUNCTION(constructor)) {
+ var constructor_name = %FunctionGetName(constructor);
+ if (constructor_name != "") return `#<${constructor_name}>`;
}
}
}
- return ToStringCheckErrorObject(obj);
+
+ return %_Call(NoSideEffectsObjectToString, obj);
}
@@ -208,9 +184,9 @@ function MakeGenericError(constructor, type, arg0, arg1, arg2) {
// Helper functions; called from the runtime system.
function FormatMessage(type, arg0, arg1, arg2) {
- var arg0 = NoSideEffectToString(arg0);
- var arg1 = NoSideEffectToString(arg1);
- var arg2 = NoSideEffectToString(arg2);
+ var arg0 = NoSideEffectsToString(arg0);
+ var arg1 = NoSideEffectsToString(arg1);
+ var arg2 = NoSideEffectsToString(arg2);
try {
return %FormatMessageString(type, arg0, arg1, arg2);
} catch (e) {
@@ -304,7 +280,7 @@ function ScriptLocationFromPosition(position,
var line_ends = this.line_ends;
var start = line == 0 ? 0 : line_ends[line - 1] + 1;
var end = line_ends[line];
- if (end > 0 && %_CallFunction(this.source, end - 1, StringCharAt) == '\r') {
+ if (end > 0 && %_Call(StringCharAt, this.source, end - 1) == '\r') {
end--;
}
var column = position - start;
@@ -427,7 +403,7 @@ function ScriptSourceLine(opt_line) {
var line_ends = this.line_ends;
var start = line == 0 ? 0 : line_ends[line - 1] + 1;
var end = line_ends[line];
- return %_CallFunction(this.source, start, end, StringSubstring);
+ return %_Call(StringSubstring, this.source, start, end);
}
@@ -456,11 +432,10 @@ function ScriptLineEnd(n) {
* If sourceURL comment is available returns sourceURL comment contents.
* Otherwise, script name is returned. See
* http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
- * and Source Map Revision 3 proposal for details on using //# sourceURL and
- * deprecated //@ sourceURL comment to identify scripts that don't have name.
+ * and Source Map Revision 3 proposal for details on using //# sourceURL
+ * comment to identify scripts that don't have name.
*
- * @return {?string} script name if present, value for //# sourceURL or
- * deprecated //@ sourceURL comment otherwise.
+ * @return {?string} script name if present, value for //# sourceURL comment.
*/
function ScriptNameOrSourceURL() {
if (this.source_url) return this.source_url;
@@ -526,10 +501,7 @@ function SourceLocation(script, position, line, column, start, end) {
* Source text for this location.
*/
function SourceLocationSourceText() {
- return %_CallFunction(this.script.source,
- this.start,
- this.end,
- StringSubstring);
+ return %_Call(StringSubstring, this.script.source, this.start, this.end);
}
@@ -571,10 +543,10 @@ function SourceSlice(script, from_line, to_line, from_position, to_position) {
* the line terminating characters (if any)
*/
function SourceSliceSourceText() {
- return %_CallFunction(this.script.source,
- this.from_position,
- this.to_position,
- StringSubstring);
+ return %_Call(StringSubstring,
+ this.script.source,
+ this.from_position,
+ this.to_position);
}
utils.SetUpLockedPrototype(SourceSlice,
@@ -591,10 +563,18 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// Error implementation
function CallSite(receiver, fun, pos, strict_mode) {
+ if (!IS_FUNCTION(fun)) {
+ throw MakeTypeError(kCallSiteExpectsFunction, typeof fun);
+ }
+
+ if (IS_UNDEFINED(new.target)) {
+ return new CallSite(receiver, fun, pos, strict_mode);
+ }
+
SET_PRIVATE(this, callSiteReceiverSymbol, receiver);
SET_PRIVATE(this, callSiteFunctionSymbol, fun);
- SET_PRIVATE(this, callSitePositionSymbol, pos);
- SET_PRIVATE(this, callSiteStrictSymbol, strict_mode);
+ SET_PRIVATE(this, callSitePositionSymbol, TO_INT32(pos));
+ SET_PRIVATE(this, callSiteStrictSymbol, TO_BOOLEAN(strict_mode));
}
function CallSiteGetThis() {
@@ -702,13 +682,12 @@ function CallSiteToString() {
var typeName = GetTypeName(GET_PRIVATE(this, callSiteReceiverSymbol), true);
var methodName = this.getMethodName();
if (functionName) {
- if (typeName &&
- %_CallFunction(functionName, typeName, StringIndexOf) != 0) {
+ if (typeName && %_Call(StringIndexOf, functionName, typeName) != 0) {
line += typeName + ".";
}
line += functionName;
if (methodName &&
- (%_CallFunction(functionName, "." + methodName, StringIndexOf) !=
+ (%_Call(StringIndexOf, functionName, "." + methodName) !=
functionName.length - methodName.length - 1)) {
line += " [as " + methodName + "]";
}
@@ -790,7 +769,7 @@ function FormatEvalOrigin(script) {
function FormatErrorString(error) {
try {
- return %_CallFunction(error, ErrorToString);
+ return %_Call(ErrorToString, error);
} catch (e) {
try {
return "<error: " + e + ">";
@@ -856,23 +835,19 @@ function FormatStackTrace(obj, raw_stack) {
}
lines.push(" at " + line);
}
- return %_CallFunction(lines, "\n", ArrayJoin);
+ return %_Call(ArrayJoin, lines, "\n");
}
function GetTypeName(receiver, requireConstructor) {
if (IS_NULL_OR_UNDEFINED(receiver)) return null;
- var constructor = receiver.constructor;
- if (!constructor) {
- return requireConstructor ? null :
- %_CallFunction(receiver, NoSideEffectsObjectToString);
- }
- var constructorName = constructor.name;
- if (!constructorName) {
- return requireConstructor ? null :
- %_CallFunction(receiver, NoSideEffectsObjectToString);
+ if (IS_PROXY(receiver)) return "Proxy";
+
+ var constructor = %GetDataProperty(TO_OBJECT(receiver), "constructor");
+ if (!IS_FUNCTION(constructor)) {
+ return requireConstructor ? null : %_Call(NoSideEffectsToString, receiver);
}
- return constructorName;
+ return %FunctionGetName(constructor);
}
@@ -906,7 +881,7 @@ var StackTraceGetter = function() {
// If the receiver equals the holder, set the formatted stack trace that the
// getter returns.
var StackTraceSetter = function(v) {
- if (HAS_PRIVATE(this, stackTraceSymbol)) {
+ if (IsErrorObject(this)) {
SET_PRIVATE(this, stackTraceSymbol, UNDEFINED);
SET_PRIVATE(this, formattedStackTraceSymbol, v);
}
@@ -918,99 +893,90 @@ var StackTraceSetter = function(v) {
var captureStackTrace = function() {};
-// Define special error type constructors.
-function DefineError(global, f) {
- // Store the error function in both the global object
- // and the runtime object. The function is fetched
- // from the runtime object when throwing errors from
- // within the runtime system to avoid strange side
- // effects when overwriting the error functions from
- // user code.
- var name = f.name;
- %AddNamedProperty(global, name, f, DONT_ENUM);
- // Configure the error function.
- if (name == 'Error') {
- // The prototype of the Error object must itself be an error.
- // However, it can't be an instance of the Error object because
- // it hasn't been properly configured yet. Instead we create a
- // special not-a-true-error-but-close-enough object.
- var ErrorPrototype = function() {};
- %FunctionSetPrototype(ErrorPrototype, GlobalObject.prototype);
- %FunctionSetInstanceClassName(ErrorPrototype, 'Error');
- %FunctionSetPrototype(f, new ErrorPrototype());
- } else {
- %FunctionSetPrototype(f, new GlobalError());
- %InternalSetPrototype(f, GlobalError);
+// Set up special error type constructors.
+function SetUpError(error_function) {
+ %FunctionSetInstanceClassName(error_function, 'Error');
+ var name = error_function.name;
+ var prototype = new GlobalObject();
+ if (name !== 'Error') {
+ %InternalSetPrototype(error_function, GlobalError);
+ %InternalSetPrototype(prototype, GlobalError.prototype);
}
- %FunctionSetInstanceClassName(f, 'Error');
- %AddNamedProperty(f.prototype, 'constructor', f, DONT_ENUM);
- %AddNamedProperty(f.prototype, 'name', name, DONT_ENUM);
- %SetCode(f, function(m) {
- if (%_IsConstructCall()) {
- try { captureStackTrace(this, f); } catch (e) { }
- // Define all the expected properties directly on the error
- // object. This avoids going through getters and setters defined
- // on prototype objects.
- if (!IS_UNDEFINED(m)) {
- %AddNamedProperty(this, 'message', TO_STRING(m), DONT_ENUM);
- }
- } else {
- return new f(m);
+ %FunctionSetPrototype(error_function, prototype);
+
+ %AddNamedProperty(error_function.prototype, 'name', name, DONT_ENUM);
+ %AddNamedProperty(error_function.prototype, 'message', '', DONT_ENUM);
+ %AddNamedProperty(
+ error_function.prototype, 'constructor', error_function, DONT_ENUM);
+
+ %SetCode(error_function, function(m) {
+ if (IS_UNDEFINED(new.target)) return new error_function(m);
+
+ try { captureStackTrace(this, error_function); } catch (e) { }
+ // Define all the expected properties directly on the error
+ // object. This avoids going through getters and setters defined
+ // on prototype objects.
+ if (!IS_UNDEFINED(m)) {
+ %AddNamedProperty(this, 'message', TO_STRING(m), DONT_ENUM);
}
});
- %SetNativeFlag(f);
- return f;
+
+ %SetNativeFlag(error_function);
+ return error_function;
};
-GlobalError = DefineError(global, function Error() { });
-GlobalEvalError = DefineError(global, function EvalError() { });
-GlobalRangeError = DefineError(global, function RangeError() { });
-GlobalReferenceError = DefineError(global, function ReferenceError() { });
-GlobalSyntaxError = DefineError(global, function SyntaxError() { });
-GlobalTypeError = DefineError(global, function TypeError() { });
-GlobalURIError = DefineError(global, function URIError() { });
+GlobalError = SetUpError(global.Error);
+GlobalEvalError = SetUpError(global.EvalError);
+GlobalRangeError = SetUpError(global.RangeError);
+GlobalReferenceError = SetUpError(global.ReferenceError);
+GlobalSyntaxError = SetUpError(global.SyntaxError);
+GlobalTypeError = SetUpError(global.TypeError);
+GlobalURIError = SetUpError(global.URIError);
-%AddNamedProperty(GlobalError.prototype, 'message', '', DONT_ENUM);
+utils.InstallFunctions(GlobalError.prototype, DONT_ENUM,
+ ['toString', ErrorToString]);
function ErrorToString() {
- if (!IS_SPEC_OBJECT(this)) {
+ if (!IS_RECEIVER(this)) {
throw MakeTypeError(kCalledOnNonObject, "Error.prototype.toString");
}
- return %ErrorToStringRT(this);
-}
+ var name = this.name;
+ name = IS_UNDEFINED(name) ? "Error" : TO_STRING(name);
-utils.InstallFunctions(GlobalError.prototype, DONT_ENUM,
- ['toString', ErrorToString]);
+ var message = this.message;
+ message = IS_UNDEFINED(message) ? "" : TO_STRING(message);
-$errorToString = ErrorToString;
+ if (name == "") return message;
+ if (message == "") return name;
+ return `${name}: ${message}`
+}
-MakeError = function(type, arg0, arg1, arg2) {
+function MakeError(type, arg0, arg1, arg2) {
return MakeGenericError(GlobalError, type, arg0, arg1, arg2);
}
-MakeRangeError = function(type, arg0, arg1, arg2) {
+function MakeRangeError(type, arg0, arg1, arg2) {
return MakeGenericError(GlobalRangeError, type, arg0, arg1, arg2);
}
-MakeSyntaxError = function(type, arg0, arg1, arg2) {
+function MakeSyntaxError(type, arg0, arg1, arg2) {
return MakeGenericError(GlobalSyntaxError, type, arg0, arg1, arg2);
}
-MakeTypeError = function(type, arg0, arg1, arg2) {
+function MakeTypeError(type, arg0, arg1, arg2) {
return MakeGenericError(GlobalTypeError, type, arg0, arg1, arg2);
}
-MakeURIError = function() {
+function MakeURIError() {
return MakeGenericError(GlobalURIError, kURIMalformed);
}
// Boilerplate for exceptions for stack overflows. Used from
// Isolate::StackOverflow().
var StackOverflowBoilerplate = MakeRangeError(kStackOverflow);
-%DefineAccessorPropertyUnchecked(StackOverflowBoilerplate, 'stack',
- StackTraceGetter, StackTraceSetter,
- DONT_ENUM);
+utils.InstallGetterSetter(StackOverflowBoilerplate, 'stack',
+ StackTraceGetter, StackTraceSetter)
// Define actual captureStackTrace function after everything has been set up.
captureStackTrace = function captureStackTrace(obj, cons_opt) {
@@ -1024,8 +990,6 @@ captureStackTrace = function captureStackTrace(obj, cons_opt) {
GlobalError.captureStackTrace = captureStackTrace;
%InstallToContext([
- "error_function", GlobalError,
- "eval_error_function", GlobalEvalError,
"get_stack_trace_line_fun", GetStackTraceLine,
"make_error_function", MakeGenericError,
"make_range_error", MakeRangeError,
@@ -1033,14 +997,17 @@ GlobalError.captureStackTrace = captureStackTrace;
"message_get_column_number", GetColumnNumber,
"message_get_line_number", GetLineNumber,
"message_get_source_line", GetSourceLine,
- "no_side_effect_to_string_fun", NoSideEffectToString,
- "range_error_function", GlobalRangeError,
- "reference_error_function", GlobalReferenceError,
+ "no_side_effects_to_string_fun", NoSideEffectsToString,
"stack_overflow_boilerplate", StackOverflowBoilerplate,
- "syntax_error_function", GlobalSyntaxError,
- "to_detail_string_fun", ToDetailString,
- "type_error_function", GlobalTypeError,
- "uri_error_function", GlobalURIError,
]);
+utils.Export(function(to) {
+ to.ErrorToString = ErrorToString;
+ to.MakeError = MakeError;
+ to.MakeRangeError = MakeRangeError;
+ to.MakeSyntaxError = MakeSyntaxError;
+ to.MakeTypeError = MakeTypeError;
+ to.MakeURIError = MakeURIError;
+});
+
});
diff --git a/chromium/v8/src/object-observe.js b/chromium/v8/src/js/object-observe.js
index 80296586d21..5e256bf0bbd 100644
--- a/chromium/v8/src/object-observe.js
+++ b/chromium/v8/src/js/object-observe.js
@@ -2,13 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $observeEnqueueSpliceRecord;
-var $observeBeginPerformSplice;
-var $observeEndPerformSplice;
-
-var $observeObjectMethods;
-var $observeArrayMethods;
-
(function(global, utils) {
"use strict";
@@ -18,16 +11,15 @@ var $observeArrayMethods;
// -------------------------------------------------------------------
// Imports
+var GetHash;
var GlobalArray = global.Array;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
-
-var ObjectFreeze;
-var ObjectIsFrozen;
+var MakeTypeError;
utils.Import(function(from) {
- ObjectFreeze = from.ObjectFreeze;
- ObjectIsFrozen = from.ObjectIsFrozen;
+ GetHash = from.GetHash;
+ MakeTypeError = from.MakeTypeError;
});
// -------------------------------------------------------------------
@@ -196,7 +188,7 @@ function ObserverIsActive(observer, objectInfo) {
function ObjectInfoGetOrCreate(object) {
var objectInfo = ObjectInfoGet(object);
if (IS_UNDEFINED(objectInfo)) {
- if (!%_IsJSProxy(object)) {
+ if (!IS_PROXY(object)) {
%SetIsObserved(object);
}
objectInfo = {
@@ -207,7 +199,7 @@ function ObjectInfoGetOrCreate(object) {
performingCount: 0,
};
%WeakCollectionSet(GetObservationStateJS().objectInfoMap,
- object, objectInfo, $getHash(object));
+ object, objectInfo, GetHash(object));
}
return objectInfo;
}
@@ -215,13 +207,13 @@ function ObjectInfoGetOrCreate(object) {
function ObjectInfoGet(object) {
return %WeakCollectionGet(GetObservationStateJS().objectInfoMap, object,
- $getHash(object));
+ GetHash(object));
}
function ObjectInfoGetFromNotifier(notifier) {
return %WeakCollectionGet(GetObservationStateJS().notifierObjectInfoMap,
- notifier, $getHash(notifier));
+ notifier, GetHash(notifier));
}
@@ -230,7 +222,7 @@ function ObjectInfoGetNotifier(objectInfo) {
var notifier = { __proto__: notifierPrototype };
objectInfo.notifier = notifier;
%WeakCollectionSet(GetObservationStateJS().notifierObjectInfoMap,
- notifier, objectInfo, $getHash(notifier));
+ notifier, objectInfo, GetHash(notifier));
}
return objectInfo.notifier;
@@ -328,7 +320,7 @@ function ConvertAcceptListToTypeMap(arg) {
if (IS_UNDEFINED(arg))
return arg;
- if (!IS_SPEC_OBJECT(arg)) throw MakeTypeError(kObserveInvalidAccept);
+ if (!IS_RECEIVER(arg)) throw MakeTypeError(kObserveInvalidAccept);
var len = TO_INTEGER(arg.length);
if (len < 0) len = 0;
@@ -342,13 +334,13 @@ function ConvertAcceptListToTypeMap(arg) {
// normalizes. When delivery clears any pending change records, it re-optimizes.
function CallbackInfoGet(callback) {
return %WeakCollectionGet(GetObservationStateJS().callbackInfoMap, callback,
- $getHash(callback));
+ GetHash(callback));
}
function CallbackInfoSet(callback, callbackInfo) {
%WeakCollectionSet(GetObservationStateJS().callbackInfoMap,
- callback, callbackInfo, $getHash(callback));
+ callback, callbackInfo, GetHash(callback));
}
@@ -384,7 +376,7 @@ function CallbackInfoNormalize(callback) {
function ObjectObserve(object, callback, acceptList) {
- if (!IS_SPEC_OBJECT(object))
+ if (!IS_RECEIVER(object))
throw MakeTypeError(kObserveNonObject, "observe", "observe");
if (%IsJSGlobalProxy(object))
throw MakeTypeError(kObserveGlobalProxy, "observe");
@@ -392,7 +384,7 @@ function ObjectObserve(object, callback, acceptList) {
throw MakeTypeError(kObserveAccessChecked, "observe");
if (!IS_CALLABLE(callback))
throw MakeTypeError(kObserveNonFunction, "observe");
- if (ObjectIsFrozen(callback))
+ if (%object_is_frozen(callback))
throw MakeTypeError(kObserveCallbackFrozen);
var objectObserveFn = %GetObjectContextObjectObserve(object);
@@ -409,7 +401,7 @@ function NativeObjectObserve(object, callback, acceptList) {
function ObjectUnobserve(object, callback) {
- if (!IS_SPEC_OBJECT(object))
+ if (!IS_RECEIVER(object))
throw MakeTypeError(kObserveNonObject, "unobserve", "unobserve");
if (%IsJSGlobalProxy(object))
throw MakeTypeError(kObserveGlobalProxy, "unobserve");
@@ -485,7 +477,7 @@ function ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, type) {
%DefineDataPropertyUnchecked(
newRecord, prop, changeRecord[prop], READ_ONLY + DONT_DELETE);
}
- ObjectFreeze(newRecord);
+ %object_freeze(newRecord);
ObjectInfoEnqueueInternalChangeRecord(objectInfo, newRecord);
}
@@ -537,8 +529,8 @@ function EnqueueSpliceRecord(array, index, removed, addedCount) {
addedCount: addedCount
};
- ObjectFreeze(changeRecord);
- ObjectFreeze(changeRecord.removed);
+ %object_freeze(changeRecord);
+ %object_freeze(changeRecord.removed);
ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
}
@@ -562,13 +554,13 @@ function NotifyChange(type, object, name, oldValue) {
};
}
- ObjectFreeze(changeRecord);
+ %object_freeze(changeRecord);
ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
}
function ObjectNotifierNotify(changeRecord) {
- if (!IS_SPEC_OBJECT(this))
+ if (!IS_RECEIVER(this))
throw MakeTypeError(kCalledOnNonObject, "notify");
var objectInfo = ObjectInfoGetFromNotifier(this);
@@ -582,7 +574,7 @@ function ObjectNotifierNotify(changeRecord) {
function ObjectNotifierPerformChange(changeType, changeFn) {
- if (!IS_SPEC_OBJECT(this))
+ if (!IS_RECEIVER(this))
throw MakeTypeError(kCalledOnNonObject, "performChange");
var objectInfo = ObjectInfoGetFromNotifier(this);
@@ -608,20 +600,20 @@ function NativeObjectNotifierPerformChange(objectInfo, changeType, changeFn) {
ObjectInfoRemovePerformingType(objectInfo, changeType);
}
- if (IS_SPEC_OBJECT(changeRecord))
+ if (IS_RECEIVER(changeRecord))
ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, changeType);
}
function ObjectGetNotifier(object) {
- if (!IS_SPEC_OBJECT(object))
+ if (!IS_RECEIVER(object))
throw MakeTypeError(kObserveNonObject, "getNotifier", "getNotifier");
if (%IsJSGlobalProxy(object))
throw MakeTypeError(kObserveGlobalProxy, "getNotifier");
if (%IsAccessCheckNeeded(object))
throw MakeTypeError(kObserveAccessChecked, "getNotifier");
- if (ObjectIsFrozen(object)) return null;
+ if (%object_is_frozen(object)) return null;
if (!%ObjectWasCreatedInCurrentOrigin(object)) return null;
@@ -684,13 +676,14 @@ utils.InstallFunctions(notifierPrototype, DONT_ENUM, [
"performChange", ObjectNotifierPerformChange
]);
-$observeObjectMethods = [
+var ObserveObjectMethods = [
"deliverChangeRecords", ObjectDeliverChangeRecords,
"getNotifier", ObjectGetNotifier,
"observe", ObjectObserve,
"unobserve", ObjectUnobserve
];
-$observeArrayMethods = [
+
+var ObserveArrayMethods = [
"observe", ArrayObserve,
"unobserve", ArrayUnobserve
];
@@ -700,12 +693,8 @@ $observeArrayMethods = [
var removePrototypeFn = function(f, i) {
if (i % 2 === 1) %FunctionRemovePrototype(f);
};
-$observeObjectMethods.forEach(removePrototypeFn);
-$observeArrayMethods.forEach(removePrototypeFn);
-
-$observeEnqueueSpliceRecord = EnqueueSpliceRecord;
-$observeBeginPerformSplice = BeginPerformSplice;
-$observeEndPerformSplice = EndPerformSplice;
+ObserveObjectMethods.forEach(removePrototypeFn);
+ObserveArrayMethods.forEach(removePrototypeFn);
%InstallToContext([
"native_object_get_notifier", NativeObjectGetNotifier,
@@ -717,4 +706,12 @@ $observeEndPerformSplice = EndPerformSplice;
"observers_notify_change", NotifyChange,
]);
+utils.Export(function(to) {
+ to.ObserveArrayMethods = ObserveArrayMethods;
+ to.ObserveBeginPerformSplice = BeginPerformSplice;
+ to.ObserveEndPerformSplice = EndPerformSplice;
+ to.ObserveEnqueueSpliceRecord = EnqueueSpliceRecord;
+ to.ObserveObjectMethods = ObserveObjectMethods;
+});
+
})
diff --git a/chromium/v8/src/prologue.js b/chromium/v8/src/js/prologue.js
index a54de365635..2779393bd19 100644
--- a/chromium/v8/src/prologue.js
+++ b/chromium/v8/src/js/prologue.js
@@ -14,6 +14,14 @@
var imports = UNDEFINED;
var imports_from_experimental = UNDEFINED;
var exports_container = %ExportFromRuntime({});
+var typed_array_setup = UNDEFINED;
+
+// Register context value to be initialized with a typed array in
+// Genesis::InitializeBuiltinTypedArrays.
+function SetupTypedArray(f) {
+ f.next = typed_array_setup;
+ typed_array_setup = f;
+}
// Export to other scripts.
// In normal natives, this exports functions to other normal natives.
@@ -94,21 +102,20 @@ function InstallFunctions(object, attributes, functions) {
// Helper function to install a getter-only accessor property.
-function InstallGetter(object, name, getter, attributes) {
+function InstallGetter(object, name, getter, attributes, prefix) {
%CheckIsBootstrapping();
- if (typeof attributes == "undefined") {
- attributes = DONT_ENUM;
- }
- SetFunctionName(getter, name, "get");
+ if (IS_UNDEFINED(attributes)) attributes = DONT_ENUM;
+ SetFunctionName(getter, name, IS_UNDEFINED(prefix) ? "get" : prefix);
%FunctionRemovePrototype(getter);
- %DefineAccessorPropertyUnchecked(object, name, getter, null, attributes);
+ %DefineGetterPropertyUnchecked(object, name, getter, attributes);
%SetNativeFlag(getter);
}
// Helper function to install a getter/setter accessor property.
-function InstallGetterSetter(object, name, getter, setter) {
+function InstallGetterSetter(object, name, getter, setter, attributes) {
%CheckIsBootstrapping();
+ if (IS_UNDEFINED(attributes)) attributes = DONT_ENUM;
SetFunctionName(getter, name, "get");
SetFunctionName(setter, name, "set");
%FunctionRemovePrototype(getter);
@@ -163,41 +170,45 @@ function PostNatives(utils) {
// Whitelist of exports from normal natives to experimental natives and debug.
var expose_list = [
"ArrayToString",
- "FunctionSourceString",
+ "ErrorToString",
"GetIterator",
"GetMethod",
- "InnerArrayEvery",
- "InnerArrayFilter",
- "InnerArrayForEach",
- "InnerArrayIndexOf",
- "InnerArrayJoin",
- "InnerArrayLastIndexOf",
- "InnerArrayMap",
- "InnerArrayReduce",
- "InnerArrayReduceRight",
- "InnerArrayReverse",
- "InnerArraySome",
- "InnerArraySort",
- "InnerArrayToLocaleString",
"IsNaN",
+ "MakeError",
+ "MakeTypeError",
+ "MapEntries",
+ "MapIterator",
+ "MapIteratorNext",
"MathMax",
"MathMin",
- "ObjectIsFrozen",
+ "MaxSimple",
+ "MinSimple",
"ObjectDefineProperty",
- "OwnPropertyKeys",
+ "ObserveArrayMethods",
+ "ObserveObjectMethods",
+ "PromiseChain",
+ "PromiseDeferred",
+ "PromiseResolved",
+ "SameValueZero",
+ "SetIterator",
+ "SetIteratorNext",
+ "SetValues",
"SymbolToString",
- "ToNameArray",
- "ToBoolean",
- "ToNumber",
- "ToString",
+ "ToPositiveInteger",
// From runtime:
"is_concat_spreadable_symbol",
"iterator_symbol",
"promise_status_symbol",
"promise_value_symbol",
+ "object_freeze",
+ "object_is_frozen",
+ "object_is_sealed",
"reflect_apply",
"reflect_construct",
+ "regexp_flags_symbol",
"to_string_tag_symbol",
+ "object_to_string",
+ "species_symbol",
];
var filtered_exports = {};
@@ -225,12 +236,13 @@ function PostExperimentals(utils) {
imports_from_experimental(exports_container);
}
- exports_container = UNDEFINED;
+ utils.CreateDoubleResultArray();
+ utils.CreateDoubleResultArray = UNDEFINED;
- utils.PostExperimentals = UNDEFINED;
- utils.PostDebug = UNDEFINED;
- utils.Import = UNDEFINED;
utils.Export = UNDEFINED;
+ utils.PostDebug = UNDEFINED;
+ utils.PostExperimentals = UNDEFINED;
+ typed_array_setup = UNDEFINED;
}
@@ -239,17 +251,32 @@ function PostDebug(utils) {
imports(exports_container);
}
+ utils.CreateDoubleResultArray();
+ utils.CreateDoubleResultArray = UNDEFINED;
+
exports_container = UNDEFINED;
+ utils.Export = UNDEFINED;
+ utils.Import = UNDEFINED;
+ utils.ImportNow = UNDEFINED;
utils.PostDebug = UNDEFINED;
utils.PostExperimentals = UNDEFINED;
- utils.Import = UNDEFINED;
- utils.Export = UNDEFINED;
+ typed_array_setup = UNDEFINED;
+}
+
+
+function InitializeBuiltinTypedArrays(utils, rng_state, rempio2result) {
+ var setup_list = typed_array_setup;
+
+ for ( ; !IS_UNDEFINED(setup_list); setup_list = setup_list.next) {
+ setup_list(rng_state, rempio2result);
+ }
}
+
// -----------------------------------------------------------------------
-%OptimizeObjectForAddingMultipleProperties(utils, 13);
+%OptimizeObjectForAddingMultipleProperties(utils, 14);
utils.Import = Import;
utils.ImportNow = ImportNow;
diff --git a/chromium/v8/src/js/promise-extra.js b/chromium/v8/src/js/promise-extra.js
new file mode 100644
index 00000000000..f6f79592bcd
--- /dev/null
+++ b/chromium/v8/src/js/promise-extra.js
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalPromise = global.Promise;
+
+var PromiseChain = utils.ImportNow("PromiseChain");
+var PromiseDeferred = utils.ImportNow("PromiseDeferred");
+var PromiseResolved = utils.ImportNow("PromiseResolved");
+
+utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
+ "chain", PromiseChain,
+]);
+
+utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
+ "defer", PromiseDeferred,
+ "accept", PromiseResolved,
+]);
+
+})
diff --git a/chromium/v8/src/promise.js b/chromium/v8/src/js/promise.js
index b509e76e4a9..8cf6a36cef6 100644
--- a/chromium/v8/src/promise.js
+++ b/chromium/v8/src/js/promise.js
@@ -12,6 +12,9 @@
// Imports
var InternalArray = utils.InternalArray;
+var MakeTypeError;
+var promiseCombinedDeferredSymbol =
+ utils.ImportNow("promise_combined_deferred_symbol");
var promiseHasHandlerSymbol =
utils.ImportNow("promise_has_handler_symbol");
var promiseOnRejectSymbol = utils.ImportNow("promise_on_reject_symbol");
@@ -20,28 +23,63 @@ var promiseOnResolveSymbol =
var promiseRawSymbol = utils.ImportNow("promise_raw_symbol");
var promiseStatusSymbol = utils.ImportNow("promise_status_symbol");
var promiseValueSymbol = utils.ImportNow("promise_value_symbol");
+var SpeciesConstructor;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+ SpeciesConstructor = from.SpeciesConstructor;
+});
+
// -------------------------------------------------------------------
// Status values: 0 = pending, +1 = resolved, -1 = rejected
var lastMicrotaskId = 0;
+function CreateResolvingFunctions(promise) {
+ var alreadyResolved = false;
+
+ var resolve = value => {
+ if (alreadyResolved === true) return;
+ alreadyResolved = true;
+ PromiseResolve(promise, value);
+ };
+
+ var reject = reason => {
+ if (alreadyResolved === true) return;
+ alreadyResolved = true;
+ PromiseReject(promise, reason);
+ };
+
+ return {
+ __proto__: null,
+ resolve: resolve,
+ reject: reject
+ };
+}
+
+
var GlobalPromise = function Promise(resolver) {
- if (resolver === promiseRawSymbol) return;
- if (!%_IsConstructCall()) throw MakeTypeError(kNotAPromise, this);
+ if (resolver === promiseRawSymbol) {
+ return %NewObject(GlobalPromise, new.target);
+ }
+ if (IS_UNDEFINED(new.target)) throw MakeTypeError(kNotAPromise, this);
if (!IS_CALLABLE(resolver))
throw MakeTypeError(kResolverNotAFunction, resolver);
- var promise = PromiseInit(this);
+
+ var promise = PromiseInit(%NewObject(GlobalPromise, new.target));
+ var callbacks = CreateResolvingFunctions(promise);
+
try {
%DebugPushPromise(promise, Promise);
- resolver(function(x) { PromiseResolve(promise, x) },
- function(r) { PromiseReject(promise, r) });
+ resolver(callbacks.resolve, callbacks.reject);
} catch (e) {
- PromiseReject(promise, e);
+ %_Call(callbacks.reject, UNDEFINED, e);
} finally {
%DebugPopPromise();
}
+
+ return promise;
}
// Core functionality.
@@ -77,38 +115,11 @@ function PromiseDone(promise, status, value, promiseQueue) {
}
}
-function PromiseCoerce(constructor, x) {
- if (!IsPromise(x) && IS_SPEC_OBJECT(x)) {
- var then;
- try {
- then = x.then;
- } catch(r) {
- return %_CallFunction(constructor, r, PromiseRejected);
- }
- if (IS_CALLABLE(then)) {
- var deferred = %_CallFunction(constructor, PromiseDeferred);
- try {
- %_Call(then, x, deferred.resolve, deferred.reject);
- } catch(r) {
- deferred.reject(r);
- }
- return deferred.promise;
- }
- }
- return x;
-}
-
function PromiseHandle(value, handler, deferred) {
try {
%DebugPushPromise(deferred.promise, PromiseHandle);
- DEBUG_PREPARE_STEP_IN_IF_STEPPING(handler);
var result = handler(value);
- if (result === deferred.promise)
- throw MakeTypeError(kPromiseCyclic, result);
- else if (IsPromise(result))
- %_CallFunction(result, deferred.resolve, deferred.reject, PromiseChain);
- else
- deferred.resolve(result);
+ deferred.resolve(result);
} catch (exception) {
try { deferred.reject(exception); } catch (e) { }
} finally {
@@ -147,7 +158,7 @@ function PromiseNopResolver() {}
// For bootstrapper.
function IsPromise(x) {
- return IS_SPEC_OBJECT(x) && HAS_DEFINED_PRIVATE(x, promiseStatusSymbol);
+ return IS_RECEIVER(x) && HAS_DEFINED_PRIVATE(x, promiseStatusSymbol);
}
function PromiseCreate() {
@@ -155,7 +166,42 @@ function PromiseCreate() {
}
function PromiseResolve(promise, x) {
- PromiseDone(promise, +1, x, promiseOnResolveSymbol)
+ if (x === promise) {
+ return PromiseReject(promise, MakeTypeError(kPromiseCyclic, x));
+ }
+ if (IS_RECEIVER(x)) {
+ // 25.4.1.3.2 steps 8-12
+ try {
+ var then = x.then;
+ } catch (e) {
+ return PromiseReject(promise, e);
+ }
+ if (IS_CALLABLE(then)) {
+ // PromiseResolveThenableJob
+ var id, name, instrumenting = DEBUG_IS_ACTIVE;
+ %EnqueueMicrotask(function() {
+ if (instrumenting) {
+ %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
+ }
+ var callbacks = CreateResolvingFunctions(promise);
+ try {
+ %_Call(then, x, callbacks.resolve, callbacks.reject);
+ } catch (e) {
+ %_Call(callbacks.reject, UNDEFINED, e);
+ }
+ if (instrumenting) {
+ %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name });
+ }
+ });
+ if (instrumenting) {
+ id = ++lastMicrotaskId;
+ name = "PromseResolveThenableJob";
+ %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
+ }
+ return;
+ }
+ }
+ PromiseDone(promise, +1, x, promiseOnResolveSymbol);
}
function PromiseReject(promise, r) {
@@ -173,57 +219,70 @@ function PromiseReject(promise, r) {
// Convenience.
-function PromiseDeferred() {
- if (this === GlobalPromise) {
+function NewPromiseCapability(C) {
+ if (C === GlobalPromise) {
// Optimized case, avoid extra closure.
var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
+ var callbacks = CreateResolvingFunctions(promise);
return {
promise: promise,
- resolve: function(x) { PromiseResolve(promise, x) },
- reject: function(r) { PromiseReject(promise, r) }
+ resolve: callbacks.resolve,
+ reject: callbacks.reject
};
- } else {
- var result = {promise: UNDEFINED, reject: UNDEFINED, resolve: UNDEFINED};
- result.promise = new this(function(resolve, reject) {
- result.resolve = resolve;
- result.reject = reject;
- });
- return result;
}
+
+ var result = {promise: UNDEFINED, resolve: UNDEFINED, reject: UNDEFINED };
+ result.promise = new C((resolve, reject) => {
+ if (!IS_UNDEFINED(result.resolve) || !IS_UNDEFINED(result.reject))
+ throw MakeTypeError(kPromiseExecutorAlreadyInvoked);
+ result.resolve = resolve;
+ result.reject = reject;
+ });
+
+ return result;
+}
+
+function PromiseDeferred() {
+ %IncrementUseCounter(kPromiseDefer);
+ return NewPromiseCapability(this);
}
function PromiseResolved(x) {
- if (this === GlobalPromise) {
- // Optimized case, avoid extra closure.
- return PromiseCreateAndSet(+1, x);
- } else {
- return new this(function(resolve, reject) { resolve(x) });
- }
+ %IncrementUseCounter(kPromiseAccept);
+ return %_Call(PromiseCast, this, x);
}
function PromiseRejected(r) {
- var promise;
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kCalledOnNonObject, PromiseRejected);
+ }
if (this === GlobalPromise) {
// Optimized case, avoid extra closure.
- promise = PromiseCreateAndSet(-1, r);
+ var promise = PromiseCreateAndSet(-1, r);
// The debug event for this would always be an uncaught promise reject,
// which is usually simply noise. Do not trigger that debug event.
%PromiseRejectEvent(promise, r, false);
+ return promise;
} else {
- promise = new this(function(resolve, reject) { reject(r) });
+ var promiseCapability = NewPromiseCapability(this);
+ %_Call(promiseCapability.reject, UNDEFINED, r);
+ return promiseCapability.promise;
}
- return promise;
}
-// Simple chaining.
+// Multi-unwrapped chaining with thenable coercion.
-function PromiseChain(onResolve, onReject) { // a.k.a. flatMap
- onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
- onReject = IS_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject;
- var deferred = %_CallFunction(this.constructor, PromiseDeferred);
- switch (GET_PRIVATE(this, promiseStatusSymbol)) {
- case UNDEFINED:
- throw MakeTypeError(kNotAPromise, this);
+function PromiseThen(onResolve, onReject) {
+ var status = GET_PRIVATE(this, promiseStatusSymbol);
+ if (IS_UNDEFINED(status)) {
+ throw MakeTypeError(kNotAPromise, this);
+ }
+
+ var constructor = SpeciesConstructor(this, GlobalPromise);
+ onResolve = IS_CALLABLE(onResolve) ? onResolve : PromiseIdResolveHandler;
+ onReject = IS_CALLABLE(onReject) ? onReject : PromiseIdRejectHandler;
+ var deferred = NewPromiseCapability(constructor);
+ switch (status) {
case 0: // Pending
GET_PRIVATE(this, promiseOnResolveSymbol).push(onResolve, deferred);
GET_PRIVATE(this, promiseOnRejectSymbol).push(onReject, deferred);
@@ -252,81 +311,88 @@ function PromiseChain(onResolve, onReject) { // a.k.a. flatMap
return deferred.promise;
}
-function PromiseCatch(onReject) {
- return this.then(UNDEFINED, onReject);
+// Chain is left around for now as an alias for then
+function PromiseChain(onResolve, onReject) {
+ %IncrementUseCounter(kPromiseChain);
+ return %_Call(PromiseThen, this, onResolve, onReject);
}
-// Multi-unwrapped chaining with thenable coercion.
-
-function PromiseThen(onResolve, onReject) {
- onResolve = IS_CALLABLE(onResolve) ? onResolve : PromiseIdResolveHandler;
- onReject = IS_CALLABLE(onReject) ? onReject : PromiseIdRejectHandler;
- var that = this;
- var constructor = this.constructor;
- return %_CallFunction(
- this,
- function(x) {
- x = PromiseCoerce(constructor, x);
- if (x === that) {
- DEBUG_PREPARE_STEP_IN_IF_STEPPING(onReject);
- return onReject(MakeTypeError(kPromiseCyclic, x));
- } else if (IsPromise(x)) {
- return x.then(onResolve, onReject);
- } else {
- DEBUG_PREPARE_STEP_IN_IF_STEPPING(onResolve);
- return onResolve(x);
- }
- },
- onReject,
- PromiseChain
- );
+function PromiseCatch(onReject) {
+ return this.then(UNDEFINED, onReject);
}
// Combinators.
function PromiseCast(x) {
- // TODO(rossberg): cannot do better until we support @@create.
- return IsPromise(x) ? x : new this(function(resolve) { resolve(x) });
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kCalledOnNonObject, PromiseCast);
+ }
+ if (IsPromise(x) && x.constructor === this) return x;
+
+ var promiseCapability = NewPromiseCapability(this);
+ var resolveResult = %_Call(promiseCapability.resolve, UNDEFINED, x);
+ return promiseCapability.promise;
}
function PromiseAll(iterable) {
- var deferred = %_CallFunction(this, PromiseDeferred);
- var resolutions = [];
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kCalledOnNonObject, "Promise.all");
+ }
+
+ var deferred = NewPromiseCapability(this);
+ var resolutions = new InternalArray();
+ var count;
+
+ function CreateResolveElementFunction(index, values, promiseCapability) {
+ var alreadyCalled = false;
+ return (x) => {
+ if (alreadyCalled === true) return;
+ alreadyCalled = true;
+ values[index] = x;
+ if (--count === 0) {
+ var valuesArray = [];
+ %MoveArrayContents(values, valuesArray);
+ %_Call(promiseCapability.resolve, UNDEFINED, valuesArray);
+ }
+ };
+ }
+
try {
- var count = 0;
var i = 0;
+ count = 1;
for (var value of iterable) {
- this.resolve(value).then(
- // Nested scope to get closure over current i.
- // TODO(arv): Use an inner let binding once available.
- (function(i) {
- return function(x) {
- resolutions[i] = x;
- if (--count === 0) deferred.resolve(resolutions);
- }
- })(i),
- function(r) { deferred.reject(r); });
- ++i;
+ var nextPromise = this.resolve(value);
++count;
+ nextPromise.then(
+ CreateResolveElementFunction(i, resolutions, deferred),
+ deferred.reject);
+ SET_PRIVATE(deferred.reject, promiseCombinedDeferredSymbol, deferred);
+ ++i;
}
- if (count === 0) {
- deferred.resolve(resolutions);
+ // 6.d
+ if (--count === 0) {
+ var valuesArray = [];
+ %MoveArrayContents(resolutions, valuesArray);
+ %_Call(deferred.resolve, UNDEFINED, valuesArray);
}
} catch (e) {
- deferred.reject(e)
+ %_Call(deferred.reject, UNDEFINED, e);
}
return deferred.promise;
}
function PromiseRace(iterable) {
- var deferred = %_CallFunction(this, PromiseDeferred);
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kCalledOnNonObject, PromiseRace);
+ }
+
+ var deferred = NewPromiseCapability(this);
try {
for (var value of iterable) {
- this.resolve(value).then(
- function(x) { deferred.resolve(x) },
- function(r) { deferred.reject(r) });
+ this.resolve(value).then(deferred.resolve, deferred.reject);
+ SET_PRIVATE(deferred.reject, promiseCombinedDeferredSymbol, deferred);
}
} catch (e) {
deferred.reject(e)
@@ -341,8 +407,15 @@ function PromiseHasUserDefinedRejectHandlerRecursive(promise) {
var queue = GET_PRIVATE(promise, promiseOnRejectSymbol);
if (IS_UNDEFINED(queue)) return false;
for (var i = 0; i < queue.length; i += 2) {
- if (queue[i] != PromiseIdRejectHandler) return true;
- if (PromiseHasUserDefinedRejectHandlerRecursive(queue[i + 1].promise)) {
+ var handler = queue[i];
+ if (handler !== PromiseIdRejectHandler) {
+ var deferred = GET_PRIVATE(handler, promiseCombinedDeferredSymbol);
+ if (IS_UNDEFINED(deferred)) return true;
+ if (PromiseHasUserDefinedRejectHandlerRecursive(deferred.promise)) {
+ return true;
+ }
+ } else if (PromiseHasUserDefinedRejectHandlerRecursive(
+ queue[i + 1].promise)) {
return true;
}
}
@@ -364,8 +437,6 @@ function PromiseHasUserDefinedRejectHandler() {
DONT_ENUM | READ_ONLY);
utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
- "defer", PromiseDeferred,
- "accept", PromiseResolved,
"reject", PromiseRejected,
"all", PromiseAll,
"race", PromiseRace,
@@ -373,7 +444,6 @@ utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
]);
utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
- "chain", PromiseChain,
"then", PromiseThen,
"catch", PromiseCatch
]);
@@ -397,4 +467,14 @@ utils.InstallFunctions(extrasUtils, 0, [
"rejectPromise", PromiseReject
]);
+// TODO(v8:4567): Allow experimental natives to remove function prototype
+[PromiseChain, PromiseDeferred, PromiseResolved].forEach(
+ fn => %FunctionRemovePrototype(fn));
+
+utils.Export(function(to) {
+ to.PromiseChain = PromiseChain;
+ to.PromiseDeferred = PromiseDeferred;
+ to.PromiseResolved = PromiseResolved;
+});
+
})
diff --git a/chromium/v8/src/js/proxy.js b/chromium/v8/src/js/proxy.js
new file mode 100644
index 00000000000..842bac02525
--- /dev/null
+++ b/chromium/v8/src/js/proxy.js
@@ -0,0 +1,69 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+// ----------------------------------------------------------------------------
+// Imports
+//
+var GlobalProxy = global.Proxy;
+var MakeTypeError;
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
+//----------------------------------------------------------------------------
+
+function ProxyCreateRevocable(target, handler) {
+ var p = new GlobalProxy(target, handler);
+ return {proxy: p, revoke: () => %JSProxyRevoke(p)};
+}
+
+// -------------------------------------------------------------------
+// Proxy Builtins
+
+// Implements part of ES6 9.5.11 Proxy.[[Enumerate]]:
+// Call the trap, which should return an iterator, exhaust the iterator,
+// and return an array containing the values.
+function ProxyEnumerate(trap, handler, target) {
+ // 7. Let trapResult be ? Call(trap, handler, «target»).
+ var trap_result = %_Call(trap, handler, target);
+ // 8. If Type(trapResult) is not Object, throw a TypeError exception.
+ if (!IS_RECEIVER(trap_result)) {
+ throw MakeTypeError(kProxyEnumerateNonObject);
+ }
+ // 9. Return trapResult.
+ var result = [];
+ for (var it = trap_result.next(); !it.done; it = trap_result.next()) {
+ var key = it.value;
+ // Not yet spec'ed as of 2015-11-25, but will be spec'ed soon:
+ // If the iterator returns a non-string value, throw a TypeError.
+ if (!IS_STRING(key)) {
+ throw MakeTypeError(kProxyEnumerateNonString);
+ }
+ result.push(key);
+ }
+ return result;
+}
+
+//-------------------------------------------------------------------
+
+//Set up non-enumerable properties of the Proxy object.
+utils.InstallFunctions(GlobalProxy, DONT_ENUM, [
+ "revocable", ProxyCreateRevocable
+]);
+
+// -------------------------------------------------------------------
+// Exports
+
+%InstallToContext([
+ "proxy_enumerate", ProxyEnumerate,
+]);
+
+})
diff --git a/chromium/v8/src/js/regexp.js b/chromium/v8/src/js/regexp.js
new file mode 100644
index 00000000000..a163952451f
--- /dev/null
+++ b/chromium/v8/src/js/regexp.js
@@ -0,0 +1,581 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var FLAG_harmony_tolength;
+var GlobalObject = global.Object;
+var GlobalRegExp = global.RegExp;
+var GlobalRegExpPrototype;
+var InternalArray = utils.InternalArray;
+var InternalPackedArray = utils.InternalPackedArray;
+var MakeTypeError;
+var matchSymbol = utils.ImportNow("match_symbol");
+var searchSymbol = utils.ImportNow("search_symbol");
+var splitSymbol = utils.ImportNow("split_symbol");
+
+utils.ImportFromExperimental(function(from) {
+ FLAG_harmony_tolength = from.FLAG_harmony_tolength;
+});
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
+// -------------------------------------------------------------------
+
+// Property of the builtins object for recording the result of the last
+// regexp match. The property RegExpLastMatchInfo includes the matchIndices
+// array of the last successful regexp match (an array of start/end index
+// pairs for the match and all the captured substrings), the invariant is
+// that there are at least two capture indeces. The array also contains
+// the subject string for the last successful match.
+var RegExpLastMatchInfo = new InternalPackedArray(
+ 2, // REGEXP_NUMBER_OF_CAPTURES
+ "", // Last subject.
+ UNDEFINED, // Last input - settable with RegExpSetInput.
+ 0, // REGEXP_FIRST_CAPTURE + 0
+ 0 // REGEXP_FIRST_CAPTURE + 1
+);
+
+// -------------------------------------------------------------------
+
+function IsRegExp(o) {
+ if (!IS_RECEIVER(o)) return false;
+ var is_regexp = o[matchSymbol];
+ if (!IS_UNDEFINED(is_regexp)) return TO_BOOLEAN(is_regexp);
+ return IS_REGEXP(o);
+}
+
+
+// ES6 section 21.2.3.2.2
+function RegExpInitialize(object, pattern, flags) {
+ pattern = IS_UNDEFINED(pattern) ? '' : TO_STRING(pattern);
+ flags = IS_UNDEFINED(flags) ? '' : TO_STRING(flags);
+ %RegExpInitializeAndCompile(object, pattern, flags);
+ return object;
+}
+
+
+function PatternFlags(pattern) {
+ return (REGEXP_GLOBAL(pattern) ? 'g' : '') +
+ (REGEXP_IGNORE_CASE(pattern) ? 'i' : '') +
+ (REGEXP_MULTILINE(pattern) ? 'm' : '') +
+ (REGEXP_UNICODE(pattern) ? 'u' : '') +
+ (REGEXP_STICKY(pattern) ? 'y' : '');
+}
+
+
+function RegExpConstructor(pattern, flags) {
+ var newtarget = new.target;
+ var pattern_is_regexp = IsRegExp(pattern);
+
+ if (IS_UNDEFINED(newtarget)) {
+ newtarget = GlobalRegExp;
+
+ // ES6 section 21.2.3.1 step 3.b
+ if (pattern_is_regexp && IS_UNDEFINED(flags) &&
+ pattern.constructor === newtarget) {
+ return pattern;
+ }
+ }
+
+ if (IS_REGEXP(pattern)) {
+ if (IS_UNDEFINED(flags)) flags = PatternFlags(pattern);
+ pattern = REGEXP_SOURCE(pattern);
+
+ } else if (pattern_is_regexp) {
+ var input_pattern = pattern;
+ pattern = pattern.source;
+ if (IS_UNDEFINED(flags)) flags = input_pattern.flags;
+ }
+
+ var object = %NewObject(GlobalRegExp, newtarget);
+ return RegExpInitialize(object, pattern, flags);
+}
+
+
+function RegExpCompileJS(pattern, flags) {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "RegExp.prototype.compile", this);
+ }
+
+ if (IS_REGEXP(pattern)) {
+ if (!IS_UNDEFINED(flags)) throw MakeTypeError(kRegExpFlags);
+
+ flags = PatternFlags(pattern);
+ pattern = REGEXP_SOURCE(pattern);
+ }
+
+ return RegExpInitialize(this, pattern, flags);
+}
+
+
+function DoRegExpExec(regexp, string, index) {
+ return %_RegExpExec(regexp, string, index, RegExpLastMatchInfo);
+}
+
+
+// This is kind of performance sensitive, so we want to avoid unnecessary
+// type checks on inputs. But we also don't want to inline it several times
+// manually, so we use a macro :-)
+macro RETURN_NEW_RESULT_FROM_MATCH_INFO(MATCHINFO, STRING)
+ var numResults = NUMBER_OF_CAPTURES(MATCHINFO) >> 1;
+ var start = MATCHINFO[CAPTURE0];
+ var end = MATCHINFO[CAPTURE1];
+ // Calculate the substring of the first match before creating the result array
+ // to avoid an unnecessary write barrier storing the first result.
+ var first = %_SubString(STRING, start, end);
+ var result = %_RegExpConstructResult(numResults, start, STRING);
+ result[0] = first;
+ if (numResults == 1) return result;
+ var j = REGEXP_FIRST_CAPTURE + 2;
+ for (var i = 1; i < numResults; i++) {
+ start = MATCHINFO[j++];
+ if (start != -1) {
+ end = MATCHINFO[j];
+ result[i] = %_SubString(STRING, start, end);
+ }
+ j++;
+ }
+ return result;
+endmacro
+
+
+function RegExpExecNoTests(regexp, string, start) {
+ // Must be called with RegExp, string and positive integer as arguments.
+ var matchInfo = %_RegExpExec(regexp, string, start, RegExpLastMatchInfo);
+ if (matchInfo !== null) {
+ // ES6 21.2.5.2.2 step 18.
+ if (REGEXP_STICKY(regexp)) regexp.lastIndex = matchInfo[CAPTURE1];
+ RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, string);
+ }
+ regexp.lastIndex = 0;
+ return null;
+}
+
+
+function RegExpExecJS(string) {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'RegExp.prototype.exec', this);
+ }
+
+ string = TO_STRING(string);
+ var lastIndex = this.lastIndex;
+
+ // Conversion is required by the ES2015 specification (RegExpBuiltinExec
+ // algorithm, step 4) even if the value is discarded for non-global RegExps.
+ var i = TO_LENGTH_OR_INTEGER(lastIndex);
+
+ var updateLastIndex = REGEXP_GLOBAL(this) || REGEXP_STICKY(this);
+ if (updateLastIndex) {
+ if (i < 0 || i > string.length) {
+ this.lastIndex = 0;
+ return null;
+ }
+ } else {
+ i = 0;
+ }
+
+ // matchIndices is either null or the RegExpLastMatchInfo array.
+ var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
+
+ if (IS_NULL(matchIndices)) {
+ this.lastIndex = 0;
+ return null;
+ }
+
+ // Successful match.
+ if (updateLastIndex) {
+ this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
+ }
+ RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
+}
+
+
+// One-element cache for the simplified test regexp.
+var regexp_key;
+var regexp_val;
+
+// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
+// that test is defined in terms of String.prototype.exec. However, it probably
+// means the original value of String.prototype.exec, which is what everybody
+// else implements.
+function RegExpTest(string) {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'RegExp.prototype.test', this);
+ }
+ string = TO_STRING(string);
+
+ var lastIndex = this.lastIndex;
+
+ // Conversion is required by the ES2015 specification (RegExpBuiltinExec
+ // algorithm, step 4) even if the value is discarded for non-global RegExps.
+ var i = TO_LENGTH_OR_INTEGER(lastIndex);
+
+ if (REGEXP_GLOBAL(this) || REGEXP_STICKY(this)) {
+ if (i < 0 || i > string.length) {
+ this.lastIndex = 0;
+ return false;
+ }
+ // matchIndices is either null or the RegExpLastMatchInfo array.
+ var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
+ if (IS_NULL(matchIndices)) {
+ this.lastIndex = 0;
+ return false;
+ }
+ this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
+ return true;
+ } else {
+ // Non-global, non-sticky regexp.
+ // Remove irrelevant preceeding '.*' in a test regexp. The expression
+ // checks whether this.source starts with '.*' and that the third char is
+ // not a '?'. But see https://code.google.com/p/v8/issues/detail?id=3560
+ var regexp = this;
+ var source = REGEXP_SOURCE(regexp);
+ if (regexp.length >= 3 &&
+ %_StringCharCodeAt(regexp, 0) == 46 && // '.'
+ %_StringCharCodeAt(regexp, 1) == 42 && // '*'
+ %_StringCharCodeAt(regexp, 2) != 63) { // '?'
+ regexp = TrimRegExp(regexp);
+ }
+ // matchIndices is either null or the RegExpLastMatchInfo array.
+ var matchIndices = %_RegExpExec(regexp, string, 0, RegExpLastMatchInfo);
+ if (IS_NULL(matchIndices)) {
+ this.lastIndex = 0;
+ return false;
+ }
+ return true;
+ }
+}
+
+function TrimRegExp(regexp) {
+ if (!%_ObjectEquals(regexp_key, regexp)) {
+ regexp_key = regexp;
+ regexp_val =
+ new GlobalRegExp(
+ %_SubString(REGEXP_SOURCE(regexp), 2, REGEXP_SOURCE(regexp).length),
+ (REGEXP_IGNORE_CASE(regexp) ? REGEXP_MULTILINE(regexp) ? "im" : "i"
+ : REGEXP_MULTILINE(regexp) ? "m" : ""));
+ }
+ return regexp_val;
+}
+
+
+function RegExpToString() {
+ if (!IS_REGEXP(this)) {
+ // RegExp.prototype.toString() returns '/(?:)/' as a compatibility fix;
+ // a UseCounter is incremented to track it.
+ // TODO(littledan): Remove this workaround or standardize it
+ if (this === GlobalRegExpPrototype) {
+ %IncrementUseCounter(kRegExpPrototypeToString);
+ return '/(?:)/';
+ }
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'RegExp.prototype.toString', this);
+ }
+ var result = '/' + REGEXP_SOURCE(this) + '/';
+ if (REGEXP_GLOBAL(this)) result += 'g';
+ if (REGEXP_IGNORE_CASE(this)) result += 'i';
+ if (REGEXP_MULTILINE(this)) result += 'm';
+ if (REGEXP_UNICODE(this)) result += 'u';
+ if (REGEXP_STICKY(this)) result += 'y';
+ return result;
+}
+
+
+// ES6 21.2.5.11.
+function RegExpSplit(string, limit) {
+ // TODO(yangguo): allow non-regexp receivers.
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@split", this);
+ }
+ var separator = this;
+ var subject = TO_STRING(string);
+
+ limit = (IS_UNDEFINED(limit)) ? kMaxUint32 : TO_UINT32(limit);
+ var length = subject.length;
+
+ if (limit === 0) return [];
+
+ if (length === 0) {
+ if (DoRegExpExec(separator, subject, 0, 0) !== null) return [];
+ return [subject];
+ }
+
+ var currentIndex = 0;
+ var startIndex = 0;
+ var startMatch = 0;
+ var result = new InternalArray();
+
+ outer_loop:
+ while (true) {
+ if (startIndex === length) {
+ result[result.length] = %_SubString(subject, currentIndex, length);
+ break;
+ }
+
+ var matchInfo = DoRegExpExec(separator, subject, startIndex);
+ if (matchInfo === null || length === (startMatch = matchInfo[CAPTURE0])) {
+ result[result.length] = %_SubString(subject, currentIndex, length);
+ break;
+ }
+ var endIndex = matchInfo[CAPTURE1];
+
+ // We ignore a zero-length match at the currentIndex.
+ if (startIndex === endIndex && endIndex === currentIndex) {
+ startIndex++;
+ continue;
+ }
+
+ result[result.length] = %_SubString(subject, currentIndex, startMatch);
+
+ if (result.length === limit) break;
+
+ var matchinfo_len = NUMBER_OF_CAPTURES(matchInfo) + REGEXP_FIRST_CAPTURE;
+ for (var i = REGEXP_FIRST_CAPTURE + 2; i < matchinfo_len; ) {
+ var start = matchInfo[i++];
+ var end = matchInfo[i++];
+ if (end != -1) {
+ result[result.length] = %_SubString(subject, start, end);
+ } else {
+ result[result.length] = UNDEFINED;
+ }
+ if (result.length === limit) break outer_loop;
+ }
+
+ startIndex = currentIndex = endIndex;
+ }
+
+ var array_result = [];
+ %MoveArrayContents(result, array_result);
+ return array_result;
+}
+
+
+// ES6 21.2.5.6.
+function RegExpMatch(string) {
+ // TODO(yangguo): allow non-regexp receivers.
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@match", this);
+ }
+ var subject = TO_STRING(string);
+
+ if (!REGEXP_GLOBAL(this)) return RegExpExecNoTests(this, subject, 0);
+ this.lastIndex = 0;
+ var result = %StringMatch(subject, this, RegExpLastMatchInfo);
+ return result;
+}
+
+
+// ES6 21.2.5.9.
+function RegExpSearch(string) {
+ // TODO(yangguo): allow non-regexp receivers.
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@search", this);
+ }
+ var match = DoRegExpExec(this, TO_STRING(string), 0);
+ if (match) return match[CAPTURE0];
+ return -1;
+}
+
+
+// Getters for the static properties lastMatch, lastParen, leftContext, and
+// rightContext of the RegExp constructor. The properties are computed based
+// on the captures array of the last successful match and the subject string
+// of the last successful match.
+function RegExpGetLastMatch() {
+ var regExpSubject = LAST_SUBJECT(RegExpLastMatchInfo);
+ return %_SubString(regExpSubject,
+ RegExpLastMatchInfo[CAPTURE0],
+ RegExpLastMatchInfo[CAPTURE1]);
+}
+
+
+function RegExpGetLastParen() {
+ var length = NUMBER_OF_CAPTURES(RegExpLastMatchInfo);
+ if (length <= 2) return ''; // There were no captures.
+ // We match the SpiderMonkey behavior: return the substring defined by the
+ // last pair (after the first pair) of elements of the capture array even if
+ // it is empty.
+ var regExpSubject = LAST_SUBJECT(RegExpLastMatchInfo);
+ var start = RegExpLastMatchInfo[CAPTURE(length - 2)];
+ var end = RegExpLastMatchInfo[CAPTURE(length - 1)];
+ if (start != -1 && end != -1) {
+ return %_SubString(regExpSubject, start, end);
+ }
+ return "";
+}
+
+
+function RegExpGetLeftContext() {
+ var start_index;
+ var subject;
+ start_index = RegExpLastMatchInfo[CAPTURE0];
+ subject = LAST_SUBJECT(RegExpLastMatchInfo);
+ return %_SubString(subject, 0, start_index);
+}
+
+
+function RegExpGetRightContext() {
+ var start_index;
+ var subject;
+ start_index = RegExpLastMatchInfo[CAPTURE1];
+ subject = LAST_SUBJECT(RegExpLastMatchInfo);
+ return %_SubString(subject, start_index, subject.length);
+}
+
+
+// The properties $1..$9 are the first nine capturing substrings of the last
+// successful match, or ''. The function RegExpMakeCaptureGetter will be
+// called with indices from 1 to 9.
+function RegExpMakeCaptureGetter(n) {
+ return function foo() {
+ var index = n * 2;
+ if (index >= NUMBER_OF_CAPTURES(RegExpLastMatchInfo)) return '';
+ var matchStart = RegExpLastMatchInfo[CAPTURE(index)];
+ var matchEnd = RegExpLastMatchInfo[CAPTURE(index + 1)];
+ if (matchStart == -1 || matchEnd == -1) return '';
+ return %_SubString(LAST_SUBJECT(RegExpLastMatchInfo), matchStart, matchEnd);
+ };
+}
+
+
+// ES6 21.2.5.4.
+function RegExpGetGlobal() {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.global");
+ }
+ return !!REGEXP_GLOBAL(this);
+}
+%FunctionSetName(RegExpGetGlobal, "RegExp.prototype.global");
+%SetNativeFlag(RegExpGetGlobal);
+
+
+// ES6 21.2.5.5.
+function RegExpGetIgnoreCase() {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.ignoreCase");
+ }
+ return !!REGEXP_IGNORE_CASE(this);
+}
+%FunctionSetName(RegExpGetIgnoreCase, "RegExp.prototype.ignoreCase");
+%SetNativeFlag(RegExpGetIgnoreCase);
+
+
+// ES6 21.2.5.7.
+function RegExpGetMultiline() {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.multiline");
+ }
+ return !!REGEXP_MULTILINE(this);
+}
+%FunctionSetName(RegExpGetMultiline, "RegExp.prototype.multiline");
+%SetNativeFlag(RegExpGetMultiline);
+
+
+// ES6 21.2.5.10.
+function RegExpGetSource() {
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.source");
+ }
+ return REGEXP_SOURCE(this);
+}
+%FunctionSetName(RegExpGetSource, "RegExp.prototype.source");
+%SetNativeFlag(RegExpGetSource);
+
+// -------------------------------------------------------------------
+
+%FunctionSetInstanceClassName(GlobalRegExp, 'RegExp');
+GlobalRegExpPrototype = new GlobalObject();
+%FunctionSetPrototype(GlobalRegExp, GlobalRegExpPrototype);
+%AddNamedProperty(
+ GlobalRegExp.prototype, 'constructor', GlobalRegExp, DONT_ENUM);
+%SetCode(GlobalRegExp, RegExpConstructor);
+
+utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
+ "exec", RegExpExecJS,
+ "test", RegExpTest,
+ "toString", RegExpToString,
+ "compile", RegExpCompileJS,
+ matchSymbol, RegExpMatch,
+ searchSymbol, RegExpSearch,
+ splitSymbol, RegExpSplit,
+]);
+
+utils.InstallGetter(GlobalRegExp.prototype, 'global', RegExpGetGlobal);
+utils.InstallGetter(GlobalRegExp.prototype, 'ignoreCase', RegExpGetIgnoreCase);
+utils.InstallGetter(GlobalRegExp.prototype, 'multiline', RegExpGetMultiline);
+utils.InstallGetter(GlobalRegExp.prototype, 'source', RegExpGetSource);
+
+// The length of compile is 1 in SpiderMonkey.
+%FunctionSetLength(GlobalRegExp.prototype.compile, 1);
+
+// The properties `input` and `$_` are aliases for each other. When this
+// value is set the value it is set to is coerced to a string.
+// Getter and setter for the input.
+var RegExpGetInput = function() {
+ var regExpInput = LAST_INPUT(RegExpLastMatchInfo);
+ return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
+};
+var RegExpSetInput = function(string) {
+ LAST_INPUT(RegExpLastMatchInfo) = TO_STRING(string);
+};
+
+%OptimizeObjectForAddingMultipleProperties(GlobalRegExp, 22);
+utils.InstallGetterSetter(GlobalRegExp, 'input', RegExpGetInput, RegExpSetInput,
+ DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, '$_', RegExpGetInput, RegExpSetInput,
+ DONT_ENUM | DONT_DELETE);
+
+
+var NoOpSetter = function(ignored) {};
+
+
+// Static properties set by a successful match.
+utils.InstallGetterSetter(GlobalRegExp, 'lastMatch', RegExpGetLastMatch,
+ NoOpSetter, DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, '$&', RegExpGetLastMatch, NoOpSetter,
+ DONT_ENUM | DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, 'lastParen', RegExpGetLastParen,
+ NoOpSetter, DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, '$+', RegExpGetLastParen, NoOpSetter,
+ DONT_ENUM | DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, 'leftContext', RegExpGetLeftContext,
+ NoOpSetter, DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, '$`', RegExpGetLeftContext, NoOpSetter,
+ DONT_ENUM | DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, 'rightContext', RegExpGetRightContext,
+ NoOpSetter, DONT_DELETE);
+utils.InstallGetterSetter(GlobalRegExp, "$'", RegExpGetRightContext, NoOpSetter,
+ DONT_ENUM | DONT_DELETE);
+
+for (var i = 1; i < 10; ++i) {
+ utils.InstallGetterSetter(GlobalRegExp, '$' + i, RegExpMakeCaptureGetter(i),
+ NoOpSetter, DONT_DELETE);
+}
+%ToFastProperties(GlobalRegExp);
+
+// -------------------------------------------------------------------
+// Exports
+
+utils.Export(function(to) {
+ to.RegExpExec = DoRegExpExec;
+ to.RegExpExecNoTests = RegExpExecNoTests;
+ to.RegExpLastMatchInfo = RegExpLastMatchInfo;
+ to.RegExpTest = RegExpTest;
+});
+
+})
diff --git a/chromium/v8/src/js/runtime.js b/chromium/v8/src/js/runtime.js
new file mode 100644
index 00000000000..301d75a391c
--- /dev/null
+++ b/chromium/v8/src/js/runtime.js
@@ -0,0 +1,191 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This files contains runtime support implemented in JavaScript.
+
+// CAUTION: Some of the functions specified in this file are called
+// directly from compiled code. These are the functions with names in
+// ALL CAPS. The compiled code passes the first argument in 'this'.
+
+
+// The following declarations are shared with other native JS files.
+// They are all declared at this one spot to avoid redeclaration errors.
+
+(function(global, utils) {
+
+%CheckIsBootstrapping();
+
+var FLAG_harmony_species;
+var GlobalArray = global.Array;
+var GlobalBoolean = global.Boolean;
+var GlobalString = global.String;
+var MakeRangeError;
+var MakeTypeError;
+var speciesSymbol;
+
+utils.Import(function(from) {
+ MakeRangeError = from.MakeRangeError;
+ MakeTypeError = from.MakeTypeError;
+ speciesSymbol = from.species_symbol;
+});
+
+utils.ImportFromExperimental(function(from) {
+ FLAG_harmony_species = from.FLAG_harmony_species;
+});
+
+// ----------------------------------------------------------------------------
+
+/* -----------------------------
+ - - - H e l p e r s - - -
+ -----------------------------
+*/
+
+function CONCAT_ITERABLE_TO_ARRAY(iterable) {
+ return %concat_iterable_to_array(this, iterable);
+};
+
+
+/* -------------------------------------
+ - - - C o n v e r s i o n s - - -
+ -------------------------------------
+*/
+
+// ES5, section 9.12
+function SameValue(x, y) {
+ if (typeof x != typeof y) return false;
+ if (IS_NUMBER(x)) {
+ if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
+ // x is +0 and y is -0 or vice versa.
+ if (x === 0 && y === 0 && %_IsMinusZero(x) != %_IsMinusZero(y)) {
+ return false;
+ }
+ }
+ if (IS_SIMD_VALUE(x)) return %SimdSameValue(x, y);
+ return x === y;
+}
+
+
+// ES6, section 7.2.4
+function SameValueZero(x, y) {
+ if (typeof x != typeof y) return false;
+ if (IS_NUMBER(x)) {
+ if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
+ }
+ if (IS_SIMD_VALUE(x)) return %SimdSameValueZero(x, y);
+ return x === y;
+}
+
+
+function ConcatIterableToArray(target, iterable) {
+ var index = target.length;
+ for (var element of iterable) {
+ AddIndexedProperty(target, index++, element);
+ }
+ return target;
+}
+
+
+/* ---------------------------------
+ - - - U t i l i t i e s - - -
+ ---------------------------------
+*/
+
+
+// This function should be called rather than %AddElement in contexts where the
+// argument might not be less than 2**32-1. ES2015 ToLength semantics mean that
+// this is a concern at basically all callsites.
+function AddIndexedProperty(obj, index, value) {
+ if (index === TO_UINT32(index) && index !== kMaxUint32) {
+ %AddElement(obj, index, value);
+ } else {
+ %AddNamedProperty(obj, TO_STRING(index), value, NONE);
+ }
+}
+%SetForceInlineFlag(AddIndexedProperty);
+
+
+function ToPositiveInteger(x, rangeErrorIndex) {
+ var i = TO_INTEGER_MAP_MINUS_ZERO(x);
+ if (i < 0) throw MakeRangeError(rangeErrorIndex);
+ return i;
+}
+
+
+function MaxSimple(a, b) {
+ return a > b ? a : b;
+}
+
+
+function MinSimple(a, b) {
+ return a > b ? b : a;
+}
+
+
+%SetForceInlineFlag(MaxSimple);
+%SetForceInlineFlag(MinSimple);
+
+
+// ES2015 7.3.20
+// For the fallback with --harmony-species off, there are two possible choices:
+// - "conservative": return defaultConstructor
+// - "not conservative": return object.constructor
+// This fallback path is only needed in the transition to ES2015, and the
+// choice is made simply to preserve the previous behavior so that we don't
+// have a three-step upgrade: old behavior, unspecified intermediate behavior,
+// and ES2015.
+// In some cases, we were "conservative" (e.g., ArrayBuffer, RegExp), and in
+// other cases we were "not conservative (e.g., TypedArray, Promise).
+function SpeciesConstructor(object, defaultConstructor, conservative) {
+ if (FLAG_harmony_species) {
+ var constructor = object.constructor;
+ if (IS_UNDEFINED(constructor)) {
+ return defaultConstructor;
+ }
+ if (!IS_RECEIVER(constructor)) {
+ throw MakeTypeError(kConstructorNotReceiver);
+ }
+ var species = constructor[speciesSymbol];
+ if (IS_NULL_OR_UNDEFINED(species)) {
+ return defaultConstructor;
+ }
+ if (%IsConstructor(species)) {
+ return species;
+ }
+ throw MakeTypeError(kSpeciesNotConstructor);
+ } else {
+ return conservative ? defaultConstructor : object.constructor;
+ }
+}
+
+//----------------------------------------------------------------------------
+
+// NOTE: Setting the prototype for Array must take place as early as
+// possible due to code generation for array literals. When
+// generating code for a array literal a boilerplate array is created
+// that is cloned when running the code. It is essential that the
+// boilerplate gets the right prototype.
+%FunctionSetPrototype(GlobalArray, new GlobalArray(0));
+
+// ----------------------------------------------------------------------------
+// Exports
+
+utils.Export(function(to) {
+ to.AddIndexedProperty = AddIndexedProperty;
+ to.MaxSimple = MaxSimple;
+ to.MinSimple = MinSimple;
+ to.SameValue = SameValue;
+ to.SameValueZero = SameValueZero;
+ to.ToPositiveInteger = ToPositiveInteger;
+ to.SpeciesConstructor = SpeciesConstructor;
+});
+
+%InstallToContext([
+ "concat_iterable_to_array_builtin", CONCAT_ITERABLE_TO_ARRAY,
+]);
+
+%InstallToContext([
+ "concat_iterable_to_array", ConcatIterableToArray,
+]);
+
+})
diff --git a/chromium/v8/src/harmony-spread.js b/chromium/v8/src/js/spread.js
index b271c7efe54..235c91ab792 100644
--- a/chromium/v8/src/harmony-spread.js
+++ b/chromium/v8/src/js/spread.js
@@ -9,6 +9,11 @@
// -------------------------------------------------------------------
// Imports
var InternalArray = utils.InternalArray;
+var MakeTypeError;
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
// -------------------------------------------------------------------
diff --git a/chromium/v8/src/string-iterator.js b/chromium/v8/src/js/string-iterator.js
index 660dc7c98b1..3c331dd1a2d 100644
--- a/chromium/v8/src/string-iterator.js
+++ b/chromium/v8/src/js/string-iterator.js
@@ -12,13 +12,19 @@
// Imports
var GlobalString = global.String;
+var IteratorPrototype = utils.ImportNow("IteratorPrototype");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var MakeTypeError;
var stringIteratorIteratedStringSymbol =
utils.ImportNow("string_iterator_iterated_string_symbol");
var stringIteratorNextIndexSymbol =
utils.ImportNow("string_iterator_next_index_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
// -------------------------------------------------------------------
function StringIterator() {}
@@ -40,7 +46,7 @@ function StringIteratorNext() {
var value = UNDEFINED;
var done = true;
- if (!IS_SPEC_OBJECT(iterator) ||
+ if (!IS_RECEIVER(iterator) ||
!HAS_DEFINED_PRIVATE(iterator, stringIteratorNextIndexSymbol)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'String Iterator.prototype.next');
@@ -80,7 +86,7 @@ function StringPrototypeIterator() {
//-------------------------------------------------------------------
-%FunctionSetPrototype(StringIterator, {__proto__: $iteratorPrototype});
+%FunctionSetPrototype(StringIterator, {__proto__: IteratorPrototype});
%FunctionSetInstanceClassName(StringIterator, 'String Iterator');
utils.InstallFunctions(StringIterator.prototype, DONT_ENUM, [
diff --git a/chromium/v8/src/string.js b/chromium/v8/src/js/string.js
index bd20226757d..b220038b74c 100644
--- a/chromium/v8/src/string.js
+++ b/chromium/v8/src/js/string.js
@@ -15,18 +15,27 @@ var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
+var MakeRangeError;
+var MakeTypeError;
+var MathMax;
+var MathMin;
+var matchSymbol = utils.ImportNow("match_symbol");
var RegExpExec;
var RegExpExecNoTests;
var RegExpLastMatchInfo;
-var ToNumber;
+var searchSymbol = utils.ImportNow("search_symbol");
+var splitSymbol = utils.ImportNow("split_symbol");
utils.Import(function(from) {
ArrayIndexOf = from.ArrayIndexOf;
ArrayJoin = from.ArrayJoin;
+ MakeRangeError = from.MakeRangeError;
+ MakeTypeError = from.MakeTypeError;
+ MathMax = from.MathMax;
+ MathMin = from.MathMin;
RegExpExec = from.RegExpExec;
RegExpExecNoTests = from.RegExpExecNoTests;
RegExpLastMatchInfo = from.RegExpLastMatchInfo;
- ToNumber = from.ToNumber;
});
//-------------------------------------------------------------------
@@ -118,7 +127,7 @@ function StringLastIndexOfJS(pat /* position */) { // length == 1
var patLength = pat.length;
var index = subLength - patLength;
if (%_ArgumentsLength() > 1) {
- var position = ToNumber(%_Arguments(1));
+ var position = TO_NUMBER(%_Arguments(1));
if (!NUMBER_IS_NAN(position)) {
position = TO_INTEGER(position);
if (position < 0) {
@@ -147,23 +156,21 @@ function StringLocaleCompareJS(other) {
}
-// ECMA-262 section 15.5.4.10
-function StringMatchJS(regexp) {
+// ES6 21.1.3.11.
+function StringMatchJS(pattern) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.match");
- var subject = TO_STRING(this);
- if (IS_REGEXP(regexp)) {
- // Emulate RegExp.prototype.exec's side effect in step 5, even though
- // value is discarded.
- var lastIndex = TO_INTEGER(regexp.lastIndex);
- if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
- var result = %StringMatch(subject, regexp, RegExpLastMatchInfo);
- if (result !== null) $regexpLastMatchInfoOverride = null;
- regexp.lastIndex = 0;
- return result;
+ if (!IS_NULL_OR_UNDEFINED(pattern)) {
+ var matcher = pattern[matchSymbol];
+ if (!IS_UNDEFINED(matcher)) {
+ return %_Call(matcher, pattern, this);
+ }
}
+
+ var subject = TO_STRING(this);
+
// Non-regexp argument.
- regexp = new GlobalRegExp(regexp);
+ var regexp = new GlobalRegExp(pattern);
return RegExpExecNoTests(regexp, subject, 0);
}
@@ -181,11 +188,10 @@ function StringNormalizeJS() {
var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING(formArg);
var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
- var normalizationForm =
- %_CallFunction(NORMALIZATION_FORMS, form, ArrayIndexOf);
+ var normalizationForm = %_Call(ArrayIndexOf, NORMALIZATION_FORMS, form);
if (normalizationForm === -1) {
throw MakeRangeError(kNormalizationForm,
- %_CallFunction(NORMALIZATION_FORMS, ', ', ArrayJoin));
+ %_Call(ArrayJoin, NORMALIZATION_FORMS, ', '));
}
return s;
@@ -224,14 +230,10 @@ function StringReplace(search, replace) {
// ...... string replace (with $-expansion)
if (IS_REGEXP(search)) {
- // Emulate RegExp.prototype.exec's side effect in step 5, even if
- // value is discarded.
- var lastIndex = TO_INTEGER(search.lastIndex);
-
if (!IS_CALLABLE(replace)) {
replace = TO_STRING(replace);
- if (!search.global) {
+ if (!REGEXP_GLOBAL(search)) {
// Non-global regexp search, string replace.
var match = RegExpExec(search, subject, 0);
if (match == null) {
@@ -249,27 +251,11 @@ function StringReplace(search, replace) {
// Global regexp search, string replace.
search.lastIndex = 0;
- if ($regexpLastMatchInfoOverride == null) {
- return %StringReplaceGlobalRegExpWithString(
- subject, search, replace, RegExpLastMatchInfo);
- } else {
- // We use this hack to detect whether StringReplaceRegExpWithString
- // found at least one hit. In that case we need to remove any
- // override.
- var saved_subject = RegExpLastMatchInfo[LAST_SUBJECT_INDEX];
- RegExpLastMatchInfo[LAST_SUBJECT_INDEX] = 0;
- var answer = %StringReplaceGlobalRegExpWithString(
- subject, search, replace, RegExpLastMatchInfo);
- if (%_IsSmi(RegExpLastMatchInfo[LAST_SUBJECT_INDEX])) {
- RegExpLastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
- } else {
- $regexpLastMatchInfoOverride = null;
- }
- return answer;
- }
+ return %StringReplaceGlobalRegExpWithString(
+ subject, search, replace, RegExpLastMatchInfo);
}
- if (search.global) {
+ if (REGEXP_GLOBAL(search)) {
// Global regexp search, function replace.
return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
}
@@ -443,22 +429,16 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
// input string and some replacements that were returned from the replace
// function.
var match_start = 0;
- var override = new InternalPackedArray(null, 0, subject);
for (var i = 0; i < len; i++) {
var elem = res[i];
if (%_IsSmi(elem)) {
- // Integers represent slices of the original string. Use these to
- // get the offsets we need for the override array (so things like
- // RegExp.leftContext work during the callback function.
+ // Integers represent slices of the original string.
if (elem > 0) {
match_start = (elem >> 11) + (elem & 0x7ff);
} else {
match_start = res[++i] - elem;
}
} else {
- override[0] = elem;
- override[1] = match_start;
- $regexpLastMatchInfoOverride = override;
var func_result = replace(elem, match_start, subject);
// Overwrite the i'th element in the results with the string we got
// back from the callback function.
@@ -472,7 +452,6 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
if (!%_IsSmi(elem)) {
// elem must be an Array.
// Use the apply argument as backing for global RegExp properties.
- $regexpLastMatchInfoOverride = elem;
var func_result = %Apply(replace, UNDEFINED, elem, 0, elem.length);
// Overwrite the i'th element in the results with the string we got
// back from the callback function.
@@ -480,7 +459,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
}
}
}
- var result = %StringBuilderConcat(res, res.length, subject);
+ var result = %StringBuilderConcat(res, len, subject);
resultArray.length = 0;
reusableReplaceArray = resultArray;
return result;
@@ -524,21 +503,20 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
}
-// ECMA-262 section 15.5.4.12
-function StringSearch(re) {
+// ES6 21.1.3.15.
+function StringSearch(pattern) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
- var regexp;
- if (IS_REGEXP(re)) {
- regexp = re;
- } else {
- regexp = new GlobalRegExp(re);
- }
- var match = RegExpExec(regexp, TO_STRING(this), 0);
- if (match) {
- return match[CAPTURE0];
+ if (!IS_NULL_OR_UNDEFINED(pattern)) {
+ var searcher = pattern[searchSymbol];
+ if (!IS_UNDEFINED(searcher)) {
+ return %_Call(searcher, pattern, this);
+ }
}
- return -1;
+
+ var subject = TO_STRING(this);
+ var regexp = new GlobalRegExp(pattern);
+ return %_Call(regexp[searchSymbol], regexp, subject);
}
@@ -584,95 +562,35 @@ function StringSlice(start, end) {
}
-// ECMA-262 section 15.5.4.14
+// ES6 21.1.3.17.
function StringSplitJS(separator, limit) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.split");
- var subject = TO_STRING(this);
- limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
-
- var length = subject.length;
- if (!IS_REGEXP(separator)) {
- var separator_string = TO_STRING(separator);
-
- if (limit === 0) return [];
-
- // ECMA-262 says that if separator is undefined, the result should
- // be an array of size 1 containing the entire string.
- if (IS_UNDEFINED(separator)) return [subject];
-
- var separator_length = separator_string.length;
-
- // If the separator string is empty then return the elements in the subject.
- if (separator_length === 0) return %StringToArray(subject, limit);
-
- var result = %StringSplit(subject, separator_string, limit);
-
- return result;
- }
-
- if (limit === 0) return [];
-
- // Separator is a regular expression.
- return StringSplitOnRegExp(subject, separator, limit, length);
-}
-
-
-function StringSplitOnRegExp(subject, separator, limit, length) {
- if (length === 0) {
- if (RegExpExec(separator, subject, 0, 0) != null) {
- return [];
+ if (!IS_NULL_OR_UNDEFINED(separator)) {
+ var splitter = separator[splitSymbol];
+ if (!IS_UNDEFINED(splitter)) {
+ return %_Call(splitter, separator, this, limit);
}
- return [subject];
}
- var currentIndex = 0;
- var startIndex = 0;
- var startMatch = 0;
- var result = new InternalArray();
-
- outer_loop:
- while (true) {
-
- if (startIndex === length) {
- result[result.length] = %_SubString(subject, currentIndex, length);
- break;
- }
+ var subject = TO_STRING(this);
+ limit = (IS_UNDEFINED(limit)) ? kMaxUint32 : TO_UINT32(limit);
- var matchInfo = RegExpExec(separator, subject, startIndex);
- if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) {
- result[result.length] = %_SubString(subject, currentIndex, length);
- break;
- }
- var endIndex = matchInfo[CAPTURE1];
+ var length = subject.length;
+ var separator_string = TO_STRING(separator);
- // We ignore a zero-length match at the currentIndex.
- if (startIndex === endIndex && endIndex === currentIndex) {
- startIndex++;
- continue;
- }
+ if (limit === 0) return [];
- result[result.length] = %_SubString(subject, currentIndex, startMatch);
+ // ECMA-262 says that if separator is undefined, the result should
+ // be an array of size 1 containing the entire string.
+ if (IS_UNDEFINED(separator)) return [subject];
- if (result.length === limit) break;
+ var separator_length = separator_string.length;
- var matchinfo_len = NUMBER_OF_CAPTURES(matchInfo) + REGEXP_FIRST_CAPTURE;
- for (var i = REGEXP_FIRST_CAPTURE + 2; i < matchinfo_len; ) {
- var start = matchInfo[i++];
- var end = matchInfo[i++];
- if (end != -1) {
- result[result.length] = %_SubString(subject, start, end);
- } else {
- result[result.length] = UNDEFINED;
- }
- if (result.length === limit) break outer_loop;
- }
+ // If the separator string is empty then return the elements in the subject.
+ if (separator_length === 0) return %StringToArray(subject, limit);
- startIndex = currentIndex = endIndex;
- }
- var array_result = [];
- %MoveArrayContents(result, array_result);
- return array_result;
+ return %StringSplit(subject, separator_string, limit);
}
@@ -803,17 +721,12 @@ function StringTrimRight() {
// ECMA-262, section 15.5.3.2
function StringFromCharCode(code) {
var n = %_ArgumentsLength();
- if (n == 1) {
- if (!%_IsSmi(code)) code = ToNumber(code);
- return %_StringCharFromCode(code & 0xffff);
- }
+ if (n == 1) return %_StringCharFromCode(code & 0xffff);
var one_byte = %NewString(n, NEW_ONE_BYTE_STRING);
var i;
for (i = 0; i < n; i++) {
- var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
- if (code < 0) code = code & 0xffff;
+ code = %_Arguments(i) & 0xffff;
if (code > 0xff) break;
%_OneByteSeqStringSetChar(i, code, one_byte);
}
@@ -821,9 +734,10 @@ function StringFromCharCode(code) {
one_byte = %TruncateString(one_byte, i);
var two_byte = %NewString(n - i, NEW_TWO_BYTE_STRING);
- for (var j = 0; i < n; i++, j++) {
- var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
+ %_TwoByteSeqStringSetChar(0, code, two_byte);
+ i++;
+ for (var j = 1; i < n; i++, j++) {
+ code = %_Arguments(i) & 0xffff;
%_TwoByteSeqStringSetChar(j, code, two_byte);
}
return one_byte + two_byte;
@@ -832,7 +746,7 @@ function StringFromCharCode(code) {
// ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
function HtmlEscape(str) {
- return %_CallFunction(TO_STRING(str), /"/g, "&quot;", StringReplace);
+ return %_Call(StringReplace, TO_STRING(str), /"/g, "&quot;");
}
@@ -929,15 +843,21 @@ function StringSup() {
return "<sup>" + TO_STRING(this) + "</sup>";
}
-// ES6 draft 01-20-14, section 21.1.3.13
+// ES6, section 21.1.3.13
function StringRepeat(count) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.repeat");
var s = TO_STRING(this);
var n = TO_INTEGER(count);
+
+ if (n < 0 || n === INFINITY) throw MakeRangeError(kInvalidCountValue);
+
+ // Early return to allow an arbitrarily-large repeat of the empty string.
+ if (s.length === 0) return "";
+
// The maximum string length is stored in a smi, so a longer repeat
// must result in a range error.
- if (n < 0 || n > %_MaxSmi()) throw MakeRangeError(kInvalidCountValue);
+ if (n > %_MaxSmi()) throw MakeRangeError(kInvalidCountValue);
var r = "";
while (true) {
@@ -969,21 +889,13 @@ function StringStartsWith(searchString /* position */) { // length == 1
}
var s_len = s.length;
- if (pos < 0) pos = 0;
- if (pos > s_len) pos = s_len;
+ var start = MathMin(MathMax(pos, 0), s_len);
var ss_len = ss.length;
-
- if (ss_len + pos > s_len) {
+ if (ss_len + start > s_len) {
return false;
}
- for (var i = 0; i < ss_len; i++) {
- if (%_StringCharCodeAt(s, pos + i) !== %_StringCharCodeAt(ss, i)) {
- return false;
- }
- }
-
- return true;
+ return %_SubString(s, start, start + ss_len) === ss;
}
@@ -1007,22 +919,14 @@ function StringEndsWith(searchString /* position */) { // length == 1
}
}
- if (pos < 0) pos = 0;
- if (pos > s_len) pos = s_len;
+ var end = MathMin(MathMax(pos, 0), s_len);
var ss_len = ss.length;
- pos = pos - ss_len;
-
- if (pos < 0) {
+ var start = end - ss_len;
+ if (start < 0) {
return false;
}
- for (var i = 0; i < ss_len; i++) {
- if (%_StringCharCodeAt(s, pos + i) !== %_StringCharCodeAt(ss, i)) {
- return false;
- }
- }
-
- return true;
+ return %_SubString(s, start, start + ss_len) === ss;
}
@@ -1087,7 +991,7 @@ function StringFromCodePoint(_) { // length = 1
for (index = 0; index < length; index++) {
code = %_Arguments(index);
if (!%_IsSmi(code)) {
- code = ToNumber(code);
+ code = TO_NUMBER(code);
}
if (code < 0 || code > 0x10FFFF || code !== TO_INTEGER(code)) {
throw MakeRangeError(kInvalidCodePoint, code);
diff --git a/chromium/v8/src/symbol.js b/chromium/v8/src/js/symbol.js
index 1596169685e..5be6e0168d7 100644
--- a/chromium/v8/src/symbol.js
+++ b/chromium/v8/src/js/symbol.js
@@ -16,15 +16,14 @@ var GlobalSymbol = global.Symbol;
var hasInstanceSymbol = utils.ImportNow("has_instance_symbol");
var isConcatSpreadableSymbol =
utils.ImportNow("is_concat_spreadable_symbol");
-var isRegExpSymbol = utils.ImportNow("is_regexp_symbol");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var ObjectGetOwnPropertyKeys;
+var MakeTypeError;
var toPrimitiveSymbol = utils.ImportNow("to_primitive_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
utils.Import(function(from) {
- ObjectGetOwnPropertyKeys = from.ObjectGetOwnPropertyKeys;
+ MakeTypeError = from.MakeTypeError;
});
// -------------------------------------------------------------------
@@ -79,9 +78,7 @@ function SymbolKeyFor(symbol) {
function ObjectGetOwnPropertySymbols(obj) {
obj = TO_OBJECT(obj);
- // TODO(arv): Proxies use a shared trap for String and Symbol keys.
-
- return ObjectGetOwnPropertyKeys(obj, PROPERTY_ATTRIBUTES_STRING);
+ return %GetOwnPropertyKeys(obj, PROPERTY_FILTER_SKIP_STRINGS);
}
// -------------------------------------------------------------------
@@ -92,8 +89,12 @@ utils.InstallConstants(GlobalSymbol, [
// TODO(rossberg): expose when implemented.
// "hasInstance", hasInstanceSymbol,
// "isConcatSpreadable", isConcatSpreadableSymbol,
- // "isRegExp", isRegExpSymbol,
"iterator", iteratorSymbol,
+ // TODO(yangguo): expose when implemented.
+ // "match", matchSymbol,
+ // "replace", replaceSymbol,
+ // "search", searchSymbol,
+ // "split, splitSymbol,
"toPrimitive", toPrimitiveSymbol,
// TODO(dslomov, caitp): Currently defined in harmony-tostring.js ---
// Move here when shipping
diff --git a/chromium/v8/src/templates.js b/chromium/v8/src/js/templates.js
index b273bc39e81..7236d5c130a 100644
--- a/chromium/v8/src/templates.js
+++ b/chromium/v8/src/js/templates.js
@@ -38,7 +38,7 @@ function SameCallSiteElements(rawStrings, other) {
function GetCachedCallSite(siteObj, hash) {
- var obj = %_CallFunction(callSiteCache, hash, mapGetFn);
+ var obj = %_Call(mapGetFn, callSiteCache, hash);
if (IS_UNDEFINED(obj)) return;
@@ -50,13 +50,13 @@ function GetCachedCallSite(siteObj, hash) {
function SetCachedCallSite(siteObj, hash) {
- var obj = %_CallFunction(callSiteCache, hash, mapGetFn);
+ var obj = %_Call(mapGetFn, callSiteCache, hash);
var array;
if (IS_UNDEFINED(obj)) {
array = new InternalArray(1);
array[0] = siteObj;
- %_CallFunction(callSiteCache, hash, array, mapSetFn);
+ %_Call(mapSetFn, callSiteCache, hash, array);
} else {
obj.push(siteObj);
}
@@ -70,10 +70,10 @@ function GetTemplateCallSite(siteObj, rawStrings, hash) {
if (!IS_UNDEFINED(cached)) return cached;
- %AddNamedProperty(siteObj, "raw", %ObjectFreeze(rawStrings),
+ %AddNamedProperty(siteObj, "raw", %object_freeze(rawStrings),
READ_ONLY | DONT_ENUM | DONT_DELETE);
- return SetCachedCallSite(%ObjectFreeze(siteObj), hash);
+ return SetCachedCallSite(%object_freeze(siteObj), hash);
}
// ----------------------------------------------------------------------------
diff --git a/chromium/v8/src/js/typedarray.js b/chromium/v8/src/js/typedarray.js
new file mode 100644
index 00000000000..b3e1c829dd6
--- /dev/null
+++ b/chromium/v8/src/js/typedarray.js
@@ -0,0 +1,969 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var ArrayFrom;
+var ArrayToString;
+var ArrayValues;
+var GlobalArray = global.Array;
+var GlobalArrayBuffer = global.ArrayBuffer;
+var GlobalDataView = global.DataView;
+var GlobalObject = global.Object;
+var InternalArray = utils.InternalArray;
+var InnerArrayCopyWithin;
+var InnerArrayEvery;
+var InnerArrayFill;
+var InnerArrayFilter;
+var InnerArrayFind;
+var InnerArrayFindIndex;
+var InnerArrayForEach;
+var InnerArrayIncludes;
+var InnerArrayIndexOf;
+var InnerArrayJoin;
+var InnerArrayLastIndexOf;
+var InnerArrayReduce;
+var InnerArrayReduceRight;
+var InnerArraySome;
+var InnerArraySort;
+var InnerArrayToLocaleString;
+var InternalArray = utils.InternalArray;
+var IsNaN;
+var MakeRangeError;
+var MakeTypeError;
+var MaxSimple;
+var MinSimple;
+var PackedArrayReverse;
+var SpeciesConstructor;
+var ToPositiveInteger;
+var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+
+macro TYPED_ARRAYS(FUNCTION)
+// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
+FUNCTION(1, Uint8Array, 1)
+FUNCTION(2, Int8Array, 1)
+FUNCTION(3, Uint16Array, 2)
+FUNCTION(4, Int16Array, 2)
+FUNCTION(5, Uint32Array, 4)
+FUNCTION(6, Int32Array, 4)
+FUNCTION(7, Float32Array, 4)
+FUNCTION(8, Float64Array, 8)
+FUNCTION(9, Uint8ClampedArray, 1)
+endmacro
+
+macro DECLARE_GLOBALS(INDEX, NAME, SIZE)
+var GlobalNAME = global.NAME;
+endmacro
+
+TYPED_ARRAYS(DECLARE_GLOBALS)
+
+utils.Import(function(from) {
+ ArrayFrom = from.ArrayFrom;
+ ArrayToString = from.ArrayToString;
+ ArrayValues = from.ArrayValues;
+ InnerArrayCopyWithin = from.InnerArrayCopyWithin;
+ InnerArrayEvery = from.InnerArrayEvery;
+ InnerArrayFill = from.InnerArrayFill;
+ InnerArrayFilter = from.InnerArrayFilter;
+ InnerArrayFind = from.InnerArrayFind;
+ InnerArrayFindIndex = from.InnerArrayFindIndex;
+ InnerArrayForEach = from.InnerArrayForEach;
+ InnerArrayIncludes = from.InnerArrayIncludes;
+ InnerArrayIndexOf = from.InnerArrayIndexOf;
+ InnerArrayJoin = from.InnerArrayJoin;
+ InnerArrayLastIndexOf = from.InnerArrayLastIndexOf;
+ InnerArrayReduce = from.InnerArrayReduce;
+ InnerArrayReduceRight = from.InnerArrayReduceRight;
+ InnerArraySome = from.InnerArraySome;
+ InnerArraySort = from.InnerArraySort;
+ InnerArrayToLocaleString = from.InnerArrayToLocaleString;
+ IsNaN = from.IsNaN;
+ MakeRangeError = from.MakeRangeError;
+ MakeTypeError = from.MakeTypeError;
+ MaxSimple = from.MaxSimple;
+ MinSimple = from.MinSimple;
+ PackedArrayReverse = from.PackedArrayReverse;
+ SpeciesConstructor = from.SpeciesConstructor;
+ ToPositiveInteger = from.ToPositiveInteger;
+});
+
+// --------------- Typed Arrays ---------------------
+
+function TypedArrayDefaultConstructor(typedArray) {
+ switch (%_ClassOf(typedArray)) {
+macro TYPED_ARRAY_CONSTRUCTOR_CASE(ARRAY_ID, NAME, ELEMENT_SIZE)
+ case "NAME":
+ return GlobalNAME;
+endmacro
+TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR_CASE)
+ }
+ // The TypeError should not be generated since all callers should
+ // have already called ValidateTypedArray.
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "TypedArrayDefaultConstructor", this);
+}
+
+function TypedArrayCreate(constructor, arg0, arg1, arg2) {
+ if (IS_UNDEFINED(arg1)) {
+ var newTypedArray = new constructor(arg0);
+ } else {
+ var newTypedArray = new constructor(arg0, arg1, arg2);
+ }
+ if (!%_IsTypedArray(newTypedArray)) throw MakeTypeError(kNotTypedArray);
+ // TODO(littledan): Check for being detached, here and elsewhere
+ // All callers where the first argument is a Number have no additional
+ // arguments.
+ if (IS_NUMBER(arg0) && %_TypedArrayGetLength(newTypedArray) < arg0) {
+ throw MakeTypeError(kTypedArrayTooShort);
+ }
+ return newTypedArray;
+}
+
+function TypedArraySpeciesCreate(exemplar, arg0, arg1, arg2, conservative) {
+ var defaultConstructor = TypedArrayDefaultConstructor(exemplar);
+ var constructor = SpeciesConstructor(exemplar, defaultConstructor,
+ conservative);
+ return TypedArrayCreate(constructor, arg0, arg1, arg2);
+}
+
+macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
+function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
+ if (!IS_UNDEFINED(byteOffset)) {
+ byteOffset = ToPositiveInteger(byteOffset, kInvalidTypedArrayLength);
+ }
+ if (!IS_UNDEFINED(length)) {
+ length = ToPositiveInteger(length, kInvalidTypedArrayLength);
+ }
+
+ var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
+ var offset;
+ if (IS_UNDEFINED(byteOffset)) {
+ offset = 0;
+ } else {
+ offset = byteOffset;
+
+ if (offset % ELEMENT_SIZE !== 0) {
+ throw MakeRangeError(kInvalidTypedArrayAlignment,
+ "start offset", "NAME", ELEMENT_SIZE);
+ }
+ if (offset > bufferByteLength) {
+ throw MakeRangeError(kInvalidTypedArrayOffset);
+ }
+ }
+
+ var newByteLength;
+ var newLength;
+ if (IS_UNDEFINED(length)) {
+ if (bufferByteLength % ELEMENT_SIZE !== 0) {
+ throw MakeRangeError(kInvalidTypedArrayAlignment,
+ "byte length", "NAME", ELEMENT_SIZE);
+ }
+ newByteLength = bufferByteLength - offset;
+ newLength = newByteLength / ELEMENT_SIZE;
+ } else {
+ var newLength = length;
+ newByteLength = newLength * ELEMENT_SIZE;
+ }
+ if ((offset + newByteLength > bufferByteLength)
+ || (newLength > %_MaxSmi())) {
+ throw MakeRangeError(kInvalidTypedArrayLength);
+ }
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength, true);
+}
+
+function NAMEConstructByLength(obj, length) {
+ var l = IS_UNDEFINED(length) ?
+ 0 : ToPositiveInteger(length, kInvalidTypedArrayLength);
+ if (l > %_MaxSmi()) {
+ throw MakeRangeError(kInvalidTypedArrayLength);
+ }
+ var byteLength = l * ELEMENT_SIZE;
+ if (byteLength > %_TypedArrayMaxSizeInHeap()) {
+ var buffer = new GlobalArrayBuffer(byteLength);
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength, true);
+ } else {
+ %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength, true);
+ }
+}
+
+function NAMEConstructByArrayLike(obj, arrayLike) {
+ var length = arrayLike.length;
+ var l = ToPositiveInteger(length, kInvalidTypedArrayLength);
+
+ if (l > %_MaxSmi()) {
+ throw MakeRangeError(kInvalidTypedArrayLength);
+ }
+ var initialized = false;
+ var byteLength = l * ELEMENT_SIZE;
+ if (byteLength <= %_TypedArrayMaxSizeInHeap()) {
+ %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength, false);
+ } else {
+ initialized =
+ %TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l);
+ }
+ if (!initialized) {
+ for (var i = 0; i < l; i++) {
+ // It is crucial that we let any execptions from arrayLike[i]
+ // propagate outside the function.
+ obj[i] = arrayLike[i];
+ }
+ }
+}
+
+function NAMEConstructByIterable(obj, iterable, iteratorFn) {
+ var list = new InternalArray();
+ // Reading the Symbol.iterator property of iterable twice would be
+ // observable with getters, so instead, we call the function which
+ // was already looked up, and wrap it in another iterable. The
+ // __proto__ of the new iterable is set to null to avoid any chance
+ // of modifications to Object.prototype being observable here.
+ var iterator = %_Call(iteratorFn, iterable);
+ var newIterable = {
+ __proto__: null
+ };
+ // TODO(littledan): Computed properties don't work yet in nosnap.
+ // Rephrase when they do.
+ newIterable[iteratorSymbol] = function() { return iterator; }
+ for (var value of newIterable) {
+ list.push(value);
+ }
+ NAMEConstructByArrayLike(obj, list);
+}
+
+function NAMEConstructor(arg1, arg2, arg3) {
+ if (!IS_UNDEFINED(new.target)) {
+ if (IS_ARRAYBUFFER(arg1) || IS_SHAREDARRAYBUFFER(arg1)) {
+ NAMEConstructByArrayBuffer(this, arg1, arg2, arg3);
+ } else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
+ IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
+ NAMEConstructByLength(this, arg1);
+ } else {
+ // TODO(littledan): If arg1 is a TypedArray, follow the constructor
+ // path in ES2015 22.2.4.3, and call SpeciesConstructor, in a
+ // path that seems to be an optimized version of what's below, but
+ // in an observably different way.
+ var iteratorFn = arg1[iteratorSymbol];
+ if (IS_UNDEFINED(iteratorFn) || iteratorFn === ArrayValues) {
+ NAMEConstructByArrayLike(this, arg1);
+ } else {
+ NAMEConstructByIterable(this, arg1, iteratorFn);
+ }
+ }
+ } else {
+ throw MakeTypeError(kConstructorNotFunction, "NAME")
+ }
+}
+
+function NAMESubArray(begin, end) {
+ var beginInt = TO_INTEGER(begin);
+ if (!IS_UNDEFINED(end)) {
+ var endInt = TO_INTEGER(end);
+ var srcLength = %_TypedArrayGetLength(this);
+ } else {
+ var srcLength = %_TypedArrayGetLength(this);
+ var endInt = srcLength;
+ }
+
+ if (beginInt < 0) {
+ beginInt = MaxSimple(0, srcLength + beginInt);
+ } else {
+ beginInt = MinSimple(beginInt, srcLength);
+ }
+
+ if (endInt < 0) {
+ endInt = MaxSimple(0, srcLength + endInt);
+ } else {
+ endInt = MinSimple(endInt, srcLength);
+ }
+
+ if (endInt < beginInt) {
+ endInt = beginInt;
+ }
+
+ var newLength = endInt - beginInt;
+ var beginByteOffset =
+ %_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE;
+ return TypedArraySpeciesCreate(this, %TypedArrayGetBuffer(this),
+ beginByteOffset, newLength, true);
+}
+endmacro
+
+TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR)
+
+function TypedArraySubArray(begin, end) {
+ switch (%_ClassOf(this)) {
+macro TYPED_ARRAY_SUBARRAY_CASE(ARRAY_ID, NAME, ELEMENT_SIZE)
+ case "NAME":
+ return %_Call(NAMESubArray, this, begin, end);
+endmacro
+TYPED_ARRAYS(TYPED_ARRAY_SUBARRAY_CASE)
+ }
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "get TypedArray.prototype.subarray", this);
+}
+%SetForceInlineFlag(TypedArraySubArray);
+
+function TypedArrayGetBuffer() {
+ if (!%_IsTypedArray(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "get TypedArray.prototype.buffer", this);
+ }
+ return %TypedArrayGetBuffer(this);
+}
+%SetForceInlineFlag(TypedArrayGetBuffer);
+
+function TypedArrayGetByteLength() {
+ if (!%_IsTypedArray(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "get TypedArray.prototype.byteLength", this);
+ }
+ return %_ArrayBufferViewGetByteLength(this);
+}
+%SetForceInlineFlag(TypedArrayGetByteLength);
+
+function TypedArrayGetByteOffset() {
+ if (!%_IsTypedArray(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "get TypedArray.prototype.byteOffset", this);
+ }
+ return %_ArrayBufferViewGetByteOffset(this);
+}
+%SetForceInlineFlag(TypedArrayGetByteOffset);
+
+function TypedArrayGetLength() {
+ if (!%_IsTypedArray(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "get TypedArray.prototype.length", this);
+ }
+ return %_TypedArrayGetLength(this);
+}
+%SetForceInlineFlag(TypedArrayGetLength);
+
+
+
+function TypedArraySetFromArrayLike(target, source, sourceLength, offset) {
+ if (offset > 0) {
+ for (var i = 0; i < sourceLength; i++) {
+ target[offset + i] = source[i];
+ }
+ }
+ else {
+ for (var i = 0; i < sourceLength; i++) {
+ target[i] = source[i];
+ }
+ }
+}
+
+function TypedArraySetFromOverlappingTypedArray(target, source, offset) {
+ var sourceElementSize = source.BYTES_PER_ELEMENT;
+ var targetElementSize = target.BYTES_PER_ELEMENT;
+ var sourceLength = source.length;
+
+ // Copy left part.
+ function CopyLeftPart() {
+ // First un-mutated byte after the next write
+ var targetPtr = target.byteOffset + (offset + 1) * targetElementSize;
+ // Next read at sourcePtr. We do not care for memory changing before
+ // sourcePtr - we have already copied it.
+ var sourcePtr = source.byteOffset;
+ for (var leftIndex = 0;
+ leftIndex < sourceLength && targetPtr <= sourcePtr;
+ leftIndex++) {
+ target[offset + leftIndex] = source[leftIndex];
+ targetPtr += targetElementSize;
+ sourcePtr += sourceElementSize;
+ }
+ return leftIndex;
+ }
+ var leftIndex = CopyLeftPart();
+
+ // Copy rigth part;
+ function CopyRightPart() {
+ // First unmutated byte before the next write
+ var targetPtr =
+ target.byteOffset + (offset + sourceLength - 1) * targetElementSize;
+ // Next read before sourcePtr. We do not care for memory changing after
+ // sourcePtr - we have already copied it.
+ var sourcePtr =
+ source.byteOffset + sourceLength * sourceElementSize;
+ for(var rightIndex = sourceLength - 1;
+ rightIndex >= leftIndex && targetPtr >= sourcePtr;
+ rightIndex--) {
+ target[offset + rightIndex] = source[rightIndex];
+ targetPtr -= targetElementSize;
+ sourcePtr -= sourceElementSize;
+ }
+ return rightIndex;
+ }
+ var rightIndex = CopyRightPart();
+
+ var temp = new GlobalArray(rightIndex + 1 - leftIndex);
+ for (var i = leftIndex; i <= rightIndex; i++) {
+ temp[i - leftIndex] = source[i];
+ }
+ for (i = leftIndex; i <= rightIndex; i++) {
+ target[offset + i] = temp[i - leftIndex];
+ }
+}
+
+function TypedArraySet(obj, offset) {
+ var intOffset = IS_UNDEFINED(offset) ? 0 : TO_INTEGER(offset);
+ if (intOffset < 0) throw MakeTypeError(kTypedArraySetNegativeOffset);
+
+ if (intOffset > %_MaxSmi()) {
+ throw MakeRangeError(kTypedArraySetSourceTooLarge);
+ }
+ switch (%TypedArraySetFastCases(this, obj, intOffset)) {
+ // These numbers should be synchronized with runtime.cc.
+ case 0: // TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE
+ return;
+ case 1: // TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING
+ TypedArraySetFromOverlappingTypedArray(this, obj, intOffset);
+ return;
+ case 2: // TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING
+ TypedArraySetFromArrayLike(this, obj, obj.length, intOffset);
+ return;
+ case 3: // TYPED_ARRAY_SET_NON_TYPED_ARRAY
+ var l = obj.length;
+ if (IS_UNDEFINED(l)) {
+ if (IS_NUMBER(obj)) {
+ // For number as a first argument, throw TypeError
+ // instead of silently ignoring the call, so that
+ // the user knows (s)he did something wrong.
+ // (Consistent with Firefox and Blink/WebKit)
+ throw MakeTypeError(kInvalidArgument);
+ }
+ return;
+ }
+ l = TO_LENGTH(l);
+ if (intOffset + l > this.length) {
+ throw MakeRangeError(kTypedArraySetSourceTooLarge);
+ }
+ TypedArraySetFromArrayLike(this, obj, l, intOffset);
+ return;
+ }
+}
+
+function TypedArrayGetToStringTag() {
+ if (!%_IsTypedArray(this)) return;
+ var name = %_ClassOf(this);
+ if (IS_UNDEFINED(name)) return;
+ return name;
+}
+
+
+function TypedArrayCopyWithin(target, start, end) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ // TODO(littledan): Replace with a memcpy for better performance
+ return InnerArrayCopyWithin(target, start, end, this, length);
+}
+%FunctionSetLength(TypedArrayCopyWithin, 2);
+
+
+// ES6 draft 05-05-15, section 22.2.3.7
+function TypedArrayEvery(f, receiver) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayEvery(f, receiver, this, length);
+}
+%FunctionSetLength(TypedArrayEvery, 1);
+
+
+// ES6 draft 08-24-14, section 22.2.3.12
+function TypedArrayForEach(f, receiver) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ InnerArrayForEach(f, receiver, this, length);
+}
+%FunctionSetLength(TypedArrayForEach, 1);
+
+
+// ES6 draft 04-05-14 section 22.2.3.8
+function TypedArrayFill(value, start, end) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayFill(value, start, end, this, length);
+}
+%FunctionSetLength(TypedArrayFill, 1);
+
+
+// ES6 draft 07-15-13, section 22.2.3.9
+function TypedArrayFilter(f, thisArg) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ var result = new InternalArray();
+ InnerArrayFilter(f, thisArg, this, length, result);
+ var captured = result.length;
+ var output = TypedArraySpeciesCreate(this, captured);
+ for (var i = 0; i < captured; i++) {
+ output[i] = result[i];
+ }
+ return output;
+}
+%FunctionSetLength(TypedArrayFilter, 1);
+
+
+// ES6 draft 07-15-13, section 22.2.3.10
+function TypedArrayFind(predicate, thisArg) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayFind(predicate, thisArg, this, length);
+}
+%FunctionSetLength(TypedArrayFind, 1);
+
+
+// ES6 draft 07-15-13, section 22.2.3.11
+function TypedArrayFindIndex(predicate, thisArg) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayFindIndex(predicate, thisArg, this, length);
+}
+%FunctionSetLength(TypedArrayFindIndex, 1);
+
+
+// ES6 draft 05-18-15, section 22.2.3.21
+function TypedArrayReverse() {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return PackedArrayReverse(this, length);
+}
+
+
+function TypedArrayComparefn(x, y) {
+ if (IsNaN(x) && IsNaN(y)) {
+ return IsNaN(y) ? 0 : 1;
+ }
+ if (IsNaN(x)) {
+ return 1;
+ }
+ if (x === 0 && x === y) {
+ if (%_IsMinusZero(x)) {
+ if (!%_IsMinusZero(y)) {
+ return -1;
+ }
+ } else if (%_IsMinusZero(y)) {
+ return 1;
+ }
+ }
+ return x - y;
+}
+
+
+// ES6 draft 05-18-15, section 22.2.3.25
+function TypedArraySort(comparefn) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ if (IS_UNDEFINED(comparefn)) {
+ comparefn = TypedArrayComparefn;
+ }
+
+ return InnerArraySort(this, length, comparefn);
+}
+
+
+// ES6 section 22.2.3.13
+function TypedArrayIndexOf(element, index) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+ return InnerArrayIndexOf(this, element, index, length);
+}
+%FunctionSetLength(TypedArrayIndexOf, 1);
+
+
+// ES6 section 22.2.3.16
+function TypedArrayLastIndexOf(element, index) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayLastIndexOf(this, element, index, length,
+ %_ArgumentsLength());
+}
+%FunctionSetLength(TypedArrayLastIndexOf, 1);
+
+
+// ES6 draft 07-15-13, section 22.2.3.18
+function TypedArrayMap(f, thisArg) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+ var result = TypedArraySpeciesCreate(this, length);
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ for (var i = 0; i < length; i++) {
+ var element = this[i];
+ result[i] = %_Call(f, thisArg, element, i, this);
+ }
+ return result;
+}
+%FunctionSetLength(TypedArrayMap, 1);
+
+
+// ES6 draft 05-05-15, section 22.2.3.24
+function TypedArraySome(f, receiver) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArraySome(f, receiver, this, length);
+}
+%FunctionSetLength(TypedArraySome, 1);
+
+
+// ES6 section 22.2.3.27
+function TypedArrayToLocaleString() {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayToLocaleString(this, length);
+}
+
+
+// ES6 section 22.2.3.28
+function TypedArrayToString() {
+ return %_Call(ArrayToString, this);
+}
+
+
+// ES6 section 22.2.3.14
+function TypedArrayJoin(separator) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayJoin(separator, this, length);
+}
+
+
+// ES6 draft 07-15-13, section 22.2.3.19
+function TypedArrayReduce(callback, current) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+ return InnerArrayReduce(callback, current, this, length,
+ %_ArgumentsLength());
+}
+%FunctionSetLength(TypedArrayReduce, 1);
+
+
+// ES6 draft 07-15-13, section 22.2.3.19
+function TypedArrayReduceRight(callback, current) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+ return InnerArrayReduceRight(callback, current, this, length,
+ %_ArgumentsLength());
+}
+%FunctionSetLength(TypedArrayReduceRight, 1);
+
+
+function TypedArraySlice(start, end) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+ var len = %_TypedArrayGetLength(this);
+
+ var relativeStart = TO_INTEGER(start);
+
+ var k;
+ if (relativeStart < 0) {
+ k = MaxSimple(len + relativeStart, 0);
+ } else {
+ k = MinSimple(relativeStart, len);
+ }
+
+ var relativeEnd;
+ if (IS_UNDEFINED(end)) {
+ relativeEnd = len;
+ } else {
+ relativeEnd = TO_INTEGER(end);
+ }
+
+ var final;
+ if (relativeEnd < 0) {
+ final = MaxSimple(len + relativeEnd, 0);
+ } else {
+ final = MinSimple(relativeEnd, len);
+ }
+
+ var count = MaxSimple(final - k, 0);
+ var array = TypedArraySpeciesCreate(this, count);
+ // The code below is the 'then' branch; the 'else' branch species
+ // a memcpy. Because V8 doesn't canonicalize NaN, the difference is
+ // unobservable.
+ var n = 0;
+ while (k < final) {
+ var kValue = this[k];
+ array[n] = kValue;
+ k++;
+ n++;
+ }
+ return array;
+}
+
+
+// ES2016 draft, section 22.2.3.14
+function TypedArrayIncludes(searchElement, fromIndex) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayIncludes(searchElement, fromIndex, this, length);
+}
+%FunctionSetLength(TypedArrayIncludes, 1);
+
+
+// ES6 draft 08-24-14, section 22.2.2.2
+function TypedArrayOf() {
+ var length = %_ArgumentsLength();
+ var array = TypedArrayCreate(this, length);
+ for (var i = 0; i < length; i++) {
+ array[i] = %_Arguments(i);
+ }
+ return array;
+}
+
+
+function TypedArrayFrom(source, mapfn, thisArg) {
+ // TODO(littledan): Investigate if there is a receiver which could be
+ // faster to accumulate on than Array, e.g., a TypedVector.
+ // TODO(littledan): Rewrite this code to ensure that things happen
+ // in the right order, e.g., the constructor needs to be called before
+ // the mapping function on array-likes.
+ var array = %_Call(ArrayFrom, GlobalArray, source, mapfn, thisArg);
+ return TypedArrayCreate(this, array);
+}
+%FunctionSetLength(TypedArrayFrom, 1);
+
+function TypedArray() {
+ if (IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kConstructorNonCallable, "TypedArray");
+ }
+ if (new.target === TypedArray) {
+ throw MakeTypeError(kConstructAbstractClass, "TypedArray");
+ }
+}
+
+// -------------------------------------------------------------------
+
+%FunctionSetPrototype(TypedArray, new GlobalObject());
+%AddNamedProperty(TypedArray.prototype,
+ "constructor", TypedArray, DONT_ENUM);
+utils.InstallFunctions(TypedArray, DONT_ENUM | DONT_DELETE | READ_ONLY, [
+ "from", TypedArrayFrom,
+ "of", TypedArrayOf
+]);
+utils.InstallGetter(TypedArray.prototype, "buffer", TypedArrayGetBuffer);
+utils.InstallGetter(TypedArray.prototype, "byteOffset", TypedArrayGetByteOffset,
+ DONT_ENUM | DONT_DELETE);
+utils.InstallGetter(TypedArray.prototype, "byteLength",
+ TypedArrayGetByteLength, DONT_ENUM | DONT_DELETE);
+utils.InstallGetter(TypedArray.prototype, "length", TypedArrayGetLength,
+ DONT_ENUM | DONT_DELETE);
+utils.InstallGetter(TypedArray.prototype, toStringTagSymbol,
+ TypedArrayGetToStringTag);
+utils.InstallFunctions(TypedArray.prototype, DONT_ENUM, [
+ "subarray", TypedArraySubArray,
+ "set", TypedArraySet,
+ "copyWithin", TypedArrayCopyWithin,
+ "every", TypedArrayEvery,
+ "fill", TypedArrayFill,
+ "filter", TypedArrayFilter,
+ "find", TypedArrayFind,
+ "findIndex", TypedArrayFindIndex,
+ "includes", TypedArrayIncludes,
+ "indexOf", TypedArrayIndexOf,
+ "join", TypedArrayJoin,
+ "lastIndexOf", TypedArrayLastIndexOf,
+ "forEach", TypedArrayForEach,
+ "map", TypedArrayMap,
+ "reduce", TypedArrayReduce,
+ "reduceRight", TypedArrayReduceRight,
+ "reverse", TypedArrayReverse,
+ "slice", TypedArraySlice,
+ "some", TypedArraySome,
+ "sort", TypedArraySort,
+ "toString", TypedArrayToString,
+ "toLocaleString", TypedArrayToLocaleString
+]);
+
+
+macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
+ %SetCode(GlobalNAME, NAMEConstructor);
+ %FunctionSetPrototype(GlobalNAME, new GlobalObject());
+ %InternalSetPrototype(GlobalNAME, TypedArray);
+ %InternalSetPrototype(GlobalNAME.prototype, TypedArray.prototype);
+
+ %AddNamedProperty(GlobalNAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE,
+ READ_ONLY | DONT_ENUM | DONT_DELETE);
+
+ %AddNamedProperty(GlobalNAME.prototype,
+ "constructor", global.NAME, DONT_ENUM);
+ %AddNamedProperty(GlobalNAME.prototype,
+ "BYTES_PER_ELEMENT", ELEMENT_SIZE,
+ READ_ONLY | DONT_ENUM | DONT_DELETE);
+endmacro
+
+TYPED_ARRAYS(SETUP_TYPED_ARRAY)
+
+// --------------------------- DataView -----------------------------
+
+function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
+ if (IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kConstructorNotFunction, "DataView");
+ }
+
+ // TODO(binji): support SharedArrayBuffers?
+ if (!IS_ARRAYBUFFER(buffer)) throw MakeTypeError(kDataViewNotArrayBuffer);
+ if (!IS_UNDEFINED(byteOffset)) {
+ byteOffset = ToPositiveInteger(byteOffset, kInvalidDataViewOffset);
+ }
+ if (!IS_UNDEFINED(byteLength)) {
+ byteLength = TO_INTEGER(byteLength);
+ }
+
+ var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
+
+ var offset = IS_UNDEFINED(byteOffset) ? 0 : byteOffset;
+ if (offset > bufferByteLength) throw MakeRangeError(kInvalidDataViewOffset);
+
+ var length = IS_UNDEFINED(byteLength)
+ ? bufferByteLength - offset
+ : byteLength;
+ if (length < 0 || offset + length > bufferByteLength) {
+ throw new MakeRangeError(kInvalidDataViewLength);
+ }
+ var result = %NewObject(GlobalDataView, new.target);
+ %_DataViewInitialize(result, buffer, offset, length);
+ return result;
+}
+
+function DataViewGetBufferJS() {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver, 'DataView.buffer', this);
+ }
+ return %DataViewGetBuffer(this);
+}
+
+function DataViewGetByteOffset() {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'DataView.byteOffset', this);
+ }
+ return %_ArrayBufferViewGetByteOffset(this);
+}
+
+function DataViewGetByteLength() {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'DataView.byteLength', this);
+ }
+ return %_ArrayBufferViewGetByteLength(this);
+}
+
+macro DATA_VIEW_TYPES(FUNCTION)
+ FUNCTION(Int8)
+ FUNCTION(Uint8)
+ FUNCTION(Int16)
+ FUNCTION(Uint16)
+ FUNCTION(Int32)
+ FUNCTION(Uint32)
+ FUNCTION(Float32)
+ FUNCTION(Float64)
+endmacro
+
+
+macro DATA_VIEW_GETTER_SETTER(TYPENAME)
+function DataViewGetTYPENAMEJS(offset, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'DataView.getTYPENAME', this);
+ }
+ if (%_ArgumentsLength() < 1) throw MakeTypeError(kInvalidArgument);
+ offset = ToPositiveInteger(offset, kInvalidDataViewAccessorOffset);
+ return %DataViewGetTYPENAME(this, offset, !!little_endian);
+}
+
+function DataViewSetTYPENAMEJS(offset, value, little_endian) {
+ if (!IS_DATAVIEW(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'DataView.setTYPENAME', this);
+ }
+ if (%_ArgumentsLength() < 2) throw MakeTypeError(kInvalidArgument);
+ offset = ToPositiveInteger(offset, kInvalidDataViewAccessorOffset);
+ %DataViewSetTYPENAME(this, offset, TO_NUMBER(value), !!little_endian);
+}
+endmacro
+
+DATA_VIEW_TYPES(DATA_VIEW_GETTER_SETTER)
+
+// Setup the DataView constructor.
+%SetCode(GlobalDataView, DataViewConstructor);
+%FunctionSetPrototype(GlobalDataView, new GlobalObject);
+
+// Set up constructor property on the DataView prototype.
+%AddNamedProperty(GlobalDataView.prototype, "constructor", GlobalDataView,
+ DONT_ENUM);
+%AddNamedProperty(GlobalDataView.prototype, toStringTagSymbol, "DataView",
+ READ_ONLY|DONT_ENUM);
+
+utils.InstallGetter(GlobalDataView.prototype, "buffer", DataViewGetBufferJS);
+utils.InstallGetter(GlobalDataView.prototype, "byteOffset",
+ DataViewGetByteOffset);
+utils.InstallGetter(GlobalDataView.prototype, "byteLength",
+ DataViewGetByteLength);
+
+utils.InstallFunctions(GlobalDataView.prototype, DONT_ENUM, [
+ "getInt8", DataViewGetInt8JS,
+ "setInt8", DataViewSetInt8JS,
+
+ "getUint8", DataViewGetUint8JS,
+ "setUint8", DataViewSetUint8JS,
+
+ "getInt16", DataViewGetInt16JS,
+ "setInt16", DataViewSetInt16JS,
+
+ "getUint16", DataViewGetUint16JS,
+ "setUint16", DataViewSetUint16JS,
+
+ "getInt32", DataViewGetInt32JS,
+ "setInt32", DataViewSetInt32JS,
+
+ "getUint32", DataViewGetUint32JS,
+ "setUint32", DataViewSetUint32JS,
+
+ "getFloat32", DataViewGetFloat32JS,
+ "setFloat32", DataViewSetFloat32JS,
+
+ "getFloat64", DataViewGetFloat64JS,
+ "setFloat64", DataViewSetFloat64JS
+]);
+
+})
diff --git a/chromium/v8/src/uri.js b/chromium/v8/src/js/uri.js
index bdb83d14310..712d7e60f37 100644
--- a/chromium/v8/src/uri.js
+++ b/chromium/v8/src/js/uri.js
@@ -17,6 +17,12 @@
var GlobalObject = global.Object;
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
+var MakeURIError;
+
+utils.Import(function(from) {
+ MakeURIError = from.MakeURIError;
+});
+
// -------------------------------------------------------------------
// Define internal helper functions.
diff --git a/chromium/v8/src/v8natives.js b/chromium/v8/src/js/v8natives.js
index 37e6f1bcceb..26447dac5de 100644
--- a/chromium/v8/src/v8natives.js
+++ b/chromium/v8/src/js/v8natives.js
@@ -9,53 +9,52 @@
// ----------------------------------------------------------------------------
// Imports
-var FLAG_harmony_tostring;
var GlobalArray = global.Array;
var GlobalBoolean = global.Boolean;
-var GlobalFunction = global.Function;
var GlobalNumber = global.Number;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var MakeRangeError;
+var MakeSyntaxError;
+var MakeTypeError;
var MathAbs;
-var ProxyDelegateCallAndConstruct;
-var ProxyDerivedHasOwnTrap;
-var ProxyDerivedKeysTrap;
-var StringIndexOf;
-var ToBoolean = utils.ImportNow("ToBoolean");
-var ToNumber = utils.ImportNow("ToNumber");
+var NaN = %GetRootNaN();
+var ObjectToString = utils.ImportNow("object_to_string");
+var ObserveBeginPerformSplice;
+var ObserveEndPerformSplice;
+var ObserveEnqueueSpliceRecord;
+var SameValue = utils.ImportNow("SameValue");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
+ MakeRangeError = from.MakeRangeError;
+ MakeSyntaxError = from.MakeSyntaxError;
+ MakeTypeError = from.MakeTypeError;
MathAbs = from.MathAbs;
- StringIndexOf = from.StringIndexOf;
-});
-
-utils.ImportFromExperimental(function(from) {
- FLAG_harmony_tostring = from.FLAG_harmony_tostring;
- ProxyDelegateCallAndConstruct = from.ProxyDelegateCallAndConstruct;
- ProxyDerivedHasOwnTrap = from.ProxyDerivedHasOwnTrap;
- ProxyDerivedKeysTrap = from.ProxyDerivedKeysTrap;
+ ObserveBeginPerformSplice = from.ObserveBeginPerformSplice;
+ ObserveEndPerformSplice = from.ObserveEndPerformSplice;
+ ObserveEnqueueSpliceRecord = from.ObserveEnqueueSpliceRecord;
});
// ----------------------------------------------------------------------------
-// ECMA 262 - 15.1.4
+// ES6 18.2.3 isNaN(number)
function GlobalIsNaN(number) {
- number = TO_NUMBER_INLINE(number);
+ number = TO_NUMBER(number);
return NUMBER_IS_NAN(number);
}
-// ECMA 262 - 15.1.5
+// ES6 18.2.2 isFinite(number)
function GlobalIsFinite(number) {
- number = TO_NUMBER_INLINE(number);
+ number = TO_NUMBER(number);
return NUMBER_IS_FINITE(number);
}
-// ECMA-262 - 15.1.2.2
+// ES6 18.2.5 parseInt(string, radix)
function GlobalParseInt(string, radix) {
if (IS_UNDEFINED(radix) || radix === 10 || radix === 0) {
// Some people use parseInt instead of Math.floor. This
@@ -77,7 +76,7 @@ function GlobalParseInt(string, radix) {
string = TO_STRING(string);
radix = TO_INT32(radix);
if (!(radix == 0 || (2 <= radix && radix <= 36))) {
- return NAN;
+ return NaN;
}
}
@@ -89,37 +88,26 @@ function GlobalParseInt(string, radix) {
}
-// ECMA-262 - 15.1.2.3
+// ES6 18.2.4 parseFloat(string)
function GlobalParseFloat(string) {
+ // 1. Let inputString be ? ToString(string).
string = TO_STRING(string);
if (%_HasCachedArrayIndex(string)) return %_GetCachedArrayIndex(string);
return %StringParseFloat(string);
}
-function GlobalEval(x) {
- if (!IS_STRING(x)) return x;
-
- var global_proxy = %GlobalProxy(GlobalEval);
-
- var f = %CompileString(x, false);
- if (!IS_FUNCTION(f)) return f;
-
- return %_CallFunction(global_proxy, f);
-}
-
-
// ----------------------------------------------------------------------------
// Set up global object.
var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
utils.InstallConstants(global, [
- // ECMA 262 - 15.1.1.1.
- "NaN", NAN,
- // ECMA-262 - 15.1.1.2.
+ // ES6 18.1.1
"Infinity", INFINITY,
- // ECMA-262 - 15.1.1.2.
+ // ES6 18.1.2
+ "NaN", NaN,
+ // ES6 18.1.3
"undefined", UNDEFINED,
]);
@@ -129,83 +117,45 @@ utils.InstallFunctions(global, DONT_ENUM, [
"isFinite", GlobalIsFinite,
"parseInt", GlobalParseInt,
"parseFloat", GlobalParseFloat,
- "eval", GlobalEval
]);
// ----------------------------------------------------------------------------
// Object
-// ECMA-262 - 15.2.4.2
-function ObjectToString() {
- if (IS_UNDEFINED(this)) return "[object Undefined]";
- if (IS_NULL(this)) return "[object Null]";
- var O = TO_OBJECT(this);
- var builtinTag = %_ClassOf(O);
- var tag;
-
- // TODO(caitp): cannot wait to get rid of this flag :>
- if (FLAG_harmony_tostring) {
- tag = O[toStringTagSymbol];
- if (!IS_STRING(tag)) {
- tag = builtinTag;
- }
- } else {
- tag = builtinTag;
- }
-
- return `[object ${tag}]`;
-}
-
-
-// ECMA-262 - 15.2.4.3
+// ES6 19.1.3.5 Object.prototype.toLocaleString([reserved1 [,reserved2]])
function ObjectToLocaleString() {
CHECK_OBJECT_COERCIBLE(this, "Object.prototype.toLocaleString");
return this.toString();
}
-// ECMA-262 - 15.2.4.4
+// ES6 19.1.3.7 Object.prototype.valueOf()
function ObjectValueOf() {
return TO_OBJECT(this);
}
-// ECMA-262 - 15.2.4.5
+// ES6 7.3.11
function ObjectHasOwnProperty(value) {
var name = TO_NAME(value);
var object = TO_OBJECT(this);
-
- if (%_IsJSProxy(object)) {
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(value)) return false;
-
- var handler = %GetHandler(object);
- return CallTrap1(handler, "hasOwn", ProxyDerivedHasOwnTrap, name);
- }
return %HasOwnProperty(object, name);
}
-// ECMA-262 - 15.2.4.6
+// ES6 19.1.3.3 Object.prototype.isPrototypeOf(V)
function ObjectIsPrototypeOf(V) {
- if (!IS_SPEC_OBJECT(V)) return false;
+ if (!IS_RECEIVER(V)) return false;
var O = TO_OBJECT(this);
- return %_HasInPrototypeChain(V, O);
+ return %HasInPrototypeChain(V, O);
}
-// ECMA-262 - 15.2.4.6
+// ES6 19.1.3.4
function ObjectPropertyIsEnumerable(V) {
var P = TO_NAME(V);
- if (%_IsJSProxy(this)) {
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(V)) return false;
-
- var desc = GetOwnPropertyJS(this, P);
- return IS_UNDEFINED(desc) ? false : desc.isEnumerable();
- }
- return %IsPropertyEnumerable(TO_OBJECT(this), P);
+ return %PropertyIsEnumerable(TO_OBJECT(this), P);
}
@@ -260,32 +210,21 @@ function ObjectLookupSetter(name) {
}
-function ObjectKeys(obj) {
- obj = TO_OBJECT(obj);
- if (%_IsJSProxy(obj)) {
- var handler = %GetHandler(obj);
- var names = CallTrap0(handler, "keys", ProxyDerivedKeysTrap);
- return ToNameArray(names, "keys", false);
- }
- return %OwnKeys(obj);
-}
-
-
-// ES5 8.10.1.
+// ES6 6.2.4.1
function IsAccessorDescriptor(desc) {
if (IS_UNDEFINED(desc)) return false;
return desc.hasGetter() || desc.hasSetter();
}
-// ES5 8.10.2.
+// ES6 6.2.4.2
function IsDataDescriptor(desc) {
if (IS_UNDEFINED(desc)) return false;
return desc.hasValue() || desc.hasWritable();
}
-// ES5 8.10.3.
+// ES6 6.2.4.3
function IsGenericDescriptor(desc) {
if (IS_UNDEFINED(desc)) return false;
return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
@@ -297,24 +236,6 @@ function IsInconsistentDescriptor(desc) {
}
-// ES5 8.10.4
-function FromPropertyDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return desc;
-
- if (IsDataDescriptor(desc)) {
- return { value: desc.getValue(),
- writable: desc.isWritable(),
- enumerable: desc.isEnumerable(),
- configurable: desc.isConfigurable() };
- }
- // Must be an AccessorDescriptor then. We never return a generic descriptor.
- return { get: desc.getGet(),
- set: desc.getSet(),
- enumerable: desc.isEnumerable(),
- configurable: desc.isConfigurable() };
-}
-
-
// Harmony Proxies
function FromGenericPropertyDescriptor(desc) {
if (IS_UNDEFINED(desc)) return desc;
@@ -342,18 +263,18 @@ function FromGenericPropertyDescriptor(desc) {
}
-// ES5 8.10.5.
+// ES6 6.2.4.5
function ToPropertyDescriptor(obj) {
- if (!IS_SPEC_OBJECT(obj)) throw MakeTypeError(kPropertyDescObject, obj);
+ if (!IS_RECEIVER(obj)) throw MakeTypeError(kPropertyDescObject, obj);
var desc = new PropertyDescriptor();
if ("enumerable" in obj) {
- desc.setEnumerable(ToBoolean(obj.enumerable));
+ desc.setEnumerable(TO_BOOLEAN(obj.enumerable));
}
if ("configurable" in obj) {
- desc.setConfigurable(ToBoolean(obj.configurable));
+ desc.setConfigurable(TO_BOOLEAN(obj.configurable));
}
if ("value" in obj) {
@@ -361,7 +282,7 @@ function ToPropertyDescriptor(obj) {
}
if ("writable" in obj) {
- desc.setWritable(ToBoolean(obj.writable));
+ desc.setWritable(TO_BOOLEAN(obj.writable));
}
if ("get" in obj) {
@@ -386,8 +307,7 @@ function ToPropertyDescriptor(obj) {
return desc;
}
-
-// For Harmony proxies.
+// TODO(cbruni): remove once callers have been removed
function ToCompletePropertyDescriptor(obj) {
var desc = ToPropertyDescriptor(obj);
if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
@@ -531,46 +451,42 @@ function GetTrap(handler, name, defaultTrap) {
var trap = handler[name];
if (IS_UNDEFINED(trap)) {
if (IS_UNDEFINED(defaultTrap)) {
- throw MakeTypeError(kProxyHandlerTrapMissing, handler, name);
+ throw MakeTypeError(kIllegalInvocation);
}
trap = defaultTrap;
} else if (!IS_CALLABLE(trap)) {
- throw MakeTypeError(kProxyHandlerTrapMustBeCallable, handler, name);
+ throw MakeTypeError(kIllegalInvocation);
}
return trap;
}
-function CallTrap0(handler, name, defaultTrap) {
- return %_CallFunction(handler, GetTrap(handler, name, defaultTrap));
-}
-
-
function CallTrap1(handler, name, defaultTrap, x) {
- return %_CallFunction(handler, x, GetTrap(handler, name, defaultTrap));
+ return %_Call(GetTrap(handler, name, defaultTrap), handler, x);
}
function CallTrap2(handler, name, defaultTrap, x, y) {
- return %_CallFunction(handler, x, y, GetTrap(handler, name, defaultTrap));
+ return %_Call(GetTrap(handler, name, defaultTrap), handler, x, y);
}
// ES5 section 8.12.1.
+// TODO(jkummerow): Deprecated. Migrate all callers to
+// ObjectGetOwnPropertyDescriptor and delete this.
function GetOwnPropertyJS(obj, v) {
var p = TO_NAME(v);
- if (%_IsJSProxy(obj)) {
+ if (IS_PROXY(obj)) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(v)) return UNDEFINED;
- var handler = %GetHandler(obj);
+ var handler = %JSProxyGetHandler(obj);
var descriptor = CallTrap1(
handler, "getOwnPropertyDescriptor", UNDEFINED, p);
if (IS_UNDEFINED(descriptor)) return descriptor;
var desc = ToCompletePropertyDescriptor(descriptor);
if (!desc.isConfigurable()) {
- throw MakeTypeError(kProxyPropNotConfigurable,
- handler, p, "getOwnPropertyDescriptor");
+ throw MakeTypeError(kIllegalInvocation);
}
return desc;
}
@@ -578,28 +494,13 @@ function GetOwnPropertyJS(obj, v) {
// GetOwnProperty returns an array indexed by the constants
// defined in macros.py.
// If p is not a property on obj undefined is returned.
- var props = %GetOwnProperty(TO_OBJECT(obj), p);
+ var props = %GetOwnProperty_Legacy(TO_OBJECT(obj), p);
return ConvertDescriptorArrayToDescriptor(props);
}
-// ES5 section 8.12.7.
-function Delete(obj, p, should_throw) {
- var desc = GetOwnPropertyJS(obj, p);
- if (IS_UNDEFINED(desc)) return true;
- if (desc.isConfigurable()) {
- %DeleteProperty_Sloppy(obj, p);
- return true;
- } else if (should_throw) {
- throw MakeTypeError(kDefineDisallowed, p);
- } else {
- return;
- }
-}
-
-
-// ES6, draft 12-24-14, section 7.3.8
+// ES6 7.3.9
function GetMethod(obj, p) {
var func = obj[p];
if (IS_NULL_OR_UNDEFINED(func)) return UNDEFINED;
@@ -613,12 +514,11 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(p)) return false;
- var handler = %GetHandler(obj);
+ var handler = %JSProxyGetHandler(obj);
var result = CallTrap2(handler, "defineProperty", UNDEFINED, p, attributes);
- if (!ToBoolean(result)) {
+ if (!result) {
if (should_throw) {
- throw MakeTypeError(kProxyHandlerReturned,
- handler, "false", "defineProperty");
+ throw MakeTypeError(kIllegalInvocation);
} else {
return false;
}
@@ -627,14 +527,12 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
}
-// ES5 8.12.9.
+// ES6 9.1.6 [[DefineOwnProperty]](P, Desc)
function DefineObjectProperty(obj, p, desc, should_throw) {
- var current_array = %GetOwnProperty(obj, TO_NAME(p));
+ var current_array = %GetOwnProperty_Legacy(obj, TO_NAME(p));
var current = ConvertDescriptorArrayToDescriptor(current_array);
- var extensible = %IsExtensible(obj);
+ var extensible = %object_is_extensible(obj);
- // Error handling according to spec.
- // Step 3
if (IS_UNDEFINED(current) && !extensible) {
if (should_throw) {
throw MakeTypeError(kDefineDisallowed, p);
@@ -644,21 +542,20 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
}
if (!IS_UNDEFINED(current)) {
- // Step 5 and 6
if ((IsGenericDescriptor(desc) ||
IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
(!desc.hasEnumerable() ||
- $sameValue(desc.isEnumerable(), current.isEnumerable())) &&
+ SameValue(desc.isEnumerable(), current.isEnumerable())) &&
(!desc.hasConfigurable() ||
- $sameValue(desc.isConfigurable(), current.isConfigurable())) &&
+ SameValue(desc.isConfigurable(), current.isConfigurable())) &&
(!desc.hasWritable() ||
- $sameValue(desc.isWritable(), current.isWritable())) &&
+ SameValue(desc.isWritable(), current.isWritable())) &&
(!desc.hasValue() ||
- $sameValue(desc.getValue(), current.getValue())) &&
+ SameValue(desc.getValue(), current.getValue())) &&
(!desc.hasGetter() ||
- $sameValue(desc.getGet(), current.getGet())) &&
+ SameValue(desc.getGet(), current.getGet())) &&
(!desc.hasSetter() ||
- $sameValue(desc.getSet(), current.getSet()))) {
+ SameValue(desc.getSet(), current.getSet()))) {
return true;
}
if (!current.isConfigurable()) {
@@ -697,7 +594,7 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
}
}
if (!currentIsWritable && desc.hasValue() &&
- !$sameValue(desc.getValue(), current.getValue())) {
+ !SameValue(desc.getValue(), current.getValue())) {
if (should_throw) {
throw MakeTypeError(kRedefineDisallowed, p);
} else {
@@ -708,14 +605,14 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
// Step 11
if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
if (desc.hasSetter() &&
- !$sameValue(desc.getSet(), current.getSet())) {
+ !SameValue(desc.getSet(), current.getSet())) {
if (should_throw) {
throw MakeTypeError(kRedefineDisallowed, p);
} else {
return false;
}
}
- if (desc.hasGetter() && !$sameValue(desc.getGet(),current.getGet())) {
+ if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
if (should_throw) {
throw MakeTypeError(kRedefineDisallowed, p);
} else {
@@ -808,14 +705,14 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
var length = obj.length;
if (index >= length && %IsObserved(obj)) {
emit_splice = true;
- $observeBeginPerformSplice(obj);
+ ObserveBeginPerformSplice(obj);
}
var length_desc = GetOwnPropertyJS(obj, "length");
if ((index >= length && !length_desc.isWritable()) ||
!DefineObjectProperty(obj, p, desc, true)) {
if (emit_splice)
- $observeEndPerformSplice(obj);
+ ObserveEndPerformSplice(obj);
if (should_throw) {
throw MakeTypeError(kDefineDisallowed, p);
} else {
@@ -826,8 +723,8 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
obj.length = index + 1;
}
if (emit_splice) {
- $observeEndPerformSplice(obj);
- $observeEnqueueSpliceRecord(obj, length, [], index + 1 - length);
+ ObserveEndPerformSplice(obj);
+ ObserveEnqueueSpliceRecord(obj, length, [], index + 1 - length);
}
return true;
}
@@ -840,7 +737,7 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
function DefineOwnProperty(obj, p, desc, should_throw) {
- if (%_IsJSProxy(obj)) {
+ if (IS_PROXY(obj)) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(p)) return false;
@@ -854,31 +751,20 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
}
-function DefineOwnPropertyFromAPI(obj, p, value, desc) {
- return DefineOwnProperty(obj, p, ToPropertyDescriptor({
- value: value,
- writable: desc[0],
- enumerable: desc[1],
- configurable: desc[2]
- }),
- false);
-}
-
-
// ES6 section 19.1.2.9
function ObjectGetPrototypeOf(obj) {
return %_GetPrototype(TO_OBJECT(obj));
}
-// ES6 section 19.1.2.19.
+// ES6 section 19.1.2.18.
function ObjectSetPrototypeOf(obj, proto) {
CHECK_OBJECT_COERCIBLE(obj, "Object.setPrototypeOf");
- if (proto !== null && !IS_SPEC_OBJECT(proto)) {
+ if (proto !== null && !IS_RECEIVER(proto)) {
throw MakeTypeError(kProtoObjectOrNull, proto);
}
- if (IS_SPEC_OBJECT(obj)) {
+ if (IS_RECEIVER(obj)) {
%SetPrototype(obj, proto);
}
@@ -888,432 +774,85 @@ function ObjectSetPrototypeOf(obj, proto) {
// ES6 section 19.1.2.6
function ObjectGetOwnPropertyDescriptor(obj, p) {
- var desc = GetOwnPropertyJS(TO_OBJECT(obj), p);
- return FromPropertyDescriptor(desc);
-}
-
-
-// For Harmony proxies
-function ToNameArray(obj, trap, includeSymbols) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError(kProxyNonObjectPropNames, trap, obj);
- }
- var n = TO_UINT32(obj.length);
- var array = new GlobalArray(n);
- var realLength = 0;
- var names = { __proto__: null }; // TODO(rossberg): use sets once ready.
- for (var index = 0; index < n; index++) {
- var s = TO_NAME(obj[index]);
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(s) && !includeSymbols) continue;
- if (%HasOwnProperty(names, s)) {
- throw MakeTypeError(kProxyRepeatedPropName, trap, s);
- }
- array[realLength] = s;
- ++realLength;
- names[s] = 0;
- }
- array.length = realLength;
- return array;
-}
-
-
-function ObjectGetOwnPropertyKeys(obj, filter) {
- var nameArrays = new InternalArray();
- filter |= PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL;
- var interceptorInfo = %GetInterceptorInfo(obj);
-
- // Find all the indexed properties.
-
- // Only get own element names if we want to include string keys.
- if ((filter & PROPERTY_ATTRIBUTES_STRING) === 0) {
- var ownElementNames = %GetOwnElementNames(obj);
- for (var i = 0; i < ownElementNames.length; ++i) {
- ownElementNames[i] = %_NumberToString(ownElementNames[i]);
- }
- nameArrays.push(ownElementNames);
- // Get names for indexed interceptor properties.
- if ((interceptorInfo & 1) != 0) {
- var indexedInterceptorNames = %GetIndexedInterceptorElementNames(obj);
- if (!IS_UNDEFINED(indexedInterceptorNames)) {
- nameArrays.push(indexedInterceptorNames);
- }
- }
- }
-
- // Find all the named properties.
-
- // Get own property names.
- nameArrays.push(%GetOwnPropertyNames(obj, filter));
-
- // Get names for named interceptor properties if any.
- if ((interceptorInfo & 2) != 0) {
- var namedInterceptorNames =
- %GetNamedInterceptorPropertyNames(obj);
- if (!IS_UNDEFINED(namedInterceptorNames)) {
- nameArrays.push(namedInterceptorNames);
- }
- }
-
- var propertyNames =
- %Apply(InternalArray.prototype.concat,
- nameArrays[0], nameArrays, 1, nameArrays.length - 1);
-
- // Property names are expected to be unique strings,
- // but interceptors can interfere with that assumption.
- if (interceptorInfo != 0) {
- var seenKeys = { __proto__: null };
- var j = 0;
- for (var i = 0; i < propertyNames.length; ++i) {
- var name = propertyNames[i];
- if (IS_SYMBOL(name)) {
- if ((filter & PROPERTY_ATTRIBUTES_SYMBOLIC) || IS_PRIVATE(name)) {
- continue;
- }
- } else {
- if (filter & PROPERTY_ATTRIBUTES_STRING) continue;
- name = TO_STRING(name);
- }
- if (seenKeys[name]) continue;
- seenKeys[name] = true;
- propertyNames[j++] = name;
- }
- propertyNames.length = j;
- }
-
- return propertyNames;
-}
-
-
-// ES6 section 9.1.12 / 9.5.12
-function OwnPropertyKeys(obj) {
- if (%_IsJSProxy(obj)) {
- var handler = %GetHandler(obj);
- // TODO(caitp): Proxy.[[OwnPropertyKeys]] can not be implemented to spec
- // without an implementation of Direct Proxies.
- var names = CallTrap0(handler, "ownKeys", UNDEFINED);
- return ToNameArray(names, "getOwnPropertyNames", false);
- }
- return ObjectGetOwnPropertyKeys(obj, PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL);
+ return %GetOwnProperty(obj, p);
}
// ES5 section 15.2.3.4.
function ObjectGetOwnPropertyNames(obj) {
obj = TO_OBJECT(obj);
- // Special handling for proxies.
- if (%_IsJSProxy(obj)) {
- var handler = %GetHandler(obj);
- var names = CallTrap0(handler, "getOwnPropertyNames", UNDEFINED);
- return ToNameArray(names, "getOwnPropertyNames", false);
- }
-
- return ObjectGetOwnPropertyKeys(obj, PROPERTY_ATTRIBUTES_SYMBOLIC);
-}
-
-
-// ES5 section 15.2.3.5.
-function ObjectCreate(proto, properties) {
- if (!IS_SPEC_OBJECT(proto) && proto !== null) {
- throw MakeTypeError(kProtoObjectOrNull, proto);
- }
- var obj = {};
- %InternalSetPrototype(obj, proto);
- if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties);
- return obj;
+ return %GetOwnPropertyKeys(obj, PROPERTY_FILTER_SKIP_SYMBOLS);
}
// ES5 section 15.2.3.6.
function ObjectDefineProperty(obj, p, attributes) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError(kCalledOnNonObject, "Object.defineProperty");
- }
- var name = TO_NAME(p);
- if (%_IsJSProxy(obj)) {
- // Clone the attributes object for protection.
- // TODO(rossberg): not spec'ed yet, so not sure if this should involve
- // non-own properties as it does (or non-enumerable ones, as it doesn't?).
- var attributesClone = { __proto__: null };
- for (var a in attributes) {
- attributesClone[a] = attributes[a];
+ // The new pure-C++ implementation doesn't support O.o.
+ // TODO(jkummerow): Implement missing features and remove fallback path.
+ if (%IsObserved(obj)) {
+ if (!IS_RECEIVER(obj)) {
+ throw MakeTypeError(kCalledOnNonObject, "Object.defineProperty");
}
- DefineProxyProperty(obj, name, attributesClone, true);
- // The following would implement the spec as in the current proposal,
- // but after recent comments on es-discuss, is most likely obsolete.
- /*
- var defineObj = FromGenericPropertyDescriptor(desc);
- var names = ObjectGetOwnPropertyNames(attributes);
- var standardNames =
- {value: 0, writable: 0, get: 0, set: 0, enumerable: 0, configurable: 0};
- for (var i = 0; i < names.length; i++) {
- var N = names[i];
- if (!(%HasOwnProperty(standardNames, N))) {
- var attr = GetOwnPropertyJS(attributes, N);
- DefineOwnProperty(descObj, N, attr, true);
- }
- }
- // This is really confusing the types, but it is what the proxies spec
- // currently requires:
- desc = descObj;
- */
- } else {
+ var name = TO_NAME(p);
var desc = ToPropertyDescriptor(attributes);
DefineOwnProperty(obj, name, desc, true);
+ return obj;
}
- return obj;
+ return %ObjectDefineProperty(obj, p, attributes);
}
function GetOwnEnumerablePropertyNames(object) {
- var names = new InternalArray();
- for (var key in object) {
- if (%HasOwnProperty(object, key)) {
- names.push(key);
- }
- }
-
- var filter = PROPERTY_ATTRIBUTES_STRING | PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL;
- var symbols = %GetOwnPropertyNames(object, filter);
- for (var i = 0; i < symbols.length; ++i) {
- var symbol = symbols[i];
- if (IS_SYMBOL(symbol)) {
- var desc = ObjectGetOwnPropertyDescriptor(object, symbol);
- if (desc.enumerable) names.push(symbol);
- }
- }
-
- return names;
+ return %GetOwnPropertyKeys(object, PROPERTY_FILTER_ONLY_ENUMERABLE);
}
// ES5 section 15.2.3.7.
function ObjectDefineProperties(obj, properties) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError(kCalledOnNonObject, "Object.defineProperties");
- }
- var props = TO_OBJECT(properties);
- var names = GetOwnEnumerablePropertyNames(props);
- var descriptors = new InternalArray();
- for (var i = 0; i < names.length; i++) {
- descriptors.push(ToPropertyDescriptor(props[names[i]]));
- }
- for (var i = 0; i < names.length; i++) {
- DefineOwnProperty(obj, names[i], descriptors[i], true);
- }
- return obj;
-}
-
-
-// Harmony proxies.
-function ProxyFix(obj) {
- var handler = %GetHandler(obj);
- var props = CallTrap0(handler, "fix", UNDEFINED);
- if (IS_UNDEFINED(props)) {
- throw MakeTypeError(kProxyHandlerReturned, handler, "undefined", "fix");
- }
-
- if (%IsJSFunctionProxy(obj)) {
- var callTrap = %GetCallTrap(obj);
- var constructTrap = %GetConstructTrap(obj);
- var code = ProxyDelegateCallAndConstruct(callTrap, constructTrap);
- %Fix(obj); // becomes a regular function
- %SetCode(obj, code);
- // TODO(rossberg): What about length and other properties? Not specified.
- // We just put in some half-reasonable defaults for now.
- var prototype = new GlobalObject();
- ObjectDefineProperty(prototype, "constructor",
- {value: obj, writable: true, enumerable: false, configurable: true});
- // TODO(v8:1530): defineProperty does not handle prototype and length.
- %FunctionSetPrototype(obj, prototype);
- obj.length = 0;
- } else {
- %Fix(obj);
- }
- ObjectDefineProperties(obj, props);
-}
-
-
-// ES5 section 15.2.3.8.
-function ObjectSealJS(obj) {
- if (!IS_SPEC_OBJECT(obj)) return obj;
- var isProxy = %_IsJSProxy(obj);
- if (isProxy || %HasSloppyArgumentsElements(obj) || %IsObserved(obj)) {
- if (isProxy) {
- ProxyFix(obj);
+ // The new pure-C++ implementation doesn't support O.o.
+ // TODO(jkummerow): Implement missing features and remove fallback path.
+ if (%IsObserved(obj)) {
+ if (!IS_RECEIVER(obj)) {
+ throw MakeTypeError(kCalledOnNonObject, "Object.defineProperties");
}
- var names = OwnPropertyKeys(obj);
+ var props = TO_OBJECT(properties);
+ var names = GetOwnEnumerablePropertyNames(props);
+ var descriptors = new InternalArray();
for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnPropertyJS(obj, name);
- if (desc.isConfigurable()) {
- desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
- }
- }
- %PreventExtensions(obj);
- } else {
- // TODO(adamk): Is it worth going to this fast path if the
- // object's properties are already in dictionary mode?
- %ObjectSeal(obj);
- }
- return obj;
-}
-
-
-// ES5 section 15.2.3.9.
-function ObjectFreezeJS(obj) {
- if (!IS_SPEC_OBJECT(obj)) return obj;
- var isProxy = %_IsJSProxy(obj);
- // TODO(conradw): Investigate modifying the fast path to accommodate strong
- // objects.
- if (isProxy || %HasSloppyArgumentsElements(obj) || %IsObserved(obj) ||
- IS_STRONG(obj)) {
- if (isProxy) {
- ProxyFix(obj);
+ descriptors.push(ToPropertyDescriptor(props[names[i]]));
}
- var names = OwnPropertyKeys(obj);
for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnPropertyJS(obj, name);
- if (desc.isWritable() || desc.isConfigurable()) {
- if (IsDataDescriptor(desc)) desc.setWritable(false);
- desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
- }
- }
- %PreventExtensions(obj);
- } else {
- // TODO(adamk): Is it worth going to this fast path if the
- // object's properties are already in dictionary mode?
- %ObjectFreeze(obj);
- }
- return obj;
-}
-
-
-// ES5 section 15.2.3.10
-function ObjectPreventExtension(obj) {
- if (!IS_SPEC_OBJECT(obj)) return obj;
- if (%_IsJSProxy(obj)) {
- ProxyFix(obj);
- }
- %PreventExtensions(obj);
- return obj;
-}
-
-
-// ES5 section 15.2.3.11
-function ObjectIsSealed(obj) {
- if (!IS_SPEC_OBJECT(obj)) return true;
- if (%_IsJSProxy(obj)) {
- return false;
- }
- if (%IsExtensible(obj)) {
- return false;
- }
- var names = OwnPropertyKeys(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnPropertyJS(obj, name);
- if (desc.isConfigurable()) {
- return false;
- }
- }
- return true;
-}
-
-
-// ES5 section 15.2.3.12
-function ObjectIsFrozen(obj) {
- if (!IS_SPEC_OBJECT(obj)) return true;
- if (%_IsJSProxy(obj)) {
- return false;
- }
- if (%IsExtensible(obj)) {
- return false;
- }
- var names = OwnPropertyKeys(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnPropertyJS(obj, name);
- if (IsDataDescriptor(desc) && desc.isWritable()) return false;
- if (desc.isConfigurable()) return false;
- }
- return true;
-}
-
-
-// ES5 section 15.2.3.13
-function ObjectIsExtensible(obj) {
- if (!IS_SPEC_OBJECT(obj)) return false;
- if (%_IsJSProxy(obj)) {
- return true;
- }
- return %IsExtensible(obj);
-}
-
-
-// ECMA-262, Edition 6, section 19.1.2.10
-function ObjectIs(obj1, obj2) {
- return $sameValue(obj1, obj2);
-}
-
-
-// ECMA-262, Edition 6, section 19.1.2.1
-function ObjectAssign(target, sources) {
- // TODO(bmeurer): Move this to toplevel.
- "use strict";
- var to = TO_OBJECT(target);
- var argsLen = %_ArgumentsLength();
- if (argsLen < 2) return to;
-
- for (var i = 1; i < argsLen; ++i) {
- var nextSource = %_Arguments(i);
- if (IS_NULL_OR_UNDEFINED(nextSource)) {
- continue;
- }
-
- var from = TO_OBJECT(nextSource);
- var keys = OwnPropertyKeys(from);
- var len = keys.length;
-
- for (var j = 0; j < len; ++j) {
- var key = keys[j];
- if (%IsPropertyEnumerable(from, key)) {
- var propValue = from[key];
- to[key] = propValue;
- }
+ DefineOwnProperty(obj, names[i], descriptors[i], true);
}
+ return obj;
}
- return to;
+ return %ObjectDefineProperties(obj, properties);
}
-// ECMA-262, Edition 6, section B.2.2.1.1
+// ES6 B.2.2.1.1
function ObjectGetProto() {
return %_GetPrototype(TO_OBJECT(this));
}
-// ECMA-262, Edition 6, section B.2.2.1.2
+// ES6 B.2.2.1.2
function ObjectSetProto(proto) {
CHECK_OBJECT_COERCIBLE(this, "Object.prototype.__proto__");
- if ((IS_SPEC_OBJECT(proto) || IS_NULL(proto)) && IS_SPEC_OBJECT(this)) {
+ if ((IS_RECEIVER(proto) || IS_NULL(proto)) && IS_RECEIVER(this)) {
%SetPrototype(this, proto);
}
}
+// ES6 19.1.1.1
function ObjectConstructor(x) {
- if (%_IsConstructCall()) {
- if (x == null) return this;
- return TO_OBJECT(x);
- } else {
- if (x == null) return { };
- return TO_OBJECT(x);
+ if (GlobalObject != new.target && !IS_UNDEFINED(new.target)) {
+ return this;
}
+ if (IS_NULL(x) || IS_UNDEFINED(x)) return {};
+ return TO_OBJECT(x);
}
@@ -1344,23 +883,16 @@ utils.InstallGetterSetter(GlobalObject.prototype, "__proto__", ObjectGetProto,
// Set up non-enumerable functions in the Object object.
utils.InstallFunctions(GlobalObject, DONT_ENUM, [
- "assign", ObjectAssign,
- "keys", ObjectKeys,
- "create", ObjectCreate,
+ // assign is added in bootstrapper.cc.
+ // keys is added in bootstrapper.cc.
"defineProperty", ObjectDefineProperty,
"defineProperties", ObjectDefineProperties,
- "freeze", ObjectFreezeJS,
"getPrototypeOf", ObjectGetPrototypeOf,
"setPrototypeOf", ObjectSetPrototypeOf,
"getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
"getOwnPropertyNames", ObjectGetOwnPropertyNames,
// getOwnPropertySymbols is added in symbol.js.
- "is", ObjectIs,
- "isExtensible", ObjectIsExtensible,
- "isFrozen", ObjectIsFrozen,
- "isSealed", ObjectIsSealed,
- "preventExtensions", ObjectPreventExtension,
- "seal", ObjectSealJS
+ "is", SameValue, // ECMA-262, Edition 6, section 19.1.2.10
// deliverChangeRecords, getNotifier, observe and unobserve are added
// in object-observe.js.
]);
@@ -1372,10 +904,10 @@ utils.InstallFunctions(GlobalObject, DONT_ENUM, [
function BooleanConstructor(x) {
// TODO(bmeurer): Move this to toplevel.
"use strict";
- if (%_IsConstructCall()) {
- %_SetValueOf(this, ToBoolean(x));
+ if (!IS_UNDEFINED(new.target)) {
+ %_SetValueOf(this, TO_BOOLEAN(x));
} else {
- return ToBoolean(x);
+ return TO_BOOLEAN(x);
}
}
@@ -1420,19 +952,7 @@ utils.InstallFunctions(GlobalBoolean.prototype, DONT_ENUM, [
// ----------------------------------------------------------------------------
// Number
-function NumberConstructor(x) {
- // TODO(bmeurer): Move this to toplevel.
- "use strict";
- var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x);
- if (%_IsConstructCall()) {
- %_SetValueOf(this, value);
- } else {
- return value;
- }
-}
-
-
-// ECMA-262 section 15.7.4.2.
+// ES6 Number.prototype.toString([ radix ])
function NumberToStringJS(radix) {
// NOTE: Both Number objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
@@ -1457,13 +977,13 @@ function NumberToStringJS(radix) {
}
-// ECMA-262 section 15.7.4.3
+// ES6 20.1.3.4 Number.prototype.toLocaleString([reserved1 [, reserved2]])
function NumberToLocaleString() {
- return %_CallFunction(this, NumberToStringJS);
+ return %_Call(NumberToStringJS, this);
}
-// ECMA-262 section 15.7.4.4
+// ES6 20.1.3.7 Number.prototype.valueOf()
function NumberValueOf() {
// NOTE: Both Number objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
@@ -1474,7 +994,7 @@ function NumberValueOf() {
}
-// ECMA-262 section 15.7.4.5
+// ES6 20.1.3.3 Number.prototype.toFixed(fractionDigits)
function NumberToFixedJS(fractionDigits) {
var x = this;
if (!IS_NUMBER(this)) {
@@ -1499,7 +1019,7 @@ function NumberToFixedJS(fractionDigits) {
}
-// ECMA-262 section 15.7.4.6
+// ES6 20.1.3.2 Number.prototype.toExponential(fractionDigits)
function NumberToExponentialJS(fractionDigits) {
var x = this;
if (!IS_NUMBER(this)) {
@@ -1525,7 +1045,7 @@ function NumberToExponentialJS(fractionDigits) {
}
-// ECMA-262 section 15.7.4.7
+// ES6 20.1.3.5 Number.prototype.toPrecision(precision)
function NumberToPrecisionJS(precision) {
var x = this;
if (!IS_NUMBER(this)) {
@@ -1582,7 +1102,6 @@ function NumberIsSafeInteger(number) {
// ----------------------------------------------------------------------------
-%SetCode(GlobalNumber, NumberConstructor);
%FunctionSetPrototype(GlobalNumber, new GlobalNumber(0));
%OptimizeObjectForAddingMultipleProperties(GlobalNumber.prototype, 8);
@@ -1596,7 +1115,7 @@ utils.InstallConstants(GlobalNumber, [
// ECMA-262 section 15.7.3.2.
"MIN_VALUE", 5e-324,
// ECMA-262 section 15.7.3.3.
- "NaN", NAN,
+ "NaN", NaN,
// ECMA-262 section 15.7.3.4.
"NEGATIVE_INFINITY", -INFINITY,
// ECMA-262 section 15.7.3.5.
@@ -1633,176 +1152,9 @@ utils.InstallFunctions(GlobalNumber, DONT_ENUM, [
// ----------------------------------------------------------------------------
-// Function
-
-function NativeCodeFunctionSourceString(func) {
- var name = %FunctionGetName(func);
- if (name) {
- // Mimic what KJS does.
- return 'function ' + name + '() { [native code] }';
- }
-
- return 'function () { [native code] }';
-}
-
-function FunctionSourceString(func) {
- while (%IsJSFunctionProxy(func)) {
- func = %GetCallTrap(func);
- }
-
- if (!IS_FUNCTION(func)) {
- throw MakeTypeError(kNotGeneric, 'Function.prototype.toString');
- }
-
- if (%FunctionHidesSource(func)) {
- return NativeCodeFunctionSourceString(func);
- }
-
- var classSource = %ClassGetSourceCode(func);
- if (IS_STRING(classSource)) {
- return classSource;
- }
-
- var source = %FunctionGetSourceCode(func);
- if (!IS_STRING(source)) {
- return NativeCodeFunctionSourceString(func);
- }
-
- if (%FunctionIsArrow(func)) {
- return source;
- }
-
- var name = %FunctionNameShouldPrintAsAnonymous(func)
- ? 'anonymous'
- : %FunctionGetName(func);
-
- var isGenerator = %FunctionIsGenerator(func);
- var head = %FunctionIsConciseMethod(func)
- ? (isGenerator ? '*' : '')
- : (isGenerator ? 'function* ' : 'function ');
- return head + name + source;
-}
-
-
-function FunctionToString() {
- return FunctionSourceString(this);
-}
-
-
-// ES5 15.3.4.5
-function FunctionBind(this_arg) { // Length is 1.
- if (!IS_CALLABLE(this)) throw MakeTypeError(kFunctionBind);
-
- var boundFunction = function () {
- // Poison .arguments and .caller, but is otherwise not detectable.
- "use strict";
- // This function must not use any object literals (Object, Array, RegExp),
- // since the literals-array is being used to store the bound data.
- if (%_IsConstructCall()) {
- return %NewObjectFromBound(boundFunction);
- }
- var bindings = %BoundFunctionGetBindings(boundFunction);
-
- var argc = %_ArgumentsLength();
- if (argc == 0) {
- return %Apply(bindings[0], bindings[1], bindings, 2, bindings.length - 2);
- }
- if (bindings.length === 2) {
- return %Apply(bindings[0], bindings[1], arguments, 0, argc);
- }
- var bound_argc = bindings.length - 2;
- var argv = new InternalArray(bound_argc + argc);
- for (var i = 0; i < bound_argc; i++) {
- argv[i] = bindings[i + 2];
- }
- for (var j = 0; j < argc; j++) {
- argv[i++] = %_Arguments(j);
- }
- return %Apply(bindings[0], bindings[1], argv, 0, bound_argc + argc);
- };
-
- var new_length = 0;
- var old_length = this.length;
- // FunctionProxies might provide a non-UInt32 value. If so, ignore it.
- if ((typeof old_length === "number") &&
- ((old_length >>> 0) === old_length)) {
- var argc = %_ArgumentsLength();
- if (argc > 0) argc--; // Don't count the thisArg as parameter.
- new_length = old_length - argc;
- if (new_length < 0) new_length = 0;
- }
- // This runtime function finds any remaining arguments on the stack,
- // so we don't pass the arguments object.
- var result = %FunctionBindArguments(boundFunction, this,
- this_arg, new_length);
-
- var name = this.name;
- var bound_name = IS_STRING(name) ? name : "";
- %DefineDataPropertyUnchecked(result, "name", "bound " + bound_name,
- DONT_ENUM | READ_ONLY);
-
- // We already have caller and arguments properties on functions,
- // which are non-configurable. It therefore makes no sence to
- // try to redefine these as defined by the spec. The spec says
- // that bind should make these throw a TypeError if get or set
- // is called and make them non-enumerable and non-configurable.
- // To be consistent with our normal functions we leave this as it is.
- // TODO(lrn): Do set these to be thrower.
- return result;
-}
-
-
-function NewFunctionString(args, function_token) {
- var n = args.length;
- var p = '';
- if (n > 1) {
- p = TO_STRING(args[0]);
- for (var i = 1; i < n - 1; i++) {
- p += ',' + TO_STRING(args[i]);
- }
- // If the formal parameters string include ) - an illegal
- // character - it may make the combined function expression
- // compile. We avoid this problem by checking for this early on.
- if (%_CallFunction(p, ')', StringIndexOf) != -1) {
- throw MakeSyntaxError(kParenthesisInArgString);
- }
- // If the formal parameters include an unbalanced block comment, the
- // function must be rejected. Since JavaScript does not allow nested
- // comments we can include a trailing block comment to catch this.
- p += '\n/' + '**/';
- }
- var body = (n > 0) ? TO_STRING(args[n - 1]) : '';
- return '(' + function_token + '(' + p + ') {\n' + body + '\n})';
-}
-
-
-function FunctionConstructor(arg1) { // length == 1
- var source = NewFunctionString(arguments, 'function');
- var global_proxy = %GlobalProxy(FunctionConstructor);
- // Compile the string in the constructor and not a helper so that errors
- // appear to come from here.
- var f = %_CallFunction(global_proxy, %CompileString(source, true));
- %FunctionMarkNameShouldPrintAsAnonymous(f);
- return f;
-}
-
-
-// ----------------------------------------------------------------------------
-
-%SetCode(GlobalFunction, FunctionConstructor);
-%AddNamedProperty(GlobalFunction.prototype, "constructor", GlobalFunction,
- DONT_ENUM);
-
-utils.InstallFunctions(GlobalFunction.prototype, DONT_ENUM, [
- "bind", FunctionBind,
- "toString", FunctionToString
-]);
-
-// ----------------------------------------------------------------------------
// Iterator related spec functions.
-// ES6 rev 33, 2015-02-12
-// 7.4.1 GetIterator ( obj, method )
+// ES6 7.4.1 GetIterator(obj, method)
function GetIterator(obj, method) {
if (IS_UNDEFINED(method)) {
method = obj[iteratorSymbol];
@@ -1810,8 +1162,8 @@ function GetIterator(obj, method) {
if (!IS_CALLABLE(method)) {
throw MakeTypeError(kNotIterable, obj);
}
- var iterator = %_CallFunction(obj, method);
- if (!IS_SPEC_OBJECT(iterator)) {
+ var iterator = %_Call(method, obj);
+ if (!IS_RECEIVER(iterator)) {
throw MakeTypeError(kNotAnIterator, iterator);
}
return iterator;
@@ -1821,32 +1173,18 @@ function GetIterator(obj, method) {
// Exports
utils.Export(function(to) {
- to.Delete = Delete;
- to.FunctionSourceString = FunctionSourceString;
to.GetIterator = GetIterator;
to.GetMethod = GetMethod;
to.IsFinite = GlobalIsFinite;
to.IsNaN = GlobalIsNaN;
- to.NewFunctionString = NewFunctionString;
to.NumberIsNaN = NumberIsNaN;
to.ObjectDefineProperties = ObjectDefineProperties;
to.ObjectDefineProperty = ObjectDefineProperty;
- to.ObjectFreeze = ObjectFreezeJS;
- to.ObjectGetOwnPropertyKeys = ObjectGetOwnPropertyKeys;
to.ObjectHasOwnProperty = ObjectHasOwnProperty;
- to.ObjectIsFrozen = ObjectIsFrozen;
- to.ObjectIsSealed = ObjectIsSealed;
- to.ObjectToString = ObjectToString;
- to.ToNameArray = ToNameArray;
});
%InstallToContext([
- "global_eval_fun", GlobalEval,
"object_value_of", ObjectValueOf,
- "object_to_string", ObjectToString,
- "object_define_own_property", DefineOwnPropertyFromAPI,
- "object_get_own_property_descriptor", ObjectGetOwnPropertyDescriptor,
- "to_complete_property_descriptor", ToCompletePropertyDescriptor,
]);
})
diff --git a/chromium/v8/src/weak-collection.js b/chromium/v8/src/js/weak-collection.js
index 1c60a2f47a6..308b9edef7c 100644
--- a/chromium/v8/src/weak-collection.js
+++ b/chromium/v8/src/js/weak-collection.js
@@ -8,16 +8,28 @@
%CheckIsBootstrapping();
+// -------------------------------------------------------------------
+// Imports
+
+var GetExistingHash;
+var GetHash;
var GlobalObject = global.Object;
var GlobalWeakMap = global.WeakMap;
var GlobalWeakSet = global.WeakSet;
+var MakeTypeError;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+utils.Import(function(from) {
+ GetExistingHash = from.GetExistingHash;
+ GetHash = from.GetHash;
+ MakeTypeError = from.MakeTypeError;
+});
+
// -------------------------------------------------------------------
// Harmony WeakMap
function WeakMapConstructor(iterable) {
- if (!%_IsConstructCall()) {
+ if (IS_UNDEFINED(new.target)) {
throw MakeTypeError(kConstructorNotFunction, "WeakMap");
}
@@ -26,10 +38,10 @@ function WeakMapConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.set;
if (!IS_CALLABLE(adder)) {
- throw MakeTypeError(kPropertyNotFunction, 'set', this);
+ throw MakeTypeError(kPropertyNotFunction, adder, 'set', this);
}
for (var nextItem of iterable) {
- if (!IS_SPEC_OBJECT(nextItem)) {
+ if (!IS_RECEIVER(nextItem)) {
throw MakeTypeError(kIteratorValueNotAnObject, nextItem);
}
%_Call(adder, this, nextItem[0], nextItem[1]);
@@ -43,8 +55,8 @@ function WeakMapGet(key) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakMap.prototype.get', this);
}
- if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
- var hash = $getExistingHash(key);
+ if (!IS_RECEIVER(key)) return UNDEFINED;
+ var hash = GetExistingHash(key);
if (IS_UNDEFINED(hash)) return UNDEFINED;
return %WeakCollectionGet(this, key, hash);
}
@@ -55,8 +67,8 @@ function WeakMapSet(key, value) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakMap.prototype.set', this);
}
- if (!IS_SPEC_OBJECT(key)) throw MakeTypeError(kInvalidWeakMapKey);
- return %WeakCollectionSet(this, key, value, $getHash(key));
+ if (!IS_RECEIVER(key)) throw MakeTypeError(kInvalidWeakMapKey);
+ return %WeakCollectionSet(this, key, value, GetHash(key));
}
@@ -65,8 +77,8 @@ function WeakMapHas(key) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakMap.prototype.has', this);
}
- if (!IS_SPEC_OBJECT(key)) return false;
- var hash = $getExistingHash(key);
+ if (!IS_RECEIVER(key)) return false;
+ var hash = GetExistingHash(key);
if (IS_UNDEFINED(hash)) return false;
return %WeakCollectionHas(this, key, hash);
}
@@ -77,8 +89,8 @@ function WeakMapDelete(key) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakMap.prototype.delete', this);
}
- if (!IS_SPEC_OBJECT(key)) return false;
- var hash = $getExistingHash(key);
+ if (!IS_RECEIVER(key)) return false;
+ var hash = GetExistingHash(key);
if (IS_UNDEFINED(hash)) return false;
return %WeakCollectionDelete(this, key, hash);
}
@@ -106,7 +118,7 @@ utils.InstallFunctions(GlobalWeakMap.prototype, DONT_ENUM, [
// Harmony WeakSet
function WeakSetConstructor(iterable) {
- if (!%_IsConstructCall()) {
+ if (IS_UNDEFINED(new.target)) {
throw MakeTypeError(kConstructorNotFunction, "WeakSet");
}
@@ -115,7 +127,7 @@ function WeakSetConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.add;
if (!IS_CALLABLE(adder)) {
- throw MakeTypeError(kPropertyNotFunction, 'add', this);
+ throw MakeTypeError(kPropertyNotFunction, adder, 'add', this);
}
for (var value of iterable) {
%_Call(adder, this, value);
@@ -129,8 +141,8 @@ function WeakSetAdd(value) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakSet.prototype.add', this);
}
- if (!IS_SPEC_OBJECT(value)) throw MakeTypeError(kInvalidWeakSetValue);
- return %WeakCollectionSet(this, value, true, $getHash(value));
+ if (!IS_RECEIVER(value)) throw MakeTypeError(kInvalidWeakSetValue);
+ return %WeakCollectionSet(this, value, true, GetHash(value));
}
@@ -139,8 +151,8 @@ function WeakSetHas(value) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakSet.prototype.has', this);
}
- if (!IS_SPEC_OBJECT(value)) return false;
- var hash = $getExistingHash(value);
+ if (!IS_RECEIVER(value)) return false;
+ var hash = GetExistingHash(value);
if (IS_UNDEFINED(hash)) return false;
return %WeakCollectionHas(this, value, hash);
}
@@ -151,8 +163,8 @@ function WeakSetDelete(value) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakSet.prototype.delete', this);
}
- if (!IS_SPEC_OBJECT(value)) return false;
- var hash = $getExistingHash(value);
+ if (!IS_RECEIVER(value)) return false;
+ var hash = GetExistingHash(value);
if (IS_UNDEFINED(hash)) return false;
return %WeakCollectionDelete(this, value, hash);
}
diff --git a/chromium/v8/src/json-stringifier.h b/chromium/v8/src/json-stringifier.h
index fa4946dad7d..5c0459eb1b0 100644
--- a/chromium/v8/src/json-stringifier.h
+++ b/chromium/v8/src/json-stringifier.h
@@ -223,6 +223,7 @@ MaybeHandle<Object> BasicJsonStringifier::StringifyString(
SerializeStringUnchecked_(object->GetFlatContent().ToOneByteVector(),
&no_extend);
no_extend.Append('\"');
+ return no_extend.Finalize();
} else {
result = isolate->factory()
->NewRawTwoByteString(worst_case_length)
@@ -233,8 +234,8 @@ MaybeHandle<Object> BasicJsonStringifier::StringifyString(
SerializeStringUnchecked_(object->GetFlatContent().ToUC16Vector(),
&no_extend);
no_extend.Append('\"');
+ return no_extend.Finalize();
}
- return result;
}
@@ -336,14 +337,13 @@ BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
case JS_VALUE_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
return SerializeJSValue(Handle<JSValue>::cast(object));
- case JS_FUNCTION_TYPE:
- return UNCHANGED;
default:
if (object->IsString()) {
if (deferred_string_key) SerializeDeferredKey(comma, key);
SerializeString(Handle<String>::cast(object));
return SUCCESS;
} else if (object->IsJSObject()) {
+ if (object->IsCallable()) return UNCHANGED;
// Go to slow path for global proxy and objects requiring access checks.
if (object->IsAccessCheckNeeded() || object->IsJSGlobalProxy()) break;
if (deferred_string_key) SerializeDeferredKey(comma, key);
@@ -396,9 +396,10 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
DCHECK(value->IsBoolean());
builder_.AppendCString(value->IsTrue() ? "true" : "false");
} else {
- // Fail gracefully for special value wrappers.
- isolate_->ThrowIllegalOperation();
- return EXCEPTION;
+ // ES6 24.3.2.1 step 10.c, serialize as an ordinary JSObject.
+ CHECK(!object->IsAccessCheckNeeded());
+ CHECK(!object->IsJSGlobalProxy());
+ return SerializeJSObject(object);
}
return SUCCESS;
}
@@ -524,7 +525,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
HandleScope handle_scope(isolate_);
Result stack_push = StackPush(object);
if (stack_push != SUCCESS) return stack_push;
- DCHECK(!object->IsJSGlobalProxy() && !object->IsGlobalObject());
+ DCHECK(!object->IsJSGlobalProxy() && !object->IsJSGlobalObject());
builder_.AppendCharacter('{');
bool comma = false;
@@ -566,7 +567,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
Handle<FixedArray> contents;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, contents,
- JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY),
+ JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY, ENUMERABLE_STRINGS),
EXCEPTION);
for (int i = 0; i < contents->length(); i++) {
@@ -681,6 +682,7 @@ void BasicJsonStringifier::SerializeString(Handle<String> object) {
}
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_JSON_STRINGIFIER_H_
diff --git a/chromium/v8/src/key-accumulator.cc b/chromium/v8/src/key-accumulator.cc
new file mode 100644
index 00000000000..e7a9c3ccebc
--- /dev/null
+++ b/chromium/v8/src/key-accumulator.cc
@@ -0,0 +1,315 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/key-accumulator.h"
+
+#include "src/elements.h"
+#include "src/factory.h"
+#include "src/isolate-inl.h"
+#include "src/objects-inl.h"
+#include "src/property-descriptor.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+KeyAccumulator::~KeyAccumulator() {
+ for (size_t i = 0; i < elements_.size(); i++) {
+ delete elements_[i];
+ }
+}
+
+
+Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
+ if (length_ == 0) {
+ return isolate_->factory()->empty_fixed_array();
+ }
+ // Make sure we have all the lengths collected.
+ NextPrototype();
+
+ // Assemble the result array by first adding the element keys and then the
+ // property keys. We use the total number of String + Symbol keys per level in
+ // |level_lengths_| and the available element keys in the corresponding bucket
+ // in |elements_| to deduce the number of keys to take from the
+ // |string_properties_| and |symbol_properties_| set.
+ Handle<FixedArray> result = isolate_->factory()->NewFixedArray(length_);
+ int insertion_index = 0;
+ int string_properties_index = 0;
+ int symbol_properties_index = 0;
+ // String and Symbol lengths always come in pairs:
+ size_t max_level = level_lengths_.size() / 2;
+ for (size_t level = 0; level < max_level; level++) {
+ int num_string_properties = level_lengths_[level * 2];
+ int num_symbol_properties = level_lengths_[level * 2 + 1];
+ if (num_string_properties < 0) {
+ // If the |num_string_properties| is negative, the current level contains
+ // properties from a proxy, hence we skip the integer keys in |elements_|
+ // since proxies define the complete ordering.
+ num_string_properties = -num_string_properties;
+ } else if (level < elements_.size()) {
+ // Add the element indices for this prototype level.
+ std::vector<uint32_t>* elements = elements_[level];
+ int num_elements = static_cast<int>(elements->size());
+ for (int i = 0; i < num_elements; i++) {
+ Handle<Object> key;
+ if (convert == KEEP_NUMBERS) {
+ key = isolate_->factory()->NewNumberFromUint(elements->at(i));
+ } else {
+ key = isolate_->factory()->Uint32ToString(elements->at(i));
+ }
+ result->set(insertion_index, *key);
+ insertion_index++;
+ }
+ }
+ // Add the string property keys for this prototype level.
+ for (int i = 0; i < num_string_properties; i++) {
+ Object* key = string_properties_->KeyAt(string_properties_index);
+ result->set(insertion_index, key);
+ insertion_index++;
+ string_properties_index++;
+ }
+ // Add the symbol property keys for this prototype level.
+ for (int i = 0; i < num_symbol_properties; i++) {
+ Object* key = symbol_properties_->KeyAt(symbol_properties_index);
+ result->set(insertion_index, key);
+ insertion_index++;
+ symbol_properties_index++;
+ }
+ }
+
+ DCHECK_EQ(insertion_index, length_);
+ return result;
+}
+
+
+namespace {
+
+bool AccumulatorHasKey(std::vector<uint32_t>* sub_elements, uint32_t key) {
+ return std::binary_search(sub_elements->begin(), sub_elements->end(), key);
+}
+
+} // namespace
+
+bool KeyAccumulator::AddKey(Object* key, AddKeyConversion convert) {
+ return AddKey(handle(key, isolate_), convert);
+}
+
+
+bool KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
+ if (key->IsSymbol()) {
+ if (filter_ & SKIP_SYMBOLS) return false;
+ if (Handle<Symbol>::cast(key)->is_private()) return false;
+ return AddSymbolKey(key);
+ }
+ if (filter_ & SKIP_STRINGS) return false;
+ // Make sure we do not add keys to a proxy-level (see AddKeysFromProxy).
+ DCHECK_LE(0, level_string_length_);
+ // In some cases (e.g. proxies) we might get in String-converted ints which
+ // should be added to the elements list instead of the properties. For
+ // proxies we have to convert as well but also respect the original order.
+ // Therefore we add a converted key to both sides
+ if (convert == CONVERT_TO_ARRAY_INDEX || convert == PROXY_MAGIC) {
+ uint32_t index = 0;
+ int prev_length = length_;
+ int prev_proto = level_string_length_;
+ if ((key->IsString() && Handle<String>::cast(key)->AsArrayIndex(&index)) ||
+ key->ToArrayIndex(&index)) {
+ bool key_was_added = AddIntegerKey(index);
+ if (convert == CONVERT_TO_ARRAY_INDEX) return key_was_added;
+ if (convert == PROXY_MAGIC) {
+ // If we had an array index (number) and it wasn't added, the key
+ // already existed before, hence we cannot add it to the properties
+ // keys as it would lead to duplicate entries.
+ if (!key_was_added) {
+ return false;
+ }
+ length_ = prev_length;
+ level_string_length_ = prev_proto;
+ }
+ }
+ }
+ return AddStringKey(key, convert);
+}
+
+
+bool KeyAccumulator::AddKey(uint32_t key) { return AddIntegerKey(key); }
+
+
+bool KeyAccumulator::AddIntegerKey(uint32_t key) {
+ // Make sure we do not add keys to a proxy-level (see AddKeysFromProxy).
+ // We mark proxy-levels with a negative length
+ DCHECK_LE(0, level_string_length_);
+ // Binary search over all but the last level. The last one might not be
+ // sorted yet.
+ for (size_t i = 1; i < elements_.size(); i++) {
+ if (AccumulatorHasKey(elements_[i - 1], key)) return false;
+ }
+ elements_.back()->push_back(key);
+ length_++;
+ return true;
+}
+
+
+bool KeyAccumulator::AddStringKey(Handle<Object> key,
+ AddKeyConversion convert) {
+ if (string_properties_.is_null()) {
+ string_properties_ = OrderedHashSet::Allocate(isolate_, 16);
+ }
+ // TODO(cbruni): remove this conversion once we throw the correct TypeError
+ // for non-string/symbol elements returned by proxies
+ if (convert == PROXY_MAGIC && key->IsNumber()) {
+ key = isolate_->factory()->NumberToString(key);
+ }
+ int prev_size = string_properties_->NumberOfElements();
+ string_properties_ = OrderedHashSet::Add(string_properties_, key);
+ if (prev_size < string_properties_->NumberOfElements()) {
+ length_++;
+ level_string_length_++;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+bool KeyAccumulator::AddSymbolKey(Handle<Object> key) {
+ if (symbol_properties_.is_null()) {
+ symbol_properties_ = OrderedHashSet::Allocate(isolate_, 16);
+ }
+ int prev_size = symbol_properties_->NumberOfElements();
+ symbol_properties_ = OrderedHashSet::Add(symbol_properties_, key);
+ if (prev_size < symbol_properties_->NumberOfElements()) {
+ length_++;
+ level_symbol_length_++;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+void KeyAccumulator::AddKeys(Handle<FixedArray> array,
+ AddKeyConversion convert) {
+ int add_length = array->length();
+ if (add_length == 0) return;
+ for (int i = 0; i < add_length; i++) {
+ Handle<Object> current(array->get(i), isolate_);
+ AddKey(current, convert);
+ }
+}
+
+
+void KeyAccumulator::AddKeys(Handle<JSObject> array_like,
+ AddKeyConversion convert) {
+ DCHECK(array_like->IsJSArray() || array_like->HasSloppyArgumentsElements());
+ ElementsAccessor* accessor = array_like->GetElementsAccessor();
+ accessor->AddElementsToKeyAccumulator(array_like, this, convert);
+}
+
+
+void KeyAccumulator::AddKeysFromProxy(Handle<JSObject> array_like) {
+ // Proxies define a complete list of keys with no distinction of
+ // elements and properties, which breaks the normal assumption for the
+ // KeyAccumulator.
+ AddKeys(array_like, PROXY_MAGIC);
+ // Invert the current length to indicate a present proxy, so we can ignore
+ // element keys for this level. Otherwise we would not fully respect the order
+ // given by the proxy.
+ level_string_length_ = -level_string_length_;
+}
+
+
+MaybeHandle<FixedArray> FilterProxyKeys(Isolate* isolate, Handle<JSProxy> owner,
+ Handle<FixedArray> keys,
+ PropertyFilter filter) {
+ if (filter == ALL_PROPERTIES) {
+ // Nothing to do.
+ return keys;
+ }
+ int store_position = 0;
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Name> key(Name::cast(keys->get(i)), isolate);
+ if (key->FilterKey(filter)) continue; // Skip this key.
+ if (filter & ONLY_ENUMERABLE) {
+ PropertyDescriptor desc;
+ Maybe<bool> found =
+ JSProxy::GetOwnPropertyDescriptor(isolate, owner, key, &desc);
+ MAYBE_RETURN(found, MaybeHandle<FixedArray>());
+ if (!found.FromJust() || !desc.enumerable()) continue; // Skip this key.
+ }
+ // Keep this key.
+ if (store_position != i) {
+ keys->set(store_position, *key);
+ }
+ store_position++;
+ }
+ if (store_position == 0) return isolate->factory()->empty_fixed_array();
+ keys->Shrink(store_position);
+ return keys;
+}
+
+
+// Returns "nothing" in case of exception, "true" on success.
+Maybe<bool> KeyAccumulator::AddKeysFromProxy(Handle<JSProxy> proxy,
+ Handle<FixedArray> keys) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, keys, FilterProxyKeys(isolate_, proxy, keys, filter_),
+ Nothing<bool>());
+ // Proxies define a complete list of keys with no distinction of
+ // elements and properties, which breaks the normal assumption for the
+ // KeyAccumulator.
+ AddKeys(keys, PROXY_MAGIC);
+ // Invert the current length to indicate a present proxy, so we can ignore
+ // element keys for this level. Otherwise we would not fully respect the order
+ // given by the proxy.
+ level_string_length_ = -level_string_length_;
+ return Just(true);
+}
+
+
+void KeyAccumulator::AddElementKeysFromInterceptor(
+ Handle<JSObject> array_like) {
+ AddKeys(array_like, CONVERT_TO_ARRAY_INDEX);
+ // The interceptor might introduce duplicates for the current level, since
+ // these keys get added after the objects's normal element keys.
+ SortCurrentElementsListRemoveDuplicates();
+}
+
+
+void KeyAccumulator::SortCurrentElementsListRemoveDuplicates() {
+ // Sort and remove duplicates from the current elements level and adjust.
+ // the lengths accordingly.
+ auto last_level = elements_.back();
+ size_t nof_removed_keys = last_level->size();
+ std::sort(last_level->begin(), last_level->end());
+ last_level->erase(std::unique(last_level->begin(), last_level->end()),
+ last_level->end());
+ // Adjust total length by the number of removed duplicates.
+ nof_removed_keys -= last_level->size();
+ length_ -= static_cast<int>(nof_removed_keys);
+}
+
+
+void KeyAccumulator::SortCurrentElementsList() {
+ if (elements_.empty()) return;
+ auto element_keys = elements_.back();
+ std::sort(element_keys->begin(), element_keys->end());
+}
+
+
+void KeyAccumulator::NextPrototype() {
+ // Store the protoLength on the first call of this method.
+ if (!elements_.empty()) {
+ level_lengths_.push_back(level_string_length_);
+ level_lengths_.push_back(level_symbol_length_);
+ }
+ elements_.push_back(new std::vector<uint32_t>());
+ level_string_length_ = 0;
+ level_symbol_length_ = 0;
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/key-accumulator.h b/chromium/v8/src/key-accumulator.h
new file mode 100644
index 00000000000..8a4d886f515
--- /dev/null
+++ b/chromium/v8/src/key-accumulator.h
@@ -0,0 +1,93 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_KEY_ACCUMULATOR_H_
+#define V8_KEY_ACCUMULATOR_H_
+
+#include "src/isolate.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX, PROXY_MAGIC };
+
+// This is a helper class for JSReceiver::GetKeys which collects and sorts keys.
+// GetKeys needs to sort keys per prototype level, first showing the integer
+// indices from elements then the strings from the properties. However, this
+// does not apply to proxies which are in full control of how the keys are
+// sorted.
+//
+// For performance reasons the KeyAccumulator internally separates integer keys
+// in |elements_| into sorted lists per prototype level. String keys are
+// collected in |string_properties_|, a single OrderedHashSet (similar for
+// Symbols in |symbol_properties_|. To separate the keys per level later when
+// assembling the final list, |levelLengths_| keeps track of the number of
+// String and Symbol keys per level.
+//
+// Only unique keys are kept by the KeyAccumulator, strings are stored in a
+// HashSet for inexpensive lookups. Integer keys are kept in sorted lists which
+// are more compact and allow for reasonably fast includes check.
+class KeyAccumulator final BASE_EMBEDDED {
+ public:
+ KeyAccumulator(Isolate* isolate, PropertyFilter filter)
+ : isolate_(isolate), filter_(filter) {}
+ ~KeyAccumulator();
+
+ bool AddKey(uint32_t key);
+ bool AddKey(Object* key, AddKeyConversion convert = DO_NOT_CONVERT);
+ bool AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
+ void AddKeys(Handle<FixedArray> array,
+ AddKeyConversion convert = DO_NOT_CONVERT);
+ void AddKeys(Handle<JSObject> array,
+ AddKeyConversion convert = DO_NOT_CONVERT);
+ void AddKeysFromProxy(Handle<JSObject> array);
+ Maybe<bool> AddKeysFromProxy(Handle<JSProxy> proxy, Handle<FixedArray> keys);
+ void AddElementKeysFromInterceptor(Handle<JSObject> array);
+ // Jump to the next level, pushing the current |levelLength_| to
+ // |levelLengths_| and adding a new list to |elements_|.
+ void NextPrototype();
+ // Sort the integer indices in the last list in |elements_|
+ void SortCurrentElementsList();
+ Handle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
+ int length() { return length_; }
+ Isolate* isolate() { return isolate_; }
+
+ private:
+ bool AddIntegerKey(uint32_t key);
+ bool AddStringKey(Handle<Object> key, AddKeyConversion convert);
+ bool AddSymbolKey(Handle<Object> array);
+ void SortCurrentElementsListRemoveDuplicates();
+
+ Isolate* isolate_;
+ PropertyFilter filter_;
+ // |elements_| contains the sorted element keys (indices) per level.
+ std::vector<std::vector<uint32_t>*> elements_;
+ // |protoLengths_| contains the total number of keys (elements + properties)
+ // per level. Negative values mark counts for a level with keys from a proxy.
+ std::vector<int> level_lengths_;
+ // |string_properties_| contains the unique String property keys for all
+ // levels in insertion order per level.
+ Handle<OrderedHashSet> string_properties_;
+ // |symbol_properties_| contains the unique Symbol property keys for all
+ // levels in insertion order per level.
+ Handle<OrderedHashSet> symbol_properties_;
+ // |length_| keeps track of the total number of all element and property keys.
+ int length_ = 0;
+ // |levelLength_| keeps track of the number of String keys in the current
+ // level.
+ int level_string_length_ = 0;
+ // |levelSymbolLength_| keeps track of the number of Symbol keys in the
+ // current level.
+ int level_symbol_length_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
+};
+
+
+} // namespace internal
+} // namespace v8
+
+
+#endif // V8_KEY_ACCUMULATOR_H_
diff --git a/chromium/v8/src/layout-descriptor-inl.h b/chromium/v8/src/layout-descriptor-inl.h
index 3771064c8fa..3f150658e7f 100644
--- a/chromium/v8/src/layout-descriptor-inl.h
+++ b/chromium/v8/src/layout-descriptor-inl.h
@@ -62,8 +62,8 @@ LayoutDescriptor* LayoutDescriptor::SetRawData(int field_index) {
LayoutDescriptor* LayoutDescriptor::SetTagged(int field_index, bool tagged) {
- int layout_word_index;
- int layout_bit_index;
+ int layout_word_index = 0;
+ int layout_bit_index = 0;
if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
CHECK(false);
@@ -250,7 +250,7 @@ bool LayoutDescriptorHelper::IsTagged(int offset_in_bytes) {
return layout_descriptor_->IsTagged(field_index);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LAYOUT_DESCRIPTOR_INL_H_
diff --git a/chromium/v8/src/layout-descriptor.h b/chromium/v8/src/layout-descriptor.h
index 11d8d35f268..5a80e73f1f0 100644
--- a/chromium/v8/src/layout-descriptor.h
+++ b/chromium/v8/src/layout-descriptor.h
@@ -153,7 +153,7 @@ class LayoutDescriptorHelper {
int header_size_;
LayoutDescriptor* layout_descriptor_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LAYOUT_DESCRIPTOR_H_
diff --git a/chromium/v8/src/libplatform/default-platform.cc b/chromium/v8/src/libplatform/default-platform.cc
index ddceab54572..e8c15572ad1 100644
--- a/chromium/v8/src/libplatform/default-platform.cc
+++ b/chromium/v8/src/libplatform/default-platform.cc
@@ -168,4 +168,30 @@ double DefaultPlatform::MonotonicallyIncreasingTime() {
return base::TimeTicks::HighResolutionNow().ToInternalValue() /
static_cast<double>(base::Time::kMicrosecondsPerSecond);
}
-} } // namespace v8::platform
+
+
+uint64_t DefaultPlatform::AddTraceEvent(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ uint64_t id, uint64_t bind_id, int num_args, const char** arg_names,
+ const uint8_t* arg_types, const uint64_t* arg_values, unsigned int flags) {
+ return 0;
+}
+
+
+void DefaultPlatform::UpdateTraceEventDuration(
+ const uint8_t* category_enabled_flag, const char* name, uint64_t handle) {}
+
+
+const uint8_t* DefaultPlatform::GetCategoryGroupEnabled(const char* name) {
+ static uint8_t no = 0;
+ return &no;
+}
+
+
+const char* DefaultPlatform::GetCategoryGroupName(
+ const uint8_t* category_enabled_flag) {
+ static const char dummy[] = "dummy";
+ return dummy;
+}
+} // namespace platform
+} // namespace v8
diff --git a/chromium/v8/src/libplatform/default-platform.h b/chromium/v8/src/libplatform/default-platform.h
index 94ef9c5055d..8bdda95be67 100644
--- a/chromium/v8/src/libplatform/default-platform.h
+++ b/chromium/v8/src/libplatform/default-platform.h
@@ -34,16 +34,25 @@ class DefaultPlatform : public Platform {
bool PumpMessageLoop(v8::Isolate* isolate);
// v8::Platform implementation.
- virtual void CallOnBackgroundThread(
- Task* task, ExpectedRuntime expected_runtime) override;
- virtual void CallOnForegroundThread(v8::Isolate* isolate,
- Task* task) override;
- virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
- double delay_in_seconds) override;
- virtual void CallIdleOnForegroundThread(Isolate* isolate,
- IdleTask* task) override;
- virtual bool IdleTasksEnabled(Isolate* isolate) override;
+ void CallOnBackgroundThread(Task* task,
+ ExpectedRuntime expected_runtime) override;
+ void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override;
+ void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
+ double delay_in_seconds) override;
+ void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override;
+ bool IdleTasksEnabled(Isolate* isolate) override;
double MonotonicallyIncreasingTime() override;
+ const uint8_t* GetCategoryGroupEnabled(const char* name) override;
+ const char* GetCategoryGroupName(
+ const uint8_t* category_enabled_flag) override;
+ uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
+ const char* name, uint64_t id, uint64_t bind_id,
+ int32_t num_args, const char** arg_names,
+ const uint8_t* arg_types, const uint64_t* arg_values,
+ unsigned int flags) override;
+ void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
+ const char* name, uint64_t handle) override;
+
private:
static const int kMaxThreadPoolSize;
@@ -68,7 +77,8 @@ class DefaultPlatform : public Platform {
};
-} } // namespace v8::platform
+} // namespace platform
+} // namespace v8
#endif // V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
diff --git a/chromium/v8/src/libplatform/task-queue.cc b/chromium/v8/src/libplatform/task-queue.cc
index 7a9071f3620..0a630ed3c39 100644
--- a/chromium/v8/src/libplatform/task-queue.cc
+++ b/chromium/v8/src/libplatform/task-queue.cc
@@ -53,4 +53,5 @@ void TaskQueue::Terminate() {
process_queue_semaphore_.Signal();
}
-} } // namespace v8::platform
+} // namespace platform
+} // namespace v8
diff --git a/chromium/v8/src/libplatform/task-queue.h b/chromium/v8/src/libplatform/task-queue.h
index eb9d6987e95..efe9e07e066 100644
--- a/chromium/v8/src/libplatform/task-queue.h
+++ b/chromium/v8/src/libplatform/task-queue.h
@@ -41,7 +41,8 @@ class TaskQueue {
DISALLOW_COPY_AND_ASSIGN(TaskQueue);
};
-} } // namespace v8::platform
+} // namespace platform
+} // namespace v8
#endif // V8_LIBPLATFORM_TASK_QUEUE_H_
diff --git a/chromium/v8/src/libplatform/worker-thread.cc b/chromium/v8/src/libplatform/worker-thread.cc
index 99637151e2f..a8e714a896b 100644
--- a/chromium/v8/src/libplatform/worker-thread.cc
+++ b/chromium/v8/src/libplatform/worker-thread.cc
@@ -28,4 +28,5 @@ void WorkerThread::Run() {
}
}
-} } // namespace v8::platform
+} // namespace platform
+} // namespace v8
diff --git a/chromium/v8/src/libplatform/worker-thread.h b/chromium/v8/src/libplatform/worker-thread.h
index 730e039ca1e..6a55a6bc89f 100644
--- a/chromium/v8/src/libplatform/worker-thread.h
+++ b/chromium/v8/src/libplatform/worker-thread.h
@@ -32,7 +32,8 @@ class WorkerThread : public base::Thread {
DISALLOW_COPY_AND_ASSIGN(WorkerThread);
};
-} } // namespace v8::platform
+} // namespace platform
+} // namespace v8
#endif // V8_LIBPLATFORM_WORKER_THREAD_H_
diff --git a/chromium/v8/src/list-inl.h b/chromium/v8/src/list-inl.h
index 5a247d5fd76..9a2d11f96aa 100644
--- a/chromium/v8/src/list-inl.h
+++ b/chromium/v8/src/list-inl.h
@@ -286,6 +286,7 @@ int SortedListBSearch(const List<T>& list, T elem) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LIST_INL_H_
diff --git a/chromium/v8/src/list.h b/chromium/v8/src/list.h
index d935f764b86..8b8a5dd1edd 100644
--- a/chromium/v8/src/list.h
+++ b/chromium/v8/src/list.h
@@ -229,7 +229,8 @@ template <typename T>
int SortedListBSearch(const List<T>& list, T elem);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LIST_H_
diff --git a/chromium/v8/src/locked-queue-inl.h b/chromium/v8/src/locked-queue-inl.h
new file mode 100644
index 00000000000..8b3e9d02bb7
--- /dev/null
+++ b/chromium/v8/src/locked-queue-inl.h
@@ -0,0 +1,91 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOCKED_QUEUE_INL_
+#define V8_LOCKED_QUEUE_INL_
+
+#include "src/atomic-utils.h"
+#include "src/locked-queue.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename Record>
+struct LockedQueue<Record>::Node : Malloced {
+ Node() : next(nullptr) {}
+ Record value;
+ AtomicValue<Node*> next;
+};
+
+
+template <typename Record>
+inline LockedQueue<Record>::LockedQueue() {
+ head_ = new Node();
+ CHECK(head_ != nullptr);
+ tail_ = head_;
+}
+
+
+template <typename Record>
+inline LockedQueue<Record>::~LockedQueue() {
+ // Destroy all remaining nodes. Note that we do not destroy the actual values.
+ Node* old_node = nullptr;
+ Node* cur_node = head_;
+ while (cur_node != nullptr) {
+ old_node = cur_node;
+ cur_node = cur_node->next.Value();
+ delete old_node;
+ }
+}
+
+
+template <typename Record>
+inline void LockedQueue<Record>::Enqueue(const Record& record) {
+ Node* n = new Node();
+ CHECK(n != nullptr);
+ n->value = record;
+ {
+ base::LockGuard<base::Mutex> guard(&tail_mutex_);
+ tail_->next.SetValue(n);
+ tail_ = n;
+ }
+}
+
+
+template <typename Record>
+inline bool LockedQueue<Record>::Dequeue(Record* record) {
+ Node* old_head = nullptr;
+ {
+ base::LockGuard<base::Mutex> guard(&head_mutex_);
+ old_head = head_;
+ Node* const next_node = head_->next.Value();
+ if (next_node == nullptr) return false;
+ *record = next_node->value;
+ head_ = next_node;
+ }
+ delete old_head;
+ return true;
+}
+
+
+template <typename Record>
+inline bool LockedQueue<Record>::IsEmpty() const {
+ base::LockGuard<base::Mutex> guard(&head_mutex_);
+ return head_->next.Value() == nullptr;
+}
+
+
+template <typename Record>
+inline bool LockedQueue<Record>::Peek(Record* record) const {
+ base::LockGuard<base::Mutex> guard(&head_mutex_);
+ Node* const next_node = head_->next.Value();
+ if (next_node == nullptr) return false;
+ *record = next_node->value;
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LOCKED_QUEUE_INL_
diff --git a/chromium/v8/src/locked-queue.h b/chromium/v8/src/locked-queue.h
new file mode 100644
index 00000000000..5bb97c8a125
--- /dev/null
+++ b/chromium/v8/src/locked-queue.h
@@ -0,0 +1,43 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOCKED_QUEUE_
+#define V8_LOCKED_QUEUE_
+
+#include "src/allocation.h"
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace internal {
+
+// Simple lock-based unbounded size queue (multi producer; multi consumer) based
+// on "Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue
+// Algorithms" by M. Scott and M. Michael.
+// See:
+// https://www.cs.rochester.edu/research/synchronization/pseudocode/queues.html
+template <typename Record>
+class LockedQueue final BASE_EMBEDDED {
+ public:
+ inline LockedQueue();
+ inline ~LockedQueue();
+ inline void Enqueue(const Record& record);
+ inline bool Dequeue(Record* record);
+ inline bool IsEmpty() const;
+ inline bool Peek(Record* record) const;
+
+ private:
+ struct Node;
+
+ mutable base::Mutex head_mutex_;
+ base::Mutex tail_mutex_;
+ Node* head_;
+ Node* tail_;
+
+ DISALLOW_COPY_AND_ASSIGN(LockedQueue);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LOCKED_QUEUE_
diff --git a/chromium/v8/src/log-inl.h b/chromium/v8/src/log-inl.h
index 520d05c4d33..d47a24b96af 100644
--- a/chromium/v8/src/log-inl.h
+++ b/chromium/v8/src/log-inl.h
@@ -8,6 +8,7 @@
#include "src/log.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -37,7 +38,21 @@ void Logger::CallEventLogger(Isolate* isolate, const char* name, StartEnd se,
isolate->event_logger()(name, se);
}
}
+ if (expose_to_api) {
+ if (se == START) {
+ TRACE_EVENT_BEGIN0("v8", name);
+ } else {
+ TRACE_EVENT_END0("v8", name);
+ }
+ } else {
+ if (se == START) {
+ TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8"), name);
+ } else {
+ TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8"), name);
+ }
+ }
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LOG_INL_H_
diff --git a/chromium/v8/src/log-utils.h b/chromium/v8/src/log-utils.h
index 67143078a9b..7621668552d 100644
--- a/chromium/v8/src/log-utils.h
+++ b/chromium/v8/src/log-utils.h
@@ -5,6 +5,8 @@
#ifndef V8_LOG_UTILS_H_
#define V8_LOG_UTILS_H_
+#include <stdio.h>
+
#include <cstdarg>
#include "src/allocation.h"
@@ -136,6 +138,7 @@ class Log {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LOG_UTILS_H_
diff --git a/chromium/v8/src/log.cc b/chromium/v8/src/log.cc
index ea69fb4bee3..a10d9621d32 100644
--- a/chromium/v8/src/log.cc
+++ b/chromium/v8/src/log.cc
@@ -1497,7 +1497,11 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
msg.Append(",%ld", static_cast<int>(timer_.Elapsed().InMicroseconds()));
if (sample->has_external_callback) {
msg.Append(",1,");
+#if USES_FUNCTION_DESCRIPTORS
+ msg.AppendAddress(*FUNCTION_ENTRYPOINT_ADDRESS(sample->external_callback));
+#else
msg.AppendAddress(sample->external_callback);
+#endif
} else {
msg.Append(",0,");
msg.AppendAddress(sample->tos);
diff --git a/chromium/v8/src/log.h b/chromium/v8/src/log.h
index 33c1b29d961..064115b3aa9 100644
--- a/chromium/v8/src/log.h
+++ b/chromium/v8/src/log.h
@@ -526,7 +526,8 @@ class CodeEventLogger : public CodeEventListener {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LOG_H_
diff --git a/chromium/v8/src/lookup-inl.h b/chromium/v8/src/lookup-inl.h
deleted file mode 100644
index 3df2194d3f9..00000000000
--- a/chromium/v8/src/lookup-inl.h
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LOOKUP_INL_H_
-#define V8_LOOKUP_INL_H_
-
-#include "src/lookup.h"
-
-#include "src/elements.h"
-
-namespace v8 {
-namespace internal {
-
-
-JSReceiver* LookupIterator::NextHolder(Map* map) {
- DisallowHeapAllocation no_gc;
- if (!map->prototype()->IsJSReceiver()) return NULL;
-
- JSReceiver* next = JSReceiver::cast(map->prototype());
- DCHECK(!next->map()->IsGlobalObjectMap() ||
- next->map()->is_hidden_prototype());
-
- if (!check_prototype_chain() &&
- !(check_hidden() && next->map()->is_hidden_prototype()) &&
- // Always lookup behind the JSGlobalProxy into the JSGlobalObject, even
- // when not checking other hidden prototypes.
- !map->IsJSGlobalProxyMap()) {
- return NULL;
- }
-
- return next;
-}
-
-
-LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
- JSReceiver* const holder) {
- STATIC_ASSERT(INTERCEPTOR == BEFORE_PROPERTY);
- DisallowHeapAllocation no_gc;
- if (interceptor_state_ == InterceptorState::kProcessNonMasking) {
- return LookupNonMaskingInterceptorInHolder(map, holder);
- }
- switch (state_) {
- case NOT_FOUND:
- if (map->IsJSProxyMap()) return JSPROXY;
- if (map->is_access_check_needed() &&
- (IsElement() || !isolate_->IsInternallyUsedPropertyName(name_))) {
- return ACCESS_CHECK;
- }
- // Fall through.
- case ACCESS_CHECK:
- if (exotic_index_state_ != ExoticIndexState::kNotExotic &&
- IsIntegerIndexedExotic(holder)) {
- return INTEGER_INDEXED_EXOTIC;
- }
- if (check_interceptor() && HasInterceptor(map) &&
- !SkipInterceptor(JSObject::cast(holder))) {
- return INTERCEPTOR;
- }
- // Fall through.
- case INTERCEPTOR:
- if (IsElement()) {
- // TODO(verwaest): Optimize.
- if (holder->IsStringObjectWithCharacterAt(index_)) {
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- property_details_ = PropertyDetails(attributes, v8::internal::DATA, 0,
- PropertyCellType::kNoCell);
- } else {
- JSObject* js_object = JSObject::cast(holder);
- if (js_object->elements() == isolate()->heap()->empty_fixed_array()) {
- return NOT_FOUND;
- }
-
- ElementsAccessor* accessor = js_object->GetElementsAccessor();
- FixedArrayBase* backing_store = js_object->elements();
- number_ =
- accessor->GetEntryForIndex(js_object, backing_store, index_);
- if (number_ == kMaxUInt32) return NOT_FOUND;
- property_details_ = accessor->GetDetails(backing_store, number_);
- }
- } else if (!map->is_dictionary_map()) {
- DescriptorArray* descriptors = map->instance_descriptors();
- int number = descriptors->SearchWithCache(*name_, map);
- if (number == DescriptorArray::kNotFound) return NOT_FOUND;
- number_ = static_cast<uint32_t>(number);
- property_details_ = descriptors->GetDetails(number_);
- } else if (map->IsGlobalObjectMap()) {
- GlobalDictionary* dict = JSObject::cast(holder)->global_dictionary();
- int number = dict->FindEntry(name_);
- if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
- number_ = static_cast<uint32_t>(number);
- DCHECK(dict->ValueAt(number_)->IsPropertyCell());
- PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
- if (cell->value()->IsTheHole()) return NOT_FOUND;
- property_details_ = cell->property_details();
- } else {
- NameDictionary* dict = JSObject::cast(holder)->property_dictionary();
- int number = dict->FindEntry(name_);
- if (number == NameDictionary::kNotFound) return NOT_FOUND;
- number_ = static_cast<uint32_t>(number);
- property_details_ = dict->DetailsAt(number_);
- }
- has_property_ = true;
- switch (property_details_.kind()) {
- case v8::internal::kData:
- return DATA;
- case v8::internal::kAccessor:
- return ACCESSOR;
- }
- case ACCESSOR:
- case DATA:
- return NOT_FOUND;
- case INTEGER_INDEXED_EXOTIC:
- case JSPROXY:
- case TRANSITION:
- UNREACHABLE();
- }
- UNREACHABLE();
- return state_;
-}
-
-
-LookupIterator::State LookupIterator::LookupNonMaskingInterceptorInHolder(
- Map* const map, JSReceiver* const holder) {
- switch (state_) {
- case NOT_FOUND:
- if (check_interceptor() && HasInterceptor(map) &&
- !SkipInterceptor(JSObject::cast(holder))) {
- return INTERCEPTOR;
- }
- // Fall through.
- default:
- return NOT_FOUND;
- }
- UNREACHABLE();
- return state_;
-}
-}
-} // namespace v8::internal
-
-#endif // V8_LOOKUP_INL_H_
diff --git a/chromium/v8/src/lookup.cc b/chromium/v8/src/lookup.cc
index 809c35e4a53..48da4fabeea 100644
--- a/chromium/v8/src/lookup.cc
+++ b/chromium/v8/src/lookup.cc
@@ -6,13 +6,45 @@
#include "src/bootstrapper.h"
#include "src/deoptimizer.h"
+#include "src/elements.h"
#include "src/isolate-inl.h"
-#include "src/lookup-inl.h"
namespace v8 {
namespace internal {
+// static
+LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Object> key,
+ bool* success,
+ Configuration configuration) {
+ uint32_t index = 0;
+ if (key->ToArrayIndex(&index)) {
+ *success = true;
+ return LookupIterator(isolate, receiver, index, configuration);
+ }
+
+ Handle<Name> name;
+ *success = Object::ToName(isolate, key).ToHandle(&name);
+ if (!*success) {
+ DCHECK(isolate->has_pending_exception());
+ // Return an unusable dummy.
+ return LookupIterator(receiver, isolate->factory()->empty_string());
+ }
+
+ if (name->AsArrayIndex(&index)) {
+ LookupIterator it(isolate, receiver, index, configuration);
+ // Here we try to avoid having to rebuild the string later
+ // by storing it on the indexed LookupIterator.
+ it.name_ = name;
+ return it;
+ }
+
+ return LookupIterator(receiver, name, configuration);
+}
+
+
void LookupIterator::Next() {
DCHECK_NE(JSPROXY, state_);
DCHECK_NE(TRANSITION, state_);
@@ -48,22 +80,20 @@ void LookupIterator::Next() {
}
-void LookupIterator::RestartLookupForNonMaskingInterceptors() {
- interceptor_state_ = InterceptorState::kProcessNonMasking;
+void LookupIterator::RestartInternal(InterceptorState interceptor_state) {
state_ = NOT_FOUND;
+ interceptor_state_ = interceptor_state;
property_details_ = PropertyDetails::Empty();
- number_ = DescriptorArray::kNotFound;
holder_ = initial_holder_;
holder_map_ = handle(holder_->map(), isolate_);
+ number_ = DescriptorArray::kNotFound;
Next();
}
// static
-Handle<JSReceiver> LookupIterator::GetRoot(Isolate* isolate,
- Handle<Object> receiver,
- uint32_t index) {
- if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
+Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
+ Isolate* isolate, Handle<Object> receiver, uint32_t index) {
// Strings are the only objects with properties (only elements) directly on
// the wrapper. Hence we can skip generating the wrapper for all other cases.
if (index != kMaxUInt32 && receiver->IsString() &&
@@ -102,7 +132,8 @@ Handle<JSObject> LookupIterator::GetStoreTarget() const {
bool LookupIterator::HasAccess() const {
DCHECK_EQ(ACCESS_CHECK, state_);
- return isolate_->MayAccess(GetHolder<JSObject>());
+ return isolate_->MayAccess(handle(isolate_->context()),
+ GetHolder<JSObject>());
}
@@ -212,11 +243,11 @@ void LookupIterator::PrepareTransitionToDataProperty(
state_ = TRANSITION;
transition_ = transition;
- if (receiver->IsGlobalObject()) {
+ if (receiver->IsJSGlobalObject()) {
// Install a property cell.
InternalizeName();
- auto cell = GlobalObject::EnsurePropertyCell(
- Handle<GlobalObject>::cast(receiver), name());
+ auto cell = JSGlobalObject::EnsurePropertyCell(
+ Handle<JSGlobalObject>::cast(receiver), name());
DCHECK(cell->value()->IsTheHole());
transition_ = cell;
} else if (!transition->is_dictionary_map()) {
@@ -230,7 +261,7 @@ void LookupIterator::ApplyTransitionToDataProperty() {
DCHECK_EQ(TRANSITION, state_);
Handle<JSObject> receiver = GetStoreTarget();
- if (receiver->IsGlobalObject()) return;
+ if (receiver->IsJSGlobalObject()) return;
holder_ = receiver;
holder_map_ = transition_map();
JSObject::MigrateToMap(receiver, holder_map_);
@@ -239,23 +270,27 @@ void LookupIterator::ApplyTransitionToDataProperty() {
void LookupIterator::Delete() {
- Handle<JSObject> holder = Handle<JSObject>::cast(holder_);
+ Handle<JSReceiver> holder = Handle<JSReceiver>::cast(holder_);
if (IsElement()) {
- ElementsAccessor* accessor = holder->GetElementsAccessor();
- accessor->Delete(holder, number_);
+ Handle<JSObject> object = Handle<JSObject>::cast(holder);
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ accessor->Delete(object, number_);
} else {
PropertyNormalizationMode mode = holder->map()->is_prototype_map()
? KEEP_INOBJECT_PROPERTIES
: CLEAR_INOBJECT_PROPERTIES;
if (holder->HasFastProperties()) {
- JSObject::NormalizeProperties(holder, mode, 0, "DeletingProperty");
+ JSObject::NormalizeProperties(Handle<JSObject>::cast(holder), mode, 0,
+ "DeletingProperty");
holder_map_ = handle(holder->map(), isolate_);
ReloadPropertyInformation();
}
// TODO(verwaest): Get rid of the name_ argument.
- JSObject::DeleteNormalizedProperty(holder, name_, number_);
- JSObject::ReoptimizeIfPrototype(holder);
+ JSReceiver::DeleteNormalizedProperty(holder, name_, number_);
+ if (holder->IsJSObject()) {
+ JSObject::ReoptimizeIfPrototype(Handle<JSObject>::cast(holder));
+ }
}
}
@@ -382,8 +417,8 @@ bool LookupIterator::InternalHolderIsReceiverOrHiddenPrototype() const {
Handle<Object> LookupIterator::FetchValue() const {
Object* result = NULL;
- Handle<JSObject> holder = GetHolder<JSObject>();
if (IsElement()) {
+ Handle<JSObject> holder = GetHolder<JSObject>();
// TODO(verwaest): Optimize.
if (holder->IsStringObjectWithCharacterAt(index_)) {
Handle<JSValue> js_value = Handle<JSValue>::cast(holder);
@@ -394,13 +429,15 @@ Handle<Object> LookupIterator::FetchValue() const {
ElementsAccessor* accessor = holder->GetElementsAccessor();
return accessor->Get(handle(holder->elements()), number_);
- } else if (holder_map_->IsGlobalObjectMap()) {
+ } else if (holder_map_->IsJSGlobalObjectMap()) {
+ Handle<JSObject> holder = GetHolder<JSObject>();
result = holder->global_dictionary()->ValueAt(number_);
DCHECK(result->IsPropertyCell());
result = PropertyCell::cast(result)->value();
} else if (holder_map_->is_dictionary_map()) {
- result = holder->property_dictionary()->ValueAt(number_);
+ result = holder_->property_dictionary()->ValueAt(number_);
} else if (property_details_.type() == v8::internal::DATA) {
+ Handle<JSObject> holder = GetHolder<JSObject>();
FieldIndex field_index = FieldIndex::ForDescriptor(*holder_map_, number_);
return JSObject::FastPropertyAt(holder, property_details_.representation(),
field_index);
@@ -453,7 +490,7 @@ Handle<HeapType> LookupIterator::GetFieldType() const {
Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
DCHECK(!IsElement());
Handle<JSObject> holder = GetHolder<JSObject>();
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(holder);
Object* value = global->global_dictionary()->ValueAt(dictionary_entry());
DCHECK(value->IsPropertyCell());
return handle(PropertyCell::cast(value));
@@ -475,20 +512,21 @@ Handle<Object> LookupIterator::GetDataValue() const {
void LookupIterator::WriteDataValue(Handle<Object> value) {
DCHECK_EQ(DATA, state_);
- Handle<JSObject> holder = GetHolder<JSObject>();
+ Handle<JSReceiver> holder = GetHolder<JSReceiver>();
if (IsElement()) {
- ElementsAccessor* accessor = holder->GetElementsAccessor();
- accessor->Set(holder->elements(), number_, *value);
- } else if (holder->IsGlobalObject()) {
+ Handle<JSObject> object = Handle<JSObject>::cast(holder);
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ accessor->Set(object->elements(), number_, *value);
+ } else if (holder->IsJSGlobalObject()) {
Handle<GlobalDictionary> property_dictionary =
- handle(holder->global_dictionary());
+ handle(JSObject::cast(*holder)->global_dictionary());
PropertyCell::UpdateCell(property_dictionary, dictionary_entry(), value,
property_details_);
} else if (holder_map_->is_dictionary_map()) {
NameDictionary* property_dictionary = holder->property_dictionary();
property_dictionary->ValueAtPut(dictionary_entry(), *value);
} else if (property_details_.type() == v8::internal::DATA) {
- holder->WriteToField(descriptor_number(), *value);
+ JSObject::cast(*holder)->WriteToField(descriptor_number(), *value);
} else {
DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
}
@@ -497,8 +535,6 @@ void LookupIterator::WriteDataValue(Handle<Object> value) {
bool LookupIterator::IsIntegerIndexedExotic(JSReceiver* holder) {
DCHECK(exotic_index_state_ != ExoticIndexState::kNotExotic);
- // Currently typed arrays are the only such objects.
- if (!holder->IsJSTypedArray()) return false;
if (exotic_index_state_ == ExoticIndexState::kExotic) return true;
if (!InternalHolderIsReceiverOrHiddenPrototype()) {
exotic_index_state_ = ExoticIndexState::kNotExotic;
@@ -533,18 +569,6 @@ bool LookupIterator::HasInterceptor(Map* map) const {
}
-Handle<InterceptorInfo> LookupIterator::GetInterceptor() const {
- DCHECK_EQ(INTERCEPTOR, state_);
- return handle(GetInterceptor(JSObject::cast(*holder_)), isolate_);
-}
-
-
-InterceptorInfo* LookupIterator::GetInterceptor(JSObject* holder) const {
- if (IsElement()) return holder->GetIndexedInterceptor();
- return holder->GetNamedInterceptor();
-}
-
-
bool LookupIterator::SkipInterceptor(JSObject* holder) {
auto info = GetInterceptor(holder);
// TODO(dcarney): check for symbol/can_intercept_symbols here as well.
@@ -561,5 +585,136 @@ bool LookupIterator::SkipInterceptor(JSObject* holder) {
}
return interceptor_state_ == InterceptorState::kProcessNonMasking;
}
+
+
+JSReceiver* LookupIterator::NextHolder(Map* map) {
+ DisallowHeapAllocation no_gc;
+ if (!map->prototype()->IsJSReceiver()) return NULL;
+
+ JSReceiver* next = JSReceiver::cast(map->prototype());
+ DCHECK(!next->map()->IsJSGlobalObjectMap() ||
+ next->map()->is_hidden_prototype());
+
+ if (!check_prototype_chain() &&
+ !(check_hidden() && next->map()->is_hidden_prototype()) &&
+ // Always lookup behind the JSGlobalProxy into the JSGlobalObject, even
+ // when not checking other hidden prototypes.
+ !map->IsJSGlobalProxyMap()) {
+ return NULL;
+ }
+
+ return next;
+}
+
+
+LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
+ JSReceiver* const holder) {
+ STATIC_ASSERT(INTERCEPTOR == BEFORE_PROPERTY);
+ DisallowHeapAllocation no_gc;
+ if (interceptor_state_ == InterceptorState::kProcessNonMasking) {
+ return LookupNonMaskingInterceptorInHolder(map, holder);
+ }
+ switch (state_) {
+ case NOT_FOUND:
+ if (map->IsJSProxyMap()) {
+ // Do not leak private property names.
+ if (IsElement() || !name_->IsPrivate()) return JSPROXY;
+ }
+ if (map->is_access_check_needed() &&
+ (IsElement() || !isolate_->IsInternallyUsedPropertyName(name_))) {
+ return ACCESS_CHECK;
+ }
+ // Fall through.
+ case ACCESS_CHECK:
+ if (exotic_index_state_ != ExoticIndexState::kNotExotic &&
+ holder->IsJSTypedArray() && IsIntegerIndexedExotic(holder)) {
+ return INTEGER_INDEXED_EXOTIC;
+ }
+ if (check_interceptor() && HasInterceptor(map) &&
+ !SkipInterceptor(JSObject::cast(holder))) {
+ // Do not leak private property names.
+ if (!name_.is_null() && name_->IsPrivate()) return NOT_FOUND;
+ return INTERCEPTOR;
+ }
+ // Fall through.
+ case INTERCEPTOR:
+ if (IsElement()) {
+ // TODO(verwaest): Optimize.
+ if (holder->IsStringObjectWithCharacterAt(index_)) {
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+ property_details_ = PropertyDetails(attributes, v8::internal::DATA, 0,
+ PropertyCellType::kNoCell);
+ } else {
+ JSObject* js_object = JSObject::cast(holder);
+ if (js_object->elements() == isolate()->heap()->empty_fixed_array()) {
+ return NOT_FOUND;
+ }
+
+ ElementsAccessor* accessor = js_object->GetElementsAccessor();
+ FixedArrayBase* backing_store = js_object->elements();
+ number_ =
+ accessor->GetEntryForIndex(js_object, backing_store, index_);
+ if (number_ == kMaxUInt32) return NOT_FOUND;
+ property_details_ = accessor->GetDetails(backing_store, number_);
+ }
+ } else if (!map->is_dictionary_map()) {
+ DescriptorArray* descriptors = map->instance_descriptors();
+ int number = descriptors->SearchWithCache(*name_, map);
+ if (number == DescriptorArray::kNotFound) return NOT_FOUND;
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = descriptors->GetDetails(number_);
+ } else if (map->IsJSGlobalObjectMap()) {
+ GlobalDictionary* dict = JSObject::cast(holder)->global_dictionary();
+ int number = dict->FindEntry(name_);
+ if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
+ number_ = static_cast<uint32_t>(number);
+ DCHECK(dict->ValueAt(number_)->IsPropertyCell());
+ PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
+ if (cell->value()->IsTheHole()) return NOT_FOUND;
+ property_details_ = cell->property_details();
+ } else {
+ NameDictionary* dict = holder->property_dictionary();
+ int number = dict->FindEntry(name_);
+ if (number == NameDictionary::kNotFound) return NOT_FOUND;
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = dict->DetailsAt(number_);
+ }
+ has_property_ = true;
+ switch (property_details_.kind()) {
+ case v8::internal::kData:
+ return DATA;
+ case v8::internal::kAccessor:
+ return ACCESSOR;
+ }
+ case ACCESSOR:
+ case DATA:
+ return NOT_FOUND;
+ case INTEGER_INDEXED_EXOTIC:
+ case JSPROXY:
+ case TRANSITION:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return state_;
+}
+
+
+LookupIterator::State LookupIterator::LookupNonMaskingInterceptorInHolder(
+ Map* const map, JSReceiver* const holder) {
+ switch (state_) {
+ case NOT_FOUND:
+ if (check_interceptor() && HasInterceptor(map) &&
+ !SkipInterceptor(JSObject::cast(holder))) {
+ return INTERCEPTOR;
+ }
+ // Fall through.
+ default:
+ return NOT_FOUND;
+ }
+ UNREACHABLE();
+ return state_;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/lookup.h b/chromium/v8/src/lookup.h
index 3888ed62405..7d689560b8f 100644
--- a/chromium/v8/src/lookup.h
+++ b/chromium/v8/src/lookup.h
@@ -158,6 +158,12 @@ class LookupIterator final BASE_EMBEDDED {
return it;
}
+ static LookupIterator PropertyOrElement(
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> key,
+ bool* success, Configuration configuration = DEFAULT);
+
+ void Restart() { RestartInternal(InterceptorState::kUninitialized); }
+
Isolate* isolate() const { return isolate_; }
State state() const { return state_; }
@@ -197,10 +203,13 @@ class LookupIterator final BASE_EMBEDDED {
DCHECK(IsFound());
return Handle<T>::cast(holder_);
}
- static Handle<JSReceiver> GetRoot(Isolate* isolate, Handle<Object> receiver,
- uint32_t index = kMaxUInt32);
+
bool HolderIsReceiverOrHiddenPrototype() const;
+ bool check_prototype_chain() const {
+ return (configuration_ & kPrototypeChain) != 0;
+ }
+
/* ACCESS_CHECK */
bool HasAccess() const;
@@ -239,7 +248,10 @@ class LookupIterator final BASE_EMBEDDED {
int GetConstantIndex() const;
Handle<PropertyCell> GetPropertyCell() const;
Handle<Object> GetAccessors() const;
- Handle<InterceptorInfo> GetInterceptor() const;
+ inline Handle<InterceptorInfo> GetInterceptor() const {
+ DCHECK_EQ(INTERCEPTOR, state_);
+ return handle(GetInterceptor(JSObject::cast(*holder_)), isolate_);
+ }
Handle<Object> GetDataValue() const;
void WriteDataValue(Handle<Object> value);
void InternalizeName();
@@ -256,22 +268,25 @@ class LookupIterator final BASE_EMBEDDED {
MUST_USE_RESULT inline JSReceiver* NextHolder(Map* map);
inline State LookupInHolder(Map* map, JSReceiver* holder);
- void RestartLookupForNonMaskingInterceptors();
+ void RestartLookupForNonMaskingInterceptors() {
+ RestartInternal(InterceptorState::kProcessNonMasking);
+ }
+ void RestartInternal(InterceptorState interceptor_state);
State LookupNonMaskingInterceptorInHolder(Map* map, JSReceiver* holder);
Handle<Object> FetchValue() const;
void ReloadPropertyInformation();
- bool SkipInterceptor(JSObject* holder);
+ inline bool SkipInterceptor(JSObject* holder);
bool HasInterceptor(Map* map) const;
bool InternalHolderIsReceiverOrHiddenPrototype() const;
- InterceptorInfo* GetInterceptor(JSObject* holder) const;
+ inline InterceptorInfo* GetInterceptor(JSObject* holder) const {
+ if (IsElement()) return holder->GetIndexedInterceptor();
+ return holder->GetNamedInterceptor();
+ }
bool check_hidden() const { return (configuration_ & kHidden) != 0; }
bool check_interceptor() const {
return (configuration_ & kInterceptor) != 0;
}
- bool check_prototype_chain() const {
- return (configuration_ & kPrototypeChain) != 0;
- }
int descriptor_number() const {
DCHECK(has_property_);
DCHECK(!holder_map_->is_dictionary_map());
@@ -293,8 +308,17 @@ class LookupIterator final BASE_EMBEDDED {
}
}
+ static Handle<JSReceiver> GetRootForNonJSReceiver(
+ Isolate* isolate, Handle<Object> receiver, uint32_t index = kMaxUInt32);
+ inline static Handle<JSReceiver> GetRoot(Isolate* isolate,
+ Handle<Object> receiver,
+ uint32_t index = kMaxUInt32) {
+ if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
+ return GetRootForNonJSReceiver(isolate, receiver, index);
+ }
+
enum class ExoticIndexState { kUninitialized, kNotExotic, kExotic };
- bool IsIntegerIndexedExotic(JSReceiver* holder);
+ inline bool IsIntegerIndexedExotic(JSReceiver* holder);
// If configuration_ becomes mutable, update
// HolderIsReceiverOrHiddenPrototype.
@@ -316,6 +340,7 @@ class LookupIterator final BASE_EMBEDDED {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_LOOKUP_H_
diff --git a/chromium/v8/src/machine-type.cc b/chromium/v8/src/machine-type.cc
new file mode 100644
index 00000000000..1fb886ca525
--- /dev/null
+++ b/chromium/v8/src/machine-type.cc
@@ -0,0 +1,75 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/machine-type.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+std::ostream& operator<<(std::ostream& os, MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kNone:
+ return os << "kMachNone";
+ case MachineRepresentation::kBit:
+ return os << "kRepBit";
+ case MachineRepresentation::kWord8:
+ return os << "kRepWord8";
+ case MachineRepresentation::kWord16:
+ return os << "kRepWord16";
+ case MachineRepresentation::kWord32:
+ return os << "kRepWord32";
+ case MachineRepresentation::kWord64:
+ return os << "kRepWord64";
+ case MachineRepresentation::kFloat32:
+ return os << "kRepFloat32";
+ case MachineRepresentation::kFloat64:
+ return os << "kRepFloat64";
+ case MachineRepresentation::kTagged:
+ return os << "kRepTagged";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, MachineSemantic type) {
+ switch (type) {
+ case MachineSemantic::kNone:
+ return os << "kMachNone";
+ case MachineSemantic::kBool:
+ return os << "kTypeBool";
+ case MachineSemantic::kInt32:
+ return os << "kTypeInt32";
+ case MachineSemantic::kUint32:
+ return os << "kTypeUint32";
+ case MachineSemantic::kInt64:
+ return os << "kTypeInt64";
+ case MachineSemantic::kUint64:
+ return os << "kTypeUint64";
+ case MachineSemantic::kNumber:
+ return os << "kTypeNumber";
+ case MachineSemantic::kAny:
+ return os << "kTypeAny";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, MachineType type) {
+ if (type == MachineType::None()) {
+ return os;
+ } else if (type.representation() == MachineRepresentation::kNone) {
+ return os << type.semantic();
+ } else if (type.semantic() == MachineSemantic::kNone) {
+ return os << type.representation();
+ } else {
+ return os << type.representation() << "|" << type.semantic();
+ }
+ return os;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/machine-type.h b/chromium/v8/src/machine-type.h
new file mode 100644
index 00000000000..97f6ae3bbdb
--- /dev/null
+++ b/chromium/v8/src/machine-type.h
@@ -0,0 +1,204 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MACHINE_TYPE_H_
+#define V8_MACHINE_TYPE_H_
+
+#include <iosfwd>
+
+#include "src/base/bits.h"
+#include "src/globals.h"
+#include "src/signature.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+enum class MachineRepresentation : uint8_t {
+ kNone,
+ kBit,
+ kWord8,
+ kWord16,
+ kWord32,
+ kWord64,
+ kFloat32,
+ kFloat64,
+ kTagged
+};
+
+enum class MachineSemantic : uint8_t {
+ kNone,
+ kBool,
+ kInt32,
+ kUint32,
+ kInt64,
+ kUint64,
+ kNumber,
+ kAny
+};
+
+class MachineType {
+ public:
+ MachineType()
+ : representation_(MachineRepresentation::kNone),
+ semantic_(MachineSemantic::kNone) {}
+ MachineType(MachineRepresentation representation, MachineSemantic semantic)
+ : representation_(representation), semantic_(semantic) {}
+
+ bool operator==(MachineType other) const {
+ return representation() == other.representation() &&
+ semantic() == other.semantic();
+ }
+
+ bool operator!=(MachineType other) const { return !(*this == other); }
+
+
+ MachineRepresentation representation() const { return representation_; }
+ MachineSemantic semantic() const { return semantic_; }
+
+ bool IsSigned() {
+ return semantic() == MachineSemantic::kInt32 ||
+ semantic() == MachineSemantic::kInt64;
+ }
+ bool IsUnsigned() {
+ return semantic() == MachineSemantic::kUint32 ||
+ semantic() == MachineSemantic::kUint64;
+ }
+
+ static MachineRepresentation PointerRepresentation() {
+ return (kPointerSize == 4) ? MachineRepresentation::kWord32
+ : MachineRepresentation::kWord64;
+ }
+ static MachineType Pointer() {
+ return MachineType(PointerRepresentation(), MachineSemantic::kNone);
+ }
+ static MachineType IntPtr() {
+ return (kPointerSize == 4) ? Int32() : Int64();
+ }
+ static MachineType Float32() {
+ return MachineType(MachineRepresentation::kFloat32,
+ MachineSemantic::kNumber);
+ }
+ static MachineType Float64() {
+ return MachineType(MachineRepresentation::kFloat64,
+ MachineSemantic::kNumber);
+ }
+ static MachineType Int8() {
+ return MachineType(MachineRepresentation::kWord8, MachineSemantic::kInt32);
+ }
+ static MachineType Uint8() {
+ return MachineType(MachineRepresentation::kWord8, MachineSemantic::kUint32);
+ }
+ static MachineType Int16() {
+ return MachineType(MachineRepresentation::kWord16, MachineSemantic::kInt32);
+ }
+ static MachineType Uint16() {
+ return MachineType(MachineRepresentation::kWord16,
+ MachineSemantic::kUint32);
+ }
+ static MachineType Int32() {
+ return MachineType(MachineRepresentation::kWord32, MachineSemantic::kInt32);
+ }
+ static MachineType Uint32() {
+ return MachineType(MachineRepresentation::kWord32,
+ MachineSemantic::kUint32);
+ }
+ static MachineType Int64() {
+ return MachineType(MachineRepresentation::kWord64, MachineSemantic::kInt64);
+ }
+ static MachineType Uint64() {
+ return MachineType(MachineRepresentation::kWord64,
+ MachineSemantic::kUint64);
+ }
+ static MachineType AnyTagged() {
+ return MachineType(MachineRepresentation::kTagged, MachineSemantic::kAny);
+ }
+ static MachineType Bool() {
+ return MachineType(MachineRepresentation::kBit, MachineSemantic::kBool);
+ }
+ static MachineType TaggedBool() {
+ return MachineType(MachineRepresentation::kTagged, MachineSemantic::kBool);
+ }
+ static MachineType None() {
+ return MachineType(MachineRepresentation::kNone, MachineSemantic::kNone);
+ }
+
+ // These naked representations should eventually go away.
+ static MachineType RepWord8() {
+ return MachineType(MachineRepresentation::kWord8, MachineSemantic::kNone);
+ }
+ static MachineType RepWord16() {
+ return MachineType(MachineRepresentation::kWord16, MachineSemantic::kNone);
+ }
+ static MachineType RepWord32() {
+ return MachineType(MachineRepresentation::kWord32, MachineSemantic::kNone);
+ }
+ static MachineType RepWord64() {
+ return MachineType(MachineRepresentation::kWord64, MachineSemantic::kNone);
+ }
+ static MachineType RepFloat32() {
+ return MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone);
+ }
+ static MachineType RepFloat64() {
+ return MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone);
+ }
+ static MachineType RepTagged() {
+ return MachineType(MachineRepresentation::kTagged, MachineSemantic::kNone);
+ }
+ static MachineType RepBit() {
+ return MachineType(MachineRepresentation::kBit, MachineSemantic::kNone);
+ }
+
+ private:
+ MachineRepresentation representation_;
+ MachineSemantic semantic_;
+};
+
+V8_INLINE size_t hash_value(MachineRepresentation rep) {
+ return static_cast<size_t>(rep);
+}
+
+V8_INLINE size_t hash_value(MachineType type) {
+ return static_cast<size_t>(type.representation()) +
+ static_cast<size_t>(type.semantic()) * 16;
+}
+
+std::ostream& operator<<(std::ostream& os, MachineRepresentation rep);
+std::ostream& operator<<(std::ostream& os, MachineSemantic type);
+std::ostream& operator<<(std::ostream& os, MachineType type);
+
+inline bool IsFloatingPoint(MachineRepresentation rep) {
+ return rep == MachineRepresentation::kFloat32 ||
+ rep == MachineRepresentation::kFloat64;
+}
+
+// Gets the log2 of the element size in bytes of the machine type.
+inline int ElementSizeLog2Of(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ return 0;
+ case MachineRepresentation::kWord16:
+ return 1;
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kFloat32:
+ return 2;
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat64:
+ return 3;
+ case MachineRepresentation::kTagged:
+ return kPointerSizeLog2;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+typedef Signature<MachineType> MachineSignature;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MACHINE_TYPE_H_
diff --git a/chromium/v8/src/macro-assembler.h b/chromium/v8/src/macro-assembler.h
index c865a5fb0c3..fd2aa7c3145 100644
--- a/chromium/v8/src/macro-assembler.h
+++ b/chromium/v8/src/macro-assembler.h
@@ -278,6 +278,7 @@ class AllocationUtils {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MACRO_ASSEMBLER_H_
diff --git a/chromium/v8/src/messages.cc b/chromium/v8/src/messages.cc
index 640c2dff4e8..23deb1afeba 100644
--- a/chromium/v8/src/messages.cc
+++ b/chromium/v8/src/messages.cc
@@ -82,11 +82,23 @@ void MessageHandler::ReportMessage(Isolate* isolate, MessageLocation* loc,
if (message->argument()->IsJSObject()) {
HandleScope scope(isolate);
Handle<Object> argument(message->argument(), isolate);
- Handle<Object> args[] = {argument};
- MaybeHandle<Object> maybe_stringified = Execution::TryCall(
- isolate->to_detail_string_fun(), isolate->factory()->undefined_value(),
- arraysize(args), args);
+
+ MaybeHandle<Object> maybe_stringified;
Handle<Object> stringified;
+ // Make sure we don't leak uncaught internally generated Error objects.
+ if (Object::IsErrorObject(isolate, argument)) {
+ Handle<Object> args[] = {argument};
+ maybe_stringified = Execution::TryCall(
+ isolate, isolate->no_side_effects_to_string_fun(),
+ isolate->factory()->undefined_value(), arraysize(args), args);
+ } else {
+ v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
+ catcher.SetVerbose(false);
+ catcher.SetCaptureMessage(false);
+
+ maybe_stringified = Object::ToString(isolate, argument);
+ }
+
if (!maybe_stringified.ToHandle(&stringified)) {
stringified = isolate->factory()->NewStringFromAsciiChecked("exception");
}
@@ -144,14 +156,16 @@ base::SmartArrayPointer<char> MessageHandler::GetLocalizedMessage(
CallSite::CallSite(Isolate* isolate, Handle<JSObject> call_site_obj)
: isolate_(isolate) {
+ Handle<Object> maybe_function = JSObject::GetDataProperty(
+ call_site_obj, isolate->factory()->call_site_function_symbol());
+ if (!maybe_function->IsJSFunction()) return;
+
+ fun_ = Handle<JSFunction>::cast(maybe_function);
receiver_ = JSObject::GetDataProperty(
call_site_obj, isolate->factory()->call_site_receiver_symbol());
- fun_ = Handle<JSFunction>::cast(JSObject::GetDataProperty(
- call_site_obj, isolate->factory()->call_site_function_symbol()));
- pos_ = Handle<Smi>::cast(JSObject::GetDataProperty(
- call_site_obj,
- isolate->factory()->call_site_position_symbol()))
- ->value();
+ CHECK(JSObject::GetDataProperty(
+ call_site_obj, isolate->factory()->call_site_position_symbol())
+ ->ToInt32(&pos_));
}
@@ -165,8 +179,9 @@ Handle<Object> CallSite::GetFileName() {
Handle<Object> CallSite::GetFunctionName() {
- Handle<String> result = JSFunction::GetDebugName(fun_);
+ Handle<String> result = JSFunction::GetName(fun_);
if (result->length() != 0) return result;
+
Handle<Object> script(fun_->shared()->script(), isolate_);
if (script->IsScript() &&
Handle<Script>::cast(script)->compilation_type() ==
@@ -313,10 +328,10 @@ Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
if (arg->IsString()) {
result_string = Handle<String>::cast(arg);
} else {
- Handle<JSFunction> fun = isolate->no_side_effect_to_string_fun();
+ Handle<JSFunction> fun = isolate->no_side_effects_to_string_fun();
MaybeHandle<Object> maybe_result =
- Execution::TryCall(fun, factory->undefined_value(), 1, &arg);
+ Execution::TryCall(isolate, fun, factory->undefined_value(), 1, &arg);
Handle<Object> result;
if (!maybe_result.ToHandle(&result) || !result->IsString()) {
return factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("<error>"));
@@ -387,96 +402,5 @@ MaybeHandle<String> MessageTemplate::FormatMessage(int template_index,
}
-MaybeHandle<String> ErrorToStringHelper::Stringify(Isolate* isolate,
- Handle<JSObject> error) {
- VisitedScope scope(this, error);
- if (scope.has_visited()) return isolate->factory()->empty_string();
-
- Handle<String> name;
- Handle<String> message;
- Handle<Name> internal_key = isolate->factory()->internal_error_symbol();
- Handle<String> message_string =
- isolate->factory()->NewStringFromStaticChars("message");
- Handle<String> name_string = isolate->factory()->name_string();
- LookupIterator internal_error_lookup(
- error, internal_key, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- LookupIterator message_lookup(
- error, message_string, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- LookupIterator name_lookup(error, name_string,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
-
- // Find out whether an internally created error object is on the prototype
- // chain. If the name property is found on a holder prior to the internally
- // created error object, use that name property. Otherwise just use the
- // constructor name to avoid triggering possible side effects.
- // Similar for the message property. If the message property shadows the
- // internally created error object, use that message property. Otherwise
- // use empty string as message.
- if (internal_error_lookup.IsFound()) {
- if (!ShadowsInternalError(isolate, &name_lookup, &internal_error_lookup)) {
- Handle<JSObject> holder = internal_error_lookup.GetHolder<JSObject>();
- name = Handle<String>(holder->constructor_name());
- }
- if (!ShadowsInternalError(isolate, &message_lookup,
- &internal_error_lookup)) {
- message = isolate->factory()->empty_string();
- }
- }
- if (name.is_null()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, name,
- GetStringifiedProperty(isolate, &name_lookup,
- isolate->factory()->Error_string()),
- String);
- }
- if (message.is_null()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, message,
- GetStringifiedProperty(isolate, &message_lookup,
- isolate->factory()->empty_string()),
- String);
- }
-
- if (name->length() == 0) return message;
- if (message->length() == 0) return name;
- IncrementalStringBuilder builder(isolate);
- builder.AppendString(name);
- builder.AppendCString(": ");
- builder.AppendString(message);
- return builder.Finish();
-}
-
-
-bool ErrorToStringHelper::ShadowsInternalError(
- Isolate* isolate, LookupIterator* property_lookup,
- LookupIterator* internal_error_lookup) {
- if (!property_lookup->IsFound()) return false;
- Handle<JSObject> holder = property_lookup->GetHolder<JSObject>();
- // It's fine if the property is defined on the error itself.
- if (holder.is_identical_to(property_lookup->GetReceiver())) return true;
- PrototypeIterator it(isolate, holder, PrototypeIterator::START_AT_RECEIVER);
- while (true) {
- if (it.IsAtEnd()) return false;
- if (it.IsAtEnd(internal_error_lookup->GetHolder<JSObject>())) return true;
- it.AdvanceIgnoringProxies();
- }
-}
-
-
-MaybeHandle<String> ErrorToStringHelper::GetStringifiedProperty(
- Isolate* isolate, LookupIterator* property_lookup,
- Handle<String> default_value) {
- if (!property_lookup->IsFound()) return default_value;
- Handle<Object> obj;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, Object::GetProperty(property_lookup),
- String);
- if (obj->IsUndefined()) return default_value;
- if (!obj->IsString()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, Object::ToString(isolate, obj),
- String);
- }
- return Handle<String>::cast(obj);
-}
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/messages.h b/chromium/v8/src/messages.h
index bb78f3d0a58..8cd60b1c5c1 100644
--- a/chromium/v8/src/messages.h
+++ b/chromium/v8/src/messages.h
@@ -62,11 +62,13 @@ class CallSite {
bool IsEval();
bool IsConstructor();
+ bool IsValid() { return !fun_.is_null(); }
+
private:
Isolate* isolate_;
Handle<Object> receiver_;
Handle<JSFunction> fun_;
- int pos_;
+ int32_t pos_;
};
@@ -85,26 +87,36 @@ class CallSite {
T(ApplyNonFunction, \
"Function.prototype.apply was called on %, which is a % and not a " \
"function") \
+ T(ArrayBufferTooShort, \
+ "Derived ArrayBuffer constructor created a buffer which was too small") \
+ T(ArrayBufferSpeciesThis, \
+ "ArrayBuffer subclass returned this from species constructor") \
T(ArrayFunctionsOnFrozen, "Cannot modify frozen array elements") \
T(ArrayFunctionsOnSealed, "Cannot add/remove sealed array elements") \
T(ArrayNotSubclassable, "Subclassing Arrays is not currently supported.") \
T(CalledNonCallable, "% is not a function") \
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
+ T(CallSiteExpectsFunction, \
+ "CallSite expects function as second argument, got %") \
T(CannotConvertToPrimitive, "Cannot convert object to primitive value") \
- T(CannotPreventExtExternalArray, \
- "Cannot prevent extension of an object with external array elements") \
+ T(CannotPreventExt, "Cannot prevent extensions") \
+ T(CannotFreezeArrayBufferView, \
+ "Cannot freeze array buffer views with elements") \
T(CircularStructure, "Converting circular structure to JSON") \
+ T(ConstructAbstractClass, "Abstract class % not directly constructable") \
T(ConstAssign, "Assignment to constant variable.") \
T(ConstructorNonCallable, \
- "Class constructors cannot be invoked without 'new'") \
+ "Class constructor % cannot be invoked without 'new'") \
T(ConstructorNotFunction, "Constructor % requires 'new'") \
+ T(ConstructorNotReceiver, "The .constructor property is not an object") \
T(CurrencyCode, "Currency code is required with currency style.") \
T(DataViewNotArrayBuffer, \
"First argument to DataView constructor must be an ArrayBuffer") \
T(DateType, "this is not a Date object.") \
T(DebuggerFrame, "Debugger: Invalid frame index.") \
T(DebuggerType, "Debugger: Parameters have wrong types.") \
+ T(DeclarationMissingInitializer, "Missing initializer in % declaration") \
T(DefineDisallowed, "Cannot define property:%, object is not extensible.") \
T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \
T(ExtendsValueGenerator, \
@@ -113,8 +125,6 @@ class CallSite {
"Class extends value % is not a function or null") \
T(FirstArgumentNotRegExp, \
"First argument to % must not be a regular expression") \
- T(FlagsGetterNonObject, \
- "RegExp.prototype.flags getter called on non-object %") \
T(FunctionBind, "Bind must be called on a function") \
T(GeneratorRunning, "Generator is already running") \
T(IllegalInvocation, "Illegal invocation") \
@@ -125,6 +135,7 @@ class CallSite {
"Function has non-object prototype '%' in instanceof check") \
T(InvalidArgument, "invalid_argument") \
T(InvalidInOperatorUse, "Cannot use 'in' operator to search for '%' in %") \
+ T(InvalidSimdOperation, "% is not a valid type for this SIMD operation.") \
T(IteratorResultNotAnObject, "Iterator result % is not an object") \
T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \
T(LanguageID, "Language ID should be string or object.") \
@@ -146,6 +157,7 @@ class CallSite {
T(NotIntlObject, "% is not an i18n object.") \
T(NotGeneric, "% is not generic") \
T(NotIterable, "% is not iterable") \
+ T(NotPropertyName, "% is not a valid property name") \
T(NotTypedArray, "this is not a typed array.") \
T(NotSharedTypedArray, "% is not a shared typed array.") \
T(NotIntegerSharedTypedArray, "% is not an integer shared typed array.") \
@@ -173,32 +185,108 @@ class CallSite {
T(OrdinaryFunctionCalledAsConstructor, \
"Function object that's not a constructor was created with new") \
T(PromiseCyclic, "Chaining cycle detected for promise %") \
+ T(PromiseExecutorAlreadyInvoked, \
+ "Promise executor has already been invoked with non-undefined arguments") \
T(PropertyDescObject, "Property description must be an object: %") \
- T(PropertyNotFunction, "Property '%' of object % is not a function") \
+ T(PropertyNotFunction, \
+ "'%' returned for property '%' of object '%' is not a function") \
T(ProtoObjectOrNull, "Object prototype may only be an Object or null: %") \
T(PrototypeParentNotAnObject, \
"Class extends value does not have valid prototype property %") \
- T(ProxyHandlerDeleteFailed, \
- "Proxy handler % did not return a boolean value from 'delete' trap") \
- T(ProxyHandlerNonObject, "Proxy.% called with non-object as handler") \
- T(ProxyHandlerReturned, "Proxy handler % returned % from '%' trap") \
- T(ProxyHandlerTrapMissing, "Proxy handler % has no '%' trap") \
- T(ProxyHandlerTrapMustBeCallable, \
- "Proxy handler %0 has non-callable '%' trap") \
- T(ProxyNonObjectPropNames, "Trap '%' returned non-object %") \
- T(ProxyProtoNonObject, "Proxy.create called with no-object as prototype") \
- T(ProxyPropNotConfigurable, \
- "Proxy handler % returned non-configurable descriptor for property '%' " \
- "from '%' trap") \
- T(ProxyRepeatedPropName, "Trap '%' returned repeated property name '%'") \
- T(ProxyTrapFunctionExpected, \
- "Proxy.createFunction called with non-function for '%' trap") \
+ T(ProxyConstructNonObject, \
+ "'construct' on proxy: trap returned non-object ('%')") \
+ T(ProxyDefinePropertyNonConfigurable, \
+ "'defineProperty' on proxy: trap returned truish for defining " \
+ "non-configurable property '%' which is either non-existant or " \
+ "configurable in the proxy target") \
+ T(ProxyDefinePropertyNonExtensible, \
+ "'defineProperty' on proxy: trap returned truish for adding property '%' " \
+ " to the non-extensible proxy target") \
+ T(ProxyDefinePropertyIncompatible, \
+ "'defineProperty' on proxy: trap returned truish for adding property '%' " \
+ " that is incompatible with the existing property in the proxy target") \
+ T(ProxyDeletePropertyNonConfigurable, \
+ "'deleteProperty' on proxy: trap returned truish for property '%' which " \
+ "is non-configurable in the proxy target") \
+ T(ProxyEnumerateNonObject, "'enumerate' on proxy: trap returned non-object") \
+ T(ProxyEnumerateNonString, \
+ "'enumerate' on proxy: trap result includes non-string") \
+ T(ProxyGetNonConfigurableData, \
+ "'get' on proxy: property '%' is a read-only and " \
+ "non-configurable data property on the proxy target but the proxy " \
+ "did not return its actual value (expected '%' but got '%')") \
+ T(ProxyGetNonConfigurableAccessor, \
+ "'get' on proxy: property '%' is a non-configurable accessor " \
+ "property on the proxy target and does not have a getter function, but " \
+ "the trap did not return 'undefined' (got '%')") \
+ T(ProxyGetOwnPropertyDescriptorIncompatible, \
+ "'getOwnPropertyDescriptor' on proxy: trap returned descriptor for " \
+ "property '%' that is incompatible with the existing property in the " \
+ "proxy target") \
+ T(ProxyGetOwnPropertyDescriptorInvalid, \
+ "'getOwnPropertyDescriptor' on proxy: trap returned neither object nor " \
+ "undefined for property '%'") \
+ T(ProxyGetOwnPropertyDescriptorNonConfigurable, \
+ "'getOwnPropertyDescriptor' on proxy: trap reported non-configurability " \
+ "for property '%' which is either non-existant or configurable in the " \
+ "proxy target") \
+ T(ProxyGetOwnPropertyDescriptorNonExtensible, \
+ "'getOwnPropertyDescriptor' on proxy: trap returned undefined for " \
+ "property '%' which exists in the non-extensible proxy target") \
+ T(ProxyGetOwnPropertyDescriptorUndefined, \
+ "'getOwnPropertyDescriptor' on proxy: trap returned undefined for " \
+ "property '%' which is non-configurable in the proxy target") \
+ T(ProxyGetPrototypeOfInvalid, \
+ "'getPrototypeOf' on proxy: trap returned neither object nor null") \
+ T(ProxyGetPrototypeOfNonExtensible, \
+ "'getPrototypeOf' on proxy: proxy target is non-extensible but the " \
+ "trap did not return its actual prototype") \
+ T(ProxyHandlerOrTargetRevoked, \
+ "Cannot create proxy with a revoked proxy as target or handler") \
+ T(ProxyHasNonConfigurable, \
+ "'has' on proxy: trap returned falsish for property '%' which exists in " \
+ "the proxy target as non-configurable") \
+ T(ProxyHasNonExtensible, \
+ "'has' on proxy: trap returned falsish for property '%' but the proxy " \
+ "target is not extensible") \
+ T(ProxyIsExtensibleInconsistent, \
+ "'isExtensible' on proxy: trap result does not reflect extensibility of " \
+ "proxy target (which is '%')") \
+ T(ProxyNonObject, \
+ "Cannot create proxy with a non-object as target or handler") \
+ T(ProxyOwnKeysMissing, \
+ "'ownKeys' on proxy: trap result did not include '%'") \
+ T(ProxyOwnKeysNonExtensible, \
+ "'ownKeys' on proxy: trap returned extra keys but proxy target is " \
+ "non-extensible") \
+ T(ProxyPreventExtensionsExtensible, \
+ "'preventExtensions' on proxy: trap returned truish but the proxy target " \
+ "is extensible") \
+ T(ProxyPrivate, "Cannot pass private property name to proxy trap") \
+ T(ProxyRevoked, "Cannot perform '%' on a proxy that has been revoked") \
+ T(ProxySetFrozenData, \
+ "'set' on proxy: trap returned truish for property '%' which exists in " \
+ "the proxy target as a non-configurable and non-writable data property " \
+ "with a different value") \
+ T(ProxySetFrozenAccessor, \
+ "'set' on proxy: trap returned truish for property '%' which exists in " \
+ "the proxy target as a non-configurable and non-writable accessor " \
+ "property without a setter") \
+ T(ProxySetPrototypeOfNonExtensible, \
+ "'setPrototypeOf' on proxy: trap returned truish for setting a new " \
+ "prototype on the non-extensible proxy target") \
+ T(ProxyTrapReturnedFalsish, "'%' on proxy: trap returned falsish") \
+ T(ProxyTrapReturnedFalsishFor, \
+ "'%' on proxy: trap returned falsish for property '%'") \
+ T(ReadGlobalReferenceThroughProxy, "Trying to access '%' through proxy") \
T(RedefineDisallowed, "Cannot redefine property: %") \
T(RedefineExternalArray, \
"Cannot redefine a property of an object with external array elements") \
T(ReduceNoInitial, "Reduce of empty array with no initial value") \
T(RegExpFlags, \
"Cannot supply flags when constructing one RegExp from another") \
+ T(RegExpNonObject, "% getter called on non-object %") \
+ T(RegExpNonRegExp, "% getter called on non-RegExp object") \
T(ReinitializeIntl, "Trying to re-initialize % object.") \
T(ResolvedOptionsCalledOnNonObject, \
"resolvedOptions method called on a non-object or on a object that is " \
@@ -208,12 +296,14 @@ class CallSite {
"'caller' and 'arguments' are restricted function properties and cannot " \
"be accessed in this context.") \
T(StaticPrototype, "Classes may not have static property named prototype") \
- T(StrictCannotAssign, "Cannot assign to read only '% in strict mode") \
+ T(StrictCannotAssign, "Cannot assign to read only '%' in strict mode") \
T(StrictDeleteProperty, "Cannot delete property '%' of %") \
T(StrictPoisonPill, \
"'caller', 'callee', and 'arguments' properties may not be accessed on " \
"strict mode functions or the arguments objects for calls to them") \
- T(StrictReadOnlyProperty, "Cannot assign to read only property '%' of %") \
+ T(StrictReadOnlyProperty, \
+ "Cannot assign to read only property '%' of % '%'") \
+ T(StrictCannotCreateProperty, "Cannot create property '%' on % '%'") \
T(StrongArity, \
"In strong mode, calling a function with too few arguments is deprecated") \
T(StrongDeleteProperty, \
@@ -232,10 +322,9 @@ class CallSite {
T(SimdToNumber, "Cannot convert a SIMD value to a number") \
T(UndefinedOrNullToObject, "Cannot convert undefined or null to object") \
T(ValueAndAccessor, \
- "Invalid property. A property cannot both have accessors and be " \
- "writable or have a value, %") \
+ "Invalid property descriptor. Cannot both specify accessors and a value " \
+ "or writable attribute, %") \
T(VarRedeclaration, "Identifier '%' has already been declared") \
- T(WithExpression, "% has no properties") \
T(WrongArgs, "%: Arguments list has wrong type") \
/* ReferenceError */ \
T(NonMethod, "'super' is referenced from non-method") \
@@ -248,8 +337,13 @@ class CallSite {
T(UnsupportedSuper, "Unsupported reference to 'super'") \
/* RangeError */ \
T(DateRange, "Provided date is not in valid range.") \
- T(ExpectedLocation, "Expected Area/Location for time zone, got %") \
+ T(ExpectedTimezoneID, \
+ "Expected Area/Location(/Location)* for time zone, got %") \
+ T(ExpectedLocation, \
+ "Expected letters optionally connected with underscores or hyphens for " \
+ "a location, got %") \
T(InvalidArrayBufferLength, "Invalid array buffer length") \
+ T(ArrayBufferAllocationFailed, "Array buffer allocation failed") \
T(InvalidArrayLength, "Invalid array length") \
T(InvalidCodePoint, "Invalid code point %") \
T(InvalidCountValue, "Invalid count value") \
@@ -267,6 +361,7 @@ class CallSite {
T(InvalidTypedArrayAlignment, "% of % should be a multiple of %") \
T(InvalidTypedArrayLength, "Invalid typed array length") \
T(InvalidTypedArrayOffset, "Start offset is too large:") \
+ T(LetInLexicalBinding, "let is disallowed as a lexically bound name") \
T(LocaleMatcher, "Illegal value for localeMatcher:%") \
T(NormalizationForm, "The normalization form should be one of %.") \
T(NumberFormatRange, "% argument must be between 0 and 20") \
@@ -301,7 +396,10 @@ class CallSite {
T(IllegalLanguageModeDirective, \
"Illegal '%' directive in function with non-simple parameter list") \
T(IllegalReturn, "Illegal return statement") \
+ T(InvalidEscapedReservedWord, "Keyword must not contain escaped characters") \
T(InvalidLhsInAssignment, "Invalid left-hand side in assignment") \
+ T(InvalidCoverInitializedName, "Invalid shorthand property initializer") \
+ T(InvalidDestructuringTarget, "Invalid destructuring assignment target") \
T(InvalidLhsInFor, "Invalid left-hand side in for-loop") \
T(InvalidLhsInPostfixOp, \
"Invalid left-hand side expression in postfix operation") \
@@ -319,6 +417,10 @@ class CallSite {
T(NoCatchOrFinally, "Missing catch or finally after try") \
T(NotIsvar, "builtin %%IS_VAR: not a variable") \
T(ParamAfterRest, "Rest parameter must be last formal parameter") \
+ T(PushPastSafeLength, \
+ "Pushing % elements on an array-like of length % " \
+ "is disallowed, as the total surpasses 2**53-1") \
+ T(ElementAfterRest, "Rest element must be last element in array") \
T(BadSetterRestParameter, \
"Setter function argument must not be a rest parameter") \
T(ParamDupe, "Duplicate parameter name not allowed in this context") \
@@ -327,6 +429,8 @@ class CallSite {
T(SloppyLexical, \
"Block-scoped declarations (let, const, function, class) not yet " \
"supported outside strict mode") \
+ T(SpeciesNotConstructor, \
+ "object.constructor[Symbol.species] is not a constructor") \
T(StrictDelete, "Delete of an unqualified identifier in strict mode.") \
T(StrictEvalArguments, "Unexpected eval or arguments in strict mode") \
T(StrictFunction, \
@@ -388,6 +492,8 @@ class CallSite {
T(TooManyParameters, \
"Too many parameters in function definition (only 65535 allowed)") \
T(TooManyVariables, "Too many variables declared (only 4194303 allowed)") \
+ T(TypedArrayTooShort, \
+ "Derived TypedArray constructor created an array which was too small") \
T(UnexpectedEOS, "Unexpected end of input") \
T(UnexpectedReserved, "Unexpected reserved word") \
T(UnexpectedStrictReserved, "Unexpected strict mode reserved word") \
@@ -452,44 +558,7 @@ class MessageHandler {
};
-class ErrorToStringHelper {
- public:
- ErrorToStringHelper() : visited_(0) {}
-
- MUST_USE_RESULT MaybeHandle<String> Stringify(Isolate* isolate,
- Handle<JSObject> error);
-
- private:
- class VisitedScope {
- public:
- VisitedScope(ErrorToStringHelper* helper, Handle<JSObject> error)
- : helper_(helper), has_visited_(false) {
- for (const Handle<JSObject>& visited : helper->visited_) {
- if (visited.is_identical_to(error)) {
- has_visited_ = true;
- break;
- }
- }
- helper->visited_.Add(error);
- }
- ~VisitedScope() { helper_->visited_.RemoveLast(); }
- bool has_visited() { return has_visited_; }
-
- private:
- ErrorToStringHelper* helper_;
- bool has_visited_;
- };
-
- static bool ShadowsInternalError(Isolate* isolate,
- LookupIterator* property_lookup,
- LookupIterator* internal_error_lookup);
-
- static MUST_USE_RESULT MaybeHandle<String> GetStringifiedProperty(
- Isolate* isolate, LookupIterator* property_lookup,
- Handle<String> default_value);
-
- List<Handle<JSObject> > visited_;
-};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MESSAGES_H_
diff --git a/chromium/v8/src/mips/OWNERS b/chromium/v8/src/mips/OWNERS
index 5508ba626f3..89455a4fbd7 100644
--- a/chromium/v8/src/mips/OWNERS
+++ b/chromium/v8/src/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/mips/assembler-mips-inl.h b/chromium/v8/src/mips/assembler-mips-inl.h
index f4bddf54613..27ec8e5bda9 100644
--- a/chromium/v8/src/mips/assembler-mips-inl.h
+++ b/chromium/v8/src/mips/assembler-mips-inl.h
@@ -84,53 +84,15 @@ bool Operand::is_reg() const {
}
-int Register::NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
-}
-
-
-int DoubleRegister::NumRegisters() {
- return FPURegister::kMaxNumRegisters;
-}
-
-
-int DoubleRegister::NumAllocatableRegisters() {
- return FPURegister::kMaxNumAllocatableRegisters;
-}
-
-
-int DoubleRegister::NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
-}
-
-
-int FPURegister::ToAllocationIndex(FPURegister reg) {
- DCHECK(reg.code() % 2 == 0);
- DCHECK(reg.code() / 2 < kMaxNumAllocatableRegisters);
- DCHECK(reg.is_valid());
- DCHECK(!reg.is(kDoubleRegZero));
- DCHECK(!reg.is(kLithiumScratchDouble));
- return (reg.code() / 2);
-}
-
-
// -----------------------------------------------------------------------------
// RelocInfo.
void RelocInfo::apply(intptr_t delta) {
- if (IsCodeTarget(rmode_)) {
- uint32_t scope1 = (uint32_t) target_address() & ~kImm28Mask;
- uint32_t scope2 = reinterpret_cast<uint32_t>(pc_) & ~kImm28Mask;
-
- if (scope1 != scope2) {
- Assembler::JumpToJumpRegister(pc_);
- }
- }
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
byte* p = reinterpret_cast<byte*>(pc_);
int count = Assembler::RelocateInternalReference(rmode_, p, delta);
- CpuFeatures::FlushICache(p, count * sizeof(uint32_t));
+ Assembler::FlushICache(isolate_, p, count * sizeof(uint32_t));
}
}
@@ -180,7 +142,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -217,7 +180,7 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
DCHECK(IsLui(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
@@ -245,7 +208,7 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, host_,
+ Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
@@ -348,8 +311,7 @@ Code* RelocInfo::code_age_stub() {
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
- host_,
+ Assembler::set_target_address_at(isolate_, pc_ + Assembler::kInstrSize, host_,
stub->instruction_start());
}
@@ -366,7 +328,7 @@ void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
// The pc_ offset of 0 assumes patched debug break slot or return
// sequence.
- Assembler::set_target_address_at(pc_, host_, target);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -384,7 +346,7 @@ void RelocInfo::WipeOut() {
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
@@ -475,16 +437,57 @@ void Assembler::CheckTrampolinePoolQuick(int extra_instructions) {
}
-void Assembler::emit(Instr x) {
+void Assembler::CheckForEmitInForbiddenSlot() {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
+ if (IsPrevInstrCompactBranch()) {
+ // Nop instruction to preceed a CTI in forbidden slot:
+ Instr nop = SPECIAL | SLL;
+ *reinterpret_cast<Instr*>(pc_) = nop;
+ pc_ += kInstrSize;
+
+ ClearCompactBranchState();
+ }
+}
+
+
+void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
+ if (IsPrevInstrCompactBranch()) {
+ if (Instruction::IsForbiddenAfterBranchInstr(x)) {
+ // Nop instruction to preceed a CTI in forbidden slot:
+ Instr nop = SPECIAL | SLL;
+ *reinterpret_cast<Instr*>(pc_) = nop;
+ pc_ += kInstrSize;
+ }
+ ClearCompactBranchState();
+ }
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
+ if (is_compact_branch == CompactBranchType::COMPACT_BRANCH) {
+ EmittedCompactBranchInstruction();
+ }
+ CheckTrampolinePoolQuick();
+}
+
+
+template <typename T>
+void Assembler::EmitHelper(T x) {
+ *reinterpret_cast<T*>(pc_) = x;
+ pc_ += sizeof(x);
CheckTrampolinePoolQuick();
}
-} } // namespace v8::internal
+void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ EmitHelper(x, is_compact_branch);
+}
+
+
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
diff --git a/chromium/v8/src/mips/assembler-mips.cc b/chromium/v8/src/mips/assembler-mips.cc
index 7fa4d5d66a5..a8b6cc7c32d 100644
--- a/chromium/v8/src/mips/assembler-mips.cc
+++ b/chromium/v8/src/mips/assembler-mips.cc
@@ -64,28 +64,6 @@ static unsigned CpuFeaturesImpliedByCompiler() {
}
-const char* DoubleRegister::AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "f0",
- "f2",
- "f4",
- "f6",
- "f8",
- "f10",
- "f12",
- "f14",
- "f16",
- "f18",
- "f20",
- "f22",
- "f24",
- "f26"
- };
- return names[index];
-}
-
-
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
@@ -250,31 +228,31 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
static const int kNegOffset = 0x00008000;
// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
-const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift)
- | (kPointerSize & kImm16Mask); // NOLINT
+const Instr kPopInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
+ (Register::kCode_sp << kRtShift) |
+ (kPointerSize & kImm16Mask); // NOLINT
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
-const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift)
- | (-kPointerSize & kImm16Mask); // NOLINT
+const Instr kPushInstruction = ADDIU | (Register::kCode_sp << kRsShift) |
+ (Register::kCode_sp << kRtShift) |
+ (-kPointerSize & kImm16Mask); // NOLINT
// sw(r, MemOperand(sp, 0))
-const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kPushRegPattern =
+ SW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
// lw(r, MemOperand(sp, 0))
-const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kPopRegPattern =
+ LW | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kLwRegFpOffsetPattern =
+ LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kSwRegFpOffsetPattern =
+ SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask); // NOLINT
+const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
+ (kNegOffset & kImm16Mask); // NOLINT
-const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask); // NOLINT
+const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
+ (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xffe00000;
@@ -307,6 +285,10 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
void Assembler::GetCode(CodeDesc* desc) {
+ if (IsPrevInstrCompactBranch()) {
+ nop();
+ ClearCompactBranchState();
+ }
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
desc->buffer = buffer_;
@@ -314,11 +296,16 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
+ desc->constant_pool_size = 0;
}
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ if (IsPrevInstrCompactBranch()) {
+ nop();
+ ClearCompactBranchState();
+ }
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -334,21 +321,21 @@ void Assembler::CodeTargetAlign() {
Register Assembler::GetRtReg(Instr instr) {
Register rt;
- rt.code_ = (instr & kRtFieldMask) >> kRtShift;
+ rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
return rt;
}
Register Assembler::GetRsReg(Instr instr) {
Register rs;
- rs.code_ = (instr & kRsFieldMask) >> kRsShift;
+ rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
return rs;
}
Register Assembler::GetRdReg(Instr instr) {
Register rd;
- rd.code_ = (instr & kRdFieldMask) >> kRdShift;
+ rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
return rd;
}
@@ -475,19 +462,38 @@ bool Assembler::IsBranch(Instr instr) {
uint32_t rt_field = GetRtField(instr);
uint32_t rs_field = GetRsField(instr);
// Checks if the instruction is a branch.
- return opcode == BEQ ||
- opcode == BNE ||
- opcode == BLEZ ||
- opcode == BGTZ ||
- opcode == BEQL ||
- opcode == BNEL ||
- opcode == BLEZL ||
- opcode == BGTZL ||
+ bool isBranch =
+ opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
+ opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
(opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
(opcode == COP1 && rs_field == BC1EQZ) ||
(opcode == COP1 && rs_field == BC1NEZ);
+ if (!isBranch && IsMipsArchVariant(kMips32r6)) {
+ // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
+ // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
+ isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
+ opcode == BALC ||
+ (opcode == POP66 && rs_field != 0) || // BEQZC
+ (opcode == POP76 && rs_field != 0); // BNEZC
+ }
+ return isBranch;
+}
+
+
+bool Assembler::IsBc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a BC or BALC.
+ return opcode == BC || opcode == BALC;
+}
+
+
+bool Assembler::IsBzc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is BEQZC or BNEZC.
+ return (opcode == POP66 && GetRsField(instr) != 0) ||
+ (opcode == POP76 && GetRsField(instr) != 0);
}
@@ -507,6 +513,34 @@ bool Assembler::IsBne(Instr instr) {
}
+bool Assembler::IsBeqzc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ return opcode == POP66 && GetRsField(instr) != 0;
+}
+
+
+bool Assembler::IsBnezc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ return opcode == POP76 && GetRsField(instr) != 0;
+}
+
+
+bool Assembler::IsBeqc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rs = GetRsField(instr);
+ uint32_t rt = GetRtField(instr);
+ return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
+}
+
+
+bool Assembler::IsBnec(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rs = GetRsField(instr);
+ uint32_t rt = GetRtField(instr);
+ return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
+}
+
+
bool Assembler::IsJump(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
@@ -592,7 +626,7 @@ int32_t Assembler::GetBranchOffset(Instr instr) {
bool Assembler::IsLw(Instr instr) {
- return ((instr & kOpcodeMask) == LW);
+ return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
}
@@ -614,7 +648,7 @@ Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
bool Assembler::IsSw(Instr instr) {
- return ((instr & kOpcodeMask) == SW);
+ return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
}
@@ -640,6 +674,36 @@ bool Assembler::IsAndImmediate(Instr instr) {
}
+static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (Assembler::IsBc(instr)) {
+ return Assembler::OffsetSize::kOffset26;
+ } else if (Assembler::IsBzc(instr)) {
+ return Assembler::OffsetSize::kOffset21;
+ }
+ }
+ return Assembler::OffsetSize::kOffset16;
+}
+
+
+static inline int32_t AddBranchOffset(int pos, Instr instr) {
+ int bits = OffsetSizeInBits(instr);
+ const int32_t mask = (1 << bits) - 1;
+ bits = 32 - bits;
+
+ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
+ // the compiler uses arithmetic shifts for signed integers.
+ int32_t imm = ((instr & mask) << bits) >> (bits - 2);
+
+ if (imm == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + Assembler::kBranchPCOffset + imm;
+ }
+}
+
+
int Assembler::target_at(int pos, bool is_internal) {
Instr instr = instr_at(pos);
if (is_internal) {
@@ -663,18 +727,9 @@ int Assembler::target_at(int pos, bool is_internal) {
}
// Check we have a branch or jump instruction.
DCHECK(IsBranch(instr) || IsLui(instr));
- // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
- // the compiler uses arithmetic shifts for signed integers.
if (IsBranch(instr)) {
- int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
-
- if (imm18 == kEndOfChain) {
- // EndOfChain sentinel is returned directly, not relative to pc or pos.
- return kEndOfChain;
- } else {
- return pos + kBranchPCOffset + imm18;
- }
- } else if (IsLui(instr)) {
+ return AddBranchOffset(pos, instr);
+ } else {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
DCHECK(IsOri(instr_ori));
@@ -690,10 +745,23 @@ int Assembler::target_at(int pos, bool is_internal) {
DCHECK(pos > delta);
return pos - delta;
}
- } else {
- UNREACHABLE();
- return 0;
}
+ return 0;
+}
+
+
+static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ int32_t bits = OffsetSizeInBits(instr);
+ int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
+ DCHECK((imm & 3) == 0);
+ imm >>= 2;
+
+ const int32_t mask = (1 << bits) - 1;
+ instr &= ~mask;
+ DCHECK(is_intn(imm, bits));
+
+ return instr | (imm & mask);
}
@@ -716,15 +784,9 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
DCHECK(IsBranch(instr) || IsLui(instr));
if (IsBranch(instr)) {
- int32_t imm18 = target_pos - (pos + kBranchPCOffset);
- DCHECK((imm18 & 3) == 0);
-
- instr &= ~kImm16Mask;
- int32_t imm16 = imm18 >> 2;
- DCHECK(is_int16(imm16));
-
- instr_at_put(pos, instr | (imm16 & kImm16Mask));
- } else if (IsLui(instr)) {
+ instr = SetBranchOffset(pos, target_pos, instr);
+ instr_at_put(pos, instr);
+ } else {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
DCHECK(IsOri(instr_ori));
@@ -738,8 +800,6 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr_lui | ((imm & kHiMask) >> kLuiShift));
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
- } else {
- UNREACHABLE();
}
}
@@ -788,20 +848,23 @@ void Assembler::bind_to(Label* L, int pos) {
Instr instr = instr_at(fixup_pos);
if (is_internal) {
target_at_put(fixup_pos, pos, is_internal);
- } else if (!is_internal && IsBranch(instr)) {
- if (dist > kMaxBranchOffset) {
- if (trampoline_pos == kInvalidSlotPos) {
- trampoline_pos = get_trampoline_entry(fixup_pos);
- CHECK(trampoline_pos != kInvalidSlotPos);
+ } else {
+ if (IsBranch(instr)) {
+ int branch_offset = BranchOffset(instr);
+ if (dist > branch_offset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK(trampoline_pos != kInvalidSlotPos);
+ }
+ CHECK((trampoline_pos - fixup_pos) <= branch_offset);
+ target_at_put(fixup_pos, trampoline_pos, false);
+ fixup_pos = trampoline_pos;
+ dist = pos - fixup_pos;
}
- CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
- target_at_put(fixup_pos, trampoline_pos, false);
- fixup_pos = trampoline_pos;
- dist = pos - fixup_pos;
+ target_at_put(fixup_pos, pos, false);
+ } else {
+ target_at_put(fixup_pos, pos, false);
}
- target_at_put(fixup_pos, pos, false);
- } else {
- target_at_put(fixup_pos, pos, false);
}
}
L->bind_to(pos);
@@ -832,10 +895,47 @@ void Assembler::next(Label* L, bool is_internal) {
bool Assembler::is_near(Label* L) {
- if (L->is_bound()) {
- return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
+ DCHECK(L->is_bound());
+ return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
+}
+
+
+bool Assembler::is_near(Label* L, OffsetSize bits) {
+ if (L == nullptr || !L->is_bound()) return true;
+ return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize;
+}
+
+
+bool Assembler::is_near_branch(Label* L) {
+ DCHECK(L->is_bound());
+ return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L);
+}
+
+
+int Assembler::BranchOffset(Instr instr) {
+ // At pre-R6 and for other R6 branches the offset is 16 bits.
+ int bits = OffsetSize::kOffset16;
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ uint32_t opcode = GetOpcodeField(instr);
+ switch (opcode) {
+ // Checks BC or BALC.
+ case BC:
+ case BALC:
+ bits = OffsetSize::kOffset26;
+ break;
+
+ // Checks BEQZC or BNEZC.
+ case POP66:
+ case POP76:
+ if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
+ break;
+ default:
+ break;
+ }
}
- return false;
+
+ return (1 << (bits + 2 - 1)) - 1;
}
@@ -926,49 +1026,56 @@ void Assembler::GenInstrRegister(Opcode opcode,
// Instructions with immediate value.
// Registers are in the order of the instruction encoding, from left to right.
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- Register rt,
- int32_t j) {
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
+ int32_t j,
+ CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (j & kImm16Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- SecondaryField SF,
- int32_t j) {
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
+ int32_t j,
+ CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- FPURegister ft,
- int32_t j) {
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
+ int32_t j,
+ CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
+ CompactBranchType is_compact_branch) {
+ DCHECK(rs.is_valid() && (is_int21(offset21)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
+ emit(instr, is_compact_branch);
}
-void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t j) {
- DCHECK(rs.is_valid() && (is_uint21(j)));
- Instr instr = opcode | (rs.code() << kRsShift) | (j & kImm21Mask);
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
+ uint32_t offset21) {
+ DCHECK(rs.is_valid() && (is_uint21(offset21)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
emit(instr);
}
-void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26) {
+void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
+ CompactBranchType is_compact_branch) {
DCHECK(is_int26(offset26));
Instr instr = opcode | (offset26 & kImm26Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
@@ -1021,99 +1128,18 @@ uint32_t Assembler::jump_address(Label* L) {
}
-int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int32_t target_pos;
-
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
- DCHECK((offset & 3) == 0);
- DCHECK(is_int16(offset >> 2));
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset_compact(Label* L,
- bool jump_elimination_allowed) {
- int32_t target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - pc_offset();
- DCHECK((offset & 3) == 0);
- DCHECK(is_int16(offset >> 2));
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
- int32_t target_pos;
-
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
- DCHECK((offset & 3) == 0);
- DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset21_compact(Label* L,
- bool jump_elimination_allowed) {
+int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
int32_t target_pos;
+ int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos();
- L->link_to(pc_offset());
+ L->link_to(pc_offset() + pad);
} else {
- L->link_to(pc_offset());
+ L->link_to(pc_offset() + pad);
if (!trampoline_emitted_) {
unbound_labels_count_++;
next_buffer_check_ -= kTrampolineSlotsSize;
@@ -1122,9 +1148,9 @@ int32_t Assembler::branch_offset21_compact(Label* L,
}
}
- int32_t offset = target_pos - pc_offset();
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
+ DCHECK(is_intn(offset, bits + 2));
DCHECK((offset & 3) == 0);
- DCHECK(((offset >> 2) & 0xFFe00000) == 0); // Offset is 21bit width.
return offset;
}
@@ -1171,14 +1197,14 @@ void Assembler::bal(int16_t offset) {
void Assembler::bc(int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrImmediate(BC, offset);
+ GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::balc(int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
positions_recorder()->WriteRecordedPositions();
- GenInstrImmediate(BALC, offset);
+ GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1199,7 +1225,7 @@ void Assembler::bgez(Register rs, int16_t offset) {
void Assembler::bgezc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZL, rt, rt, offset);
+ GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1208,7 +1234,7 @@ void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
DCHECK(!(rs.is(zero_reg)));
DCHECK(!(rt.is(zero_reg)));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BLEZ, rs, rt, offset);
+ GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1217,7 +1243,7 @@ void Assembler::bgec(Register rs, Register rt, int16_t offset) {
DCHECK(!(rs.is(zero_reg)));
DCHECK(!(rt.is(zero_reg)));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BLEZL, rs, rt, offset);
+ GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1240,7 +1266,8 @@ void Assembler::bgtz(Register rs, int16_t offset) {
void Assembler::bgtzc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZL, zero_reg, rt, offset);
+ GenInstrImmediate(BGTZL, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
@@ -1254,14 +1281,15 @@ void Assembler::blez(Register rs, int16_t offset) {
void Assembler::blezc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZL, zero_reg, rt, offset);
+ GenInstrImmediate(BLEZL, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bltzc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZL, rt, rt, offset);
+ DCHECK(!rt.is(zero_reg));
+ GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1270,16 +1298,16 @@ void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
DCHECK(!(rs.is(zero_reg)));
DCHECK(!(rt.is(zero_reg)));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BGTZ, rs, rt, offset);
+ GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bltc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!rs.is(zero_reg));
+ DCHECK(!rt.is(zero_reg));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BGTZL, rs, rt, offset);
+ GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1308,96 +1336,121 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) {
void Assembler::bovc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(ADDI, rs, rt, offset);
+ DCHECK(!rs.is(zero_reg));
+ if (rs.code() >= rt.code()) {
+ GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(DADDI, rs, rt, offset);
+ DCHECK(!rs.is(zero_reg));
+ if (rs.code() >= rt.code()) {
+ GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZ, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BLEZ, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZ, rt, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgezall(Register rs, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZ, rt, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZ, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BGTZ, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(ADDI, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(ADDI, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(DADDI, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(DADDI, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::beqc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(rs.code() < rt.code());
- GenInstrImmediate(ADDI, rs, rt, offset);
+ DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
+ if (rs.code() < rt.code()) {
+ GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
void Assembler::beqzc(Register rs, int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
- Instr instr = POP66 | (rs.code() << kRsShift) | (offset & kImm21Mask);
- emit(instr);
+ GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bnec(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(rs.code() < rt.code());
- GenInstrImmediate(DADDI, rs, rt, offset);
+ DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
+ if (rs.code() < rt.code()) {
+ GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
void Assembler::bnezc(Register rs, int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
- Instr instr = POP76 | (rs.code() << kRsShift) | offset;
- emit(instr);
+ GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1409,7 +1462,9 @@ void Assembler::j(int32_t target) {
(kImm26Bits + kImmFieldShift)) == 0;
DCHECK(in_range && ((target & 3) == 0));
#endif
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrJump(J, (target >> 2) & kImm26Mask);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1435,8 +1490,10 @@ void Assembler::jal(int32_t target) {
(kImm26Bits + kImmFieldShift)) == 0;
DCHECK(in_range && ((target & 3) == 0));
#endif
+ BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrJump(JAL, (target >> 2) & kImm26Mask);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1451,9 +1508,7 @@ void Assembler::jalr(Register rs, Register rd) {
void Assembler::jic(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- Instr instr = POP66 | (JIC << kRsShift) | (rt.code() << kRtShift) |
- (offset & kImm16Mask);
- emit(instr);
+ GenInstrImmediate(POP66, zero_reg, rt, offset);
}
@@ -1604,7 +1659,7 @@ void Assembler::sll(Register rd,
// nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
// instructions.
DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
}
@@ -1614,7 +1669,7 @@ void Assembler::sllv(Register rd, Register rt, Register rs) {
void Assembler::srl(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
}
@@ -1624,7 +1679,7 @@ void Assembler::srlv(Register rd, Register rt, Register rs) {
void Assembler::sra(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
}
@@ -1645,7 +1700,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
- DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+ DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
@@ -1653,6 +1708,16 @@ void Assembler::rotrv(Register rd, Register rt, Register rs) {
}
+void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
+ DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
+ DCHECK(sa < 5 && sa > 0);
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (sa - 1) << kSaShift | LSA;
+ emit(instr);
+}
+
+
// ------------Memory-instructions-------------
// Helper for base-reg + offset, when offset is larger than int16.
@@ -1770,7 +1835,7 @@ void Assembler::lui(Register rd, int32_t j) {
}
-void Assembler::aui(Register rs, Register rt, int32_t j) {
+void Assembler::aui(Register rt, Register rs, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
DCHECK(!(rs.is(zero_reg)));
@@ -1784,7 +1849,7 @@ void Assembler::aui(Register rs, Register rt, int32_t j) {
void Assembler::addiupc(Register rs, int32_t imm19) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs.is_valid() && is_int19(imm19));
- int32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
+ uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
@@ -1792,23 +1857,23 @@ void Assembler::addiupc(Register rs, int32_t imm19) {
void Assembler::lwpc(Register rs, int32_t offset19) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs.is_valid() && is_int19(offset19));
- int32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
+ uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
void Assembler::auipc(Register rs, int16_t imm16) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(rs.is_valid() && is_int16(imm16));
- int32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
+ DCHECK(rs.is_valid());
+ uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
void Assembler::aluipc(Register rs, int16_t imm16) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(rs.is_valid() && is_int16(imm16));
- int32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
+ DCHECK(rs.is_valid());
+ uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
@@ -1942,14 +2007,14 @@ void Assembler::movn(Register rd, Register rs, Register rt) {
void Assembler::movt(Register rd, Register rs, uint16_t cc) {
Register rt;
- rt.code_ = (cc & 0x0007) << 2 | 1;
+ rt.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
void Assembler::movf(Register rd, Register rs, uint16_t cc) {
Register rt;
- rt.code_ = (cc & 0x0007) << 2 | 0;
+ rt.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
@@ -2146,13 +2211,13 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
}
void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
}
@@ -2219,45 +2284,45 @@ void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
}
void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
}
void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 1;
+ ft.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 1;
+ ft.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 0;
+ ft.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 0;
+ ft.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
@@ -2441,55 +2506,71 @@ void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
}
void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
}
void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
}
void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
}
void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
}
void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
}
@@ -2584,7 +2665,8 @@ void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -2600,7 +2682,8 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
@@ -2693,7 +2776,6 @@ void Assembler::bc1t(int16_t offset, uint16_t cc) {
}
-// Debugging.
int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta) {
Instr instr = instr_at(pc);
@@ -2749,6 +2831,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
@@ -2782,54 +2865,42 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
- CheckBuffer();
- *reinterpret_cast<uint8_t*>(pc_) = data;
- pc_ += sizeof(uint8_t);
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
void Assembler::dd(uint32_t data) {
- CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) = data;
- pc_ += sizeof(uint32_t);
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
void Assembler::dq(uint64_t data) {
- CheckBuffer();
- *reinterpret_cast<uint64_t*>(pc_) = data;
- pc_ += sizeof(uint64_t);
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
void Assembler::dd(Label* label) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
uint32_t data;
+ CheckForEmitInForbiddenSlot();
if (label->is_bound()) {
data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
} else {
data = jump_address(label);
internal_reference_positions_.insert(label->pos());
}
- *reinterpret_cast<uint32_t*>(pc_) = data;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::emit_code_stub_address(Code* stub) {
- CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) =
- reinterpret_cast<uint32_t>(stub->instruction_start());
- pc_ += sizeof(uint32_t);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ EmitHelper(data);
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
if (rmode >= RelocInfo::COMMENT &&
- rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL) {
+ rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsComment(rmode)
@@ -2844,10 +2915,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_,
- rmode,
- RecordedAstId().ToInt(),
- NULL);
+ RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
+ RecordedAstId().ToInt(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@@ -2887,8 +2956,12 @@ void Assembler::CheckTrampolinePool() {
// First we emit jump (2 instructions), then we emit trampoline pool.
{ BlockTrampolinePoolScope block_trampoline_pool(this);
Label after_pool;
- b(&after_pool);
- nop();
+ if (IsMipsArchVariant(kMips32r6)) {
+ bc(&after_pool);
+ } else {
+ b(&after_pool);
+ nop();
+ }
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
@@ -2955,7 +3028,7 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_address_at(Address pc,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address target,
ICacheFlushMode icache_flush_mode) {
Instr instr2 = instr_at(pc + kInstrSize);
@@ -2975,130 +3048,12 @@ void Assembler::set_target_address_at(Address pc,
*p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
*(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
- // The following code is an optimization for the common case of Call()
- // or Jump() which is load to register, and jump through register:
- // li(t9, address); jalr(t9) (or jr(t9)).
- // If the destination address is in the same 256 MB page as the call, it
- // is faster to do a direct jal, or j, rather than jump thru register, since
- // that lets the cpu pipeline prefetch the target address. However each
- // time the address above is patched, we have to patch the direct jal/j
- // instruction, as well as possibly revert to jalr/jr if we now cross a
- // 256 MB page. Note that with the jal/j instructions, we do not need to
- // load the register, but that code is left, since it makes it easy to
- // revert this process. A further optimization could try replacing the
- // li sequence with nops.
- // This optimization can only be applied if the rt-code from instr2 is the
- // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
- // mips return. Occasionally this lands after an li().
-
- Instr instr3 = instr_at(pc + 2 * kInstrSize);
- uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
- bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
- uint32_t target_field =
- static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
- bool patched_jump = false;
-
-#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
- // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
- // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
- // apply this workaround for all cores so we don't have to identify the core.
- if (in_range) {
- // The 24k core E156 bug has some very specific requirements, we only check
- // the most simple one: if the address of the delay slot instruction is in
- // the first or last 32 KB of the 256 MB segment.
- uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
- uint32_t ipc_segment_addr = ipc & segment_mask;
- if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
- in_range = false;
- }
-#endif
-
- if (IsJalr(instr3)) {
- // Try to convert JALR to JAL.
- if (in_range && GetRt(instr2) == GetRs(instr3)) {
- *(p + 2) = JAL | target_field;
- patched_jump = true;
- }
- } else if (IsJr(instr3)) {
- // Try to convert JR to J, skip returns (jr ra).
- bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
- if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
- *(p + 2) = J | target_field;
- patched_jump = true;
- }
- } else if (IsJal(instr3)) {
- if (in_range) {
- // We are patching an already converted JAL.
- *(p + 2) = JAL | target_field;
- } else {
- // Patch JAL, but out of range, revert to JALR.
- // JALR rs reg is the rt reg specified in the ORI instruction.
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p+2) = SPECIAL | rs_field | rd_field | JALR;
- }
- patched_jump = true;
- } else if (IsJ(instr3)) {
- if (in_range) {
- // We are patching an already converted J (jump).
- *(p + 2) = J | target_field;
- } else {
- // Trying patch J, but out of range, just go back to JR.
- // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- if (IsMipsArchVariant(kMips32r6)) {
- *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
- } else {
- *(p + 2) = SPECIAL | rs_field | JR;
- }
- }
- patched_jump = true;
- }
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
+ Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t));
}
}
-
-void Assembler::JumpToJumpRegister(Address pc) {
- // Address pc points to lui/ori instructions.
- // Jump to label may follow at pc + 2 * kInstrSize.
- uint32_t* p = reinterpret_cast<uint32_t*>(pc);
-#ifdef DEBUG
- Instr instr1 = instr_at(pc);
-#endif
- Instr instr2 = instr_at(pc + 1 * kInstrSize);
- Instr instr3 = instr_at(pc + 2 * kInstrSize);
- bool patched = false;
-
- if (IsJal(instr3)) {
- DCHECK(GetOpcodeField(instr1) == LUI);
- DCHECK(GetOpcodeField(instr2) == ORI);
-
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p + 2) = SPECIAL | rs_field | rd_field | JALR;
- patched = true;
- } else if (IsJ(instr3)) {
- DCHECK(GetOpcodeField(instr1) == LUI);
- DCHECK(GetOpcodeField(instr2) == ORI);
-
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- if (IsMipsArchVariant(kMips32r6)) {
- *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
- } else {
- *(p + 2) = SPECIAL | rs_field | JR;
- }
- patched = true;
- }
-
- if (patched) {
- CpuFeatures::FlushICache(pc + 2, sizeof(Address));
- }
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/mips/assembler-mips.h b/chromium/v8/src/mips/assembler-mips.h
index c47f6d3abe2..054695483f7 100644
--- a/chromium/v8/src/mips/assembler-mips.h
+++ b/chromium/v8/src/mips/assembler-mips.h
@@ -41,12 +41,33 @@
#include <set>
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/mips/constants-mips.h"
namespace v8 {
namespace internal {
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(zero_reg) V(at) V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(t8) V(t9) \
+ V(k0) V(k1) V(gp) V(sp) V(fp) V(ra)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(s7)
+
+#define DOUBLE_REGISTERS(V) \
+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
+ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
+ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
+ V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
+// clang-format on
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -72,13 +93,19 @@ namespace internal {
// -----------------------------------------------------------------------------
// Implementation of Register and FPURegister.
-// Core register.
struct Register {
- static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp.
- static const int kSizeInBytes = 4;
static const int kCpRegister = 23; // cp (s7) is the 23rd register.
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
+ static const int kNumRegisters = Code::kAfterLast;
+
#if defined(V8_TARGET_LITTLE_ENDIAN)
static const int kMantissaOffset = 0;
static const int kExponentOffset = 4;
@@ -89,117 +116,37 @@ struct Register {
#error Unknown endianness
#endif
- inline static int NumAllocatableRegisters();
-
- static int ToAllocationIndex(Register reg) {
- DCHECK((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
- reg.is(from_code(kCpRegister)));
- return reg.is(from_code(kCpRegister)) ?
- kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'.
- reg.code() - 2; // zero_reg and 'at' are skipped.
- }
-
- static Register FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return index == kMaxNumAllocatableRegisters - 1 ?
- from_code(kCpRegister) : // Last index is always the 'cp' register.
- from_code(index + 2); // zero_reg and 'at' are skipped.
- }
-
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "v0",
- "v1",
- "a0",
- "a1",
- "a2",
- "a3",
- "t0",
- "t1",
- "t2",
- "t3",
- "t4",
- "t5",
- "t6",
- "s7",
- };
- return names[index];
- }
static Register from_code(int code) {
- Register r = { code };
+ DCHECK(code >= 0);
+ DCHECK(code < kNumRegisters);
+ Register r = {code};
return r;
}
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
-#define REGISTER(N, C) \
- const int kRegister_ ## N ## _Code = C; \
- const Register N = { C }
-
-REGISTER(no_reg, -1);
-// Always zero.
-REGISTER(zero_reg, 0);
-// at: Reserved for synthetic instructions.
-REGISTER(at, 1);
-// v0, v1: Used when returning multiple values from subroutines.
-REGISTER(v0, 2);
-REGISTER(v1, 3);
-// a0 - a4: Used to pass non-FP parameters.
-REGISTER(a0, 4);
-REGISTER(a1, 5);
-REGISTER(a2, 6);
-REGISTER(a3, 7);
-// t0 - t9: Can be used without reservation, act as temporary registers and are
-// allowed to be destroyed by subroutines.
-REGISTER(t0, 8);
-REGISTER(t1, 9);
-REGISTER(t2, 10);
-REGISTER(t3, 11);
-REGISTER(t4, 12);
-REGISTER(t5, 13);
-REGISTER(t6, 14);
-REGISTER(t7, 15);
-// s0 - s7: Subroutine register variables. Subroutines that write to these
-// registers must restore their values before exiting so that the caller can
-// expect the values to be preserved.
-REGISTER(s0, 16);
-REGISTER(s1, 17);
-REGISTER(s2, 18);
-REGISTER(s3, 19);
-REGISTER(s4, 20);
-REGISTER(s5, 21);
-REGISTER(s6, 22);
-REGISTER(s7, 23);
-REGISTER(t8, 24);
-REGISTER(t9, 25);
-// k0, k1: Reserved for system calls and interrupt handlers.
-REGISTER(k0, 26);
-REGISTER(k1, 27);
-// gp: Reserved.
-REGISTER(gp, 28);
-// sp: Stack pointer.
-REGISTER(sp, 29);
-// fp: Frame pointer.
-REGISTER(fp, 30);
-// ra: Return address pointer.
-REGISTER(ra, 31);
-
-#undef REGISTER
+// s7: context register
+// s3: lithium scratch
+// s4: lithium scratch2
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
int ToNumber(Register reg);
@@ -207,75 +154,70 @@ int ToNumber(Register reg);
Register ToRegister(int num);
// Coprocessor register.
-struct FPURegister {
- static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
-
- // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
- // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
- // number of Double regs (64-bit regs, or FPU-reg-pairs).
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- // A few double registers are reserved: one as a scratch register and one to
- // hold 0.0.
- // f28: 0.0
- // f30: scratch register.
- static const int kNumReservedRegisters = 2;
- static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
- kNumReservedRegisters;
+ static const int kMaxNumRegisters = Code::kAfterLast;
inline static int NumRegisters();
- inline static int NumAllocatableRegisters();
-
- // TODO(turbofan): Proper support for float32.
- inline static int NumAllocatableAliasedRegisters();
-
- inline static int ToAllocationIndex(FPURegister reg);
- static const char* AllocationIndexToString(int index);
- static FPURegister FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index * 2);
- }
-
- static FPURegister from_code(int code) {
- FPURegister r = { code };
- return r;
- }
+ // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
+ // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
+ // number of Double regs (64-bit regs, or FPU-reg-pairs).
- bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
- bool is(FPURegister creg) const { return code_ == creg.code_; }
- FPURegister low() const {
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+ DoubleRegister low() const {
// Find low reg of a Double-reg pair, which is the reg itself.
- DCHECK(code_ % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.code_ = code_;
+ DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
+ DoubleRegister reg;
+ reg.reg_code = reg_code;
DCHECK(reg.is_valid());
return reg;
}
- FPURegister high() const {
+ DoubleRegister high() const {
// Find high reg of a Doubel-reg pair, which is reg + 1.
- DCHECK(code_ % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.code_ = code_ + 1;
+ DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
+ DoubleRegister reg;
+ reg.reg_code = reg_code + 1;
DCHECK(reg.is_valid());
return reg;
}
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
+ }
+
+ static DoubleRegister from_code(int code) {
+ DoubleRegister r = {code};
+ return r;
}
void setcode(int f) {
- code_ = f;
+ reg_code = f;
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
+// A few double registers are reserved: one as a scratch register and one to
+// hold 0.0.
+// f28: 0.0
+// f30: scratch register.
+
// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
// 32-bit registers, f0 through f31. When used as 'double' they are used
// in pairs, starting with the even numbered register. So a double operation
@@ -285,43 +227,43 @@ struct FPURegister {
// but it is not in common use. Someday we will want to support this in v8.)
// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef FPURegister DoubleRegister;
-typedef FPURegister FloatRegister;
-
-const FPURegister no_freg = { -1 };
-
-const FPURegister f0 = { 0 }; // Return value in hard float mode.
-const FPURegister f1 = { 1 };
-const FPURegister f2 = { 2 };
-const FPURegister f3 = { 3 };
-const FPURegister f4 = { 4 };
-const FPURegister f5 = { 5 };
-const FPURegister f6 = { 6 };
-const FPURegister f7 = { 7 };
-const FPURegister f8 = { 8 };
-const FPURegister f9 = { 9 };
-const FPURegister f10 = { 10 };
-const FPURegister f11 = { 11 };
-const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
-const FPURegister f13 = { 13 };
-const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
-const FPURegister f15 = { 15 };
-const FPURegister f16 = { 16 };
-const FPURegister f17 = { 17 };
-const FPURegister f18 = { 18 };
-const FPURegister f19 = { 19 };
-const FPURegister f20 = { 20 };
-const FPURegister f21 = { 21 };
-const FPURegister f22 = { 22 };
-const FPURegister f23 = { 23 };
-const FPURegister f24 = { 24 };
-const FPURegister f25 = { 25 };
-const FPURegister f26 = { 26 };
-const FPURegister f27 = { 27 };
-const FPURegister f28 = { 28 };
-const FPURegister f29 = { 29 };
-const FPURegister f30 = { 30 };
-const FPURegister f31 = { 31 };
+typedef DoubleRegister FPURegister;
+typedef DoubleRegister FloatRegister;
+
+const DoubleRegister no_freg = {-1};
+
+const DoubleRegister f0 = {0}; // Return value in hard float mode.
+const DoubleRegister f1 = {1};
+const DoubleRegister f2 = {2};
+const DoubleRegister f3 = {3};
+const DoubleRegister f4 = {4};
+const DoubleRegister f5 = {5};
+const DoubleRegister f6 = {6};
+const DoubleRegister f7 = {7};
+const DoubleRegister f8 = {8};
+const DoubleRegister f9 = {9};
+const DoubleRegister f10 = {10};
+const DoubleRegister f11 = {11};
+const DoubleRegister f12 = {12}; // Arg 0 in hard float mode.
+const DoubleRegister f13 = {13};
+const DoubleRegister f14 = {14}; // Arg 1 in hard float mode.
+const DoubleRegister f15 = {15};
+const DoubleRegister f16 = {16};
+const DoubleRegister f17 = {17};
+const DoubleRegister f18 = {18};
+const DoubleRegister f19 = {19};
+const DoubleRegister f20 = {20};
+const DoubleRegister f21 = {21};
+const DoubleRegister f22 = {22};
+const DoubleRegister f23 = {23};
+const DoubleRegister f24 = {24};
+const DoubleRegister f25 = {25};
+const DoubleRegister f26 = {26};
+const DoubleRegister f27 = {27};
+const DoubleRegister f28 = {28};
+const DoubleRegister f29 = {29};
+const DoubleRegister f30 = {30};
+const DoubleRegister f31 = {31};
// Register aliases.
// cp is assumed to be a callee saved register.
@@ -341,22 +283,22 @@ const FPURegister f31 = { 31 };
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
struct FPUControlRegister {
- bool is_valid() const { return code_ == kFCSRRegister; }
- bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
+ bool is_valid() const { return reg_code == kFCSRRegister; }
+ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
void setcode(int f) {
- code_ = f;
+ reg_code = f;
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
@@ -466,27 +408,46 @@ class Assembler : public AssemblerBase {
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
void bind(Label* L); // Binds an unbound label L to current code position.
+
+ enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 };
+
// Determines if Label is bound and near enough so that branch instruction
// can be used to reach it, instead of jump instruction.
bool is_near(Label* L);
+ bool is_near(Label* L, OffsetSize bits);
+ bool is_near_branch(Label* L);
+ inline bool is_near_pre_r6(Label* L) {
+ DCHECK(!IsMipsArchVariant(kMips32r6));
+ return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
+ }
+ inline bool is_near_r6(Label* L) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize;
+ }
+
+ int BranchOffset(Instr instr);
// Returns the branch offset to the given label from the current code
// position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true.
- int32_t branch_offset(Label* L, bool jump_elimination_allowed);
- int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed);
- int32_t branch_offset21(Label* L, bool jump_elimination_allowed);
- int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
- int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
- int32_t o = branch_offset(L, jump_elimination_allowed);
- DCHECK((o & 3) == 0); // Assert the offset is aligned.
- return o >> 2;
- }
- int32_t shifted_branch_offset_compact(Label* L,
- bool jump_elimination_allowed) {
- int32_t o = branch_offset_compact(L, jump_elimination_allowed);
- DCHECK((o & 3) == 0); // Assert the offset is aligned.
- return o >> 2;
+ int32_t branch_offset_helper(Label* L, OffsetSize bits);
+ inline int32_t branch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset16);
+ }
+ inline int32_t branch_offset21(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset21);
+ }
+ inline int32_t branch_offset26(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset26);
+ }
+ inline int32_t shifted_branch_offset(Label* L) {
+ return branch_offset(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset21(Label* L) {
+ return branch_offset21(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset26(Label* L) {
+ return branch_offset26(L) >> 2;
}
uint32_t jump_address(Label* L);
@@ -496,54 +457,51 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
- static void set_target_address_at(Address pc,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED);
+ static void set_target_address_at(
+ Isolate* isolate, Address pc, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// On MIPS there is no Constant Pool so we skip that parameter.
INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
return target_address_at(pc);
}
INLINE(static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- set_target_address_at(pc, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, target, icache_flush_mode);
}
INLINE(static Address target_address_at(Address pc, Code* code)) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
- INLINE(static void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED)) {
+ INLINE(static void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target,
+ icache_flush_mode);
}
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
- static void JumpToJumpRegister(Address pc);
-
static void QuietNaN(HeapObject* nan);
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target) {
set_target_address_at(
- instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
- code,
+ isolate,
+ instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code,
target);
}
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -629,111 +587,111 @@ class Assembler : public AssemblerBase {
// --------Branch-and-jump-instructions----------
// We don't use likely variant of instructions.
void b(int16_t offset);
- void b(Label* L) { b(branch_offset(L, false)>>2); }
+ inline void b(Label* L) { b(shifted_branch_offset(L)); }
void bal(int16_t offset);
- void bal(Label* L) { bal(branch_offset(L, false)>>2); }
+ inline void bal(Label* L) { bal(shifted_branch_offset(L)); }
void bc(int32_t offset);
- void bc(Label* L) { bc(branch_offset(L, false) >> 2); }
+ inline void bc(Label* L) { bc(shifted_branch_offset26(L)); }
void balc(int32_t offset);
- void balc(Label* L) { balc(branch_offset(L, false) >> 2); }
+ inline void balc(Label* L) { balc(shifted_branch_offset26(L)); }
void beq(Register rs, Register rt, int16_t offset);
- void beq(Register rs, Register rt, Label* L) {
- beq(rs, rt, branch_offset(L, false) >> 2);
+ inline void beq(Register rs, Register rt, Label* L) {
+ beq(rs, rt, shifted_branch_offset(L));
}
void bgez(Register rs, int16_t offset);
void bgezc(Register rt, int16_t offset);
- void bgezc(Register rt, Label* L) {
- bgezc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgezc(Register rt, Label* L) {
+ bgezc(rt, shifted_branch_offset(L));
}
void bgeuc(Register rs, Register rt, int16_t offset);
- void bgeuc(Register rs, Register rt, Label* L) {
- bgeuc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bgeuc(Register rs, Register rt, Label* L) {
+ bgeuc(rs, rt, shifted_branch_offset(L));
}
void bgec(Register rs, Register rt, int16_t offset);
- void bgec(Register rs, Register rt, Label* L) {
- bgec(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bgec(Register rs, Register rt, Label* L) {
+ bgec(rs, rt, shifted_branch_offset(L));
}
void bgezal(Register rs, int16_t offset);
void bgezalc(Register rt, int16_t offset);
- void bgezalc(Register rt, Label* L) {
- bgezalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgezalc(Register rt, Label* L) {
+ bgezalc(rt, shifted_branch_offset(L));
}
void bgezall(Register rs, int16_t offset);
- void bgezall(Register rs, Label* L) {
- bgezall(rs, branch_offset(L, false)>>2);
+ inline void bgezall(Register rs, Label* L) {
+ bgezall(rs, branch_offset(L) >> 2);
}
void bgtz(Register rs, int16_t offset);
void bgtzc(Register rt, int16_t offset);
- void bgtzc(Register rt, Label* L) {
- bgtzc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgtzc(Register rt, Label* L) {
+ bgtzc(rt, shifted_branch_offset(L));
}
void blez(Register rs, int16_t offset);
void blezc(Register rt, int16_t offset);
- void blezc(Register rt, Label* L) {
- blezc(rt, branch_offset_compact(L, false)>>2);
+ inline void blezc(Register rt, Label* L) {
+ blezc(rt, shifted_branch_offset(L));
}
void bltz(Register rs, int16_t offset);
void bltzc(Register rt, int16_t offset);
- void bltzc(Register rt, Label* L) {
- bltzc(rt, branch_offset_compact(L, false)>>2);
+ inline void bltzc(Register rt, Label* L) {
+ bltzc(rt, shifted_branch_offset(L));
}
void bltuc(Register rs, Register rt, int16_t offset);
- void bltuc(Register rs, Register rt, Label* L) {
- bltuc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bltuc(Register rs, Register rt, Label* L) {
+ bltuc(rs, rt, shifted_branch_offset(L));
}
void bltc(Register rs, Register rt, int16_t offset);
- void bltc(Register rs, Register rt, Label* L) {
- bltc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bltc(Register rs, Register rt, Label* L) {
+ bltc(rs, rt, shifted_branch_offset(L));
}
void bltzal(Register rs, int16_t offset);
void blezalc(Register rt, int16_t offset);
- void blezalc(Register rt, Label* L) {
- blezalc(rt, branch_offset_compact(L, false)>>2);
+ inline void blezalc(Register rt, Label* L) {
+ blezalc(rt, shifted_branch_offset(L));
}
void bltzalc(Register rt, int16_t offset);
- void bltzalc(Register rt, Label* L) {
- bltzalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bltzalc(Register rt, Label* L) {
+ bltzalc(rt, shifted_branch_offset(L));
}
void bgtzalc(Register rt, int16_t offset);
- void bgtzalc(Register rt, Label* L) {
- bgtzalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgtzalc(Register rt, Label* L) {
+ bgtzalc(rt, shifted_branch_offset(L));
}
void beqzalc(Register rt, int16_t offset);
- void beqzalc(Register rt, Label* L) {
- beqzalc(rt, branch_offset_compact(L, false)>>2);
+ inline void beqzalc(Register rt, Label* L) {
+ beqzalc(rt, shifted_branch_offset(L));
}
void beqc(Register rs, Register rt, int16_t offset);
- void beqc(Register rs, Register rt, Label* L) {
- beqc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void beqc(Register rs, Register rt, Label* L) {
+ beqc(rs, rt, shifted_branch_offset(L));
}
void beqzc(Register rs, int32_t offset);
- void beqzc(Register rs, Label* L) {
- beqzc(rs, branch_offset21_compact(L, false)>>2);
+ inline void beqzc(Register rs, Label* L) {
+ beqzc(rs, shifted_branch_offset21(L));
}
void bnezalc(Register rt, int16_t offset);
- void bnezalc(Register rt, Label* L) {
- bnezalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bnezalc(Register rt, Label* L) {
+ bnezalc(rt, shifted_branch_offset(L));
}
void bnec(Register rs, Register rt, int16_t offset);
- void bnec(Register rs, Register rt, Label* L) {
- bnec(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bnec(Register rs, Register rt, Label* L) {
+ bnec(rs, rt, shifted_branch_offset(L));
}
void bnezc(Register rt, int32_t offset);
- void bnezc(Register rt, Label* L) {
- bnezc(rt, branch_offset21_compact(L, false)>>2);
+ inline void bnezc(Register rt, Label* L) {
+ bnezc(rt, shifted_branch_offset21(L));
}
void bne(Register rs, Register rt, int16_t offset);
- void bne(Register rs, Register rt, Label* L) {
- bne(rs, rt, branch_offset(L, false)>>2);
+ inline void bne(Register rs, Register rt, Label* L) {
+ bne(rs, rt, shifted_branch_offset(L));
}
void bovc(Register rs, Register rt, int16_t offset);
- void bovc(Register rs, Register rt, Label* L) {
- bovc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bovc(Register rs, Register rt, Label* L) {
+ bovc(rs, rt, shifted_branch_offset(L));
}
void bnvc(Register rs, Register rt, int16_t offset);
- void bnvc(Register rs, Register rt, Label* L) {
- bnvc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bnvc(Register rs, Register rt, Label* L) {
+ bnvc(rs, rt, shifted_branch_offset(L));
}
// Never use the int16_t b(l)cond version with a branch offset
@@ -793,6 +751,8 @@ class Assembler : public AssemblerBase {
void rotr(Register rd, Register rt, uint16_t sa);
void rotrv(Register rd, Register rt, Register rs);
+ // Address computing instructions with shift.
+ void lsa(Register rd, Register rt, Register rs, uint8_t sa);
// ------------Memory-instructions-------------
@@ -867,10 +827,10 @@ class Assembler : public AssemblerBase {
void movz_s(FPURegister fd, FPURegister fs, Register rt);
void movz_d(FPURegister fd, FPURegister fs, Register rt);
- void movt_s(FPURegister fd, FPURegister fs, uint16_t cc);
- void movt_d(FPURegister fd, FPURegister fs, uint16_t cc);
- void movf_s(FPURegister fd, FPURegister fs, uint16_t cc);
- void movf_d(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
+ void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
+ void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
+ void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
void movn_s(FPURegister fd, FPURegister fs, Register rt);
void movn_d(FPURegister fd, FPURegister fs, Register rt);
// Bit twiddling.
@@ -978,12 +938,12 @@ class Assembler : public AssemblerBase {
void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
void bc1eqz(int16_t offset, FPURegister ft);
- void bc1eqz(Label* L, FPURegister ft) {
- bc1eqz(branch_offset(L, false)>>2, ft);
+ inline void bc1eqz(Label* L, FPURegister ft) {
+ bc1eqz(shifted_branch_offset(L), ft);
}
void bc1nez(int16_t offset, FPURegister ft);
- void bc1nez(Label* L, FPURegister ft) {
- bc1nez(branch_offset(L, false)>>2, ft);
+ inline void bc1nez(Label* L, FPURegister ft) {
+ bc1nez(shifted_branch_offset(L), ft);
}
// Conditions and branches for non MIPSr6.
@@ -993,9 +953,13 @@ class Assembler : public AssemblerBase {
void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
void bc1f(int16_t offset, uint16_t cc = 0);
- void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
+ inline void bc1f(Label* L, uint16_t cc = 0) {
+ bc1f(shifted_branch_offset(L), cc);
+ }
void bc1t(int16_t offset, uint16_t cc = 0);
- void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
+ inline void bc1t(Label* L, uint16_t cc = 0) {
+ bc1t(shifted_branch_offset(L), cc);
+ }
void fcmp(FPURegister src1, const double src2, FPUCondition cond);
// Check the code size generated from label to here.
@@ -1049,7 +1013,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
@@ -1085,9 +1049,6 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data) { dd(data); }
void dd(Label* label);
- // Emits the address of the code stub's first instruction.
- void emit_code_stub_address(Code* stub);
-
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Postpone the generation of the trampoline pool for the specified number of
@@ -1114,8 +1075,14 @@ class Assembler : public AssemblerBase {
// Check if an instruction is a branch of some kind.
static bool IsBranch(Instr instr);
+ static bool IsBc(Instr instr);
+ static bool IsBzc(Instr instr);
static bool IsBeq(Instr instr);
static bool IsBne(Instr instr);
+ static bool IsBeqzc(Instr instr);
+ static bool IsBnezc(Instr instr);
+ static bool IsBeqc(Instr instr);
+ static bool IsBnec(Instr instr);
static bool IsJump(Instr instr);
static bool IsJ(Instr instr);
@@ -1174,6 +1141,8 @@ class Assembler : public AssemblerBase {
UNREACHABLE();
}
+ bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@@ -1237,6 +1206,8 @@ class Assembler : public AssemblerBase {
return block_buffer_growth_;
}
+ inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
+
private:
inline static void set_target_internal_reference_encoded_at(Address pc,
Address target);
@@ -1279,11 +1250,19 @@ class Assembler : public AssemblerBase {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
+ // Readable constants for compact branch handling in emit()
+ enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true };
+
// Code emission.
inline void CheckBuffer();
void GrowBuffer();
- inline void emit(Instr x);
- inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
+ inline void emit(Instr x,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ inline void emit(uint64_t x);
+ inline void CheckForEmitInForbiddenSlot();
+ template <typename T>
+ inline void EmitHelper(T x);
+ inline void EmitHelper(Instr x, CompactBranchType is_compact_branch);
// Instruction generation.
// We have 3 different kind of encoding layout on MIPS.
@@ -1334,21 +1313,22 @@ class Assembler : public AssemblerBase {
FPUControlRegister fs,
SecondaryField func = NULLSF);
-
- void GenInstrImmediate(Opcode opcode,
- Register rs,
- Register rt,
- int32_t j);
- void GenInstrImmediate(Opcode opcode,
- Register rs,
- SecondaryField SF,
- int32_t j);
- void GenInstrImmediate(Opcode opcode,
- Register r1,
- FPURegister r2,
- int32_t j);
- void GenInstrImmediate(Opcode opcode, Register rs, int32_t j);
- void GenInstrImmediate(Opcode opcode, int32_t offset26);
+ void GenInstrImmediate(
+ Opcode opcode, Register rs, Register rt, int32_t j,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(
+ Opcode opcode, Register rs, SecondaryField SF, int32_t j,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(
+ Opcode opcode, Register r1, FPURegister r2, int32_t j,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(
+ Opcode opcode, Register rs, int32_t offset21,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21);
+ void GenInstrImmediate(
+ Opcode opcode, int32_t offset26,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
void GenInstrJump(Opcode opcode,
@@ -1423,12 +1403,17 @@ class Assembler : public AssemblerBase {
bool trampoline_emitted_;
static const int kTrampolineSlotsSize = 4 * kInstrSize;
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+ static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static const int kInvalidSlotPos = -1;
// Internal reference positions, required for unbounded internal reference
// labels.
std::set<int> internal_reference_positions_;
+ void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
+ void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
+ bool prev_instr_compact_branch_ = false;
+
Trampoline trampoline_;
bool internal_trampoline_exception_;
@@ -1450,6 +1435,7 @@ class EnsureSpace BASE_EMBEDDED {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_ASSEMBLER_MIPS_H_
diff --git a/chromium/v8/src/mips/builtins-mips.cc b/chromium/v8/src/mips/builtins-mips.cc
index f4da1945795..f6c1dfbaaf2 100644
--- a/chromium/v8/src/mips/builtins-mips.cc
+++ b/chromium/v8/src/mips/builtins-mips.cc
@@ -23,8 +23,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- a0 : number of arguments excluding receiver
- // -- a1 : called function (only guaranteed when
- // -- extra_args requires it)
+ // -- a1 : target
+ // -- a3 : new.target
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument
@@ -36,21 +36,31 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(a1);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ switch (extra_args) {
+ case BuiltinExtraArguments::kTarget:
+ __ Push(a1);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kNewTarget:
+ __ Push(a3);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kTargetAndNewTarget:
+ __ Push(a1, a3);
+ num_extra_args += 2;
+ break;
+ case BuiltinExtraArguments::kNone:
+ break;
}
// JumpToExternalReference expects a0 to contain the number of arguments
// including the receiver and the extra arguments.
__ Addu(a0, a0, num_extra_args + 1);
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -58,32 +68,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the native context.
-
- __ lw(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
- __ lw(result,
- MemOperand(result,
- Context::SlotOffset(
- Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
-
- __ lw(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
// Load the Array function from the native context.
- __ lw(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
@@ -149,6 +142,108 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into a0 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+ __ Subu(a0, a0, Operand(1));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ Addu(sp, a0, sp);
+ __ lw(a0, MemOperand(sp));
+ __ Drop(2);
+ }
+
+ // 2a. Convert first argument to number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0.
+ __ bind(&no_arguments);
+ __ Move(v0, Smi::FromInt(0));
+ __ DropAndRet(1);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- a3 : new target
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into a0 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+ __ Subu(a0, a0, Operand(1));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ Addu(sp, a0, sp);
+ __ lw(a0, MemOperand(sp));
+ __ Drop(2);
+ __ jmp(&done);
+ __ bind(&no_arguments);
+ __ Move(a0, Smi::FromInt(0));
+ __ Drop(1);
+ __ bind(&done);
+ }
+
+ // 3. Make sure a0 is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(a0, &done_convert);
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&done_convert, eq, a2, Operand(HEAP_NUMBER_TYPE));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(a0, v0);
+ __ Pop(a1, a3);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ Branch(&new_object, ne, a1, Operand(a3));
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1, a3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(a0);
+ }
+ __ Ret(USE_DELAY_SLOT);
+ __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -202,7 +297,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&symbol_descriptive_string);
{
__ Push(a0);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -212,12 +307,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- a3 : new target
// -- ra : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into a0 and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into a0 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -234,7 +333,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done);
}
- // 2. Make sure a0 is a string.
+ // 3. Make sure a0 is a string.
{
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
@@ -245,60 +344,50 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
- __ Push(a1);
+ __ Push(a1, a3);
__ CallStub(&stub);
__ Move(a0, v0);
- __ Pop(a1);
+ __ Pop(a1, a3);
}
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- a0 : the first argument
- // -- a1 : constructor function
- // -- ra : return address
- // -----------------------------------
-
- Label allocate, done_allocate;
- __ Allocate(JSValue::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ Branch(&new_object, ne, a1, Operand(a3));
- // Initialize the JSValue in eax.
- __ LoadGlobalFunctionInitialMap(a1, a2, a3);
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ Ret(USE_DELAY_SLOT);
- __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object);
+ __ Ret();
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Move(a2, Smi::FromInt(JSValue::kSize));
- __ Push(a0, a1, a2);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(a0, a1);
- }
- __ jmp(&done_allocate);
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1, a3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(a0);
}
+ __ Ret(USE_DELAY_SLOT);
+ __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot
}
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- a1 : target function (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -----------------------------------
+
FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- // Push call kind information and function as parameter to the runtime call.
- __ Push(a1, a1);
+ // Push a copy of the target function and the new target.
+ // Push function as parameter to the runtime call.
+ __ Push(a1, a3, a1);
__ CallRuntime(function_id, 1);
- // Restore call kind information and receiver.
- __ Pop(a1);
+ // Restore target function and new target.
+ __ Pop(a1, a3);
}
@@ -335,12 +424,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
// -- a2 : allocation site or undefined
- // -- a3 : original constructor
+ // -- a3 : new target
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -354,164 +444,162 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(a2, t0);
__ SmiTag(a0);
- __ Push(a2, a0, a1, a3);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ li(a2, Operand(debug_step_in_fp));
- __ lw(a2, MemOperand(a2));
- __ Branch(&rt_call, ne, a2, Operand(zero_reg));
-
- // Fall back to runtime if the original constructor and function differ.
- __ Branch(&rt_call, ne, a1, Operand(a3));
-
- // Load the initial map and verify that it is in fact a map.
- // a1: constructor function
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &rt_call);
- __ GetObjectType(a2, t5, t4);
- __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // a1: constructor function
- // a2: initial map
- __ lbu(t5, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, t5, Operand(JS_FUNCTION_TYPE));
-
- if (!is_api_function) {
- Label allocate;
- MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lw(t0, bit_field3);
- __ DecodeField<Map::Counter>(t2, t0);
- __ Branch(&allocate, lt, t2, Operand(Map::kSlackTrackingCounterEnd));
- // Decrease generous allocation count.
- __ Subu(t0, t0, Operand(1 << Map::Counter::kShift));
- __ Branch(USE_DELAY_SLOT, &allocate, ne, t2,
- Operand(Map::kSlackTrackingCounterEnd));
- __ sw(t0, bit_field3); // In delay slot.
-
- __ Push(a1, a2, a1); // a1 = Constructor.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ Pop(a1, a2);
- __ li(t2, Operand(Map::kSlackTrackingCounterEnd - 1));
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // a1: constructor function
- // a2: initial map
- Label rt_call_reload_new_target;
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-
- __ Allocate(a3, t4, t5, t6, &rt_call_reload_new_target, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // a1: constructor function
- // a2: initial map
- // a3: object size
- // t4: JSObject (not tagged)
- __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t5, t4);
- __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
- __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
- __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
- __ Addu(t5, t5, Operand(3*kPointerSize));
- DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
- DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
- // Fill all the in-object properties with appropriate filler.
- // a1: constructor function
- // a2: initial map
- // a3: object size (in words)
- // t4: JSObject (not tagged)
- // t5: First in-object property of JSObject (not tagged)
- // t2: slack tracking counter (non-API function case)
- DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
-
- // Use t7 to hold undefined, which is used in several places below.
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ Branch(&no_inobject_slack_tracking, lt, t2,
- Operand(Map::kSlackTrackingCounterEnd));
-
- // Allocate object with a slack.
- __ lbu(
- a0,
- FieldMemOperand(
- a2, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ lbu(a2, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- __ subu(a0, a0, a2);
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(a0, t5, at);
- // a0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ sll(at, a3, kPointerSizeLog2);
- __ Addu(t6, t4, Operand(at)); // End of object.
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
- a0, Operand(t6));
+ __ Push(a2, a0);
+
+ if (create_implicit_receiver) {
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ GetObjectType(a3, t1, t0);
+ __ Branch(&rt_call, ne, t0, Operand(JS_FUNCTION_TYPE));
+
+ // Load the initial map and verify that it is in fact a map.
+ // a3: new target
+ __ lw(a2,
+ FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(a2, &rt_call);
+ __ GetObjectType(a2, t5, t4);
+ __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ lw(t1, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
+ __ Branch(&rt_call, ne, a1, Operand(t1));
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(t5, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, t5, Operand(JS_FUNCTION_TYPE));
+
+ // Now allocate the JSObject on the heap.
+ // a1: constructor function
+ // a2: initial map
+ // a3: new target
+ __ lbu(t3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+
+ __ Allocate(t3, t4, t3, t6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // a1: constructor function
+ // a2: initial map
+ // a3: new target
+ // t4: JSObject (not HeapObject tagged - the actual address).
+ // t3: start of next object
+ __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t5, t4);
+ STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
+ __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+ STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
+ __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+ STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
+ __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+ STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
+ __ Addu(t5, t5, Operand(3 * kPointerSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+ // Fill all the in-object properties with appropriate filler.
+ // t4: JSObject (tagged)
+ // t5: First in-object property of JSObject (not tagged)
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ lw(t0, bit_field3);
+ __ DecodeField<Map::ConstructionCounter>(t2, t0);
+ // t2: slack tracking counter
+ __ Branch(&no_inobject_slack_tracking, lt, t2,
+ Operand(Map::kSlackTrackingCounterEnd));
+ // Decrease generous allocation count.
+ __ Subu(t0, t0, Operand(1 << Map::ConstructionCounter::kShift));
+ __ sw(t0, bit_field3);
+
+ // Allocate object with a slack.
+ __ lbu(a0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ subu(a0, t3, a0);
+ // a0: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, t5,
+ Operand(a0));
+ }
+ __ InitializeFieldsWithFiller(t5, a0, t7);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(t5, t3, t7);
+
+ // t2: slack tracking counter value before decreasing.
+ __ Branch(&allocated, ne, t2, Operand(Map::kSlackTrackingCounterEnd));
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(a1, a3, t4, a2);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(a1, a3, t4);
+
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a3: new target
+ // t4: JSObject
+ __ jmp(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- __ InitializeFieldsWithFiller(t5, a0, t7);
- // To allow for truncation.
- __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
- // Fill the remaining fields with one pointer filler map.
- __ bind(&no_inobject_slack_tracking);
+ __ InitializeFieldsWithFiller(t5, t3, t7);
+
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a3: new target
+ // t4: JSObject
+ __ jmp(&allocated);
}
- __ sll(at, a3, kPointerSizeLog2);
- __ Addu(a0, t4, Operand(at)); // End of object.
- __ InitializeFieldsWithFiller(t5, a0, t7);
+ // Allocate the new receiver object using the runtime call.
+ // a1: constructor function
+ // a3: new target
+ __ bind(&rt_call);
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ Addu(t4, t4, Operand(kHeapObjectTag));
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(a1, a3, a1, a3); // constructor function, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ mov(t4, v0);
+ __ Pop(a1, a3);
- // Continue with JSObject being successfully allocated.
+ // Receiver for constructor call allocated.
+ // a1: constructor function
+ // a3: new target
// t4: JSObject
- __ jmp(&allocated);
+ __ bind(&allocated);
- // Reload the original constructor and fall-through.
- __ bind(&rt_call_reload_new_target);
- __ lw(a3, MemOperand(sp, 0 * kPointerSize));
+ // Retrieve smi-tagged arguments count from the stack.
+ __ lw(a0, MemOperand(sp));
}
- // Allocate the new receiver object using the runtime call.
- // a1: constructor function
- // a3: original constructor
- __ bind(&rt_call);
-
- __ Push(a1, a3); // arguments 2-3 / 1-2
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mov(t4, v0);
-
- // Receiver for constructor call allocated.
- // t4: JSObject
- __ bind(&allocated);
-
- // Restore the parameters.
- __ Pop(a3); // new.target
- __ Pop(a1);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ lw(a0, MemOperand(sp));
__ SmiUntag(a0);
- __ Push(a3, t4, t4);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(t4, t4);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -520,26 +608,27 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a0: number of arguments
// a1: constructor function
// a2: address of last argument (caller sp)
- // a3: number of arguments (smi-tagged)
+ // a3: new target
+ // t4: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: number of arguments (smi-tagged)
Label loop, entry;
- __ SmiTag(a3, a0);
+ __ SmiTag(t4, a0);
__ jmp(&entry);
__ bind(&loop);
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ sll(t0, t4, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t0, a2, Operand(t0));
__ lw(t1, MemOperand(t0));
__ push(t1);
__ bind(&entry);
- __ Addu(a3, a3, Operand(-2));
- __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+ __ Addu(t4, t4, Operand(-2));
+ __ Branch(&loop, greater_equal, t4, Operand(zero_reg));
// Call the function.
// a0: number of arguments
// a1: constructor function
+ // a3: new target
if (is_api_function) {
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
Handle<Code> code =
@@ -547,47 +636,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(v0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a1, a3);
- __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ lw(v0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (original constructor)
- // sp[2]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ JumpIfSmi(v0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, a1, a3);
+ __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ lw(v0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+ } else {
+ __ lw(a1, MemOperand(sp));
+ }
// Leave construct frame.
}
@@ -595,104 +687,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ sll(t0, a1, kPointerSizeLog2 - 1);
__ Addu(sp, sp, t0);
__ Addu(sp, sp, kPointerSize);
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+ }
__ Ret();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- a2 : allocation site or undefined
- // -- a3 : original constructor
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- __ AssertUndefinedOrAllocationSite(a2, t0);
- __ push(a2);
-
- __ mov(t0, a0);
- __ SmiTag(t0);
- __ push(t0); // Smi-tagged arguments count.
-
- // Push new.target.
- __ push(a3);
-
- // receiver is the hole.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ push(at);
-
- // Set up pointer to last argument.
- __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- // a0: number of arguments
- // a1: constructor function
- // a2: address of last argument (caller sp)
- // t0: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- Label loop, entry;
- __ Branch(&entry);
- __ bind(&loop);
- __ sll(at, t0, kPointerSizeLog2 - 1);
- __ Addu(at, a2, Operand(at));
- __ lw(at, MemOperand(at));
- __ push(at);
- __ bind(&entry);
- __ Subu(t0, t0, Operand(2));
- __ Branch(&loop, ge, t0, Operand(zero_reg));
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ li(a2, Operand(debug_step_in_fp));
- __ lw(a2, MemOperand(a2));
- __ Branch(&skip_step_in, eq, a2, Operand(zero_reg));
-
- __ Push(a0, a1, a1);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(a0, a1);
-
- __ bind(&skip_step_in);
-
- // Call the function.
- // a0: number of arguments
- // a1: constructor function
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- // v0: result
- // sp[0]: new.target
- // sp[1]: number of arguments (smi-tagged)
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ lw(a1, MemOperand(sp, kPointerSize));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Leave construct frame.
- }
- __ sll(at, a1, kPointerSizeLog2 - 1);
- __ Addu(sp, sp, Operand(at));
- __ Addu(sp, sp, Operand(kPointerSize));
- __ Jump(ra);
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -708,7 +728,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
Label okay;
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
// Make a2 the space we have left. The stack might already be overflowed
- // here which will cause r2 to become negative.
+ // here which will cause a2 to become negative.
__ Subu(a2, sp, a2);
// Check if the arguments will overflow the stack.
if (argc_is_tagged == kArgcIsSmiTagged) {
@@ -721,7 +741,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ Branch(&okay, gt, a2, Operand(t3));
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -825,6 +845,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o a1: the JS function object being called.
+// o a3: the new target
// o cp: our context
// o fp: the caller's frame pointer
// o sp: stack pointer
@@ -842,6 +863,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(ra, fp, cp, a1);
__ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Push(a3);
+
+ // Push zero for bytecode array offset.
+ __ Push(zero_reg);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -870,7 +895,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Subu(t1, sp, Operand(t0));
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
__ Branch(&ok, hs, t1, Operand(a2));
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -892,36 +917,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
Label ok;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(at));
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ CallRuntime(Runtime::kStackGuard);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
// Load bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ Subu(
- kInterpreterRegisterFileRegister, fp,
- Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Addu(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -963,41 +975,171 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ Addu(a3, a0, Operand(1)); // Add one for receiver.
+ __ sll(a3, a3, kPointerSizeLog2);
+ __ Subu(a3, a2, Operand(a3));
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ __ lw(t0, MemOperand(a2));
+ __ Addu(a2, a2, Operand(-kPointerSize));
+ __ push(t0);
+ __ bind(&loop_check);
+ __ Branch(&loop_header, gt, a2, Operand(a3));
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- // Push function as parameter to the runtime call.
- __ Push(a1, a1);
- // Whether to compile in a background thread.
- __ LoadRoot(
- at, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ push(at);
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (not including receiver)
+ // -- a3 : new target
+ // -- a1 : constructor to call
+ // -- a2 : address of the first argument
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ sll(t0, a0, kPointerSizeLog2);
+ __ Subu(t0, a2, Operand(t0));
+
+ // Push a slot for the receiver.
+ __ push(zero_reg);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ __ lw(t1, MemOperand(a2));
+ __ Addu(a2, a2, Operand(-kPointerSize));
+ __ push(t1);
+ __ bind(&loop_check);
+ __ Branch(&loop_header, gt, a2, Operand(t0));
+
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(a1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use this for interpreter deopts).
+ __ Drop(1);
+
+ // Initialize register file register and dispatch table register.
+ __ Addu(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Addu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ lw(kContextRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ lw(a1,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister, at);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at,
+ Operand(zero_reg));
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1,
+ Operand(BYTECODE_ARRAY_TYPE));
+ }
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ Pop(a1);
+ // Get the target bytecode offset from the frame.
+ __ lw(kInterpreterBytecodeOffsetRegister,
+ MemOperand(
+ kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ Addu(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ lbu(a1, MemOperand(a1));
+ __ sll(a1, a1, kPointerSizeLog2);
+ __ Addu(a1, kInterpreterDispatchTableRegister, a1);
+ __ lw(a1, MemOperand(a1));
+ __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a1);
+}
+
+
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
-
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
@@ -1013,8 +1155,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// the runtime:
// a0 - contains return address (beginning of patch sequence)
// a1 - isolate
+ // a3 - new target
RegList saved_regs =
- (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
__ PrepareCallCFunction(2, 0, a2);
@@ -1052,8 +1195,9 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// the runtime:
// a0 - contains return address (beginning of patch sequence)
// a1 - isolate
+ // a3 - new target
RegList saved_regs =
- (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
__ PrepareCallCFunction(2, 0, a2);
@@ -1093,7 +1237,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
@@ -1119,7 +1263,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(a0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
}
// Get the full codegen state from the stack and untag it -> t2.
@@ -1161,6 +1305,109 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
+// Clobbers {t2, t3, t4, t5}.
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Label* receiver_check_failed) {
+ Register signature = t2;
+ Register map = t3;
+ Register constructor = t4;
+ Register scratch = t5;
+
+ // If there is no signature, return the holder.
+ __ lw(signature, FieldMemOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ Label receiver_check_passed;
+ __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+ &receiver_check_passed);
+
+ // Walk the prototype chain.
+ __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(constructor, map, scratch, scratch);
+ Label next_prototype;
+ __ Branch(&next_prototype, ne, scratch, Operand(JS_FUNCTION_TYPE));
+ Register type = constructor;
+ __ lw(type,
+ FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ Branch(&receiver_check_passed, eq, signature, Operand(type),
+ USE_DELAY_SLOT);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype);
+ __ GetObjectType(type, scratch, scratch);
+ __ Branch(&next_prototype, ne, scratch, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+
+ // Otherwise load the parent function template and iterate.
+ __ lw(type,
+ FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+ __ Branch(&function_template_loop);
+
+ // Load the next prototype and iterate.
+ __ bind(&next_prototype);
+ __ lw(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
+ __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lw(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+ __ DecodeField<Map::IsHiddenPrototype>(scratch);
+ __ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg));
+
+ __ Branch(&prototype_loop_start);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments excluding receiver
+ // -- a1 : callee
+ // -- ra : return address
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ lw(t1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t1, FieldMemOperand(t1, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ sll(at, a0, kPointerSizeLog2);
+ __ Addu(t8, sp, at);
+ __ lw(t0, MemOperand(t8));
+ CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ lw(t2, FieldMemOperand(t1, FunctionTemplateInfo::kCallCodeOffset));
+ __ lw(t2, FieldMemOperand(t2, CallHandlerInfo::kFastHandlerOffset));
+ __ Addu(t2, t2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t2);
+
+ // Compatible receiver check failed: throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ // Drop the arguments (including the receiver);
+ __ Addu(t8, t8, Operand(kPointerSize));
+ __ addu(sp, t8, zero_reg);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1168,7 +1415,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(a0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
@@ -1201,7 +1448,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ Branch(&ok, hs, sp, Operand(at));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1212,7 +1459,127 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // 1. Pop receiver into a0 and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ Pop(a0);
+ __ JumpIfSmi(a0, &receiver_not_date);
+ __ GetObjectType(a0, t0, t0);
+ __ Branch(&receiver_not_date, ne, t0, Operand(JS_DATE_TYPE));
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ Ret(USE_DELAY_SLOT);
+ __ lw(v0, FieldMemOperand(a0, JSDate::kValueOffset)); // In delay slot.
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ li(a1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+ __ lw(a1, MemOperand(a1));
+ __ lw(t0, FieldMemOperand(a0, JSDate::kCacheStampOffset));
+ __ Branch(&stamp_mismatch, ne, t0, Operand(a1));
+ __ Ret(USE_DELAY_SLOT);
+ __ lw(v0, FieldMemOperand(
+ a0, JSDate::kValueOffset +
+ field_index * kPointerSize)); // In delay slot.
+ __ bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, t0);
+ __ li(a1, Operand(Smi::FromInt(field_index)));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ Ret();
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : argArray
+ // -- sp[4] : thisArg
+ // -- sp[8] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into a1, argArray into a0 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label no_arg;
+ Register scratch = t0;
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ mov(a3, a2);
+ __ sll(scratch, a0, kPointerSizeLog2);
+ __ Addu(a0, sp, Operand(scratch));
+ __ lw(a1, MemOperand(a0)); // receiver
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a2, MemOperand(a0)); // thisArg
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a3, MemOperand(a0)); // argArray
+ __ bind(&no_arg);
+ __ Addu(sp, sp, Operand(scratch));
+ __ sw(a2, MemOperand(sp));
+ __ mov(a0, a3);
+ }
+
+ // ----------- S t a t e -------------
+ // -- a0 : argArray
+ // -- a1 : receiver
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(a1, &receiver_not_callable);
+ __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
+ __ And(t0, t0, Operand(1 << Map::kIsCallable));
+ __ Branch(&receiver_not_callable, eq, t0, Operand(zero_reg));
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(a0, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(a0, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ mov(a0, zero_reg);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ sw(a1, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// a0: actual number of arguments
{
@@ -1256,189 +1623,145 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ lw(key, MemOperand(fp, indexOffset));
- __ Branch(&entry);
-
- // Load the current argument from the arguments array.
- __ bind(&loop);
- __ lw(receiver, MemOperand(fp, argumentsOffset));
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ li(slot, Operand(Smi::FromInt(slot_index)));
- __ lw(vector, MemOperand(fp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- __ push(v0);
-
- // Use inline caching to access the arguments.
- __ lw(key, MemOperand(fp, indexOffset));
- __ Addu(key, key, Operand(1 << kSmiTagSize));
- __ sw(key, MemOperand(fp, indexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ lw(a1, MemOperand(fp, limitOffset));
- __ Branch(&loop, ne, key, Operand(a1));
-
- // On exit, the pushed arguments count is in a0, untagged
- __ mov(a0, key);
- __ SmiUntag(a0);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : argumentsList
+ // -- sp[4] : thisArgument
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(a1);
-
- __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
- __ lw(a1, MemOperand(fp, kArgumentsOffset)); // Get the args array.
- __ Push(a0, a1);
- // Returns (in v0) number of arguments to copy to stack as Smi.
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
-
- // Returns the result in v0.
- Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ mov(a1, zero_reg);
- __ lw(a2, MemOperand(fp, kReceiverOffset));
- __ Push(v0, a1, a2); // limit, initial index and receiver.
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-
- // Tear down the internal frame and remove function, receiver and args.
+ Label no_arg;
+ Register scratch = t0;
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ mov(a2, a1);
+ __ mov(a3, a1);
+ __ sll(scratch, a0, kPointerSizeLog2);
+ __ mov(a0, scratch);
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(zero_reg));
+ __ Addu(a0, sp, Operand(a0));
+ __ lw(a1, MemOperand(a0)); // target
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a2, MemOperand(a0)); // thisArgument
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a3, MemOperand(a0)); // argumentsList
+ __ bind(&no_arg);
+ __ Addu(sp, sp, Operand(scratch));
+ __ sw(a2, MemOperand(sp));
+ __ mov(a0, a3);
}
- __ Ret(USE_DELAY_SLOT);
- __ Addu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
-}
-
-
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+ // ----------- S t a t e -------------
+ // -- a0 : argumentsList
+ // -- a1 : target
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(a1, &target_not_callable);
+ __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
+ __ And(t0, t0, Operand(1 << Map::kIsCallable));
+ __ Branch(&target_not_callable, eq, t0, Operand(zero_reg));
+
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(a1);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ lw(a0, MemOperand(fp, kNewTargetOffset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&validate_arguments, ne, a0, Operand(at));
- __ lw(a0, MemOperand(fp, kFunctionOffset));
- __ sw(a0, MemOperand(fp, kNewTargetOffset));
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ lw(a0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(a0);
- __ lw(a0, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ push(a0);
- __ lw(a0, MemOperand(fp, kNewTargetOffset)); // get the new.target
- __ push(a0);
- // Returns argument count in v0.
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- // Returns result in v0.
- Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ push(v0); // limit
- __ mov(a1, zero_reg); // initial index
- __ push(a1);
- // Push the constructor function as callee.
- __ lw(a0, MemOperand(fp, kFunctionOffset));
- __ push(a0);
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ lw(t0, MemOperand(fp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ __ sw(a1, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ jr(ra);
- __ Addu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : new.target (optional)
+ // -- sp[4] : argumentsList
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+ // new.target into a3 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
+ {
+ Label no_arg;
+ Register scratch = t0;
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ mov(a2, a1);
+ __ sll(scratch, a0, kPointerSizeLog2);
+ __ Addu(a0, sp, Operand(scratch));
+ __ sw(a2, MemOperand(a0)); // receiver
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a1, MemOperand(a0)); // target
+ __ mov(a3, a1); // new.target defaults to target
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a2, MemOperand(a0)); // argumentsList
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a3, MemOperand(a0)); // new.target
+ __ bind(&no_arg);
+ __ Addu(sp, sp, Operand(scratch));
+ __ mov(a0, a2);
+ }
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // ----------- S t a t e -------------
+ // -- a0 : argumentsList
+ // -- a3 : new.target
+ // -- a1 : target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(a1, &target_not_constructor);
+ __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
+ __ And(t0, t0, Operand(1 << Map::kIsConstructor));
+ __ Branch(&target_not_constructor, eq, t0, Operand(zero_reg));
+
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(a3, &new_target_not_constructor);
+ __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
+ __ And(t0, t0, Operand(1 << Map::kIsConstructor));
+ __ Branch(&new_target_not_constructor, eq, t0, Operand(zero_reg));
+
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ sw(a1, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ sw(a3, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1448,6 +1771,7 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
// -- a0 : actual number of arguments
// -- a1 : function (passed through to callee)
// -- a2 : expected number of arguments
+ // -- a3 : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -1490,72 +1814,208 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argumentsList
+ // -- a1 : target
+ // -- a3 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(a0, &create_runtime);
+
+ // Load the map of argumentsList into a2.
+ __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+
+ // Load native context into t0.
+ __ lw(t0, NativeContextMemOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ lw(at, ContextMemOperand(t0, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ Branch(&create_arguments, eq, a2, Operand(at));
+ __ lw(at, ContextMemOperand(t0, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ Branch(&create_arguments, eq, a2, Operand(at));
+
+ // Check if argumentsList is a fast JSArray.
+ __ lw(v0, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+ __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3, a0);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ mov(a0, v0);
+ __ Pop(a1, a3);
+ __ lw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ SmiUntag(a2);
+ }
+ __ Branch(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ lw(a2,
+ FieldMemOperand(a0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ lw(t0, FieldMemOperand(a0, JSObject::kElementsOffset));
+ __ lw(at, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ __ Branch(&create_runtime, ne, a2, Operand(at));
+ __ SmiUntag(a2);
+ __ mov(a0, t0);
+ __ Branch(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ lw(a2, FieldMemOperand(a2, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(a2);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS));
+ __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
+ __ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ SmiUntag(a2);
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(t0, Heap::kRealStackLimitRootIndex);
+ // Make ip the space we have left. The stack might already be overflowed
+ // here which will cause ip to become negative.
+ __ Subu(t0, sp, t0);
+ // Check if the arguments will overflow the stack.
+ __ sll(at, a2, kPointerSizeLog2);
+ __ Branch(&done, gt, t0, Operand(at)); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- a1 : target
+ // -- a0 : args (a FixedArray built from argumentsList)
+ // -- a2 : len (number of elements to push from args)
+ // -- a3 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ __ mov(t0, zero_reg);
+ Label done, loop;
+ __ bind(&loop);
+ __ Branch(&done, eq, t0, Operand(a2));
+ __ sll(at, t0, kPointerSizeLog2);
+ __ Addu(at, a0, at);
+ __ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
+ __ Push(at);
+ __ Addu(t0, t0, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done);
+ __ Move(a0, t0);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ Label construct;
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&construct, ne, a3, Operand(at));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ bind(&construct);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(a1);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ Branch(&class_constructor, ne, at, Operand(zero_reg));
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
__ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
__ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
__ Branch(&done_convert, ne, at, Operand(zero_reg));
{
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
- __ lw(a3, MemOperand(at));
-
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
// -- a2 : the shared function info.
- // -- a3 : the receiver
// -- cp : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(a3, &convert_to_object);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ GetObjectType(a3, t0, t0);
- __ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE));
- __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
- __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(a3);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ lw(a3, MemOperand(at));
+ __ JumpIfSmi(a3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ GetObjectType(a3, t0, t0);
+ __ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE));
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy);
+ __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ }
+ __ Branch(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ sll(a0, a0, kSmiTagSize); // Smi tagged.
+ __ Push(a0, a1);
+ __ mov(a0, a3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(a3, v0);
+ __ Pop(a0, a1);
+ __ sra(a0, a0, kSmiTagSize); // Un-tag.
+ }
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ Branch(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ sll(a0, a0, kSmiTagSize); // Smi tagged.
- __ Push(a0, a1);
- __ mov(a0, a3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(a3, v0);
- __ Pop(a0, a1);
- __ sra(a0, a0, kSmiTagSize); // Un-tag.
- }
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ sll(at, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ sw(a3, MemOperand(at));
@@ -1572,15 +2032,118 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
__ lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
__ sra(a2, a2, kSmiTagSize); // Un-tag.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount actual(a0);
ParameterCount expected(a2);
- __ InvokeCode(a3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(a1, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(a1);
+
+ // Patch the receiver to [[BoundThis]].
+ {
+ __ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+ __ sll(t0, a0, kPointerSizeLog2);
+ __ addu(t0, t0, sp);
+ __ sw(at, MemOperand(t0));
+ }
+
+ // Load [[BoundArguments]] into a2 and length of that into t0.
+ __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(t0);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- t0 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ sll(t1, t0, kPointerSizeLog2);
+ __ Subu(sp, sp, Operand(t1));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+ __ Branch(&done, gt, sp, Operand(at)); // Signed comparison.
+ // Restore the stack pointer.
+ __ Addu(sp, sp, Operand(t1));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ {
+ Label loop, done_loop;
+ __ mov(t1, zero_reg);
+ __ bind(&loop);
+ __ Branch(&done_loop, gt, t1, Operand(a0));
+ __ sll(t2, t0, kPointerSizeLog2);
+ __ addu(t2, t2, sp);
+ __ lw(at, MemOperand(t2));
+ __ sll(t2, t1, kPointerSizeLog2);
+ __ addu(t2, t2, sp);
+ __ sw(at, MemOperand(t2));
+ __ Addu(t0, t0, Operand(1));
+ __ Addu(t1, t1, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop, done_loop;
+ __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(t0);
+ __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Subu(t0, t0, Operand(1));
+ __ Branch(&done_loop, lt, t0, Operand(zero_reg));
+ __ sll(t1, t0, kPointerSizeLog2);
+ __ addu(t1, t1, a2);
+ __ lw(at, MemOperand(t1));
+ __ sll(t1, a0, kPointerSizeLog2);
+ __ addu(t1, t1, sp);
+ __ sw(at, MemOperand(t1));
+ __ Addu(a0, a0, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ li(at, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+ masm->isolate())));
+ __ lw(at, MemOperand(at));
+ __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
@@ -1590,15 +2153,20 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
- eq, t2, Operand(JS_FUNCTION_TYPE));
- __ Branch(&non_function, ne, t2, Operand(JS_FUNCTION_PROXY_TYPE));
-
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ lw(a1, FieldMemOperand(a1, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(a1);
- __ Branch(&non_smi);
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
+
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ Push(a1);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ Addu(a0, a0, 2);
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1612,15 +2180,17 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ addu(at, sp, at);
__ sw(a1, MemOperand(at));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1630,10 +2200,9 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the constructor to call (checked to be a JSFunction)
- // -- a3 : the original constructor (checked to be a JSFunction)
+ // -- a3 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertFunction(a1);
- __ AssertFunction(a3);
// Calling convention for function specific ConstructStubs require
// a2 to contain either an AllocationSite or undefined.
@@ -1649,17 +2218,117 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertBoundFunction(a1);
+
+ // Load [[BoundArguments]] into a2 and length of that into t0.
+ __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(t0);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a3 : the new target (checked to be a constructor)
+ // -- t0 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ sll(t1, t0, kPointerSizeLog2);
+ __ Subu(sp, sp, Operand(t1));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+ __ Branch(&done, gt, sp, Operand(at)); // Signed comparison.
+ // Restore the stack pointer.
+ __ Addu(sp, sp, Operand(t1));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ {
+ Label loop, done_loop;
+ __ mov(t1, zero_reg);
+ __ bind(&loop);
+ __ Branch(&done_loop, ge, t1, Operand(a0));
+ __ sll(t2, t0, kPointerSizeLog2);
+ __ addu(t2, t2, sp);
+ __ lw(at, MemOperand(t2));
+ __ sll(t2, t1, kPointerSizeLog2);
+ __ addu(t2, t2, sp);
+ __ sw(at, MemOperand(t2));
+ __ Addu(t0, t0, Operand(1));
+ __ Addu(t1, t1, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop, done_loop;
+ __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(t0);
+ __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Subu(t0, t0, Operand(1));
+ __ Branch(&done_loop, lt, t0, Operand(zero_reg));
+ __ sll(t1, t0, kPointerSizeLog2);
+ __ addu(t1, t1, a2);
+ __ lw(at, MemOperand(t1));
+ __ sll(t1, a0, kPointerSizeLog2);
+ __ addu(t1, t1, sp);
+ __ sw(at, MemOperand(t1));
+ __ Addu(a0, a0, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label skip_load;
+ __ Branch(&skip_load, ne, a1, Operand(a3));
+ __ lw(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&skip_load);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ li(at, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ lw(at, MemOperand(at));
+ __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
- // -- a1 : the constructor to call (checked to be a JSFunctionProxy)
- // -- a3 : the original constructor (either the same as the constructor or
+ // -- a1 : the constructor to call (checked to be a JSProxy)
+ // -- a3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ lw(a1, FieldMemOperand(a1, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ Push(a1, a3);
+ // Include the pushed new_target, constructor and the receiver.
+ __ Addu(a0, a0, Operand(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1668,24 +2337,33 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the constructor to call (can be any Object)
- // -- a3 : the original constructor (either the same as the constructor or
+ // -- a3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(a1, &non_constructor);
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t2, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t2, t2, Operand(1 << Map::kIsCallable));
- __ Branch(&non_constructor, eq, t2, Operand(zero_reg));
// Dispatch based on instance type.
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
__ Jump(masm->isolate()->builtins()->ConstructFunction(),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+
+ // Check if target has a [[Construct]] internal method.
+ __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t3, t3, Operand(1 << Map::kIsConstructor));
+ __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
__ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
- eq, t2, Operand(JS_FUNCTION_PROXY_TYPE));
+ eq, t2, Operand(JS_PROXY_TYPE));
// Called Construct on an exotic Object with a [[Construct]] internal method.
{
@@ -1694,7 +2372,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ addu(at, sp, at);
__ sw(a1, MemOperand(at));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1702,40 +2380,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
-}
-
-
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : the number of arguments (not including the receiver)
- // -- a2 : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- a1 : the target to call (can be any Object).
-
- // Find the address of the last argument.
- __ Addu(a3, a0, Operand(1)); // Add one for receiver.
- __ sll(a3, a3, kPointerSizeLog2);
- __ Subu(a3, a2, Operand(a3));
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ Branch(&loop_check);
- __ bind(&loop_header);
- __ lw(t0, MemOperand(a2));
- __ Addu(a2, a2, Operand(-kPointerSize));
- __ push(t0);
- __ bind(&loop_check);
- __ Branch(&loop_header, gt, a2, Operand(a3));
-
- // Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1745,14 +2391,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- a0: actual arguments count
// -- a1: function (passed through to callee)
// -- a2: expected arguments count
+ // -- a3: new target (passed through to callee)
// -----------------------------------
- Label stack_overflow;
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Branch(&dont_adapt_arguments, eq,
a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
@@ -1762,9 +2406,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0: actual number of arguments as a smi
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t1.
__ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
@@ -1779,7 +2424,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0: copy start address
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
// t1: copy end address
Label copy;
@@ -1811,17 +2456,18 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t3.
// a0: actual number of arguments as a smi
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
__ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a0, fp, a0);
// Adjust for return address and receiver.
@@ -1833,7 +2479,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0: copy start address
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
// t3: copy end address
Label copy;
__ bind(&copy);
@@ -1846,7 +2492,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ sll(t2, a2, kPointerSizeLog2);
__ Subu(t1, fp, Operand(t2));
@@ -1866,7 +2512,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ mov(a0, a2);
// a0 : expected number of arguments
// a1 : function (passed through to callee)
- __ Call(a3);
+ // a3 : new target (passed through to callee)
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Call(t0);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1880,13 +2528,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ Jump(a3);
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Jump(t0);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ break_(0xCC);
}
}
diff --git a/chromium/v8/src/mips/code-stubs-mips.cc b/chromium/v8/src/mips/code-stubs-mips.cc
index b12cb718ab9..f88d3bd5b44 100644
--- a/chromium/v8/src/mips/code-stubs-mips.cc
+++ b/chromium/v8/src/mips/code-stubs-mips.cc
@@ -291,7 +291,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ GetObjectType(a0, t4, t4);
if (cc == less || cc == greater) {
// Call runtime on identical JSObjects.
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
@@ -307,7 +307,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
if (cc != eq) {
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
@@ -459,12 +459,12 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label first_non_object;
// Get the type of the first operand into a2 and compare it with
- // FIRST_SPEC_OBJECT_TYPE.
+ // FIRST_JS_RECEIVER_TYPE.
__ GetObjectType(lhs, a2, a2);
- __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE));
// Return non-zero.
Label return_not_equal;
@@ -477,7 +477,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
__ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
__ GetObjectType(rhs, a3, a3);
- __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE));
// Check for oddballs: true, false, null, undefined.
__ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
@@ -539,9 +539,9 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ li(v0, Operand(1)); // Non-zero indicates not equal.
__ bind(&object_test);
- __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
__ GetObjectType(rhs, a2, a3);
- __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -728,8 +728,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cc == eq) {
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result.
if (cc == lt || cc == le) {
@@ -743,9 +742,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -979,7 +977,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -1066,13 +1064,21 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
+ //
+ // If argv_in_register():
+ // a2: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Compute the argv pointer in a callee-saved register.
- __ sll(s1, a0, kPointerSizeLog2);
- __ Addu(s1, sp, s1);
- __ Subu(s1, s1, kPointerSize);
+ if (argv_in_register()) {
+ // Move argv into the correct register.
+ __ mov(s1, a2);
+ } else {
+ // Compute the argv pointer in a callee-saved register.
+ __ sll(s1, a0, kPointerSizeLog2);
+ __ Addu(s1, sp, s1);
+ __ Subu(s1, s1, kPointerSize);
+ }
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
@@ -1153,8 +1159,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- // s0: still holds argc (callee-saved).
- __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
+ Register argc;
+ if (argv_in_register()) {
+ // We don't want to pop arguments so set argc to no_reg.
+ argc = no_reg;
+ } else {
+ // s0: still holds argc (callee-saved).
+ argc = s0;
+ }
+ __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -1457,15 +1470,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
__ Branch(&slow_case, ne, at, Operand(zero_reg));
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ lw(shared_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(scratch,
- FieldMemOperand(shared_info, SharedFunctionInfo::kBoundByteOffset));
- __ And(at, scratch, Operand(1 << SharedFunctionInfo::kBoundBitWithinByte));
- __ Branch(&slow_case, ne, at, Operand(zero_reg));
-
// Get the "prototype" (or initial map) of the {function}.
__ lw(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1490,25 +1494,49 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
Register const null = scratch;
- Label done, loop;
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ Register const result = v0;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ Branch(&done, eq, object_prototype, Operand(function_prototype));
- __ Branch(USE_DELAY_SLOT, &loop, ne, object_prototype, Operand(null));
- __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+
+ // Check if the object needs to be access checked.
+ __ lbu(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ And(map_bit_field, map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ Branch(&fast_runtime_fallback, ne, map_bit_field, Operand(zero_reg));
+ // Check if the current object is a Proxy.
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ __ Branch(&fast_runtime_fallback, eq, object_instance_type,
+ Operand(JS_PROXY_TYPE));
+
+ __ lw(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Branch(&done, eq, object, Operand(function_prototype));
+ __ Branch(USE_DELAY_SLOT, &loop, ne, object, Operand(null));
+ __ lw(object_map,
+ FieldMemOperand(object, HeapObject::kMapOffset)); // In delay slot.
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
__ Ret(USE_DELAY_SLOT);
- __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
-
- // Slow-case: Call the runtime function.
+ __ StoreRoot(result,
+ Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
+
+ // Found Proxy or access check needed: Call the runtime
+ __ bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ DCHECK(Smi::FromInt(0) == 0);
+ __ StoreRoot(zero_reg, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -1579,7 +1607,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(a1);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -1607,7 +1635,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1673,7 +1701,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(t5, v0, t0, t5, &runtime, TAG_OBJECT);
+ __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
// v0 = address of new object(s) (tagged)
// a2 = argument count (smi-tagged)
@@ -1683,8 +1711,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
- __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
+ __ lw(t0, NativeContextMemOperand());
Label skip2_ne, skip2_eq;
__ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
__ lw(t0, MemOperand(t0, kNormalOffset));
@@ -1822,7 +1849,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// t1 = argument count (tagged)
__ bind(&runtime);
__ Push(a1, a3, t1);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1841,7 +1868,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1887,10 +1914,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
- __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
- __ lw(t0, MemOperand(
- t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, t0);
__ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
__ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
@@ -1938,7 +1962,32 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+ // a1 : rest parameter index (tagged)
+ // Check if the calling frame is an arguments adaptor frame.
+
+ Label runtime;
+ __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(t1, MemOperand(t0, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne, t1,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Patch the arguments.length and the parameters pointer.
+ __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sll(t1, a2, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, t0, Operand(t1));
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ Push(a2, a3, a1);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -1947,7 +1996,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2232,7 +2281,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Branch(&runtime, eq, v0, Operand(a1));
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure and exception return null.
@@ -2328,7 +2377,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2374,19 +2423,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// a0 : number of arguments to the construct function
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
// a1 : the function to call
- // t0 : original constructor (for IsSuperConstructorCall)
FrameScope scope(masm, StackFrame::INTERNAL);
- const RegList kSavedRegs = 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7 | // a3
- BoolToInt(is_super) << 8; // t0
+ const RegList kSavedRegs = 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7; // a3
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(a0);
@@ -2399,7 +2445,7 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -2407,7 +2453,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
- // t0 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2448,7 +2493,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ Branch(&miss, ne, feedback_map, Operand(at));
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
__ Branch(&megamorphic, ne, a1, Operand(t2));
__ jmp(&done);
@@ -2470,123 +2515,28 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// An uninitialized cache is patched with the function.
__ bind(&initialize);
// Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
__ Branch(&not_array_function, ne, a1, Operand(t2));
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ Branch(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
-
- // Do not transform the receiver for strict mode functions.
- int32_t strict_mode_function_mask =
- 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
- // Do not transform the receiver for native (Compilerhints already in a3).
- int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
- __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
- __ Branch(cont, ne, at, Operand(zero_reg));
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm, int argc) {
- __ li(a0, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(a1);
- __ mov(a0, a3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ pop(a1);
- }
- __ Branch(USE_DELAY_SLOT, cont);
- __ sw(v0, MemOperand(sp, argc * kPointerSize));
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // a1 : the function to call
- Label slow, wrap, cont;
-
- if (needs_checks) {
- // Check that the function is really a JavaScript function.
- // a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &slow);
-
- // Goto slow case if we do not have a function.
- __ GetObjectType(a1, t0, t0);
- __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
- }
-
- // Fast-case: Invoke the function now.
- // a1: pushed function
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Compute the receiver in sloppy mode.
- __ lw(a3, MemOperand(sp, argc * kPointerSize));
-
- if (needs_checks) {
- __ JumpIfSmi(a3, &wrap);
- __ GetObjectType(a3, t0, t0);
- __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
- } else {
- __ jmp(&wrap);
- }
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- // Wrap the receiver and patch it back onto the stack.
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
- // t0 : original constructor (for IsSuperConstructorCall)
Label non_function;
// Check that the function is not a smi.
@@ -2595,29 +2545,23 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ GetObjectType(a1, t1, t1);
__ Branch(&non_function, ne, t1, Operand(JS_FUNCTION_TYPE));
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
+ GenerateRecordCallTarget(masm);
- __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, a2, at);
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into a2, or undefined.
- __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
- __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, a2, at);
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(a2, t1);
- }
+ __ AssertUndefinedOrAllocationSite(a2, t1);
- // Pass function as original constructor.
- if (IsSuperConstructorCall()) {
- __ mov(a3, t0);
- } else {
- __ mov(a3, a1);
- }
+ // Pass function as new target.
+ __ mov(a3, a1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -2637,7 +2581,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// a3 - slot id
// a2 - vector
// t0 - loaded from vector[slot]
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
__ Branch(miss, ne, a1, Operand(at));
__ li(a0, Operand(arg_count()));
@@ -2660,13 +2604,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// a1 - function
// a3 - slot id (Smi)
// a2 - vector
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2703,34 +2641,17 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
- // Compute the receiver in sloppy mode.
- __ lw(a3, MemOperand(sp, argc * kPointerSize));
-
- __ JumpIfSmi(a3, &wrap);
- __ GetObjectType(a3, t0, t0);
- __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call_function);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
+ USE_DELAY_SLOT);
+ __ li(a0, Operand(argc)); // In delay slot.
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&slow_start, eq, t0, Operand(at));
+ __ Branch(&call, eq, t0, Operand(at));
// Verify that t0 contains an AllocationSite
__ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
@@ -2759,14 +2680,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Addu(t0, a2, Operand(t0));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
- // We have to update statistics for runtime profiling.
- __ lw(t0, FieldMemOperand(a2, with_types_offset));
- __ Subu(t0, t0, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(a2, with_types_offset));
- __ lw(t0, FieldMemOperand(a2, generic_offset));
- __ Addu(t0, t0, Operand(Smi::FromInt(1)));
- __ Branch(USE_DELAY_SLOT, &slow_start);
- __ sw(t0, FieldMemOperand(a2, generic_offset)); // In delay slot.
+
+ __ bind(&call);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
+ USE_DELAY_SLOT);
+ __ li(a0, Operand(argc)); // In delay slot.
__ bind(&uninitialized);
@@ -2779,13 +2698,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0);
__ Branch(&miss, eq, a1, Operand(t0));
- // Update stats.
- __ lw(t0, FieldMemOperand(a2, with_types_offset));
- __ Addu(t0, t0, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(a2, with_types_offset));
+ // Make sure the function belongs to the same native context.
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
+ __ lw(t1, NativeContextMemOperand());
+ __ Branch(&miss, ne, t0, Operand(t1));
// Initialize the call counter.
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
@@ -2805,23 +2725,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(a1);
}
- __ Branch(&have_js_function);
+ __ Branch(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(a1, &slow);
-
- // Goto slow case if we do not have a function.
- __ GetObjectType(a1, t0, t0);
- __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
- __ Branch(&have_js_function);
+ __ Branch(&call);
}
@@ -2832,7 +2743,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(a1, a2, a3);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to a1 and exit the internal frame.
__ mov(a1, v0);
@@ -2900,11 +2811,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
@@ -2932,7 +2843,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ sll(index_, index_, kSmiTagSize);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Move(result_, v0);
@@ -2979,7 +2890,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
__ Move(result_, v0);
call_helper.AfterCall(masm);
@@ -3240,7 +3151,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// v0: original string
@@ -3285,7 +3196,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ mov(v0, a0);
__ bind(&slow_string);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -3295,7 +3206,24 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
+}
+
+
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes on argument in a0.
+ Label not_smi, positive_smi;
+ __ JumpIfNotSmi(a0, &not_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&positive_smi, ge, a0, Operand(zero_reg));
+ __ mov(a0, zero_reg);
+ __ bind(&positive_smi);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_smi);
+
+ __ push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3327,7 +3255,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3465,7 +3393,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(a1, a0);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3504,7 +3432,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
@@ -3797,9 +3725,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3807,18 +3735,19 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
__ GetObjectType(a0, a2, a2);
- __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
__ GetObjectType(a1, a2, a2);
- __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
- DCHECK(GetCondition() == eq);
+ DCHECK_EQ(eq, GetCondition());
__ Ret(USE_DELAY_SLOT);
__ subu(v0, a0, a1);
@@ -3827,7 +3756,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ And(a2, a1, a0);
@@ -3842,7 +3771,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ subu(v0, a0, a1);
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ li(a2, Operand(Smi::FromInt(GREATER)));
@@ -3850,7 +3779,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ li(a2, Operand(Smi::FromInt(LESS)));
}
__ Push(a1, a0, a2);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -4338,11 +4267,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
@@ -4365,73 +4294,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : element value to store
- // -- a3 : element index as smi
- // -- sp[0] : array literal index in function as smi
- // -- sp[4] : array literal
- // clobbers a1, a2, t0
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ lw(t0, MemOperand(sp, 0 * kPointerSize));
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
- __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
-
- __ CheckFastElements(a2, t1, &double_elements);
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
- __ JumpIfSmi(a0, &smi_element);
- __ CheckFastSmiElements(a2, t1, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(a1, a3, a0);
- __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
- __ Push(t1, t0);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t2, t1, t2);
- __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sw(a0, MemOperand(t2, 0));
- // Update the write barrier for the array store.
- __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t2, t1, t2);
- __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -5163,6 +5025,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(a2, t0);
}
+ // Enter the context of the Array function.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
Label subclassing;
__ Branch(&subclassing, ne, a1, Operand(a3));
@@ -5182,26 +5047,26 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
- __ Push(a1);
- __ Push(a3);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ li(at, Operand(2));
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ sw(a1, MemOperand(at));
+ __ li(at, Operand(3));
__ addu(a0, a0, at);
break;
case NONE:
- __ li(a0, Operand(2));
+ __ sw(a1, MemOperand(sp, 0 * kPointerSize));
+ __ li(a0, Operand(3));
break;
case ONE:
- __ li(a0, Operand(3));
+ __ sw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ li(a0, Operand(4));
break;
}
-
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ Push(a3, a2);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5287,14 +5152,14 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ lw(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ __ lw(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
context_reg = result_reg;
}
// Load the PropertyCell value at the specified slot.
__ sll(at, slot_reg, kPointerSizeLog2);
__ Addu(at, at, Operand(context_reg));
- __ lw(result_reg, ContextOperand(at, 0));
+ __ lw(result_reg, ContextMemOperand(at, 0));
__ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
// Check that value is not the_hole.
@@ -5306,7 +5171,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ bind(&slow_case);
__ SmiTag(slot_reg);
__ Push(slot_reg);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5326,14 +5191,14 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ lw(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ __ lw(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
context_reg = cell_reg;
}
// Load the PropertyCell at the specified slot.
__ sll(at, slot_reg, kPointerSizeLog2);
__ Addu(at, at, Operand(context_reg));
- __ lw(cell_reg, ContextOperand(at, 0));
+ __ lw(cell_reg, ContextMemOperand(at, 0));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ lw(cell_details_reg,
@@ -5420,8 +5285,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot_reg, value_reg);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5545,7 +5409,7 @@ static void CallApiFunctionAndReturn(
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/chromium/v8/src/mips/code-stubs-mips.h b/chromium/v8/src/mips/code-stubs-mips.h
index 67228e01704..751095d8d82 100644
--- a/chromium/v8/src/mips/code-stubs-mips.h
+++ b/chromium/v8/src/mips/code-stubs-mips.h
@@ -140,9 +140,8 @@ class RecordWriteStub: public PlatformCodeStub {
}
static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL,
- stub->instruction_start(),
- stub->instruction_size());
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
@@ -160,8 +159,8 @@ class RecordWriteStub: public PlatformCodeStub {
break;
}
DCHECK(GetMode(stub) == mode);
- CpuFeatures::FlushICache(stub->instruction_start(),
- 4 * Assembler::kInstrSize);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
+ 4 * Assembler::kInstrSize);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@@ -343,6 +342,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_CODE_STUBS_MIPS_H_
diff --git a/chromium/v8/src/mips/codegen-mips.cc b/chromium/v8/src/mips/codegen-mips.cc
index 4a1255e1b49..2a144d990c8 100644
--- a/chromium/v8/src/mips/codegen-mips.cc
+++ b/chromium/v8/src/mips/codegen-mips.cc
@@ -18,23 +18,22 @@ namespace internal {
#if defined(USE_SIMULATOR)
-byte* fast_exp_mips_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
- fast_exp_mips_machine_code, x, 0);
+byte* fast_exp_mips_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+ return Simulator::current(isolate)->CallFP(fast_exp_mips_machine_code, x, 0);
}
#endif
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
DoubleRegister input = f12;
@@ -59,11 +58,11 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_mips_machine_code = buffer;
return &fast_exp_simulator;
@@ -72,7 +71,8 @@ UnaryMathFunction CreateExpFunction() {
#if defined(V8_HOST_ARCH_MIPS)
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
+MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
+ MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
defined(_MIPS_ARCH_MIPS32RX)
return stub;
@@ -80,11 +80,12 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
- if (buffer == NULL) return stub;
+ if (buffer == nullptr) return stub;
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
Label lastb, unaligned, aligned, chkw,
@@ -597,23 +598,24 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
#endif
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
- return &std::sqrt;
+ return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
+ if (buffer == nullptr) return nullptr;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
__ MovFromFloatParameter(f12);
__ sqrt_d(f0, f12);
@@ -624,9 +626,9 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
@@ -1187,15 +1189,17 @@ static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
#endif
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before MIPS simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(new CodePatcher(
- young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(
+ new CodePatcher(isolate, young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->Push(ra, fp, cp, a1);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
@@ -1239,10 +1243,11 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CpuFeatures::FlushICache(sequence, young_length);
+ Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ CodePatcher patcher(isolate, sequence,
+ young_length / Assembler::kInstrSize);
// Mark this code sequence for FindPlatformCodeAgeSequence().
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
diff --git a/chromium/v8/src/mips/codegen-mips.h b/chromium/v8/src/mips/codegen-mips.h
index f79ad4e41ca..ad7abb30c55 100644
--- a/chromium/v8/src/mips/codegen-mips.h
+++ b/chromium/v8/src/mips/codegen-mips.h
@@ -7,7 +7,7 @@
#define V8_MIPS_CODEGEN_MIPS_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -46,6 +46,7 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/chromium/v8/src/mips/constants-mips.cc b/chromium/v8/src/mips/constants-mips.cc
index ff8a79f1b22..6ca430a1578 100644
--- a/chromium/v8/src/mips/constants-mips.cc
+++ b/chromium/v8/src/mips/constants-mips.cc
@@ -126,24 +126,28 @@ int FPURegisters::Number(const char* name) {
// -----------------------------------------------------------------------------
// Instructions.
-bool Instruction::IsForbiddenInBranchDelay() const {
- const int op = OpcodeFieldRaw();
- switch (op) {
+bool Instruction::IsForbiddenAfterBranchInstr(Instr instr) {
+ Opcode opcode = static_cast<Opcode>(instr & kOpcodeMask);
+ switch (opcode) {
case J:
case JAL:
case BEQ:
case BNE:
- case BLEZ:
- case BGTZ:
+ case BLEZ: // POP06 bgeuc/bleuc, blezalc, bgezalc
+ case BGTZ: // POP07 bltuc/bgtuc, bgtzalc, bltzalc
case BEQL:
case BNEL:
- case BLEZL:
- case BGTZL:
+ case BLEZL: // POP26 bgezc, blezc, bgec/blec
+ case BGTZL: // POP27 bgtzc, bltzc, bltc/bgtc
case BC:
case BALC:
+ case POP10: // beqzalc, bovc, beqc
+ case POP30: // bnezalc, bvnc, bnec
+ case POP66: // beqzc, jic
+ case POP76: // bnezc, jialc
return true;
case REGIMM:
- switch (RtFieldRaw()) {
+ switch (instr & kRtFieldMask) {
case BLTZ:
case BGEZ:
case BLTZAL:
@@ -154,7 +158,7 @@ bool Instruction::IsForbiddenInBranchDelay() const {
}
break;
case SPECIAL:
- switch (FunctionFieldRaw()) {
+ switch (instr & kFunctionFieldMask) {
case JR:
case JALR:
return true;
@@ -162,6 +166,17 @@ bool Instruction::IsForbiddenInBranchDelay() const {
return false;
}
break;
+ case COP1:
+ switch (instr & kRsFieldMask) {
+ case BC1:
+ case BC1EQZ:
+ case BC1NEZ:
+ return true;
+ break;
+ default:
+ return false;
+ }
+ break;
default:
return false;
}
@@ -169,8 +184,7 @@ bool Instruction::IsForbiddenInBranchDelay() const {
bool Instruction::IsLinkingInstruction() const {
- const int op = OpcodeFieldRaw();
- switch (op) {
+ switch (OpcodeFieldRaw()) {
case JAL:
return true;
case POP76:
diff --git a/chromium/v8/src/mips/constants-mips.h b/chromium/v8/src/mips/constants-mips.h
index fcbda801919..8327501b6f8 100644
--- a/chromium/v8/src/mips/constants-mips.h
+++ b/chromium/v8/src/mips/constants-mips.h
@@ -143,8 +143,11 @@ const int kInvalidFPURegister = -1;
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const int32_t kFPUInvalidResultNegative = static_cast<int32_t>(1 << 31);
const uint64_t kFPU64InvalidResult =
static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
+const int64_t kFPU64InvalidResultNegative =
+ static_cast<int64_t>(static_cast<uint64_t>(1) << 63);
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
@@ -152,12 +155,14 @@ const uint32_t kFCSRUnderflowFlagBit = 3;
const uint32_t kFCSROverflowFlagBit = 4;
const uint32_t kFCSRDivideByZeroFlagBit = 5;
const uint32_t kFCSRInvalidOpFlagBit = 6;
+const uint32_t kFCSRNaN2008FlagBit = 18;
const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+const uint32_t kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit;
const uint32_t kFCSRFlagMask =
kFCSRInexactFlagMask |
@@ -256,6 +261,7 @@ const int kRdShift = 11;
const int kRdBits = 5;
const int kSaShift = 6;
const int kSaBits = 5;
+const int kLsaSaBits = 2;
const int kFunctionShift = 0;
const int kFunctionBits = 6;
const int kLuiShift = 16;
@@ -298,311 +304,320 @@ const int kFBtrueBits = 1;
// ----- Miscellaneous useful masks.
// Instruction bit masks.
-const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
-const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
+const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
const int kImm18Mask = ((1 << kImm18Bits) - 1) << kImm18Shift;
const int kImm19Mask = ((1 << kImm19Bits) - 1) << kImm19Shift;
const int kImm21Mask = ((1 << kImm21Bits) - 1) << kImm21Shift;
-const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
-const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
-const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
-const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
-const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
-const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
-const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
+const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
+const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
+const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
+const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
+const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
+const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
// Misc masks.
-const int kHiMask = 0xffff << 16;
-const int kLoMask = 0xffff;
-const int kSignMask = 0x80000000;
-const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
+const int kHiMask = 0xffff << 16;
+const int kLoMask = 0xffff;
+const int kSignMask = 0x80000000;
+const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
// ----- MIPS Opcodes and Function Fields.
// We use this presentation to stay close to the table representation in
// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
-enum Opcode {
- SPECIAL = 0 << kOpcodeShift,
- REGIMM = 1 << kOpcodeShift,
-
- J = ((0 << 3) + 2) << kOpcodeShift,
- JAL = ((0 << 3) + 3) << kOpcodeShift,
- BEQ = ((0 << 3) + 4) << kOpcodeShift,
- BNE = ((0 << 3) + 5) << kOpcodeShift,
- BLEZ = ((0 << 3) + 6) << kOpcodeShift,
- BGTZ = ((0 << 3) + 7) << kOpcodeShift,
-
- ADDI = ((1 << 3) + 0) << kOpcodeShift,
- ADDIU = ((1 << 3) + 1) << kOpcodeShift,
- SLTI = ((1 << 3) + 2) << kOpcodeShift,
- SLTIU = ((1 << 3) + 3) << kOpcodeShift,
- ANDI = ((1 << 3) + 4) << kOpcodeShift,
- ORI = ((1 << 3) + 5) << kOpcodeShift,
- XORI = ((1 << 3) + 6) << kOpcodeShift,
- LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
-
- BEQC = ((2 << 3) + 0) << kOpcodeShift,
- COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
- BEQL = ((2 << 3) + 4) << kOpcodeShift,
- BNEL = ((2 << 3) + 5) << kOpcodeShift,
- BLEZL = ((2 << 3) + 6) << kOpcodeShift,
- BGTZL = ((2 << 3) + 7) << kOpcodeShift,
-
- DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
- SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
- SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
-
- LB = ((4 << 3) + 0) << kOpcodeShift,
- LH = ((4 << 3) + 1) << kOpcodeShift,
- LWL = ((4 << 3) + 2) << kOpcodeShift,
- LW = ((4 << 3) + 3) << kOpcodeShift,
- LBU = ((4 << 3) + 4) << kOpcodeShift,
- LHU = ((4 << 3) + 5) << kOpcodeShift,
- LWR = ((4 << 3) + 6) << kOpcodeShift,
- SB = ((5 << 3) + 0) << kOpcodeShift,
- SH = ((5 << 3) + 1) << kOpcodeShift,
- SWL = ((5 << 3) + 2) << kOpcodeShift,
- SW = ((5 << 3) + 3) << kOpcodeShift,
- SWR = ((5 << 3) + 6) << kOpcodeShift,
-
- LWC1 = ((6 << 3) + 1) << kOpcodeShift,
- BC = ((6 << 3) + 2) << kOpcodeShift,
- LDC1 = ((6 << 3) + 5) << kOpcodeShift,
- POP66 = ((6 << 3) + 6) << kOpcodeShift,
-
- PREF = ((6 << 3) + 3) << kOpcodeShift,
-
- SWC1 = ((7 << 3) + 1) << kOpcodeShift,
- BALC = ((7 << 3) + 2) << kOpcodeShift,
- PCREL = ((7 << 3) + 3) << kOpcodeShift,
- SDC1 = ((7 << 3) + 5) << kOpcodeShift,
- POP76 = ((7 << 3) + 6) << kOpcodeShift,
-
- COP1X = ((1 << 4) + 3) << kOpcodeShift
+enum Opcode : uint32_t {
+ SPECIAL = 0U << kOpcodeShift,
+ REGIMM = 1U << kOpcodeShift,
+
+ J = ((0U << 3) + 2) << kOpcodeShift,
+ JAL = ((0U << 3) + 3) << kOpcodeShift,
+ BEQ = ((0U << 3) + 4) << kOpcodeShift,
+ BNE = ((0U << 3) + 5) << kOpcodeShift,
+ BLEZ = ((0U << 3) + 6) << kOpcodeShift,
+ BGTZ = ((0U << 3) + 7) << kOpcodeShift,
+
+ ADDI = ((1U << 3) + 0) << kOpcodeShift,
+ ADDIU = ((1U << 3) + 1) << kOpcodeShift,
+ SLTI = ((1U << 3) + 2) << kOpcodeShift,
+ SLTIU = ((1U << 3) + 3) << kOpcodeShift,
+ ANDI = ((1U << 3) + 4) << kOpcodeShift,
+ ORI = ((1U << 3) + 5) << kOpcodeShift,
+ XORI = ((1U << 3) + 6) << kOpcodeShift,
+ LUI = ((1U << 3) + 7) << kOpcodeShift, // LUI/AUI family.
+
+ BEQC = ((2U << 3) + 0) << kOpcodeShift,
+ COP1 = ((2U << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
+ BEQL = ((2U << 3) + 4) << kOpcodeShift,
+ BNEL = ((2U << 3) + 5) << kOpcodeShift,
+ BLEZL = ((2U << 3) + 6) << kOpcodeShift,
+ BGTZL = ((2U << 3) + 7) << kOpcodeShift,
+
+ DADDI = ((3U << 3) + 0) << kOpcodeShift, // This is also BNEC.
+ SPECIAL2 = ((3U << 3) + 4) << kOpcodeShift,
+ SPECIAL3 = ((3U << 3) + 7) << kOpcodeShift,
+
+ LB = ((4U << 3) + 0) << kOpcodeShift,
+ LH = ((4U << 3) + 1) << kOpcodeShift,
+ LWL = ((4U << 3) + 2) << kOpcodeShift,
+ LW = ((4U << 3) + 3) << kOpcodeShift,
+ LBU = ((4U << 3) + 4) << kOpcodeShift,
+ LHU = ((4U << 3) + 5) << kOpcodeShift,
+ LWR = ((4U << 3) + 6) << kOpcodeShift,
+ SB = ((5U << 3) + 0) << kOpcodeShift,
+ SH = ((5U << 3) + 1) << kOpcodeShift,
+ SWL = ((5U << 3) + 2) << kOpcodeShift,
+ SW = ((5U << 3) + 3) << kOpcodeShift,
+ SWR = ((5U << 3) + 6) << kOpcodeShift,
+
+ LWC1 = ((6U << 3) + 1) << kOpcodeShift,
+ BC = ((6U << 3) + 2) << kOpcodeShift,
+ LDC1 = ((6U << 3) + 5) << kOpcodeShift,
+ POP66 = ((6U << 3) + 6) << kOpcodeShift, // beqzc, jic
+
+ PREF = ((6U << 3) + 3) << kOpcodeShift,
+
+ SWC1 = ((7U << 3) + 1) << kOpcodeShift,
+ BALC = ((7U << 3) + 2) << kOpcodeShift,
+ PCREL = ((7U << 3) + 3) << kOpcodeShift,
+ SDC1 = ((7U << 3) + 5) << kOpcodeShift,
+ POP76 = ((7U << 3) + 6) << kOpcodeShift, // bnezc, jialc
+
+ COP1X = ((1U << 4) + 3) << kOpcodeShift,
+
+ // New r6 instruction.
+ POP06 = BLEZ, // bgeuc/bleuc, blezalc, bgezalc
+ POP07 = BGTZ, // bltuc/bgtuc, bgtzalc, bltzalc
+ POP10 = ADDI, // beqzalc, bovc, beqc
+ POP26 = BLEZL, // bgezc, blezc, bgec/blec
+ POP27 = BGTZL, // bgtzc, bltzc, bltc/bgtc
+ POP30 = DADDI, // bnezalc, bvnc, bnec
};
-enum SecondaryField {
+enum SecondaryField : uint32_t {
// SPECIAL Encoding of Function Field.
- SLL = ((0 << 3) + 0),
- MOVCI = ((0 << 3) + 1),
- SRL = ((0 << 3) + 2),
- SRA = ((0 << 3) + 3),
- SLLV = ((0 << 3) + 4),
- SRLV = ((0 << 3) + 6),
- SRAV = ((0 << 3) + 7),
-
- JR = ((1 << 3) + 0),
- JALR = ((1 << 3) + 1),
- MOVZ = ((1 << 3) + 2),
- MOVN = ((1 << 3) + 3),
- BREAK = ((1 << 3) + 5),
-
- MFHI = ((2 << 3) + 0),
- CLZ_R6 = ((2 << 3) + 0),
- CLO_R6 = ((2 << 3) + 1),
- MFLO = ((2 << 3) + 2),
-
- MULT = ((3 << 3) + 0),
- MULTU = ((3 << 3) + 1),
- DIV = ((3 << 3) + 2),
- DIVU = ((3 << 3) + 3),
-
- ADD = ((4 << 3) + 0),
- ADDU = ((4 << 3) + 1),
- SUB = ((4 << 3) + 2),
- SUBU = ((4 << 3) + 3),
- AND = ((4 << 3) + 4),
- OR = ((4 << 3) + 5),
- XOR = ((4 << 3) + 6),
- NOR = ((4 << 3) + 7),
-
- SLT = ((5 << 3) + 2),
- SLTU = ((5 << 3) + 3),
-
- TGE = ((6 << 3) + 0),
- TGEU = ((6 << 3) + 1),
- TLT = ((6 << 3) + 2),
- TLTU = ((6 << 3) + 3),
- TEQ = ((6 << 3) + 4),
- SELEQZ_S = ((6 << 3) + 5),
- TNE = ((6 << 3) + 6),
- SELNEZ_S = ((6 << 3) + 7),
+ SLL = ((0U << 3) + 0),
+ MOVCI = ((0U << 3) + 1),
+ SRL = ((0U << 3) + 2),
+ SRA = ((0U << 3) + 3),
+ SLLV = ((0U << 3) + 4),
+ LSA = ((0U << 3) + 5),
+ SRLV = ((0U << 3) + 6),
+ SRAV = ((0U << 3) + 7),
+
+ JR = ((1U << 3) + 0),
+ JALR = ((1U << 3) + 1),
+ MOVZ = ((1U << 3) + 2),
+ MOVN = ((1U << 3) + 3),
+ BREAK = ((1U << 3) + 5),
+
+ MFHI = ((2U << 3) + 0),
+ CLZ_R6 = ((2U << 3) + 0),
+ CLO_R6 = ((2U << 3) + 1),
+ MFLO = ((2U << 3) + 2),
+
+ MULT = ((3U << 3) + 0),
+ MULTU = ((3U << 3) + 1),
+ DIV = ((3U << 3) + 2),
+ DIVU = ((3U << 3) + 3),
+
+ ADD = ((4U << 3) + 0),
+ ADDU = ((4U << 3) + 1),
+ SUB = ((4U << 3) + 2),
+ SUBU = ((4U << 3) + 3),
+ AND = ((4U << 3) + 4),
+ OR = ((4U << 3) + 5),
+ XOR = ((4U << 3) + 6),
+ NOR = ((4U << 3) + 7),
+
+ SLT = ((5U << 3) + 2),
+ SLTU = ((5U << 3) + 3),
+
+ TGE = ((6U << 3) + 0),
+ TGEU = ((6U << 3) + 1),
+ TLT = ((6U << 3) + 2),
+ TLTU = ((6U << 3) + 3),
+ TEQ = ((6U << 3) + 4),
+ SELEQZ_S = ((6U << 3) + 5),
+ TNE = ((6U << 3) + 6),
+ SELNEZ_S = ((6U << 3) + 7),
// Multiply integers in r6.
- MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
- MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
- RINT = ((3 << 3) + 2),
+ MUL_MUH = ((3U << 3) + 0), // MUL, MUH.
+ MUL_MUH_U = ((3U << 3) + 1), // MUL_U, MUH_U.
+ RINT = ((3U << 3) + 2),
- MUL_OP = ((0 << 3) + 2),
- MUH_OP = ((0 << 3) + 3),
- DIV_OP = ((0 << 3) + 2),
- MOD_OP = ((0 << 3) + 3),
+ MUL_OP = ((0U << 3) + 2),
+ MUH_OP = ((0U << 3) + 3),
+ DIV_OP = ((0U << 3) + 2),
+ MOD_OP = ((0U << 3) + 3),
- DIV_MOD = ((3 << 3) + 2),
- DIV_MOD_U = ((3 << 3) + 3),
+ DIV_MOD = ((3U << 3) + 2),
+ DIV_MOD_U = ((3U << 3) + 3),
// SPECIAL2 Encoding of Function Field.
- MUL = ((0 << 3) + 2),
- CLZ = ((4 << 3) + 0),
- CLO = ((4 << 3) + 1),
+ MUL = ((0U << 3) + 2),
+ CLZ = ((4U << 3) + 0),
+ CLO = ((4U << 3) + 1),
// SPECIAL3 Encoding of Function Field.
- EXT = ((0 << 3) + 0),
- INS = ((0 << 3) + 4),
- BSHFL = ((4 << 3) + 0),
+ EXT = ((0U << 3) + 0),
+ INS = ((0U << 3) + 4),
+ BSHFL = ((4U << 3) + 0),
// SPECIAL3 Encoding of sa Field.
- BITSWAP = ((0 << 3) + 0),
- ALIGN = ((0 << 3) + 2),
- WSBH = ((0 << 3) + 2),
- SEB = ((2 << 3) + 0),
- SEH = ((3 << 3) + 0),
+ BITSWAP = ((0U << 3) + 0),
+ ALIGN = ((0U << 3) + 2),
+ WSBH = ((0U << 3) + 2),
+ SEB = ((2U << 3) + 0),
+ SEH = ((3U << 3) + 0),
// REGIMM encoding of rt Field.
- BLTZ = ((0 << 3) + 0) << 16,
- BGEZ = ((0 << 3) + 1) << 16,
- BLTZAL = ((2 << 3) + 0) << 16,
- BGEZAL = ((2 << 3) + 1) << 16,
- BGEZALL = ((2 << 3) + 3) << 16,
+ BLTZ = ((0U << 3) + 0) << 16,
+ BGEZ = ((0U << 3) + 1) << 16,
+ BLTZAL = ((2U << 3) + 0) << 16,
+ BGEZAL = ((2U << 3) + 1) << 16,
+ BGEZALL = ((2U << 3) + 3) << 16,
// COP1 Encoding of rs Field.
- MFC1 = ((0 << 3) + 0) << 21,
- CFC1 = ((0 << 3) + 2) << 21,
- MFHC1 = ((0 << 3) + 3) << 21,
- MTC1 = ((0 << 3) + 4) << 21,
- CTC1 = ((0 << 3) + 6) << 21,
- MTHC1 = ((0 << 3) + 7) << 21,
- BC1 = ((1 << 3) + 0) << 21,
- S = ((2 << 3) + 0) << 21,
- D = ((2 << 3) + 1) << 21,
- W = ((2 << 3) + 4) << 21,
- L = ((2 << 3) + 5) << 21,
- PS = ((2 << 3) + 6) << 21,
+ MFC1 = ((0U << 3) + 0) << 21,
+ CFC1 = ((0U << 3) + 2) << 21,
+ MFHC1 = ((0U << 3) + 3) << 21,
+ MTC1 = ((0U << 3) + 4) << 21,
+ CTC1 = ((0U << 3) + 6) << 21,
+ MTHC1 = ((0U << 3) + 7) << 21,
+ BC1 = ((1U << 3) + 0) << 21,
+ S = ((2U << 3) + 0) << 21,
+ D = ((2U << 3) + 1) << 21,
+ W = ((2U << 3) + 4) << 21,
+ L = ((2U << 3) + 5) << 21,
+ PS = ((2U << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
- ADD_S = ((0 << 3) + 0),
- SUB_S = ((0 << 3) + 1),
- MUL_S = ((0 << 3) + 2),
- DIV_S = ((0 << 3) + 3),
- ABS_S = ((0 << 3) + 5),
- SQRT_S = ((0 << 3) + 4),
- MOV_S = ((0 << 3) + 6),
- NEG_S = ((0 << 3) + 7),
- ROUND_L_S = ((1 << 3) + 0),
- TRUNC_L_S = ((1 << 3) + 1),
- CEIL_L_S = ((1 << 3) + 2),
- FLOOR_L_S = ((1 << 3) + 3),
- ROUND_W_S = ((1 << 3) + 4),
- TRUNC_W_S = ((1 << 3) + 5),
- CEIL_W_S = ((1 << 3) + 6),
- FLOOR_W_S = ((1 << 3) + 7),
- RECIP_S = ((2 << 3) + 5),
- RSQRT_S = ((2 << 3) + 6),
- CLASS_S = ((3 << 3) + 3),
- CVT_D_S = ((4 << 3) + 1),
- CVT_W_S = ((4 << 3) + 4),
- CVT_L_S = ((4 << 3) + 5),
- CVT_PS_S = ((4 << 3) + 6),
+ ADD_S = ((0U << 3) + 0),
+ SUB_S = ((0U << 3) + 1),
+ MUL_S = ((0U << 3) + 2),
+ DIV_S = ((0U << 3) + 3),
+ ABS_S = ((0U << 3) + 5),
+ SQRT_S = ((0U << 3) + 4),
+ MOV_S = ((0U << 3) + 6),
+ NEG_S = ((0U << 3) + 7),
+ ROUND_L_S = ((1U << 3) + 0),
+ TRUNC_L_S = ((1U << 3) + 1),
+ CEIL_L_S = ((1U << 3) + 2),
+ FLOOR_L_S = ((1U << 3) + 3),
+ ROUND_W_S = ((1U << 3) + 4),
+ TRUNC_W_S = ((1U << 3) + 5),
+ CEIL_W_S = ((1U << 3) + 6),
+ FLOOR_W_S = ((1U << 3) + 7),
+ RECIP_S = ((2U << 3) + 5),
+ RSQRT_S = ((2U << 3) + 6),
+ CLASS_S = ((3U << 3) + 3),
+ CVT_D_S = ((4U << 3) + 1),
+ CVT_W_S = ((4U << 3) + 4),
+ CVT_L_S = ((4U << 3) + 5),
+ CVT_PS_S = ((4U << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
- ADD_D = ((0 << 3) + 0),
- SUB_D = ((0 << 3) + 1),
- MUL_D = ((0 << 3) + 2),
- DIV_D = ((0 << 3) + 3),
- SQRT_D = ((0 << 3) + 4),
- ABS_D = ((0 << 3) + 5),
- MOV_D = ((0 << 3) + 6),
- NEG_D = ((0 << 3) + 7),
- ROUND_L_D = ((1 << 3) + 0),
- TRUNC_L_D = ((1 << 3) + 1),
- CEIL_L_D = ((1 << 3) + 2),
- FLOOR_L_D = ((1 << 3) + 3),
- ROUND_W_D = ((1 << 3) + 4),
- TRUNC_W_D = ((1 << 3) + 5),
- CEIL_W_D = ((1 << 3) + 6),
- FLOOR_W_D = ((1 << 3) + 7),
- RECIP_D = ((2 << 3) + 5),
- RSQRT_D = ((2 << 3) + 6),
- CLASS_D = ((3 << 3) + 3),
- MIN = ((3 << 3) + 4),
- MINA = ((3 << 3) + 5),
- MAX = ((3 << 3) + 6),
- MAXA = ((3 << 3) + 7),
- CVT_S_D = ((4 << 3) + 0),
- CVT_W_D = ((4 << 3) + 4),
- CVT_L_D = ((4 << 3) + 5),
- C_F_D = ((6 << 3) + 0),
- C_UN_D = ((6 << 3) + 1),
- C_EQ_D = ((6 << 3) + 2),
- C_UEQ_D = ((6 << 3) + 3),
- C_OLT_D = ((6 << 3) + 4),
- C_ULT_D = ((6 << 3) + 5),
- C_OLE_D = ((6 << 3) + 6),
- C_ULE_D = ((6 << 3) + 7),
+ ADD_D = ((0U << 3) + 0),
+ SUB_D = ((0U << 3) + 1),
+ MUL_D = ((0U << 3) + 2),
+ DIV_D = ((0U << 3) + 3),
+ SQRT_D = ((0U << 3) + 4),
+ ABS_D = ((0U << 3) + 5),
+ MOV_D = ((0U << 3) + 6),
+ NEG_D = ((0U << 3) + 7),
+ ROUND_L_D = ((1U << 3) + 0),
+ TRUNC_L_D = ((1U << 3) + 1),
+ CEIL_L_D = ((1U << 3) + 2),
+ FLOOR_L_D = ((1U << 3) + 3),
+ ROUND_W_D = ((1U << 3) + 4),
+ TRUNC_W_D = ((1U << 3) + 5),
+ CEIL_W_D = ((1U << 3) + 6),
+ FLOOR_W_D = ((1U << 3) + 7),
+ RECIP_D = ((2U << 3) + 5),
+ RSQRT_D = ((2U << 3) + 6),
+ CLASS_D = ((3U << 3) + 3),
+ MIN = ((3U << 3) + 4),
+ MINA = ((3U << 3) + 5),
+ MAX = ((3U << 3) + 6),
+ MAXA = ((3U << 3) + 7),
+ CVT_S_D = ((4U << 3) + 0),
+ CVT_W_D = ((4U << 3) + 4),
+ CVT_L_D = ((4U << 3) + 5),
+ C_F_D = ((6U << 3) + 0),
+ C_UN_D = ((6U << 3) + 1),
+ C_EQ_D = ((6U << 3) + 2),
+ C_UEQ_D = ((6U << 3) + 3),
+ C_OLT_D = ((6U << 3) + 4),
+ C_ULT_D = ((6U << 3) + 5),
+ C_OLE_D = ((6U << 3) + 6),
+ C_ULE_D = ((6U << 3) + 7),
// COP1 Encoding of Function Field When rs=W or L.
- CVT_S_W = ((4 << 3) + 0),
- CVT_D_W = ((4 << 3) + 1),
- CVT_S_L = ((4 << 3) + 0),
- CVT_D_L = ((4 << 3) + 1),
- BC1EQZ = ((2 << 2) + 1) << 21,
- BC1NEZ = ((3 << 2) + 1) << 21,
+ CVT_S_W = ((4U << 3) + 0),
+ CVT_D_W = ((4U << 3) + 1),
+ CVT_S_L = ((4U << 3) + 0),
+ CVT_D_L = ((4U << 3) + 1),
+ BC1EQZ = ((2U << 2) + 1) << 21,
+ BC1NEZ = ((3U << 2) + 1) << 21,
// COP1 CMP positive predicates Bit 5..4 = 00.
- CMP_AF = ((0 << 3) + 0),
- CMP_UN = ((0 << 3) + 1),
- CMP_EQ = ((0 << 3) + 2),
- CMP_UEQ = ((0 << 3) + 3),
- CMP_LT = ((0 << 3) + 4),
- CMP_ULT = ((0 << 3) + 5),
- CMP_LE = ((0 << 3) + 6),
- CMP_ULE = ((0 << 3) + 7),
- CMP_SAF = ((1 << 3) + 0),
- CMP_SUN = ((1 << 3) + 1),
- CMP_SEQ = ((1 << 3) + 2),
- CMP_SUEQ = ((1 << 3) + 3),
- CMP_SSLT = ((1 << 3) + 4),
- CMP_SSULT = ((1 << 3) + 5),
- CMP_SLE = ((1 << 3) + 6),
- CMP_SULE = ((1 << 3) + 7),
+ CMP_AF = ((0U << 3) + 0),
+ CMP_UN = ((0U << 3) + 1),
+ CMP_EQ = ((0U << 3) + 2),
+ CMP_UEQ = ((0U << 3) + 3),
+ CMP_LT = ((0U << 3) + 4),
+ CMP_ULT = ((0U << 3) + 5),
+ CMP_LE = ((0U << 3) + 6),
+ CMP_ULE = ((0U << 3) + 7),
+ CMP_SAF = ((1U << 3) + 0),
+ CMP_SUN = ((1U << 3) + 1),
+ CMP_SEQ = ((1U << 3) + 2),
+ CMP_SUEQ = ((1U << 3) + 3),
+ CMP_SSLT = ((1U << 3) + 4),
+ CMP_SSULT = ((1U << 3) + 5),
+ CMP_SLE = ((1U << 3) + 6),
+ CMP_SULE = ((1U << 3) + 7),
// COP1 CMP negative predicates Bit 5..4 = 01.
- CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
- CMP_OR = ((2 << 3) + 1),
- CMP_UNE = ((2 << 3) + 2),
- CMP_NE = ((2 << 3) + 3),
- CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
- CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
- CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
- CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
- CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
- CMP_SOR = ((3 << 3) + 1),
- CMP_SUNE = ((3 << 3) + 2),
- CMP_SNE = ((3 << 3) + 3),
- CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
- CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
- CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
- CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
-
- SEL = ((2 << 3) + 0),
- MOVZ_C = ((2 << 3) + 2),
- MOVN_C = ((2 << 3) + 3),
- SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
- MOVF = ((2 << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
- SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
+ CMP_AT = ((2U << 3) + 0), // Reserved, not implemented.
+ CMP_OR = ((2U << 3) + 1),
+ CMP_UNE = ((2U << 3) + 2),
+ CMP_NE = ((2U << 3) + 3),
+ CMP_UGE = ((2U << 3) + 4), // Reserved, not implemented.
+ CMP_OGE = ((2U << 3) + 5), // Reserved, not implemented.
+ CMP_UGT = ((2U << 3) + 6), // Reserved, not implemented.
+ CMP_OGT = ((2U << 3) + 7), // Reserved, not implemented.
+ CMP_SAT = ((3U << 3) + 0), // Reserved, not implemented.
+ CMP_SOR = ((3U << 3) + 1),
+ CMP_SUNE = ((3U << 3) + 2),
+ CMP_SNE = ((3U << 3) + 3),
+ CMP_SUGE = ((3U << 3) + 4), // Reserved, not implemented.
+ CMP_SOGE = ((3U << 3) + 5), // Reserved, not implemented.
+ CMP_SUGT = ((3U << 3) + 6), // Reserved, not implemented.
+ CMP_SOGT = ((3U << 3) + 7), // Reserved, not implemented.
+
+ SEL = ((2U << 3) + 0),
+ MOVZ_C = ((2U << 3) + 2),
+ MOVN_C = ((2U << 3) + 3),
+ SELEQZ_C = ((2U << 3) + 4), // COP1 on FPR registers.
+ MOVF = ((2U << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
+ SELNEZ_C = ((2U << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
- MADD_D = ((4 << 3) + 1),
+ MADD_D = ((4U << 3) + 1),
// PCREL Encoding of rt Field.
- ADDIUPC = ((0 << 2) + 0),
- LWPC = ((0 << 2) + 1),
- AUIPC = ((3 << 3) + 6),
- ALUIPC = ((3 << 3) + 7),
+ ADDIUPC = ((0U << 2) + 0),
+ LWPC = ((0U << 2) + 1),
+ AUIPC = ((3U << 3) + 6),
+ ALUIPC = ((3U << 3) + 7),
// POP66 Encoding of rs Field.
- JIC = ((0 << 5) + 0),
+ JIC = ((0U << 5) + 0),
// POP76 Encoding of rs Field.
- JIALC = ((0 << 5) + 0),
+ JIALC = ((0U << 5) + 0),
- NULLSF = 0
+ NULLSF = 0U
};
@@ -764,7 +779,12 @@ enum FPURoundingMode {
kRoundToNearest = RN,
kRoundToZero = RZ,
kRoundToPlusInf = RP,
- kRoundToMinusInf = RM
+ kRoundToMinusInf = RM,
+
+ mode_round = RN,
+ mode_ceil = RP,
+ mode_floor = RM,
+ mode_trunc = RZ
};
const uint32_t kFPURoundingModeMask = 3 << 0;
@@ -820,6 +840,10 @@ const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
// A nop instruction. (Encoding of sll 0 0 0).
const Instr nopInstr = 0;
+static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) {
+ return 1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift);
+}
+
class Instruction {
public:
@@ -848,7 +872,7 @@ class Instruction {
// Read a bit field out of the instruction bits.
inline int Bits(int hi, int lo) const {
- return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
}
// Instruction type.
@@ -862,10 +886,7 @@ class Instruction {
enum TypeChecks { NORMAL, EXTRA };
-#define OpcodeToBitNumber(opcode) \
- (1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift))
-
- static const uint64_t kOpcodeImmediateTypeMask =
+ static constexpr uint64_t kOpcodeImmediateTypeMask =
OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
OpcodeToBitNumber(BGTZ) | OpcodeToBitNumber(ADDI) |
@@ -892,20 +913,21 @@ class Instruction {
FunctionFieldToBitNumber(BREAK) | FunctionFieldToBitNumber(SLL) |
FunctionFieldToBitNumber(SRL) | FunctionFieldToBitNumber(SRA) |
FunctionFieldToBitNumber(SLLV) | FunctionFieldToBitNumber(SRLV) |
- FunctionFieldToBitNumber(SRAV) | FunctionFieldToBitNumber(MFHI) |
- FunctionFieldToBitNumber(MFLO) | FunctionFieldToBitNumber(MULT) |
- FunctionFieldToBitNumber(MULTU) | FunctionFieldToBitNumber(DIV) |
- FunctionFieldToBitNumber(DIVU) | FunctionFieldToBitNumber(ADD) |
- FunctionFieldToBitNumber(ADDU) | FunctionFieldToBitNumber(SUB) |
- FunctionFieldToBitNumber(SUBU) | FunctionFieldToBitNumber(AND) |
- FunctionFieldToBitNumber(OR) | FunctionFieldToBitNumber(XOR) |
- FunctionFieldToBitNumber(NOR) | FunctionFieldToBitNumber(SLT) |
- FunctionFieldToBitNumber(SLTU) | FunctionFieldToBitNumber(TGE) |
- FunctionFieldToBitNumber(TGEU) | FunctionFieldToBitNumber(TLT) |
- FunctionFieldToBitNumber(TLTU) | FunctionFieldToBitNumber(TEQ) |
- FunctionFieldToBitNumber(TNE) | FunctionFieldToBitNumber(MOVZ) |
- FunctionFieldToBitNumber(MOVN) | FunctionFieldToBitNumber(MOVCI) |
- FunctionFieldToBitNumber(SELEQZ_S) | FunctionFieldToBitNumber(SELNEZ_S);
+ FunctionFieldToBitNumber(SRAV) | FunctionFieldToBitNumber(LSA) |
+ FunctionFieldToBitNumber(MFHI) | FunctionFieldToBitNumber(MFLO) |
+ FunctionFieldToBitNumber(MULT) | FunctionFieldToBitNumber(MULTU) |
+ FunctionFieldToBitNumber(DIV) | FunctionFieldToBitNumber(DIVU) |
+ FunctionFieldToBitNumber(ADD) | FunctionFieldToBitNumber(ADDU) |
+ FunctionFieldToBitNumber(SUB) | FunctionFieldToBitNumber(SUBU) |
+ FunctionFieldToBitNumber(AND) | FunctionFieldToBitNumber(OR) |
+ FunctionFieldToBitNumber(XOR) | FunctionFieldToBitNumber(NOR) |
+ FunctionFieldToBitNumber(SLT) | FunctionFieldToBitNumber(SLTU) |
+ FunctionFieldToBitNumber(TGE) | FunctionFieldToBitNumber(TGEU) |
+ FunctionFieldToBitNumber(TLT) | FunctionFieldToBitNumber(TLTU) |
+ FunctionFieldToBitNumber(TEQ) | FunctionFieldToBitNumber(TNE) |
+ FunctionFieldToBitNumber(MOVZ) | FunctionFieldToBitNumber(MOVN) |
+ FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
+ FunctionFieldToBitNumber(SELNEZ_S);
// Get the encoding type of the instruction.
@@ -939,6 +961,11 @@ class Instruction {
return Bits(kSaShift + kSaBits - 1, kSaShift);
}
+ inline int LsaSaValue() const {
+ DCHECK(InstructionType() == kRegisterType);
+ return Bits(kSaShift + kLsaSaBits - 1, kSaShift);
+ }
+
inline int FunctionValue() const {
DCHECK(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
@@ -1032,6 +1059,11 @@ class Instruction {
}
}
+ inline int32_t ImmValue(int bits) const {
+ DCHECK(InstructionType() == kImmediateType);
+ return Bits(bits - 1, 0);
+ }
+
inline int32_t Imm16Value() const {
DCHECK(InstructionType() == kImmediateType);
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
@@ -1058,8 +1090,18 @@ class Instruction {
return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
}
- // Say if the instruction should not be used in a branch delay slot.
- bool IsForbiddenInBranchDelay() const;
+ static bool IsForbiddenAfterBranchInstr(Instr instr);
+
+ // Say if the instruction should not be used in a branch delay slot or
+ // immediately after a compact branch.
+ inline bool IsForbiddenAfterBranch() const {
+ return IsForbiddenAfterBranchInstr(InstructionBits());
+ }
+
+ inline bool IsForbiddenInBranchDelay() const {
+ return IsForbiddenAfterBranch();
+ }
+
// Say if the instruction 'links'. e.g. jal, bal.
bool IsLinkingInstruction() const;
// Say if the instruction is a break or a trap.
@@ -1178,6 +1220,7 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
#undef OpcodeToBitNumber
#undef FunctionFieldToBitNumber
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // #ifndef V8_MIPS_CONSTANTS_H_
diff --git a/chromium/v8/src/mips/cpu-mips.cc b/chromium/v8/src/mips/cpu-mips.cc
index dff1d304024..1199365b7d2 100644
--- a/chromium/v8/src/mips/cpu-mips.cc
+++ b/chromium/v8/src/mips/cpu-mips.cc
@@ -23,12 +23,12 @@ namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
+#if !defined(USE_SIMULATOR)
// Nothing to do, flushing no instructions.
if (size == 0) {
return;
}
-#if !defined (USE_SIMULATOR)
#if defined(ANDROID)
// Bionic cacheflush can typically run in userland, avoiding kernel call.
char *end = reinterpret_cast<char *>(start) + size;
@@ -42,14 +42,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
}
#endif // ANDROID
-#else // USE_SIMULATOR.
- // Not generating mips instructions for C-code. This means that we are
- // building a mips emulator based target. We should notify the simulator
- // that the Icache was flushed.
- // None of this code ends up in the snapshot so there are no issues
- // around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
-#endif // USE_SIMULATOR.
+#endif // !USE_SIMULATOR.
}
} // namespace internal
diff --git a/chromium/v8/src/mips/deoptimizer-mips.cc b/chromium/v8/src/mips/deoptimizer-mips.cc
index 974692495a2..a9e30de44d6 100644
--- a/chromium/v8/src/mips/deoptimizer-mips.cc
+++ b/chromium/v8/src/mips/deoptimizer-mips.cc
@@ -5,6 +5,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
@@ -37,14 +38,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->break_(0xCC);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->break_(0xCC);
}
}
@@ -65,7 +67,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
- CodePatcher patcher(call_address, call_size_in_words);
+ CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
@@ -88,7 +90,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -139,14 +141,16 @@ void Deoptimizer::TableEntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
- const int kDoubleRegsSize =
- kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kMaxNumRegisters;
// Save all FPU registers before messing with them.
__ Subu(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
}
@@ -215,9 +219,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int double_regs_offset = FrameDescription::double_registers_offset();
// Copy FPU registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
}
@@ -284,9 +289,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
__ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
__ ldc1(fpu_reg, MemOperand(a1, src_offset));
}
diff --git a/chromium/v8/src/mips/disasm-mips.cc b/chromium/v8/src/mips/disasm-mips.cc
index f24ec436f00..936514aab26 100644
--- a/chromium/v8/src/mips/disasm-mips.cc
+++ b/chromium/v8/src/mips/disasm-mips.cc
@@ -66,6 +66,7 @@ class Decoder {
// Printing of common values.
void PrintRegister(int reg);
void PrintFPURegister(int freg);
+ void PrintFPUStatusRegister(int freg);
void PrintRs(Instruction* instr);
void PrintRt(Instruction* instr);
void PrintRd(Instruction* instr);
@@ -73,6 +74,7 @@ class Decoder {
void PrintFt(Instruction* instr);
void PrintFd(Instruction* instr);
void PrintSa(Instruction* instr);
+ void PrintLsaSa(Instruction* instr);
void PrintSd(Instruction* instr);
void PrintSs1(Instruction* instr);
void PrintSs2(Instruction* instr);
@@ -182,6 +184,17 @@ void Decoder::PrintFPURegister(int freg) {
}
+void Decoder::PrintFPUStatusRegister(int freg) {
+ switch (freg) {
+ case kFCSRRegister:
+ Print("FCSR");
+ break;
+ default:
+ Print(converter_.NameOfXMMRegister(freg));
+ }
+}
+
+
void Decoder::PrintFs(Instruction* instr) {
int freg = instr->RsValue();
PrintFPURegister(freg);
@@ -207,6 +220,13 @@ void Decoder::PrintSa(Instruction* instr) {
}
+// Print the integer value of the sa field of a lsa instruction.
+void Decoder::PrintLsaSa(Instruction* instr) {
+ int sa = instr->LsaSaValue() + 1;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+
// Print the integer value of the rd field, when it is not used as reg.
void Decoder::PrintSd(Instruction* instr) {
int sd = instr->RdValue();
@@ -476,22 +496,42 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
// complexity of FormatOption.
int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
DCHECK(format[0] == 'f');
- if (format[1] == 's') { // 'fs: fs register.
- int reg = instr->FsValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 't') { // 'ft: ft register.
- int reg = instr->FtValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 'd') { // 'fd: fd register.
- int reg = instr->FdValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 'r') { // 'fr: fr register.
- int reg = instr->FrValue();
- PrintFPURegister(reg);
- return 2;
+ if ((CTC1 == instr->RsFieldRaw()) || (CFC1 == instr->RsFieldRaw())) {
+ if (format[1] == 's') { // 'fs: fs register.
+ int reg = instr->FsValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'ft: ft register.
+ int reg = instr->FtValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ } else if (format[1] == 'r') { // 'fr: fr register.
+ int reg = instr->FrValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ }
+ } else {
+ if (format[1] == 's') { // 'fs: fs register.
+ int reg = instr->FsValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'ft: ft register.
+ int reg = instr->FtValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'r') { // 'fr: fr register.
+ int reg = instr->FrValue();
+ PrintFPURegister(reg);
+ return 2;
+ }
}
UNREACHABLE();
return -1;
@@ -651,11 +691,17 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
case 's': { // 'sa.
switch (format[1]) {
- case 'a': {
- DCHECK(STRING_STARTS_WITH(format, "sa"));
- PrintSa(instr);
- return 2;
- }
+ case 'a':
+ if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "sa2")); // 'sa2
+ PrintLsaSa(instr);
+ return 3;
+ } else {
+ DCHECK(STRING_STARTS_WITH(format, "sa"));
+ PrintSa(instr);
+ return 2;
+ }
+ break;
case 'd': {
DCHECK(STRING_STARTS_WITH(format, "sd"));
PrintSd(instr);
@@ -1026,6 +1072,9 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
case SRAV:
Format(instr, "srav 'rd, 'rt, 'rs");
break;
+ case LSA:
+ Format(instr, "lsa 'rd, 'rt, 'rs, 'sa2");
+ break;
case MFHI:
if (instr->Bits(25, 16) == 0) {
Format(instr, "mfhi 'rd");
@@ -1377,12 +1426,12 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "blez 'rs, 'imm16u -> 'imm16p4s2");
} else if ((instr->RtValue() != instr->RsValue()) &&
(instr->RsValue() != 0) && (instr->RtValue() != 0)) {
- Format(instr, "bgeuc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ Format(instr, "bgeuc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
} else if ((instr->RtValue() == instr->RsValue()) &&
(instr->RtValue() != 0)) {
- Format(instr, "bgezalc 'rs, 'imm16u -> 'imm16p4s2");
+ Format(instr, "bgezalc 'rs, 'imm16u -> 'imm16p4s2");
} else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
- Format(instr, "blezalc 'rt, 'imm16u -> 'imm16p4s2");
+ Format(instr, "blezalc 'rt, 'imm16u -> 'imm16p4s2");
} else {
UNREACHABLE();
}
@@ -1419,7 +1468,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "bltzc 'rt, 'imm16u -> 'imm16p4s2");
} else if ((instr->RtValue() != instr->RsValue()) &&
(instr->RsValue() != 0) && (instr->RtValue() != 0)) {
- Format(instr, "bltc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ Format(instr, "bltc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
} else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
Format(instr, "bgtzc 'rt, 'imm16u -> 'imm16p4s2");
} else {
@@ -1435,9 +1484,9 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
case POP76:
if (instr->RsValue() == JIALC) {
- Format(instr, "jialc 'rt, 'imm16x");
+ Format(instr, "jialc 'rt, 'imm16s");
} else {
- Format(instr, "bnezc 'rs, 'imm21x -> 'imm21p4s2");
+ Format(instr, "bnezc 'rs, 'imm21s -> 'imm21p4s2");
}
break;
// ------------- Arithmetic instructions.
@@ -1445,25 +1494,33 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (!IsMipsArchVariant(kMips32r6)) {
Format(instr, "addi 'rt, 'rs, 'imm16s");
} else {
- // Check if BOVC or BEQC instruction.
- if (instr->RsValue() >= instr->RtValue()) {
+ int rs_reg = instr->RsValue();
+ int rt_reg = instr->RtValue();
+ // Check if BOVC, BEQZALC or BEQC instruction.
+ if (rs_reg >= rt_reg) {
Format(instr, "bovc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
- } else if (instr->RsValue() < instr->RtValue()) {
- Format(instr, "beqc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
- UNREACHABLE();
+ if (rs_reg == 0) {
+ Format(instr, "beqzalc 'rt, 'imm16s -> 'imm16p4s2");
+ } else {
+ Format(instr, "beqc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
+ }
}
}
break;
case DADDI:
if (IsMipsArchVariant(kMips32r6)) {
- // Check if BNVC or BNEC instruction.
- if (instr->RsValue() >= instr->RtValue()) {
+ int rs_reg = instr->RsValue();
+ int rt_reg = instr->RtValue();
+ // Check if BNVC, BNEZALC or BNEC instruction.
+ if (rs_reg >= rt_reg) {
Format(instr, "bnvc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
- } else if (instr->RsValue() < instr->RtValue()) {
- Format(instr, "bnec 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
- UNREACHABLE();
+ if (rs_reg == 0) {
+ Format(instr, "bnezalc 'rt, 'imm16s -> 'imm16p4s2");
+ } else {
+ Format(instr, "bnec 'rs, 'rt, 'imm16s -> 'imm16p4s2");
+ }
}
}
break;
@@ -1490,7 +1547,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "lui 'rt, 'imm16x");
} else {
if (instr->RsValue() != 0) {
- Format(instr, "aui 'rt, 'imm16x");
+ Format(instr, "aui 'rt, 'rs, 'imm16x");
} else {
Format(instr, "lui 'rt, 'imm16x");
}
diff --git a/chromium/v8/src/mips/frames-mips.h b/chromium/v8/src/mips/frames-mips.h
index 0452ece222b..849dea28419 100644
--- a/chromium/v8/src/mips/frames-mips.h
+++ b/chromium/v8/src/mips/frames-mips.h
@@ -169,6 +169,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif
diff --git a/chromium/v8/src/mips/interface-descriptors-mips.cc b/chromium/v8/src/mips/interface-descriptors-mips.cc
index b85b1cbf4d7..3f4fb380282 100644
--- a/chromium/v8/src/mips/interface-descriptors-mips.cc
+++ b/chromium/v8/src/mips/interface-descriptors-mips.cc
@@ -63,6 +63,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; }
+const Register RestParamAccessDescriptor::parameter_count() { return a2; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return a3; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return a1; }
+
+
const Register ApiGetterDescriptor::function_address() { return a2; }
@@ -78,14 +83,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister(), MapRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2};
@@ -108,6 +105,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return a0; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return a0; }
@@ -129,6 +130,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a3, a2, a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a3, a2, a1};
@@ -191,7 +199,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
- // t0 : original constructor (for IsSuperConstructorCall)
+ // t0 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {a0, a1, t0, a2};
@@ -208,6 +216,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ Register registers[] = {a1, a3, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ Register registers[] = {a1, a3, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a1, a0};
@@ -229,6 +258,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -339,6 +375,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a1, // JSFunction
+ a3, // the new target
a0, // actual number of arguments
a2, // expected number of arguments
};
@@ -371,33 +408,35 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- a1, // math rounding function
- a3, // vector slot id
+ a0, // argument count (not including receiver)
+ a2, // address of first argument
+ a1 // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- a1, // math rounding function
- a3, // vector slot id
- a2, // type vector
+ a0, // argument count (not including receiver)
+ a3, // new target
+ a1, // constructor to call
+ a2 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- a0, // argument count (including receiver)
- a2, // address of first argument
- a1 // the target callable to be call
+ a0, // argument count (argc)
+ a2, // address of first argument (argv)
+ a1 // the runtime function to call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/chromium/v8/src/mips/macro-assembler-mips.cc b/chromium/v8/src/mips/macro-assembler-mips.cc
index e4cf09798b1..3c866ac4538 100644
--- a/chromium/v8/src/mips/macro-assembler-mips.cc
+++ b/chromium/v8/src/mips/macro-assembler-mips.cc
@@ -13,19 +13,21 @@
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/mips/macro-assembler-mips.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false),
has_double_zero_reg_set_(false) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -146,7 +148,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
- int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
+ int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -432,10 +434,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- lw(scratch, FieldMemOperand(scratch, offset));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ lw(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1053,6 +1052,19 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
}
+void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
+ Register scratch) {
+ if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
+ lsa(rd, rt, rs, sa);
+ } else {
+ Register tmp = rd.is(rt) ? scratch : rd;
+ DCHECK(!tmp.is(rt));
+ sll(tmp, rs, sa);
+ Addu(rd, rt, tmp);
+ }
+}
+
+
// ------------Pseudo-instructions-------------
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
@@ -1266,50 +1278,40 @@ void MacroAssembler::Ins(Register rt,
}
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- FPURegister fs,
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs,
FPURegister scratch) {
- // Move the data from fs to t8.
- mfc1(t8, fs);
- Cvt_d_uw(fd, t8, scratch);
-}
-
-
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- Register rs,
- FPURegister scratch) {
- // Convert rs to a FP value in fd (and fd + 1).
- // We do this by converting rs minus the MSB to avoid sign conversion,
- // then adding 2^31 to the result (if needed).
+ // In FP64Mode we do convertion from long.
+ if (IsFp64Mode()) {
+ mtc1(rs, scratch);
+ Mthc1(zero_reg, scratch);
+ cvt_d_l(fd, scratch);
+ } else {
+ // Convert rs to a FP value in fd.
+ DCHECK(!fd.is(scratch));
+ DCHECK(!rs.is(at));
- DCHECK(!fd.is(scratch));
- DCHECK(!rs.is(t9));
- DCHECK(!rs.is(at));
+ Label msb_clear, conversion_done;
+ // For a value which is < 2^31, regard it as a signed positve word.
+ Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT);
+ mtc1(rs, fd);
- // Save rs's MSB to t9.
- Ext(t9, rs, 31, 1);
- // Remove rs's MSB.
- Ext(at, rs, 0, 31);
- // Move the result to fd.
- mtc1(at, fd);
+ li(at, 0x41F00000); // FP value: 2^32.
- // Convert fd to a real FP value.
- cvt_d_w(fd, fd);
+ // For unsigned inputs > 2^31, we convert to double as a signed int32,
+ // then add 2^32 to move it back to unsigned value in range 2^31..2^31-1.
+ mtc1(zero_reg, scratch);
+ Mthc1(at, scratch);
- Label conversion_done;
+ cvt_d_w(fd, fd);
- // If rs's MSB was 0, it's done.
- // Otherwise we need to add that to the FP register.
- Branch(&conversion_done, eq, t9, Operand(zero_reg));
+ Branch(USE_DELAY_SLOT, &conversion_done);
+ add_d(fd, fd, scratch);
- // Load 2^31 into f20 as its float representation.
- li(at, 0x41E00000);
- mtc1(zero_reg, scratch);
- Mthc1(at, scratch);
- // Add it to fd.
- add_d(fd, fd, scratch);
+ bind(&msb_clear);
+ cvt_d_w(fd, fd);
- bind(&conversion_done);
+ bind(&conversion_done);
+ }
}
@@ -1437,13 +1439,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
if (!IsMipsArchVariant(kMips32r6)) {
if (long_branch) {
Label skip;
- c(UN, D, cmp1, cmp2);
+ c(UN, sizeField, cmp1, cmp2);
bc1f(&skip);
nop();
- Jr(nan, bd);
+ BranchLong(nan, bd);
bind(&skip);
} else {
- c(UN, D, cmp1, cmp2);
+ c(UN, sizeField, cmp1, cmp2);
bc1t(nan);
if (bd == PROTECT) {
nop();
@@ -1455,13 +1457,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
if (long_branch) {
Label skip;
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
- Jr(nan, bd);
+ BranchLong(nan, bd);
bind(&skip);
} else {
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(nan, kDoubleCompareReg);
if (bd == PROTECT) {
nop();
@@ -1477,7 +1479,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
- Jr(target, bd);
+ BranchLong(target, bd);
bind(&skip);
} else {
BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
@@ -1937,28 +1939,30 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
+ DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset));
BranchShort(offset, bdslot);
}
-void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BranchShort(offset, cond, rs, rt, bdslot);
+void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
+ DCHECK(is_near);
+ USE(is_near);
}
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
+ if (is_near_branch(L)) {
BranchShort(L, bdslot);
} else {
- Jr(L, bdslot);
+ BranchLong(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
- Jr(L, bdslot);
+ BranchLong(L, bdslot);
} else {
BranchShort(L, bdslot);
}
@@ -1970,17 +1974,15 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
- BranchShort(L, cond, rs, rt, bdslot);
- } else {
+ if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
if (cond != cc_always) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
+ BranchLong(L, bdslot);
bind(&skip);
} else {
- Jr(L, bdslot);
+ BranchLong(L, bdslot);
}
}
} else {
@@ -1989,10 +1991,10 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
+ BranchLong(L, bdslot);
bind(&skip);
} else {
- Jr(L, bdslot);
+ BranchLong(L, bdslot);
}
} else {
BranchShort(L, cond, rs, rt, bdslot);
@@ -2011,7 +2013,10 @@ void MacroAssembler::Branch(Label* L,
}
-void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
b(offset);
// Emit a nop in the branch delay slot if required.
@@ -2020,549 +2025,543 @@ void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
}
-void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- DCHECK(!rs.is(zero_reg));
- Register r2 = no_reg;
- Register scratch = at;
+void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset26);
+ bc(offset);
+}
- if (rt.is_reg()) {
- // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
- // rt.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- r2 = rt.rm_;
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- beq(rs, r2, offset);
- break;
- case ne:
- bne(rs, r2, offset);
- break;
- // Signed comparison.
- case greater:
- if (r2.is(zero_reg)) {
- bgtz(rs, offset);
- } else {
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (r2.is(zero_reg)) {
- bgez(rs, offset);
- } else {
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (r2.is(zero_reg)) {
- bltz(rs, offset);
- } else {
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (r2.is(zero_reg)) {
- blez(rs, offset);
- } else {
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (r2.is(zero_reg)) {
- bne(rs, zero_reg, offset);
- } else {
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (r2.is(zero_reg)) {
- b(offset);
- } else {
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (r2.is(zero_reg)) {
- // No code needs to be emitted.
- return;
- } else {
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (r2.is(zero_reg)) {
- beq(rs, zero_reg, offset);
- } else {
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
+
+void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ BranchShortHelperR6(offset, nullptr);
} else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- if (rt.imm32_ == 0) {
- beq(rs, zero_reg, offset);
- } else {
- // We don't want any other register but scratch clobbered.
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- beq(rs, r2, offset);
- }
- break;
- case ne:
- if (rt.imm32_ == 0) {
- bne(rs, zero_reg, offset);
- } else {
- // We don't want any other register but scratch clobbered.
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- bne(rs, r2, offset);
- }
- break;
- // Signed comparison.
- case greater:
- if (rt.imm32_ == 0) {
- bgtz(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (rt.imm32_ == 0) {
- bgez(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (rt.imm32_ == 0) {
- bltz(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (rt.imm32_ == 0) {
- blez(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (rt.imm32_ == 0) {
- bne(rs, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (rt.imm32_ == 0) {
- b(offset);
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (rt.imm32_ == 0) {
- // No code needs to be emitted.
- return;
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (rt.imm32_ == 0) {
- beq(rs, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
+ DCHECK(is_int16(offset));
+ BranchShortHelper(offset, nullptr, bdslot);
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
- // We use branch_offset as an argument for the branch instructions to be sure
- // it is called just before generating the branch instruction, as needed.
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ BranchShortHelperR6(0, L);
+ } else {
+ BranchShortHelper(0, L, bdslot);
+ }
+}
- b(shifted_branch_offset(L, false));
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+static inline bool IsZero(const Operand& rt) {
+ if (rt.is_reg()) {
+ return rt.rm().is(zero_reg);
+ } else {
+ return rt.immediate() == 0;
+ }
}
-void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
+int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
+ if (L) {
+ offset = branch_offset_helper(L, bits) >> 2;
+ } else {
+ DCHECK(is_intn(offset, bits));
+ }
+ return offset;
+}
- int32_t offset = 0;
+
+Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
+ Register scratch) {
Register r2 = no_reg;
- Register scratch = at;
if (rt.is_reg()) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_;
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ return r2;
+}
+
+
+bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ DCHECK(L == nullptr || offset == 0);
+ Register scratch = rs.is(at) ? t8 : at;
+ OffsetSize bits = OffsetSize::kOffset16;
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
break;
case eq:
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
+ if (rs.code() == rt.rm_.reg_code) {
+ // Pre R6 beq is used here to make the code patchable. Otherwise bc
+ // should be used which has no condition field so is not patchable.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beq(rs, scratch, offset);
+ nop();
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ beqzc(rs, offset);
+ } else {
+ // We don't want any other register but scratch clobbered.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beqc(rs, scratch, offset);
+ }
break;
case ne:
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
+ if (rs.code() == rt.rm_.reg_code) {
+ // Pre R6 bne is used here to make the code patchable. Otherwise we
+ // should not generate any instruction.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bne(rs, scratch, offset);
+ nop();
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bnezc(rs, offset);
+ } else {
+ // We don't want any other register but scratch clobbered.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnec(rs, scratch, offset);
+ }
break;
+
// Signed comparison.
case greater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bltzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgtzc(rs, offset);
} else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltc(scratch, rs, offset);
}
break;
case greater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ blezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgezc(rs, offset);
} else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgec(rs, scratch, offset);
}
break;
case less:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgtzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bltzc(rs, offset);
} else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltc(rs, scratch, offset);
}
break;
case less_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
+ // rs <= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ blezc(rs, offset);
} else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgec(scratch, rs, offset);
}
break;
+
// Unsigned comparison.
case Ugreater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bnezc(rs, offset);
} else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltuc(scratch, rs, offset);
}
break;
case Ugreater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- b(offset);
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beqzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
} else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgeuc(rs, scratch, offset);
}
break;
case Uless:
- if (r2.is(zero_reg)) {
- // No code needs to be emitted.
- return;
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ break; // No code needs to be emitted.
} else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltuc(rs, scratch, offset);
}
break;
case Uless_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ // rs <= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ beqzc(rs, offset);
} else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgeuc(scratch, rs, offset);
}
break;
default:
UNREACHABLE();
}
- } else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
+ }
+ CheckTrampolinePoolQuick(1);
+ return true;
+}
+
+
+bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ if (!is_near(L, OffsetSize::kOffset16)) return false;
+
+ Register scratch = at;
+ int32_t offset32;
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ b(offset32);
break;
case eq:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
+ // We don't want any other register but scratch clobbered.
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, scratch, offset32);
}
break;
case ne:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
+ // We don't want any other register but scratch clobbered.
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, scratch, offset32);
}
break;
+
// Signed comparison.
case greater:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgtz(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case greater_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgez(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Slt(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
case less:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
- } else if (is_int16(rt.imm32_)) {
- slti(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltz(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Slt(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case less_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ blez(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
+
// Unsigned comparison.
case Ugreater:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case Ugreater_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- b(offset);
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ b(offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Sltu(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
- case Uless:
- if (rt.imm32_ == 0) {
- // No code needs to be emitted.
- return;
- } else if (is_int16(rt.imm32_)) {
- sltiu(scratch, rs, rt.imm32_);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ case Uless:
+ if (IsZero(rt)) {
+ return true; // No code needs to be emitted.
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Sltu(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case Uless_equal:
- if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
default:
UNREACHABLE();
}
}
- // Check that offset could actually hold on an int16_t.
- DCHECK(is_int16(offset));
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
+
+ return true;
}
-void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
+bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
+ } else {
+ DCHECK(is_int16(offset));
+ return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
+ }
+ } else {
+ DCHECK(offset == 0);
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ return BranchShortHelperR6(0, L, cond, rs, rt);
+ } else {
+ return BranchShortHelper(0, L, cond, rs, rt, bdslot);
+ }
+ }
+ return false;
+}
+
+
+void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ BranchShortCheck(0, L, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
BranchAndLinkShort(offset, bdslot);
}
-void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BranchAndLinkShort(offset, cond, rs, rt, bdslot);
+void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
+ DCHECK(is_near);
+ USE(is_near);
}
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
+ if (is_near_branch(L)) {
BranchAndLinkShort(L, bdslot);
} else {
- Jalr(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
- Jalr(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
} else {
BranchAndLinkShort(L, bdslot);
}
@@ -2574,13 +2573,11 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
- } else {
+ if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jalr(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
bind(&skip);
}
} else {
@@ -2588,20 +2585,19 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jalr(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
bind(&skip);
} else {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
}
}
}
-// We need to use a bgezal or bltzal, but they can't be used directly with the
-// slt instructions. We could use sub or add instead but we would miss overflow
-// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLinkShort(int16_t offset,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
bal(offset);
// Emit a nop in the branch delay slot if required.
@@ -2610,371 +2606,306 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset,
}
-void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Register r2 = no_reg;
- Register scratch = at;
-
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
- }
-
- if (!IsMipsArchVariant(kMips32r6)) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- bal(offset);
- break;
-
- // Signed comparison.
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
+void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset26);
+ balc(offset);
+}
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- default:
- UNREACHABLE();
- }
+void MacroAssembler::BranchAndLinkShort(int32_t offset,
+ BranchDelaySlot bdslot) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ BranchAndLinkShortHelperR6(offset, nullptr);
} else {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- bal(offset);
- break;
-
- // Signed comparison.
- case greater:
- // rs > rt
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case greater_equal:
- // rs >= rt
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case less:
- // rs < r2
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case less_equal:
- // rs <= r2
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
+ DCHECK(is_int16(offset));
+ BranchAndLinkShortHelper(offset, nullptr, bdslot);
+ }
+}
- // Unsigned comparison.
- case Ugreater:
- // rs > rt
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Ugreater_equal:
- // rs >= rt
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Uless:
- // rs < r2
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Uless_equal:
- // rs <= r2
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- default:
- UNREACHABLE();
- }
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ BranchAndLinkShortHelperR6(0, L);
+ } else {
+ BranchAndLinkShortHelper(0, L, bdslot);
}
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
-void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
- bal(shifted_branch_offset(L, false));
+bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ DCHECK(L == nullptr || offset == 0);
+ Register scratch = rs.is(at) ? t8 : at;
+ OffsetSize bits = OffsetSize::kOffset16;
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
+ switch (cond) {
+ case cc_always:
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ break;
+ case eq:
+ if (!is_near(L, bits)) return false;
+ Subu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ case ne:
+ if (!is_near(L, bits)) return false;
+ Subu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bltzalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgtzalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ }
+ break;
+ case greater_equal:
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ blezalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgezalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ }
+ break;
+ case less:
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgtzalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bltzalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ }
+ break;
+ case less_equal:
+ // rs <= r2
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgezalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ blezalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ }
+ break;
-void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- int32_t offset = 0;
- Register r2 = no_reg;
- Register scratch = at;
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ case Ugreater_equal:
+ // rs >= r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ case Uless:
+ // rs < r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ default:
+ UNREACHABLE();
}
+ return true;
+}
- if (!IsMipsArchVariant(kMips32r6)) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
-
- // Signed comparison.
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
+// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
+// with the slt instructions. We could use sub or add instead but we would miss
+// overflow cases, so we keep slt and add an intermediate third instruction.
+bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ if (!is_near(L, OffsetSize::kOffset16)) return false;
- default:
- UNREACHABLE();
- }
- } else {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+ Register scratch = t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
- // Signed comparison.
- case greater:
- // rs > rt
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case greater_equal:
- // rs >= rt
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case less:
- // rs < r2
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case less_equal:
- // rs <= r2
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+ switch (cond) {
+ case cc_always:
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
+ nop();
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
+ nop();
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
+ // Signed comparison.
+ case greater:
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ Slt(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ Slt(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
- // Unsigned comparison.
- case Ugreater:
- // rs > rt
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Ugreater_equal:
- // rs >= rt
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Uless:
- // rs < r2
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Uless_equal:
- // rs <= r2
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+ // Unsigned comparison.
+ case Ugreater:
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ Sltu(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ Sltu(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
- default:
- UNREACHABLE();
- }
+ default:
+ UNREACHABLE();
}
- // Check that offset could actually hold on an int16_t.
- DCHECK(is_int16(offset));
-
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
+
+ return true;
+}
+
+
+bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
+ } else {
+ DCHECK(is_int16(offset));
+ return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
+ }
+ } else {
+ DCHECK(offset == 0);
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
+ return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
+ } else {
+ return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
+ }
+ }
+ return false;
}
@@ -3064,6 +2995,10 @@ void MacroAssembler::Call(Register target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+#ifdef DEBUG
+ int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
+#endif
+
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
@@ -3078,8 +3013,10 @@ void MacroAssembler::Call(Register target,
if (bd == PROTECT)
nop();
- DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
+#ifdef DEBUG
+ CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+#endif
}
@@ -3157,43 +3094,51 @@ void MacroAssembler::Ret(Condition cond,
}
-void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
+void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
+ (!L->is_bound() || is_near_r6(L))) {
+ BranchShortHelperR6(0, L);
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ uint32_t imm32;
+ imm32 = jump_address(L);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jr(at);
- uint32_t imm32;
- imm32 = jump_address(L);
- { BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- lui(at, (imm32 & kHiMask) >> kLuiShift);
- ori(at, at, (imm32 & kImm16Mask));
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
- jr(at);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
-void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
+void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
+ if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
+ (!L->is_bound() || is_near_r6(L))) {
+ BranchAndLinkShortHelperR6(0, L);
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ uint32_t imm32;
+ imm32 = jump_address(L);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jalr(at);
- uint32_t imm32;
- imm32 = jump_address(L);
- { BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- lui(at, (imm32 & kHiMask) >> kLuiShift);
- ori(at, at, (imm32 & kImm16Mask));
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
- jalr(at);
-
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
@@ -3326,12 +3271,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!scratch1.is(t9));
- DCHECK(!scratch2.is(t9));
- DCHECK(!result.is(t9));
+ DCHECK(!AreAliased(result, scratch1, scratch2, t9));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3347,54 +3287,52 @@ void MacroAssembler::Allocate(int object_size,
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch1;
// This code stores a temporary value in t9.
+ Register alloc_limit = t9;
+ Register result_end = scratch2;
+ li(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- lw(result, MemOperand(topaddr));
- lw(t9, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ lw(result, MemOperand(top_address));
+ lw(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- lw(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ // Assert that result actually contains top on entry.
+ lw(alloc_limit, MemOperand(top_address));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
- // Load allocation limit into t9. Result already contains allocation top.
- lw(t9, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ lw(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- And(scratch2, result, Operand(kDoubleAlignmentMask));
+ And(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
- Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ Branch(&aligned, eq, result_end, Operand(zero_reg));
if ((flags & PRETENURE) != 0) {
- Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
}
- li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- sw(scratch2, MemOperand(result));
+ li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(result_end, MemOperand(result));
Addu(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
}
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- Addu(scratch2, result, Operand(object_size));
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
- sw(scratch2, MemOperand(topaddr));
+ Addu(result_end, result, Operand(object_size));
+ Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
+ sw(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3403,28 +3341,25 @@ void MacroAssembler::Allocate(int object_size,
}
-void MacroAssembler::Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
li(result, 0x7091);
- li(scratch1, 0x7191);
- li(scratch2, 0x7291);
+ li(scratch, 0x7191);
+ li(result_end, 0x7291);
}
jmp(gc_required);
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!object_size.is(t9));
- DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, t9));
+ DCHECK(!AreAliased(result_end, result, scratch, t9));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
@@ -3433,45 +3368,42 @@ void MacroAssembler::Allocate(Register object_size,
AllocationUtils::GetAllocationTopReference(isolate(), flags);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
// This code stores a temporary value in t9.
+ Register alloc_limit = t9;
+ li(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- lw(result, MemOperand(topaddr));
- lw(t9, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ lw(result, MemOperand(top_address));
+ lw(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- lw(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ // Assert that result actually contains top on entry.
+ lw(alloc_limit, MemOperand(top_address));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
- // Load allocation limit into t9. Result already contains allocation top.
- lw(t9, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ lw(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- And(scratch2, result, Operand(kDoubleAlignmentMask));
+ And(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
- Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ Branch(&aligned, eq, result_end, Operand(zero_reg));
if ((flags & PRETENURE) != 0) {
- Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
}
- li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- sw(scratch2, MemOperand(result));
+ li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(result_end, MemOperand(result));
Addu(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
}
@@ -3480,19 +3412,19 @@ void MacroAssembler::Allocate(Register object_size,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- sll(scratch2, object_size, kPointerSizeLog2);
- Addu(scratch2, result, scratch2);
+ sll(result_end, object_size, kPointerSizeLog2);
+ Addu(result_end, result, result_end);
} else {
- Addu(scratch2, result, Operand(object_size));
+ Addu(result_end, result, Operand(object_size));
}
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
+ Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
- And(t9, scratch2, Operand(kObjectAlignmentMask));
- Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
+ And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
}
- sw(scratch2, MemOperand(topaddr));
+ sw(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3668,29 +3600,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
}
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- RegList temps,
- int field_count) {
- DCHECK((temps & dst.bit()) == 0);
- DCHECK((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < kNumRegisters; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.code_ = i;
- break;
- }
- }
- DCHECK(!tmp.is(no_reg));
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
- for (int i = 0; i < field_count; i++) {
- lw(tmp, FieldMemOperand(src, i * kPointerSize));
- sw(tmp, FieldMemOperand(dst, i * kPointerSize));
- }
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ sw(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ sw(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ sw(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
+ sw(value, FieldMemOperand(result, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -3762,16 +3690,16 @@ void MacroAssembler::CopyBytes(Register src,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
Branch(&entry);
bind(&loop);
- sw(filler, MemOperand(start_offset));
- Addu(start_offset, start_offset, kPointerSize);
+ sw(filler, MemOperand(current_address));
+ Addu(current_address, current_address, kPointerSize);
bind(&entry);
- Branch(&loop, ult, start_offset, Operand(end_offset));
+ Branch(&loop, ult, current_address, Operand(end_address));
}
@@ -3822,6 +3750,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register scratch3,
Label* fail,
int elements_offset) {
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
+ scratch3));
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -3876,7 +3806,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
- Register untagged_value = elements_reg;
+ Register untagged_value = scratch2;
SmiUntag(untagged_value, value_reg);
mtc1(untagged_value, f2);
cvt_d_w(f0, f2);
@@ -4041,8 +3971,6 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -4062,7 +3990,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg().is(a0));
DCHECK(expected.is_immediate() || expected.reg().is(a2));
- DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -4090,11 +4017,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- li(a3, Operand(code_constant));
- addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
- }
-
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
@@ -4112,21 +4034,78 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ li(t0, Operand(step_in_enabled));
+ lb(t0, MemOperand(t0));
+ Branch(&skip_flooding, eq, t0, Operand(zero_reg));
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(a1));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
- Label done;
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ }
+ Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag,
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = t0;
+ lw(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@@ -4143,6 +4122,7 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -4152,18 +4132,18 @@ void MacroAssembler::InvokeFunction(Register function,
// Contract with called JS functions requires that function is passed in a1.
DCHECK(function.is(a1));
Register expected_reg = a2;
- Register code_reg = a3;
+ Register temp_reg = t0;
- lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
lw(expected_reg,
- FieldMemOperand(code_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
sra(expected_reg, expected_reg, kSmiTagSize);
- lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(function, new_target, expected, actual, flag,
+ call_wrapper);
}
@@ -4181,11 +4161,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
}
@@ -4357,108 +4333,161 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi,
}
-void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
- const Operand& right,
- Register overflow_dst,
- Register scratch) {
+static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
+ Label* overflow_label,
+ Label* no_overflow_label) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
+ } else {
+ masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) masm->Branch(no_overflow_label);
+ }
+}
+
+
+void MacroAssembler::AddBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
if (right.is_reg()) {
- AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
} else {
- if (dst.is(left)) {
- mov(scratch, left); // Preserve left.
- Addu(dst, left, right.immediate()); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- // Load right since xori takes uint16 as immediate.
- Addu(t9, zero_reg, right);
- xor_(overflow_dst, dst, t9);
- and_(overflow_dst, overflow_dst, scratch);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Register right_reg = t9;
+ DCHECK(!left.is(right_reg));
+ li(right_reg, Operand(right));
+ AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
} else {
- Addu(dst, left, right.immediate());
- xor_(overflow_dst, dst, left);
- // Load right since xori takes uint16 as immediate.
- Addu(t9, zero_reg, right);
- xor_(scratch, dst, t9);
- and_(overflow_dst, scratch, overflow_dst);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ Addu(dst, left, right.immediate()); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ // Load right since xori takes uint16 as immediate.
+ Addu(overflow_dst, zero_reg, right);
+ xor_(overflow_dst, dst, overflow_dst);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ Addu(dst, left, right.immediate());
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ Addu(scratch, zero_reg, right);
+ xor_(scratch, dst, scratch);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
}
-void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
-
- if (left.is(right) && dst.is(left)) {
- DCHECK(!dst.is(t9));
- DCHECK(!scratch.is(t9));
- DCHECK(!left.is(t9));
- DCHECK(!right.is(t9));
- DCHECK(!overflow_dst.is(t9));
- mov(t9, right);
- right = t9;
- }
-
- if (dst.is(left)) {
- mov(scratch, left); // Preserve left.
- addu(dst, left, right); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, right);
- and_(overflow_dst, overflow_dst, scratch);
- } else if (dst.is(right)) {
- mov(scratch, right); // Preserve right.
- addu(dst, left, right); // Right is overwritten.
- xor_(scratch, dst, scratch); // Original right.
- xor_(overflow_dst, dst, left);
- and_(overflow_dst, overflow_dst, scratch);
+void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ DCHECK(!dst.is(scratch));
+ Register left_reg = left.is(dst) ? scratch : left;
+ Register right_reg = right.is(dst) ? t9 : right;
+ DCHECK(!dst.is(left_reg));
+ DCHECK(!dst.is(right_reg));
+ Move(left_reg, left);
+ Move(right_reg, right);
+ addu(dst, left, right);
+ bnvc(left_reg, right_reg, no_overflow_label);
+ } else {
+ bovc(left, right, overflow_label);
+ addu(dst, left, right);
+ if (no_overflow_label) bc(no_overflow_label);
+ }
} else {
- addu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, dst, right);
- and_(overflow_dst, scratch, overflow_dst);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!right.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+ DCHECK(!right.is(scratch));
+
+ if (left.is(right) && dst.is(left)) {
+ mov(overflow_dst, right);
+ right = overflow_dst;
+ }
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ addu(dst, left, right); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, right);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ addu(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ addu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
-void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
- const Operand& right,
- Register overflow_dst,
- Register scratch) {
+void MacroAssembler::SubBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
if (right.is_reg()) {
- SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
} else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!left.is(scratch));
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
- Subu(dst, left, right); // Left is overwritten.
- xor_(overflow_dst, dst, scratch); // scratch is original left.
+ Subu(dst, left, right.immediate()); // Left is overwritten.
// Load right since xori takes uint16 as immediate.
- Addu(t9, zero_reg, right);
- xor_(scratch, scratch, t9); // scratch is original left.
+ Addu(overflow_dst, zero_reg, right);
+ xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
+ xor_(scratch, dst, scratch); // scratch is original left.
and_(overflow_dst, scratch, overflow_dst);
} else {
Subu(dst, left, right);
xor_(overflow_dst, dst, left);
// Load right since xori takes uint16 as immediate.
- Addu(t9, zero_reg, right);
- xor_(scratch, left, t9);
+ Addu(scratch, zero_reg, right);
+ xor_(scratch, left, scratch);
and_(overflow_dst, scratch, overflow_dst);
}
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
-void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
+void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ Register overflow_dst = t9;
DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
DCHECK(!overflow_dst.is(left));
DCHECK(!overflow_dst.is(right));
DCHECK(!scratch.is(left));
@@ -4468,8 +4497,9 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
// left == right, let's not make that restriction here.
if (left.is(right)) {
mov(dst, zero_reg);
- mov(overflow_dst, zero_reg);
- return;
+ if (no_overflow_label) {
+ Branch(no_overflow_label);
+ }
}
if (dst.is(left)) {
@@ -4490,6 +4520,7 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
xor_(scratch, left, right);
and_(overflow_dst, scratch, overflow_dst);
}
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
@@ -4525,24 +4556,13 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- PrepareCEntryArgs(num_arguments);
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ PrepareCEntryArgs(function->nargs);
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -4564,34 +4584,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(t9, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(t9));
- Call(t9);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Jump(t9);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- lw(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- lw(target, ContextOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(a1));
- GetBuiltinFunction(a1, native_context_index);
- // Load the code entry point from the builtins object.
- lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, a1);
+ InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
}
@@ -4728,48 +4724,29 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- lw(dst, GlobalObjectOperand());
- lw(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- lw(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- lw(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- lw(at, FieldMemOperand(scratch, offset));
+ lw(scratch, NativeContextMemOperand());
+ lw(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
Branch(no_map_match, ne, map_in_out, Operand(at));
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- lw(map_in_out, FieldMemOperand(scratch, offset));
+ lw(map_in_out,
+ ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- lw(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- lw(function, FieldMemOperand(function,
- GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- lw(function, MemOperand(function, Context::SlotOffset(index)));
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ lw(dst, NativeContextMemOperand());
+ lw(dst, ContextMemOperand(dst, index));
}
@@ -5172,6 +5149,17 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -5442,8 +5430,8 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -5477,28 +5465,6 @@ void MacroAssembler::HasColor(Register object,
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- DCHECK(!AreAliased(value, scratch, t8, no_reg));
- Label is_data_object;
- lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- Branch(&is_data_object, eq, t8, Operand(scratch));
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(not_data_object, ne, t8, Operand(zero_reg));
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
@@ -5514,112 +5480,23 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Register load_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
And(t8, mask_scratch, load_scratch);
- Branch(&done, ne, t8, Operand(zero_reg));
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // sll may overflow, making the check conservative.
- sll(t8, mask_scratch, 1);
- And(t8, load_scratch, t8);
- Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- {
- Label skip;
- Branch(&skip, ne, t8, Operand(map));
- li(length, HeapNumber::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- And(t8, instance_type, Operand(kExternalStringTag));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- li(length, ExternalString::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
- lw(t9, FieldMemOperand(value, String::kLengthOffset));
- And(t8, instance_type, Operand(kStringEncodingMask));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- srl(t9, t9, 1);
- bind(&skip);
- }
- Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- And(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Or(t8, t8, Operand(mask_scratch));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- Addu(t8, t8, Operand(length));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
+ Branch(value_is_white, eq, t8, Operand(zero_reg));
}
@@ -5778,8 +5655,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
if (regs & candidate.bit()) continue;
return candidate;
}
@@ -5821,17 +5701,13 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
- Register reg6,
- Register reg7,
- Register reg8) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
- reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
+bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
+ Register reg5, Register reg6, Register reg7, Register reg8,
+ Register reg9, Register reg10) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+ reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
@@ -5842,18 +5718,19 @@ bool AreAliased(Register reg1,
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
}
-CodePatcher::CodePatcher(byte* address,
- int instructions,
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap),
+ masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
@@ -5865,7 +5742,7 @@ CodePatcher::CodePatcher(byte* address,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- CpuFeatures::FlushICache(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that the code was patched as expected.
@@ -5884,25 +5761,10 @@ void CodePatcher::Emit(Address addr) {
}
-void CodePatcher::ChangeBranchCondition(Condition cond) {
- Instr instr = Assembler::instr_at(masm_.pc_);
- DCHECK(Assembler::IsBranch(instr));
- uint32_t opcode = Assembler::GetOpcodeField(instr);
- // Currently only the 'eq' and 'ne' cond values are supported and the simple
- // branch instructions (with opcode being the branch type).
- // There are some special cases (see Assembler::IsBranch()) so extending this
- // would be tricky.
- DCHECK(opcode == BEQ ||
- opcode == BNE ||
- opcode == BLEZ ||
- opcode == BGTZ ||
- opcode == BEQL ||
- opcode == BNEL ||
- opcode == BLEZL ||
- opcode == BGTZL);
- opcode = (cond == eq) ? BEQ : BNE;
- instr = (instr & ~kOpcodeMask) | opcode;
- masm_.emit(instr);
+void CodePatcher::ChangeBranchCondition(Instr current_instr,
+ uint32_t new_opcode) {
+ current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
+ masm_.emit(current_instr);
}
diff --git a/chromium/v8/src/mips/macro-assembler-mips.h b/chromium/v8/src/mips/macro-assembler-mips.h
index 1608c951b65..4f6a3c868b7 100644
--- a/chromium/v8/src/mips/macro-assembler-mips.h
+++ b/chromium/v8/src/mips/macro-assembler-mips.h
@@ -13,17 +13,19 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_v0_Code};
-const Register kReturnRegister1 = {kRegister_v1_Code};
-const Register kJSFunctionRegister = {kRegister_a1_Code};
+const Register kReturnRegister0 = {Register::kCode_v0};
+const Register kReturnRegister1 = {Register::kCode_v1};
+const Register kJSFunctionRegister = {Register::kCode_a1};
const Register kContextRegister = {Register::kCpRegister};
-const Register kInterpreterAccumulatorRegister = {kRegister_v0_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_t3_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_t4_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_t5_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_t6_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_a1_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_a0_Code};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_t3};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t4};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t5};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_t6};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_a0};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_a3};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_a1};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_a0};
// Forward declaration.
class JumpTarget;
@@ -97,26 +99,23 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg5 = no_reg,
Register reg6 = no_reg);
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3 = no_reg,
- Register reg4 = no_reg,
- Register reg5 = no_reg,
- Register reg6 = no_reg,
- Register reg7 = no_reg,
- Register reg8 = no_reg);
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+ Register reg4 = no_reg, Register reg5 = no_reg,
+ Register reg6 = no_reg, Register reg7 = no_reg,
+ Register reg8 = no_reg, Register reg9 = no_reg,
+ Register reg10 = no_reg);
// -----------------------------------------------------------------------------
// Static helper functions.
-inline MemOperand ContextOperand(Register context, int index) {
+inline MemOperand ContextMemOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
-inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+inline MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
@@ -139,11 +138,8 @@ inline MemOperand CFunctionArgumentOperand(int index) {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
@@ -164,9 +160,9 @@ class MacroAssembler: public Assembler {
Name(target, COND_ARGS, bd); \
}
-#define DECLARE_BRANCH_PROTOTYPES(Name) \
+#define DECLARE_BRANCH_PROTOTYPES(Name) \
DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
- DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
+ DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
DECLARE_BRANCH_PROTOTYPES(Branch)
DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
@@ -203,6 +199,8 @@ class MacroAssembler: public Assembler {
Ret(cond, rs, rt, bd);
}
+ bool IsNear(Label* L, Condition cond, int rs_reg);
+
void Branch(Label* L,
Condition cond,
Register rs,
@@ -383,22 +381,10 @@ class MacroAssembler: public Assembler {
Register scratch1,
Label* on_black);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -535,12 +521,8 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- void Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(Register object_size, Register result, Register result_new,
+ Register scratch, Label* gc_required, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
@@ -584,6 +566,12 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* gc_required);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Instruction macros.
@@ -645,7 +633,10 @@ class MacroAssembler: public Assembler {
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
+#undef DEFINE_INSTRUCTION3
+ void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
+ Register scratch = at);
void Pref(int32_t hint, const MemOperand& rs);
@@ -778,7 +769,6 @@ class MacroAssembler: public Assembler {
// FPU macros. These do not handle special cases like NaN or +- inf.
// Convert unsigned word to double.
- void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
// Convert double to unsigned word.
@@ -938,8 +928,15 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
@@ -952,7 +949,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- void LoadGlobalFunction(int index, Register function);
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -970,15 +967,20 @@ class MacroAssembler: public Assembler {
// JavaScript invokes.
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
@@ -1018,9 +1020,6 @@ class MacroAssembler: public Assembler {
// Must preserve the result register.
void PopStackHandler();
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
-
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
// read or written and length will be zero.
@@ -1029,12 +1028,11 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// -------------------------------------------------------------------------
// Support functions.
@@ -1184,45 +1182,42 @@ class MacroAssembler: public Assembler {
// Usage: first call the appropriate arithmetic function, then call one of the
// jump functions with the overflow_dst register as the second parameter.
- void AdduAndCheckForOverflow(Register dst,
- Register left,
- Register right,
- Register overflow_dst,
- Register scratch = at);
+ inline void AddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ AddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
- void AdduAndCheckForOverflow(Register dst, Register left,
- const Operand& right, Register overflow_dst,
- Register scratch = at);
+ inline void AddBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ AddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
- void SubuAndCheckForOverflow(Register dst,
- Register left,
- Register right,
- Register overflow_dst,
- Register scratch = at);
+ void AddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
- void SubuAndCheckForOverflow(Register dst, Register left,
- const Operand& right, Register overflow_dst,
- Register scratch = at);
+ void AddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
- void BranchOnOverflow(Label* label,
- Register overflow_check,
- BranchDelaySlot bd = PROTECT) {
- Branch(label, lt, overflow_check, Operand(zero_reg), bd);
- }
- void BranchOnNoOverflow(Label* label,
- Register overflow_check,
- BranchDelaySlot bd = PROTECT) {
- Branch(label, ge, overflow_check, Operand(zero_reg), bd);
+ inline void SubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ SubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
}
- void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
- Ret(lt, overflow_check, Operand(zero_reg), bd);
+ inline void SubBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ SubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
}
- void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
- Ret(ge, overflow_check, Operand(zero_reg), bd);
- }
+ void SubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void SubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
// -------------------------------------------------------------------------
// Runtime calls.
@@ -1259,6 +1254,14 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
}
// Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ BranchDelaySlot bd = PROTECT) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles, bd);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
BranchDelaySlot bd = PROTECT) {
@@ -1270,17 +1273,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
int num_arguments,
BranchDelaySlot bd = PROTECT);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -1336,13 +1331,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the code object for the given builtin in the target register and
- // setup the function in a1.
- void GetBuiltinEntry(Register target, int native_context_index);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
struct Unresolved {
int pc;
uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
@@ -1408,14 +1396,23 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Addu(reg, reg, reg);
}
+ void SmiTag(Register dst, Register src) { Addu(dst, src, src); }
+
// Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
void SmiTagCheckOverflow(Register reg, Register overflow);
void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
- void SmiTag(Register dst, Register src) {
- Addu(dst, src, src);
+ void BranchOnOverflow(Label* label, Register overflow_check,
+ BranchDelaySlot bd = PROTECT) {
+ Branch(label, lt, overflow_check, Operand(zero_reg), bd);
+ }
+
+ void BranchOnNoOverflow(Label* label, Register overflow_check,
+ BranchDelaySlot bd = PROTECT) {
+ Branch(label, ge, overflow_check, Operand(zero_reg), bd);
}
+
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
// sets flags.
@@ -1485,6 +1482,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1625,21 +1626,39 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
+ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments);
- void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
+ inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
+ inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
+ void BranchShortHelperR6(int32_t offset, Label* L);
+ void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
+ bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+
+ void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
+ void BranchAndLinkShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot);
+ void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
- void Jr(Label* L, BranchDelaySlot bdslot);
- void Jalr(Label* L, BranchDelaySlot bdslot);
+ bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ void BranchLong(Label* L, BranchDelaySlot bdslot);
+ void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
// Common implementation of BranchF functions for the different formats.
void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
@@ -1653,8 +1672,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -1708,8 +1725,7 @@ class CodePatcher {
DONT_FLUSH
};
- CodePatcher(byte* address,
- int instructions,
+ CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache = FLUSH);
~CodePatcher();
@@ -1724,7 +1740,7 @@ class CodePatcher {
// Change the condition part of an instruction leaving the rest of the current
// instruction unchanged.
- void ChangeBranchCondition(Condition cond);
+ void ChangeBranchCondition(Instr current_instr, uint32_t new_opcode);
private:
byte* address_; // The address of the code being patched.
@@ -1744,6 +1760,7 @@ class CodePatcher {
#define ACCESS_MASM(masm) masm->
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
diff --git a/chromium/v8/src/mips/simulator-mips.cc b/chromium/v8/src/mips/simulator-mips.cc
index 4ef61abe3db..aa4224a54c2 100644
--- a/chromium/v8/src/mips/simulator-mips.cc
+++ b/chromium/v8/src/mips/simulator-mips.cc
@@ -129,7 +129,7 @@ void MipsDebugger::Stop(Instruction* instr) {
#else // GENERATED_CODE_COVERAGE
-#define UNSUPPORTED() printf("Unsupported instruction.\n");
+#define UNSUPPORTED() printf("Sim: Unsupported instruction.\n");
static void InitializeCoverage() {}
@@ -589,7 +589,7 @@ void MipsDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ Heap* current_heap = sim_->isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
@@ -968,7 +968,12 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
for (int i = 0; i < kNumFPURegisters; i++) {
FPUregisters_[i] = 0;
}
- FCSR_ = 0;
+ if (IsMipsArchVariant(kMips32r6)) {
+ FCSR_ = kFCSRNaN2008FlagMask;
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2));
+ FCSR_ = 0;
+ }
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
@@ -995,12 +1000,12 @@ Simulator::~Simulator() { free(stack_); }
// offset from the swi instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, ExternalReference::Type type)
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr),
type_(type),
next_(NULL) {
- Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
@@ -1016,14 +1021,13 @@ class Redirection {
void* external_function() { return external_function_; }
ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function,
+ static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
- return new Redirection(external_function, type);
+ return new Redirection(isolate, external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -1068,9 +1072,10 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
}
-void* Simulator::RedirectExternalReference(void* external_function,
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -1296,6 +1301,129 @@ unsigned int Simulator::get_fcsr_rounding_mode() {
}
+void Simulator::set_fpu_register_word_invalid_result(float original,
+ float rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result(float original, float rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result64(float original,
+ float rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_word_invalid_result(double original,
+ double rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result(double original,
+ double rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result64(double original,
+ double rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ }
+}
+
+
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(double original, double rounded) {
@@ -1332,6 +1460,8 @@ bool Simulator::set_fcsr_round_error(double original, double rounded) {
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round64_error(double original, double rounded) {
bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
double max_int64 = std::numeric_limits<int64_t>::max();
double min_int64 = std::numeric_limits<int64_t>::min();
@@ -1349,7 +1479,7 @@ bool Simulator::set_fcsr_round64_error(double original, double rounded) {
ret = true;
}
- if (rounded > max_int64 || rounded < min_int64) {
+ if (rounded >= max_int64 || rounded < min_int64) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
@@ -1396,6 +1526,8 @@ bool Simulator::set_fcsr_round_error(float original, float rounded) {
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round64_error(float original, float rounded) {
bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
double max_int64 = std::numeric_limits<int64_t>::max();
double min_int64 = std::numeric_limits<int64_t>::min();
@@ -1413,7 +1545,7 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
ret = true;
}
- if (rounded > max_int64 || rounded < min_int64) {
+ if (rounded >= max_int64 || rounded < min_int64) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
@@ -2310,7 +2442,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
} else if (fabs(fs) < fabs(ft)) {
result = fs;
} else {
- result = (fs > ft ? fs : ft);
+ result = (fs < ft ? fs : ft);
}
set_fpu_register_double(fd_reg(), result);
}
@@ -2372,11 +2504,13 @@ void Simulator::DecodeTypeRegisterDRsType() {
set_fpu_register_double(fd_reg(), -fs);
break;
case SQRT_D:
- set_fpu_register_double(fd_reg(), fast_sqrt(fs));
+ lazily_initialize_fast_sqrt(isolate_);
+ set_fpu_register_double(fd_reg(), fast_sqrt(fs, isolate_));
break;
case RSQRT_D: {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- double result = 1.0 / fast_sqrt(fs);
+ lazily_initialize_fast_sqrt(isolate_);
+ double result = 1.0 / fast_sqrt(fs, isolate_);
set_fpu_register_double(fd_reg(), result);
break;
}
@@ -2413,7 +2547,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
round_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case ROUND_W_D: // Round double to word (round half to even).
@@ -2427,7 +2561,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case TRUNC_W_D: // Truncate double to word (round towards 0).
@@ -2436,7 +2570,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case FLOOR_W_D: // Round double to word towards negative infinity.
@@ -2445,7 +2579,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case CEIL_W_D: // Round double to word towards positive infinity.
@@ -2454,7 +2588,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case CVT_S_D: // Convert double to float (single).
@@ -2467,7 +2601,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
round64_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2482,7 +2616,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2502,7 +2636,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2516,7 +2650,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2530,7 +2664,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2775,11 +2909,13 @@ void Simulator::DecodeTypeRegisterSRsType() {
set_fpu_register_float(fd_reg(), -fs);
break;
case SQRT_S:
- set_fpu_register_float(fd_reg(), fast_sqrt(fs));
+ lazily_initialize_fast_sqrt(isolate_);
+ set_fpu_register_float(fd_reg(), fast_sqrt(fs, isolate_));
break;
case RSQRT_S: {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- float result = 1.0 / fast_sqrt(fs);
+ lazily_initialize_fast_sqrt(isolate_);
+ float result = 1.0 / fast_sqrt(fs, isolate_);
set_fpu_register_float(fd_reg(), result);
break;
}
@@ -2931,7 +3067,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case TRUNC_L_S: { // Mips32r2 instruction.
@@ -2941,7 +3077,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2954,7 +3090,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case FLOOR_L_S: { // Mips32r2 instruction.
@@ -2964,7 +3100,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2981,7 +3117,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
break;
}
@@ -2998,7 +3134,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -3011,7 +3147,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case CEIL_L_S: { // Mips32r2 instruction.
@@ -3021,7 +3157,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -3070,7 +3206,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
} else if (fabs(fs) < fabs(ft)) {
result = fs;
} else {
- result = (fs > ft ? fs : ft);
+ result = (fs < ft ? fs : ft);
}
set_fpu_register_float(fd_reg(), result);
}
@@ -3103,7 +3239,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
round64_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -3116,7 +3252,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
round_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
break;
}
@@ -3245,11 +3381,18 @@ void Simulator::DecodeTypeRegisterCOP1() {
case MFHC1:
set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
break;
- case CTC1:
+ case CTC1: {
// At the moment only FCSR is supported.
DCHECK(fs_reg() == kFCSRRegister);
- FCSR_ = registers_[rt_reg()];
+ int32_t reg = registers_[rt_reg()];
+ if (IsMipsArchVariant(kMips32r6)) {
+ FCSR_ = reg | kFCSRNaN2008FlagMask;
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2));
+ FCSR_ = reg & ~kFCSRNaN2008FlagMask;
+ }
break;
+ }
case MTC1:
// Hardware writes upper 32-bits to zero on mtc1.
set_fpu_register_hi_word(fs_reg(), 0);
@@ -3371,9 +3514,19 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
SetResult(rd_reg(), static_cast<int32_t>(alu_out));
break;
case SRAV:
- alu_out = rt() >> rs();
- SetResult(rd_reg(), static_cast<int32_t>(alu_out));
+ SetResult(rd_reg(), rt() >> rs());
+ break;
+ case LSA: {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ int8_t sa = lsa_sa() + 1;
+ int32_t _rt = rt();
+ int32_t _rs = rs();
+ int32_t res = _rs << sa;
+ res += _rt;
+ DCHECK_EQ(res, (rs() << (lsa_sa() + 1)) + rt());
+ SetResult(rd_reg(), (rs() << (lsa_sa() + 1)) + rt());
break;
+ }
case MFHI: // MFHI == CLZ on R6.
if (!IsMipsArchVariant(kMips32r6)) {
DCHECK(sa() == 0);
@@ -3736,26 +3889,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
}
-// Branch instructions common part.
-#define BranchAndLinkHelper(do_branch) \
- execute_branch_delay_instruction = true; \
- if (do_branch) { \
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; \
- set_register(31, current_pc + 2 * Instruction::kInstrSize); \
- } else { \
- next_pc = current_pc + 2 * Instruction::kInstrSize; \
- }
-
-#define BranchHelper(do_branch) \
- execute_branch_delay_instruction = true; \
- if (do_branch) { \
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; \
- } else { \
- next_pc = current_pc + 2 * Instruction::kInstrSize; \
- }
-
-
-// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
+// Type 2: instructions using a 16, 21 or 26 bits immediate. (e.g. beq, beqc).
void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Instruction fields.
Opcode op = instr->OpcodeFieldRaw();
@@ -3765,20 +3899,14 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
int32_t rt_reg = instr->RtValue(); // Destination register.
int32_t rt = get_register(rt_reg);
int16_t imm16 = instr->Imm16Value();
- int32_t imm21 = instr->Imm21Value();
- int32_t imm26 = instr->Imm26Value();
int32_t ft_reg = instr->FtValue(); // Destination register.
- int64_t ft;
// Zero extended immediate.
uint32_t oe_imm16 = 0xffff & imm16;
// Sign extended immediate.
int32_t se_imm16 = imm16;
- int32_t se_imm26 = imm26 | ((imm26 & 0x2000000) ? 0xfc000000 : 0);
- // Get current pc.
- int32_t current_pc = get_pc();
// Next pc.
int32_t next_pc = bad_ra;
@@ -3791,7 +3919,58 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Used for memory instructions.
int32_t addr = 0x0;
- // ---------- Configuration (and execution for REGIMM).
+ // Branch instructions common part.
+ auto BranchAndLinkHelper = [this, instr, &next_pc,
+ &execute_branch_delay_instruction](
+ bool do_branch) {
+ execute_branch_delay_instruction = true;
+ int32_t current_pc = get_pc();
+ if (do_branch) {
+ int16_t imm16 = instr->Imm16Value();
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ };
+
+ auto BranchHelper = [this, instr, &next_pc,
+ &execute_branch_delay_instruction](bool do_branch) {
+ execute_branch_delay_instruction = true;
+ int32_t current_pc = get_pc();
+ if (do_branch) {
+ int16_t imm16 = instr->Imm16Value();
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ };
+
+ auto BranchAndLinkCompactHelper = [this, instr, &next_pc](bool do_branch,
+ int bits) {
+ int32_t current_pc = get_pc();
+ CheckForbiddenSlot(current_pc);
+ if (do_branch) {
+ int32_t imm = instr->ImmValue(bits);
+ imm <<= 32 - bits;
+ imm >>= 32 - bits;
+ next_pc = current_pc + (imm << 2) + Instruction::kInstrSize;
+ set_register(31, current_pc + Instruction::kInstrSize);
+ }
+ };
+
+ auto BranchCompactHelper = [&next_pc, this, instr](bool do_branch, int bits) {
+ int32_t current_pc = get_pc();
+ CheckForbiddenSlot(current_pc);
+ if (do_branch) {
+ int32_t imm = instr->ImmValue(bits);
+ imm <<= 32 - bits;
+ imm >>= 32 - bits;
+ next_pc = get_pc() + (imm << 2) + Instruction::kInstrSize;
+ }
+ };
+
+
switch (op) {
// ------------- COP1. Coprocessor instructions.
case COP1:
@@ -3802,34 +3981,14 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
uint32_t cc_value = test_fcsr_bit(fcsr_cc);
bool do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
+ BranchHelper(do_branch);
break;
}
case BC1EQZ:
- ft = get_fpu_register(ft_reg);
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (!(ft & 0x1)) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
+ BranchHelper(!(get_fpu_register(ft_reg) & 0x1));
break;
case BC1NEZ:
- ft = get_fpu_register(ft_reg);
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (ft & 0x1) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
+ BranchHelper(get_fpu_register(ft_reg) & 0x1);
break;
default:
UNREACHABLE();
@@ -3863,54 +4022,155 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case BNE:
BranchHelper(rs != rt);
break;
- case BLEZ:
- BranchHelper(rs <= 0);
- break;
- case BGTZ:
- BranchHelper(rs > 0);
- break;
- case POP66: {
- if (rs_reg) { // BEQZC
- int32_t se_imm21 =
- static_cast<int32_t>(imm21 << (kOpcodeBits + kRsBits));
- se_imm21 = se_imm21 >> (kOpcodeBits + kRsBits);
- if (rs == 0)
- next_pc = current_pc + 4 + (se_imm21 << 2);
- else
- next_pc = current_pc + 4;
+ case POP06: // BLEZALC, BGEZALC, BGEUC, BLEZ (pre-r6)
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BLEZALC
+ BranchAndLinkCompactHelper(rt <= 0, 16);
+ } else {
+ if (rs_reg == rt_reg) { // BGEZALC
+ BranchAndLinkCompactHelper(rt >= 0, 16);
+ } else { // BGEUC
+ BranchCompactHelper(
+ static_cast<uint32_t>(rs) >= static_cast<uint32_t>(rt), 16);
+ }
+ }
+ } else { // BLEZ
+ BranchHelper(rs <= 0);
+ }
+ } else { // BLEZ
+ BranchHelper(rs <= 0);
+ }
+ break;
+ case POP07: // BGTZALC, BLTZALC, BLTUC, BGTZ (pre-r6)
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BGTZALC
+ BranchAndLinkCompactHelper(rt > 0, 16);
+ } else {
+ if (rt_reg == rs_reg) { // BLTZALC
+ BranchAndLinkCompactHelper(rt < 0, 16);
+ } else { // BLTUC
+ BranchCompactHelper(
+ static_cast<uint32_t>(rs) < static_cast<uint32_t>(rt), 16);
+ }
+ }
+ } else { // BGTZ
+ BranchHelper(rs > 0);
+ }
+ } else { // BGTZ
+ BranchHelper(rs > 0);
+ }
+ break;
+ case POP26: // BLEZC, BGEZC, BGEC/BLEC / BLEZL (pre-r6)
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BLEZC
+ BranchCompactHelper(rt <= 0, 16);
+ } else {
+ if (rs_reg == rt_reg) { // BGEZC
+ BranchCompactHelper(rt >= 0, 16);
+ } else { // BGEC/BLEC
+ BranchCompactHelper(rs >= rt, 16);
+ }
+ }
+ }
+ } else { // BLEZL
+ BranchAndLinkHelper(rs <= 0);
+ }
+ break;
+ case POP27: // BGTZC, BLTZC, BLTC/BGTC / BGTZL (pre-r6)
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BGTZC
+ BranchCompactHelper(rt > 0, 16);
+ } else {
+ if (rs_reg == rt_reg) { // BLTZC
+ BranchCompactHelper(rt < 0, 16);
+ } else { // BLTC/BGTC
+ BranchCompactHelper(rs < rt, 16);
+ }
+ }
+ }
+ } else { // BGTZL
+ BranchAndLinkHelper(rs > 0);
+ }
+ break;
+ case POP66: // BEQZC, JIC
+ if (rs_reg != 0) { // BEQZC
+ BranchCompactHelper(rs == 0, 21);
} else { // JIC
next_pc = rt + imm16;
}
break;
- }
- case BC: {
- next_pc = current_pc + 4 + (se_imm26 << 2);
- set_pc(next_pc);
- pc_modified_ = true;
+ case POP76: // BNEZC, JIALC
+ if (rs_reg != 0) { // BNEZC
+ BranchCompactHelper(rs != 0, 21);
+ } else { // JIALC
+ set_register(31, get_pc() + Instruction::kInstrSize);
+ next_pc = rt + imm16;
+ }
break;
- }
- case BALC: {
- set_register(31, current_pc + 4);
- next_pc = current_pc + 4 + (se_imm26 << 2);
- set_pc(next_pc);
- pc_modified_ = true;
+ case BC:
+ BranchCompactHelper(true, 26);
break;
- }
- // ------------- Arithmetic instructions.
- case ADDI:
- if (HaveSameSign(rs, se_imm16)) {
- if (rs > 0) {
- if (rs <= (Registers::kMaxValue - se_imm16)) {
- SignalException(kIntegerOverflow);
+ case BALC:
+ BranchAndLinkCompactHelper(true, 26);
+ break;
+ case POP10: // BOVC, BEQZALC, BEQC / ADDI (pre-r6)
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (rs_reg >= rt_reg) { // BOVC
+ if (HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ BranchCompactHelper(rs > Registers::kMaxValue - rt, 16);
+ } else if (rs < 0) {
+ BranchCompactHelper(rs < Registers::kMinValue - rt, 16);
+ }
}
- } else if (rs < 0) {
- if (rs >= (Registers::kMinValue - se_imm16)) {
- SignalException(kIntegerUnderflow);
+ } else {
+ if (rs_reg == 0) { // BEQZALC
+ BranchAndLinkCompactHelper(rt == 0, 16);
+ } else { // BEQC
+ BranchCompactHelper(rt == rs, 16);
}
}
+ } else { // ADDI
+ if (HaveSameSign(rs, se_imm16)) {
+ if (rs > 0) {
+ if (rs <= Registers::kMaxValue - se_imm16) {
+ SignalException(kIntegerOverflow);
+ }
+ } else if (rs < 0) {
+ if (rs >= Registers::kMinValue - se_imm16) {
+ SignalException(kIntegerUnderflow);
+ }
+ }
+ }
+ SetResult(rt_reg, rs + se_imm16);
}
- SetResult(rt_reg, rs + se_imm16);
break;
+ case POP30: // BNVC, BNEZALC, BNEC / DADDI (pre-r6)
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (rs_reg >= rt_reg) { // BNVC
+ if (!HaveSameSign(rs, rt) || rs == 0 || rt == 0) {
+ BranchCompactHelper(true, 16);
+ } else {
+ if (rs > 0) {
+ BranchCompactHelper(rs <= Registers::kMaxValue - rt, 16);
+ } else if (rs < 0) {
+ BranchCompactHelper(rs >= Registers::kMinValue - rt, 16);
+ }
+ }
+ } else {
+ if (rs_reg == 0) { // BNEZALC
+ BranchAndLinkCompactHelper(rt != 0, 16);
+ } else { // BNEC
+ BranchCompactHelper(rt != rs, 16);
+ }
+ }
+ }
+ break;
+ // ------------- Arithmetic instructions.
case ADDIU:
SetResult(rt_reg, rs + se_imm16);
break;
@@ -3930,7 +4190,14 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
SetResult(rt_reg, rs ^ oe_imm16);
break;
case LUI:
- SetResult(rt_reg, oe_imm16 << 16);
+ if (rs_reg != 0) {
+ // AUI
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ SetResult(rt_reg, rs + (se_imm16 << 16));
+ } else {
+ // LUI
+ SetResult(rt_reg, oe_imm16 << 16);
+ }
break;
// ------------- Memory instructions.
case LB:
@@ -4014,22 +4281,11 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case SDC1:
WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr);
break;
- // ------------- JIALC and BNEZC instructions.
- case POP76: {
- // Next pc.
- next_pc = rt + se_imm16;
- // The instruction after the jump is NOT executed.
- int16_t pc_increment = Instruction::kInstrSize;
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + pc_increment);
- }
- set_pc(next_pc);
- pc_modified_ = true;
- break;
- }
// ------------- PC-Relative instructions.
case PCREL: {
// rt field: checking 5-bits.
+ int32_t imm21 = instr->Imm21Value();
+ int32_t current_pc = get_pc();
uint8_t rt = (imm21 >> kImm16Bits);
switch (rt) {
case ALUIPC:
@@ -4076,7 +4332,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// We don't check for end_sim_pc. First it should not be met as the current
// pc is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(get_pc() + Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
}
@@ -4086,9 +4342,6 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
}
}
-#undef BranchHelper
-#undef BranchAndLinkHelper
-
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump(Instruction* instr) {
@@ -4174,7 +4427,7 @@ void Simulator::Execute() {
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
- if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
+ if (icount_ == static_cast<uint64_t>(::v8::internal::FLAG_stop_sim_at)) {
MipsDebugger dbg(this);
dbg.Debug();
} else {
diff --git a/chromium/v8/src/mips/simulator-mips.h b/chromium/v8/src/mips/simulator-mips.h
index bd30172d5b9..8efe0bba9c7 100644
--- a/chromium/v8/src/mips/simulator-mips.h
+++ b/chromium/v8/src/mips/simulator-mips.h
@@ -23,7 +23,7 @@ namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4)
typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
@@ -34,9 +34,10 @@ typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
+ p7, p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
@@ -48,14 +49,17 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() { }
+ static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
@@ -167,6 +171,12 @@ class Simulator {
void set_fpu_register_hi_word(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
+ void set_fpu_register_invalid_result64(float original, float rounded);
+ void set_fpu_register_invalid_result(float original, float rounded);
+ void set_fpu_register_word_invalid_result(float original, float rounded);
+ void set_fpu_register_invalid_result64(double original, double rounded);
+ void set_fpu_register_invalid_result(double original, double rounded);
+ void set_fpu_register_word_invalid_result(double original, double rounded);
int64_t get_fpu_register(int fpureg) const;
int32_t get_fpu_register_word(int fpureg) const;
int32_t get_fpu_register_signed_word(int fpureg) const;
@@ -314,6 +324,7 @@ class Simulator {
void DecodeTypeRegisterLRsType();
Instruction* currentInstr_;
+
inline Instruction* get_instr() const { return currentInstr_; }
inline void set_instr(Instruction* instr) { currentInstr_ = instr; }
@@ -333,6 +344,7 @@ class Simulator {
inline int32_t ft_reg() const { return currentInstr_->FtValue(); }
inline int32_t fd_reg() const { return currentInstr_->FdValue(); }
inline int32_t sa() const { return currentInstr_->SaValue(); }
+ inline int32_t lsa_sa() const { return currentInstr_->LsaSaValue(); }
inline void SetResult(int32_t rd_reg, int32_t alu_out) {
set_register(rd_reg, alu_out);
@@ -345,6 +357,18 @@ class Simulator {
// Used for breakpoints and traps.
void SoftwareInterrupt(Instruction* instr);
+ // Compact branch guard.
+ void CheckForbiddenSlot(int32_t current_pc) {
+ Instruction* instr_aftter_compact_branch =
+ reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ if (instr_aftter_compact_branch->IsForbiddenInBranchDelay()) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Error: Unexpected instruction 0x%08x immediately after a "
+ "compact branch instruction.",
+ *reinterpret_cast<uint32_t*>(instr_aftter_compact_branch));
+ }
+ }
+
// Stop helper functions.
bool IsWatchpoint(uint32_t code);
void PrintWatchpoint(uint32_t code);
@@ -394,7 +418,8 @@ class Simulator {
void SignalException(Exception e);
// Runtime call support.
- static void* RedirectExternalReference(void* external_function,
+ static void* RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
@@ -450,13 +475,14 @@ class Simulator {
// When running with the simulator transition into simulated execution at this
// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ Simulator::current(isolate) \
+ ->Call(entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
// The simulator has its own stack. Thus it has a different stack limit from
@@ -470,17 +496,19 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
+ static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
+ uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
+ static inline void UnregisterCTryCatch(Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // !defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
diff --git a/chromium/v8/src/mips64/OWNERS b/chromium/v8/src/mips64/OWNERS
index 5508ba626f3..89455a4fbd7 100644
--- a/chromium/v8/src/mips64/OWNERS
+++ b/chromium/v8/src/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/mips64/assembler-mips64-inl.h b/chromium/v8/src/mips64/assembler-mips64-inl.h
index 16ca33a9f34..09436ed1d41 100644
--- a/chromium/v8/src/mips64/assembler-mips64-inl.h
+++ b/chromium/v8/src/mips64/assembler-mips64-inl.h
@@ -84,36 +84,6 @@ bool Operand::is_reg() const {
}
-int Register::NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
-}
-
-
-int DoubleRegister::NumRegisters() {
- return FPURegister::kMaxNumRegisters;
-}
-
-
-int DoubleRegister::NumAllocatableRegisters() {
- return FPURegister::kMaxNumAllocatableRegisters;
-}
-
-
-int DoubleRegister::NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
-}
-
-
-int FPURegister::ToAllocationIndex(FPURegister reg) {
- DCHECK(reg.code() % 2 == 0);
- DCHECK(reg.code() / 2 < kMaxNumAllocatableRegisters);
- DCHECK(reg.is_valid());
- DCHECK(!reg.is(kDoubleRegZero));
- DCHECK(!reg.is(kLithiumScratchDouble));
- return (reg.code() / 2);
-}
-
-
// -----------------------------------------------------------------------------
// RelocInfo.
@@ -122,7 +92,7 @@ void RelocInfo::apply(intptr_t delta) {
// Absolute code pointer inside code object moves with the code object.
byte* p = reinterpret_cast<byte*>(pc_);
int count = Assembler::RelocateInternalReference(rmode_, p, delta);
- CpuFeatures::FlushICache(p, count * sizeof(uint32_t));
+ Assembler::FlushICache(isolate_, p, count * sizeof(uint32_t));
}
}
@@ -174,7 +144,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -208,7 +179,7 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
DCHECK(IsJ(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
@@ -236,7 +207,7 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, host_,
+ Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
@@ -338,8 +309,7 @@ Code* RelocInfo::code_age_stub() {
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
- host_,
+ Assembler::set_target_address_at(isolate_, pc_ + Assembler::kInstrSize, host_,
stub->instruction_start());
}
@@ -356,7 +326,7 @@ void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
// The pc_ offset of 0 assumes patched debug break slot or return
// sequence.
- Assembler::set_target_address_at(pc_, host_, target);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -374,7 +344,7 @@ void RelocInfo::WipeOut() {
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
@@ -468,26 +438,63 @@ void Assembler::CheckTrampolinePoolQuick(int extra_instructions) {
}
-void Assembler::emit(Instr x) {
+void Assembler::CheckForEmitInForbiddenSlot() {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
+ if (IsPrevInstrCompactBranch()) {
+ // Nop instruction to preceed a CTI in forbidden slot:
+ Instr nop = SPECIAL | SLL;
+ *reinterpret_cast<Instr*>(pc_) = nop;
+ pc_ += kInstrSize;
+
+ ClearCompactBranchState();
+ }
+}
+
+
+void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
+ if (IsPrevInstrCompactBranch()) {
+ if (Instruction::IsForbiddenAfterBranchInstr(x)) {
+ // Nop instruction to preceed a CTI in forbidden slot:
+ Instr nop = SPECIAL | SLL;
+ *reinterpret_cast<Instr*>(pc_) = nop;
+ pc_ += kInstrSize;
+ }
+ ClearCompactBranchState();
+ }
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
+ if (is_compact_branch == CompactBranchType::COMPACT_BRANCH) {
+ EmittedCompactBranchInstruction();
+ }
CheckTrampolinePoolQuick();
}
-void Assembler::emit(uint64_t x) {
+template <typename T>
+void Assembler::EmitHelper(T x) {
+ *reinterpret_cast<T*>(pc_) = x;
+ pc_ += sizeof(x);
+ CheckTrampolinePoolQuick();
+}
+
+
+void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
- *reinterpret_cast<uint64_t*>(pc_) = x;
- pc_ += kInstrSize * 2;
- CheckTrampolinePoolQuick();
+ EmitHelper(x, is_compact_branch);
+}
+
+
+void Assembler::emit(uint64_t data) {
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
diff --git a/chromium/v8/src/mips64/assembler-mips64.cc b/chromium/v8/src/mips64/assembler-mips64.cc
index cb5e164ff9c..9c313a18d69 100644
--- a/chromium/v8/src/mips64/assembler-mips64.cc
+++ b/chromium/v8/src/mips64/assembler-mips64.cc
@@ -64,28 +64,6 @@ static unsigned CpuFeaturesImpliedByCompiler() {
}
-const char* DoubleRegister::AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "f0",
- "f2",
- "f4",
- "f6",
- "f8",
- "f10",
- "f12",
- "f14",
- "f16",
- "f18",
- "f20",
- "f22",
- "f24",
- "f26"
- };
- return names[index];
-}
-
-
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
@@ -229,31 +207,31 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
static const int kNegOffset = 0x00008000;
// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
-const Instr kPopInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift)
- | (kPointerSize & kImm16Mask); // NOLINT
+const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
+ (Register::kCode_sp << kRtShift) |
+ (kPointerSize & kImm16Mask); // NOLINT
// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
-const Instr kPushInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
- | (kRegister_sp_Code << kRtShift)
- | (-kPointerSize & kImm16Mask); // NOLINT
+const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
+ (Register::kCode_sp << kRtShift) |
+ (-kPointerSize & kImm16Mask); // NOLINT
// sd(r, MemOperand(sp, 0))
-const Instr kPushRegPattern = SD | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kPushRegPattern =
+ SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
// ld(r, MemOperand(sp, 0))
-const Instr kPopRegPattern = LD | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kPopRegPattern =
+ LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kLwRegFpOffsetPattern =
+ LW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+const Instr kSwRegFpOffsetPattern =
+ SW | (Register::kCode_fp << kRsShift) | (0 & kImm16Mask); // NOLINT
-const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask); // NOLINT
+const Instr kLwRegFpNegOffsetPattern = LW | (Register::kCode_fp << kRsShift) |
+ (kNegOffset & kImm16Mask); // NOLINT
-const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask); // NOLINT
+const Instr kSwRegFpNegOffsetPattern = SW | (Register::kCode_fp << kRsShift) |
+ (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xffe00000;
@@ -286,6 +264,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
void Assembler::GetCode(CodeDesc* desc) {
+ EmitForbiddenSlotInstruction();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
desc->buffer = buffer_;
@@ -294,11 +273,13 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->reloc_size =
static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
+ desc->constant_pool_size = 0;
}
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ EmitForbiddenSlotInstruction();
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -314,21 +295,21 @@ void Assembler::CodeTargetAlign() {
Register Assembler::GetRtReg(Instr instr) {
Register rt;
- rt.code_ = (instr & kRtFieldMask) >> kRtShift;
+ rt.reg_code = (instr & kRtFieldMask) >> kRtShift;
return rt;
}
Register Assembler::GetRsReg(Instr instr) {
Register rs;
- rs.code_ = (instr & kRsFieldMask) >> kRsShift;
+ rs.reg_code = (instr & kRsFieldMask) >> kRsShift;
return rs;
}
Register Assembler::GetRdReg(Instr instr) {
Register rd;
- rd.code_ = (instr & kRdFieldMask) >> kRdShift;
+ rd.reg_code = (instr & kRdFieldMask) >> kRdShift;
return rd;
}
@@ -455,19 +436,38 @@ bool Assembler::IsBranch(Instr instr) {
uint32_t rt_field = GetRtField(instr);
uint32_t rs_field = GetRsField(instr);
// Checks if the instruction is a branch.
- return opcode == BEQ ||
- opcode == BNE ||
- opcode == BLEZ ||
- opcode == BGTZ ||
- opcode == BEQL ||
- opcode == BNEL ||
- opcode == BLEZL ||
- opcode == BGTZL ||
+ bool isBranch =
+ opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
+ opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
(opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
(opcode == COP1 && rs_field == BC1EQZ) ||
(opcode == COP1 && rs_field == BC1NEZ);
+ if (!isBranch && kArchVariant == kMips64r6) {
+ // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
+ // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
+ isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
+ opcode == BALC ||
+ (opcode == POP66 && rs_field != 0) || // BEQZC
+ (opcode == POP76 && rs_field != 0); // BNEZC
+ }
+ return isBranch;
+}
+
+
+bool Assembler::IsBc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a BC or BALC.
+ return opcode == BC || opcode == BALC;
+}
+
+
+bool Assembler::IsBzc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is BEQZC or BNEZC.
+ return (opcode == POP66 && GetRsField(instr) != 0) ||
+ (opcode == POP76 && GetRsField(instr) != 0);
}
@@ -487,6 +487,34 @@ bool Assembler::IsBne(Instr instr) {
}
+bool Assembler::IsBeqzc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ return opcode == POP66 && GetRsField(instr) != 0;
+}
+
+
+bool Assembler::IsBnezc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ return opcode == POP76 && GetRsField(instr) != 0;
+}
+
+
+bool Assembler::IsBeqc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rs = GetRsField(instr);
+ uint32_t rt = GetRtField(instr);
+ return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
+}
+
+
+bool Assembler::IsBnec(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rs = GetRsField(instr);
+ uint32_t rt = GetRtField(instr);
+ return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
+}
+
+
bool Assembler::IsJump(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
@@ -566,7 +594,7 @@ int32_t Assembler::GetBranchOffset(Instr instr) {
bool Assembler::IsLw(Instr instr) {
- return ((instr & kOpcodeMask) == LW);
+ return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
}
@@ -588,7 +616,7 @@ Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
bool Assembler::IsSw(Instr instr) {
- return ((instr & kOpcodeMask) == SW);
+ return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
}
@@ -614,6 +642,36 @@ bool Assembler::IsAndImmediate(Instr instr) {
}
+static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
+ if (kArchVariant == kMips64r6) {
+ if (Assembler::IsBc(instr)) {
+ return Assembler::OffsetSize::kOffset26;
+ } else if (Assembler::IsBzc(instr)) {
+ return Assembler::OffsetSize::kOffset21;
+ }
+ }
+ return Assembler::OffsetSize::kOffset16;
+}
+
+
+static inline int32_t AddBranchOffset(int pos, Instr instr) {
+ int bits = OffsetSizeInBits(instr);
+ const int32_t mask = (1 << bits) - 1;
+ bits = 32 - bits;
+
+ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
+ // the compiler uses arithmetic shifts for signed integers.
+ int32_t imm = ((instr & mask) << bits) >> (bits - 2);
+
+ if (imm == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + Assembler::kBranchPCOffset + imm;
+ }
+}
+
+
int Assembler::target_at(int pos, bool is_internal) {
if (is_internal) {
int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
@@ -643,13 +701,7 @@ int Assembler::target_at(int pos, bool is_internal) {
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmetic shifts for signed integers.
if (IsBranch(instr)) {
- int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
- if (imm18 == kEndOfChain) {
- // EndOfChain sentinel is returned directly, not relative to pc or pos.
- return kEndOfChain;
- } else {
- return pos + kBranchPCOffset + imm18;
- }
+ return AddBranchOffset(pos, instr);
} else if (IsLui(instr)) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
@@ -689,6 +741,21 @@ int Assembler::target_at(int pos, bool is_internal) {
}
+static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ int32_t bits = OffsetSizeInBits(instr);
+ int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
+ DCHECK((imm & 3) == 0);
+ imm >>= 2;
+
+ const int32_t mask = (1 << bits) - 1;
+ instr &= ~mask;
+ DCHECK(is_intn(imm, bits));
+
+ return instr | (imm & mask);
+}
+
+
void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
if (is_internal) {
uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
@@ -705,14 +772,8 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
}
if (IsBranch(instr)) {
- int32_t imm18 = target_pos - (pos + kBranchPCOffset);
- DCHECK((imm18 & 3) == 0);
-
- instr &= ~kImm16Mask;
- int32_t imm16 = imm18 >> 2;
- DCHECK(is_int16(imm16));
-
- instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ instr = SetBranchOffset(pos, target_pos, instr);
+ instr_at_put(pos, instr);
} else if (IsLui(instr)) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
@@ -801,22 +862,25 @@ void Assembler::bind_to(Label* L, int pos) {
Instr instr = instr_at(fixup_pos);
if (is_internal) {
target_at_put(fixup_pos, pos, is_internal);
- } else if (IsBranch(instr)) {
- if (dist > kMaxBranchOffset) {
- if (trampoline_pos == kInvalidSlotPos) {
- trampoline_pos = get_trampoline_entry(fixup_pos);
- CHECK(trampoline_pos != kInvalidSlotPos);
+ } else {
+ if (IsBranch(instr)) {
+ int branch_offset = BranchOffset(instr);
+ if (dist > branch_offset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK(trampoline_pos != kInvalidSlotPos);
+ }
+ CHECK((trampoline_pos - fixup_pos) <= branch_offset);
+ target_at_put(fixup_pos, trampoline_pos, false);
+ fixup_pos = trampoline_pos;
+ dist = pos - fixup_pos;
}
- CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
- target_at_put(fixup_pos, trampoline_pos, false);
- fixup_pos = trampoline_pos;
- dist = pos - fixup_pos;
+ target_at_put(fixup_pos, pos, false);
+ } else {
+ DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
+ IsEmittedConstant(instr));
+ target_at_put(fixup_pos, pos, false);
}
- target_at_put(fixup_pos, pos, false);
- } else {
- DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
- IsEmittedConstant(instr));
- target_at_put(fixup_pos, pos, false);
}
}
L->bind_to(pos);
@@ -847,10 +911,48 @@ void Assembler::next(Label* L, bool is_internal) {
bool Assembler::is_near(Label* L) {
- if (L->is_bound()) {
- return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
+ DCHECK(L->is_bound());
+ return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
+}
+
+
+bool Assembler::is_near(Label* L, OffsetSize bits) {
+ if (L == nullptr || !L->is_bound()) return true;
+ return ((pc_offset() - L->pos()) <
+ (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
+}
+
+
+bool Assembler::is_near_branch(Label* L) {
+ DCHECK(L->is_bound());
+ return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
+}
+
+
+int Assembler::BranchOffset(Instr instr) {
+ // At pre-R6 and for other R6 branches the offset is 16 bits.
+ int bits = OffsetSize::kOffset16;
+
+ if (kArchVariant == kMips64r6) {
+ uint32_t opcode = GetOpcodeField(instr);
+ switch (opcode) {
+ // Checks BC or BALC.
+ case BC:
+ case BALC:
+ bits = OffsetSize::kOffset26;
+ break;
+
+ // Checks BEQZC or BNEZC.
+ case POP66:
+ case POP76:
+ if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
+ break;
+ default:
+ break;
+ }
}
- return false;
+
+ return (1 << (bits + 2 - 1)) - 1;
}
@@ -941,49 +1043,56 @@ void Assembler::GenInstrRegister(Opcode opcode,
// Instructions with immediate value.
// Registers are in the order of the instruction encoding, from left to right.
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- Register rt,
- int32_t j) {
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
+ int32_t j,
+ CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (j & kImm16Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- SecondaryField SF,
- int32_t j) {
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
+ int32_t j,
+ CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- FPURegister ft,
- int32_t j) {
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
+ int32_t j,
+ CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
-void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t j) {
- DCHECK(rs.is_valid() && (is_uint21(j)));
- Instr instr = opcode | (rs.code() << kRsShift) | (j & kImm21Mask);
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
+ CompactBranchType is_compact_branch) {
+ DCHECK(rs.is_valid() && (is_int21(offset21)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
+ emit(instr, is_compact_branch);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
+ uint32_t offset21) {
+ DCHECK(rs.is_valid() && (is_uint21(offset21)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
emit(instr);
}
-void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26) {
+void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
+ CompactBranchType is_compact_branch) {
DCHECK(is_int26(offset26));
Instr instr = opcode | (offset26 & kImm26Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
@@ -1035,114 +1144,38 @@ uint64_t Assembler::jump_address(Label* L) {
uint64_t Assembler::jump_offset(Label* L) {
int64_t target_pos;
+ int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
+
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link.
- L->link_to(pc_offset());
+ L->link_to(pc_offset() + pad);
} else {
- L->link_to(pc_offset());
+ L->link_to(pc_offset() + pad);
return kEndOfJumpChain;
}
}
- int64_t imm = target_pos - pc_offset();
+ int64_t imm = target_pos - (pc_offset() + pad);
DCHECK((imm & 3) == 0);
return static_cast<uint64_t>(imm);
}
-int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int32_t target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
- DCHECK((offset & 3) == 0);
- DCHECK(is_int16(offset >> 2));
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset_compact(Label* L,
- bool jump_elimination_allowed) {
- int32_t target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - pc_offset();
- DCHECK((offset & 3) == 0);
- DCHECK(is_int16(offset >> 2));
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
+int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
int32_t target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
- DCHECK((offset & 3) == 0);
- DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
+ int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
- return offset;
-}
-
-
-int32_t Assembler::branch_offset21_compact(Label* L,
- bool jump_elimination_allowed) {
- int32_t target_pos;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos();
- L->link_to(pc_offset());
+ L->link_to(pc_offset() + pad);
} else {
- L->link_to(pc_offset());
+ L->link_to(pc_offset() + pad);
if (!trampoline_emitted_) {
unbound_labels_count_++;
next_buffer_check_ -= kTrampolineSlotsSize;
@@ -1151,9 +1184,9 @@ int32_t Assembler::branch_offset21_compact(Label* L,
}
}
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
+ DCHECK(is_intn(offset, bits + 2));
DCHECK((offset & 3) == 0);
- DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
return offset;
}
@@ -1200,14 +1233,14 @@ void Assembler::bal(int16_t offset) {
void Assembler::bc(int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
- GenInstrImmediate(BC, offset);
+ GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::balc(int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
positions_recorder()->WriteRecordedPositions();
- GenInstrImmediate(BALC, offset);
+ GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1228,7 +1261,7 @@ void Assembler::bgez(Register rs, int16_t offset) {
void Assembler::bgezc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZL, rt, rt, offset);
+ GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1237,7 +1270,7 @@ void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
DCHECK(!(rs.is(zero_reg)));
DCHECK(!(rt.is(zero_reg)));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BLEZ, rs, rt, offset);
+ GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1246,7 +1279,7 @@ void Assembler::bgec(Register rs, Register rt, int16_t offset) {
DCHECK(!(rs.is(zero_reg)));
DCHECK(!(rt.is(zero_reg)));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BLEZL, rs, rt, offset);
+ GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1269,7 +1302,8 @@ void Assembler::bgtz(Register rs, int16_t offset) {
void Assembler::bgtzc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZL, zero_reg, rt, offset);
+ GenInstrImmediate(BGTZL, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
@@ -1283,14 +1317,15 @@ void Assembler::blez(Register rs, int16_t offset) {
void Assembler::blezc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZL, zero_reg, rt, offset);
+ GenInstrImmediate(BLEZL, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bltzc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZL, rt, rt, offset);
+ DCHECK(!rt.is(zero_reg));
+ GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1299,16 +1334,16 @@ void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
DCHECK(!(rs.is(zero_reg)));
DCHECK(!(rt.is(zero_reg)));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BGTZ, rs, rt, offset);
+ GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bltc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!rs.is(zero_reg));
+ DCHECK(!rt.is(zero_reg));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BGTZL, rs, rt, offset);
+ GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1339,7 +1374,7 @@ void Assembler::bovc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rs.is(zero_reg)));
DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(ADDI, rs, rt, offset);
+ GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1347,99 +1382,122 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rs.is(zero_reg)));
DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(DADDI, rs, rt, offset);
+ GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZ, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BLEZ, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZ, rt, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgezall(Register rs, int16_t offset) {
- DCHECK(kArchVariant == kMips64r6);
+ DCHECK(kArchVariant != kMips64r6);
DCHECK(!(rs.is(zero_reg)));
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZ, rt, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZ, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BGTZ, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(ADDI, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(ADDI, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(DADDI, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(DADDI, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::beqc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(rs.code() < rt.code());
- GenInstrImmediate(ADDI, rs, rt, offset);
+ DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
+ if (rs.code() < rt.code()) {
+ GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
void Assembler::beqzc(Register rs, int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rs.is(zero_reg)));
- Instr instr = POP66 | (rs.code() << kRsShift) | (offset & kImm21Mask);
- emit(instr);
+ GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bnec(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(rs.code() < rt.code());
- GenInstrImmediate(DADDI, rs, rt, offset);
+ DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
+ if (rs.code() < rt.code()) {
+ GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
void Assembler::bnezc(Register rs, int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rs.is(zero_reg)));
- Instr instr = POP76 | (rs.code() << kRsShift) | offset;
- emit(instr);
+ GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::j(int64_t target) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrJump(J, static_cast<uint32_t>(target >> 2) & kImm26Mask);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::j(Label* target) {
uint64_t imm = jump_offset(target);
if (target->is_bound()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrJump(static_cast<Opcode>(kJRawMark),
static_cast<uint32_t>(imm >> 2) & kImm26Mask);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
} else {
j(imm);
}
@@ -1449,8 +1507,11 @@ void Assembler::j(Label* target) {
void Assembler::jal(Label* target) {
uint64_t imm = jump_offset(target);
if (target->is_bound()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrJump(static_cast<Opcode>(kJalRawMark),
static_cast<uint32_t>(imm >> 2) & kImm26Mask);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
} else {
jal(imm);
}
@@ -1472,8 +1533,10 @@ void Assembler::jr(Register rs) {
void Assembler::jal(int64_t target) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1488,9 +1551,7 @@ void Assembler::jalr(Register rs, Register rd) {
void Assembler::jic(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- Instr instr = POP66 | (JIC << kRsShift) | (rt.code() << kRtShift) |
- (offset & kImm16Mask);
- emit(instr);
+ GenInstrImmediate(POP66, zero_reg, rt, offset);
}
@@ -1726,7 +1787,7 @@ void Assembler::sll(Register rd,
// nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
// instructions.
DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
}
@@ -1736,7 +1797,7 @@ void Assembler::sllv(Register rd, Register rt, Register rs) {
void Assembler::srl(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
}
@@ -1746,7 +1807,7 @@ void Assembler::srlv(Register rd, Register rt, Register rs) {
void Assembler::sra(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
}
@@ -1767,7 +1828,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
- DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+ DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
@@ -1776,7 +1837,7 @@ void Assembler::rotrv(Register rd, Register rt, Register rs) {
void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
}
@@ -1786,7 +1847,7 @@ void Assembler::dsllv(Register rd, Register rt, Register rs) {
void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
}
@@ -1812,7 +1873,7 @@ void Assembler::drotrv(Register rd, Register rt, Register rs) {
void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
}
@@ -1822,17 +1883,37 @@ void Assembler::dsrav(Register rd, Register rt, Register rs) {
void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL32);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
}
void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL32);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
}
void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA32);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
+}
+
+
+void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
+ DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
+ DCHECK(sa < 5 && sa > 0);
+ DCHECK(kArchVariant == kMips64r6);
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (sa - 1) << kSaShift | LSA;
+ emit(instr);
+}
+
+
+void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
+ DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
+ DCHECK(sa < 5 && sa > 0);
+ DCHECK(kArchVariant == kMips64r6);
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (sa - 1) << kSaShift | DLSA;
+ emit(instr);
}
@@ -1965,17 +2046,17 @@ void Assembler::lui(Register rd, int32_t j) {
}
-void Assembler::aui(Register rs, Register rt, int32_t j) {
+void Assembler::aui(Register rt, Register rs, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
- DCHECK(!(rs.is(zero_reg)));
DCHECK(is_uint16(j));
GenInstrImmediate(LUI, rs, rt, j);
}
-void Assembler::daui(Register rs, Register rt, int32_t j) {
+void Assembler::daui(Register rt, Register rs, int32_t j) {
DCHECK(is_uint16(j));
+ DCHECK(!rs.is(zero_reg));
GenInstrImmediate(DAUI, rs, rt, j);
}
@@ -2037,7 +2118,7 @@ void Assembler::sd(Register rd, const MemOperand& rs) {
void Assembler::addiupc(Register rs, int32_t imm19) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(rs.is_valid() && is_int19(imm19));
- int32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
+ uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
@@ -2045,7 +2126,7 @@ void Assembler::addiupc(Register rs, int32_t imm19) {
void Assembler::lwpc(Register rs, int32_t offset19) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(rs.is_valid() && is_int19(offset19));
- int32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
+ uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
@@ -2053,7 +2134,7 @@ void Assembler::lwpc(Register rs, int32_t offset19) {
void Assembler::lwupc(Register rs, int32_t offset19) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(rs.is_valid() && is_int19(offset19));
- int32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
+ uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
@@ -2061,23 +2142,23 @@ void Assembler::lwupc(Register rs, int32_t offset19) {
void Assembler::ldpc(Register rs, int32_t offset18) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(rs.is_valid() && is_int18(offset18));
- int32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
+ uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
void Assembler::auipc(Register rs, int16_t imm16) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(rs.is_valid() && is_int16(imm16));
- int32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
+ DCHECK(rs.is_valid());
+ uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
void Assembler::aluipc(Register rs, int16_t imm16) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(rs.is_valid() && is_int16(imm16));
- int32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
+ DCHECK(rs.is_valid());
+ uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
@@ -2211,14 +2292,14 @@ void Assembler::movn(Register rd, Register rs, Register rt) {
void Assembler::movt(Register rd, Register rs, uint16_t cc) {
Register rt;
- rt.code_ = (cc & 0x0007) << 2 | 1;
+ rt.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
void Assembler::movf(Register rd, Register rs, uint16_t cc) {
Register rt;
- rt.code_ = (cc & 0x0007) << 2 | 0;
+ rt.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
@@ -2304,6 +2385,16 @@ void Assembler::clz(Register rd, Register rs) {
}
+void Assembler::dclz(Register rd, Register rs) {
+ if (kArchVariant != kMips64r6) {
+ // dclz instr requires same GPR number in 'rd' and 'rt' fields.
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, DCLZ);
+ } else {
+ GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, DCLZ_R6);
+ }
+}
+
+
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
@@ -2312,6 +2403,14 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
}
+void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Dins.
+ // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
+}
+
+
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
@@ -2321,13 +2420,29 @@ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
- // Should be called via MacroAssembler::Ext.
+ // Should be called via MacroAssembler::Dext.
// Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
}
+void Assembler::dextm(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Dextm.
+ // Dextm instr has 'rt' field as dest, and two uint5: msb, lsb.
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
+}
+
+
+void Assembler::dextu(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Dextu.
+ // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
+}
+
+
void Assembler::bitswap(Register rd, Register rt) {
DCHECK(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
@@ -2520,7 +2635,7 @@ void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(kArchVariant == kMips64r2);
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 1;
+ ft.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
@@ -2528,7 +2643,7 @@ void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(kArchVariant == kMips64r2);
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 1;
+ ft.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
@@ -2536,7 +2651,7 @@ void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(kArchVariant == kMips64r2);
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 0;
+ ft.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
@@ -2544,7 +2659,7 @@ void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(kArchVariant == kMips64r2);
FPURegister ft;
- ft.code_ = (cc & 0x0007) << 2 | 0;
+ ft.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
@@ -2939,7 +3054,6 @@ void Assembler::bc1t(int16_t offset, uint16_t cc) {
}
-// Debugging.
int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta) {
if (RelocInfo::IsInternalReference(rmode)) {
@@ -2993,6 +3107,8 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
instr_at_put(pc, instr | (imm26 & kImm26Mask));
return 1; // Number of instructions patched.
} else {
+ DCHECK(((instr & kJumpRawMask) == kJRawMark) ||
+ ((instr & kJumpRawMask) == kJalRawMark));
// Unbox raw offset and emit j/jal.
int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
// Sign extend 28-bit offset to 32-bit.
@@ -3024,6 +3140,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size =
@@ -3058,54 +3175,42 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
- CheckBuffer();
- *reinterpret_cast<uint8_t*>(pc_) = data;
- pc_ += sizeof(uint8_t);
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
void Assembler::dd(uint32_t data) {
- CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) = data;
- pc_ += sizeof(uint32_t);
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
void Assembler::dq(uint64_t data) {
- CheckBuffer();
- *reinterpret_cast<uint64_t*>(pc_) = data;
- pc_ += sizeof(uint64_t);
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
void Assembler::dd(Label* label) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
uint64_t data;
+ CheckForEmitInForbiddenSlot();
if (label->is_bound()) {
data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
} else {
data = jump_address(label);
internal_reference_positions_.insert(label->pos());
}
- *reinterpret_cast<uint64_t*>(pc_) = data;
- pc_ += sizeof(uint64_t);
-}
-
-
-void Assembler::emit_code_stub_address(Code* stub) {
- CheckBuffer();
- *reinterpret_cast<uint64_t*>(pc_) =
- reinterpret_cast<uint64_t>(stub->instruction_start());
- pc_ += sizeof(uint64_t);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ EmitHelper(data);
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
if (rmode >= RelocInfo::COMMENT &&
- rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL) {
+ rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsComment(rmode)
@@ -3120,10 +3225,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_,
- rmode,
- RecordedAstId().ToInt(),
- NULL);
+ RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
+ RecordedAstId().ToInt(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@@ -3163,9 +3266,14 @@ void Assembler::CheckTrampolinePool() {
// First we emit jump (2 instructions), then we emit trampoline pool.
{ BlockTrampolinePoolScope block_trampoline_pool(this);
Label after_pool;
- b(&after_pool);
- nop();
+ if (kArchVariant == kMips64r6) {
+ bc(&after_pool);
+ } else {
+ b(&after_pool);
+ nop();
+ }
+ EmitForbiddenSlotInstruction();
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
{ BlockGrowBufferScope block_buf_growth(this);
@@ -3241,7 +3349,7 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_address_at(Address pc,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address target,
ICacheFlushMode icache_flush_mode) {
// There is an optimization where only 4 instructions are used to load address
@@ -3274,7 +3382,7 @@ void Assembler::set_target_address_at(Address pc,
| (itarget & kImm16Mask);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc, 4 * Assembler::kInstrSize);
+ Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);
}
}
diff --git a/chromium/v8/src/mips64/assembler-mips64.h b/chromium/v8/src/mips64/assembler-mips64.h
index be57f298062..f8d315d835d 100644
--- a/chromium/v8/src/mips64/assembler-mips64.h
+++ b/chromium/v8/src/mips64/assembler-mips64.h
@@ -41,12 +41,33 @@
#include <set>
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/mips64/constants-mips64.h"
namespace v8 {
namespace internal {
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(zero_reg) V(at) V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
+ V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(t3) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(t8) V(t9) \
+ V(k0) V(k1) V(gp) V(sp) V(fp) V(ra)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \
+ V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(s7)
+
+#define DOUBLE_REGISTERS(V) \
+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
+ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
+ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
+ V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
+// clang-format on
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -72,11 +93,7 @@ namespace internal {
// -----------------------------------------------------------------------------
// Implementation of Register and FPURegister.
-// Core register.
struct Register {
- static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kMaxNumAllocatableRegisters = 14; // v0 through t2 and cp.
- static const int kSizeInBytes = 8;
static const int kCpRegister = 23; // cp (s7) is the 23rd register.
#if defined(V8_TARGET_LITTLE_ENDIAN)
@@ -89,117 +106,47 @@ struct Register {
#error Unknown endianness
#endif
- inline static int NumAllocatableRegisters();
-
- static int ToAllocationIndex(Register reg) {
- DCHECK((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
- reg.is(from_code(kCpRegister)));
- return reg.is(from_code(kCpRegister)) ?
- kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'.
- reg.code() - 2; // zero_reg and 'at' are skipped.
- }
-
- static Register FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return index == kMaxNumAllocatableRegisters - 1 ?
- from_code(kCpRegister) : // Last index is always the 'cp' register.
- from_code(index + 2); // zero_reg and 'at' are skipped.
- }
-
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "v0",
- "v1",
- "a0",
- "a1",
- "a2",
- "a3",
- "a4",
- "a5",
- "a6",
- "a7",
- "t0",
- "t1",
- "t2",
- "s7",
- };
- return names[index];
- }
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
+ static const int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) {
+ DCHECK(code >= 0);
+ DCHECK(code < kNumRegisters);
Register r = { code };
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
-#define REGISTER(N, C) \
- const int kRegister_ ## N ## _Code = C; \
- const Register N = { C }
-
-REGISTER(no_reg, -1);
-// Always zero.
-REGISTER(zero_reg, 0);
-// at: Reserved for synthetic instructions.
-REGISTER(at, 1);
-// v0, v1: Used when returning multiple values from subroutines.
-REGISTER(v0, 2);
-REGISTER(v1, 3);
-// a0 - a4: Used to pass non-FP parameters.
-REGISTER(a0, 4);
-REGISTER(a1, 5);
-REGISTER(a2, 6);
-REGISTER(a3, 7);
-// a4 - a7 t0 - t3: Can be used without reservation, act as temporary registers
-// and are allowed to be destroyed by subroutines.
-REGISTER(a4, 8);
-REGISTER(a5, 9);
-REGISTER(a6, 10);
-REGISTER(a7, 11);
-REGISTER(t0, 12);
-REGISTER(t1, 13);
-REGISTER(t2, 14);
-REGISTER(t3, 15);
-// s0 - s7: Subroutine register variables. Subroutines that write to these
-// registers must restore their values before exiting so that the caller can
-// expect the values to be preserved.
-REGISTER(s0, 16);
-REGISTER(s1, 17);
-REGISTER(s2, 18);
-REGISTER(s3, 19);
-REGISTER(s4, 20);
-REGISTER(s5, 21);
-REGISTER(s6, 22);
-REGISTER(s7, 23);
-REGISTER(t8, 24);
-REGISTER(t9, 25);
-// k0, k1: Reserved for system calls and interrupt handlers.
-REGISTER(k0, 26);
-REGISTER(k1, 27);
-// gp: Reserved.
-REGISTER(gp, 28);
-// sp: Stack pointer.
-REGISTER(sp, 29);
-// fp: Frame pointer.
-REGISTER(fp, 30);
-// ra: Return address pointer.
-REGISTER(ra, 31);
-
-#undef REGISTER
+// s7: context register
+// s3: lithium scratch
+// s4: lithium scratch2
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
int ToNumber(Register reg);
@@ -207,77 +154,72 @@ int ToNumber(Register reg);
Register ToRegister(int num);
// Coprocessor register.
-struct FPURegister {
- static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
-
- // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
- // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
- // number of Double regs (64-bit regs, or FPU-reg-pairs).
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- // A few double registers are reserved: one as a scratch register and one to
- // hold 0.0.
- // f28: 0.0
- // f30: scratch register.
- static const int kNumReservedRegisters = 2;
- static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
- kNumReservedRegisters;
+ static const int kMaxNumRegisters = Code::kAfterLast;
inline static int NumRegisters();
- inline static int NumAllocatableRegisters();
-
- // TODO(turbofan): Proper support for float32.
- inline static int NumAllocatableAliasedRegisters();
- inline static int ToAllocationIndex(FPURegister reg);
- static const char* AllocationIndexToString(int index);
-
- static FPURegister FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index * 2);
- }
-
- static FPURegister from_code(int code) {
- FPURegister r = { code };
- return r;
- }
+ // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
+ // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
+ // number of Double regs (64-bit regs, or FPU-reg-pairs).
- bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
- bool is(FPURegister creg) const { return code_ == creg.code_; }
- FPURegister low() const {
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+ DoubleRegister low() const {
// TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1.
// Find low reg of a Double-reg pair, which is the reg itself.
- DCHECK(code_ % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.code_ = code_;
+ DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
+ DoubleRegister reg;
+ reg.reg_code = reg_code;
DCHECK(reg.is_valid());
return reg;
}
- FPURegister high() const {
+ DoubleRegister high() const {
// TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1.
// Find high reg of a Doubel-reg pair, which is reg + 1.
- DCHECK(code_ % 2 == 0); // Specified Double reg must be even.
- FPURegister reg;
- reg.code_ = code_ + 1;
+ DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
+ DoubleRegister reg;
+ reg.reg_code = reg_code + 1;
DCHECK(reg.is_valid());
return reg;
}
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
+ }
+
+ static DoubleRegister from_code(int code) {
+ DoubleRegister r = {code};
+ return r;
}
void setcode(int f) {
- code_ = f;
+ reg_code = f;
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
+// A few double registers are reserved: one as a scratch register and one to
+// hold 0.0.
+// f28: 0.0
+// f30: scratch register.
+
// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
// 32-bit registers, f0 through f31. When used as 'double' they are used
// in pairs, starting with the even numbered register. So a double operation
@@ -287,43 +229,43 @@ struct FPURegister {
// but it is not in common use. Someday we will want to support this in v8.)
// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef FPURegister DoubleRegister;
-typedef FPURegister FloatRegister;
-
-const FPURegister no_freg = { -1 };
-
-const FPURegister f0 = { 0 }; // Return value in hard float mode.
-const FPURegister f1 = { 1 };
-const FPURegister f2 = { 2 };
-const FPURegister f3 = { 3 };
-const FPURegister f4 = { 4 };
-const FPURegister f5 = { 5 };
-const FPURegister f6 = { 6 };
-const FPURegister f7 = { 7 };
-const FPURegister f8 = { 8 };
-const FPURegister f9 = { 9 };
-const FPURegister f10 = { 10 };
-const FPURegister f11 = { 11 };
-const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
-const FPURegister f13 = { 13 };
-const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
-const FPURegister f15 = { 15 };
-const FPURegister f16 = { 16 };
-const FPURegister f17 = { 17 };
-const FPURegister f18 = { 18 };
-const FPURegister f19 = { 19 };
-const FPURegister f20 = { 20 };
-const FPURegister f21 = { 21 };
-const FPURegister f22 = { 22 };
-const FPURegister f23 = { 23 };
-const FPURegister f24 = { 24 };
-const FPURegister f25 = { 25 };
-const FPURegister f26 = { 26 };
-const FPURegister f27 = { 27 };
-const FPURegister f28 = { 28 };
-const FPURegister f29 = { 29 };
-const FPURegister f30 = { 30 };
-const FPURegister f31 = { 31 };
+typedef DoubleRegister FPURegister;
+typedef DoubleRegister FloatRegister;
+
+const DoubleRegister no_freg = {-1};
+
+const DoubleRegister f0 = {0}; // Return value in hard float mode.
+const DoubleRegister f1 = {1};
+const DoubleRegister f2 = {2};
+const DoubleRegister f3 = {3};
+const DoubleRegister f4 = {4};
+const DoubleRegister f5 = {5};
+const DoubleRegister f6 = {6};
+const DoubleRegister f7 = {7};
+const DoubleRegister f8 = {8};
+const DoubleRegister f9 = {9};
+const DoubleRegister f10 = {10};
+const DoubleRegister f11 = {11};
+const DoubleRegister f12 = {12}; // Arg 0 in hard float mode.
+const DoubleRegister f13 = {13};
+const DoubleRegister f14 = {14}; // Arg 1 in hard float mode.
+const DoubleRegister f15 = {15};
+const DoubleRegister f16 = {16};
+const DoubleRegister f17 = {17};
+const DoubleRegister f18 = {18};
+const DoubleRegister f19 = {19};
+const DoubleRegister f20 = {20};
+const DoubleRegister f21 = {21};
+const DoubleRegister f22 = {22};
+const DoubleRegister f23 = {23};
+const DoubleRegister f24 = {24};
+const DoubleRegister f25 = {25};
+const DoubleRegister f26 = {26};
+const DoubleRegister f27 = {27};
+const DoubleRegister f28 = {28};
+const DoubleRegister f29 = {29};
+const DoubleRegister f30 = {30};
+const DoubleRegister f31 = {31};
// Register aliases.
// cp is assumed to be a callee saved register.
@@ -343,22 +285,22 @@ const FPURegister f31 = { 31 };
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
struct FPUControlRegister {
- bool is_valid() const { return code_ == kFCSRRegister; }
- bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
+ bool is_valid() const { return reg_code == kFCSRRegister; }
+ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
void setcode(int f) {
- code_ = f;
+ reg_code = f;
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
@@ -469,27 +411,46 @@ class Assembler : public AssemblerBase {
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
void bind(Label* L); // Binds an unbound label L to current code position.
+
+ enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 };
+
// Determines if Label is bound and near enough so that branch instruction
// can be used to reach it, instead of jump instruction.
bool is_near(Label* L);
+ bool is_near(Label* L, OffsetSize bits);
+ bool is_near_branch(Label* L);
+ inline bool is_near_pre_r6(Label* L) {
+ DCHECK(!(kArchVariant == kMips64r6));
+ return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
+ }
+ inline bool is_near_r6(Label* L) {
+ DCHECK(kArchVariant == kMips64r6);
+ return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize;
+ }
+
+ int BranchOffset(Instr instr);
// Returns the branch offset to the given label from the current code
// position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true.
- int32_t branch_offset(Label* L, bool jump_elimination_allowed);
- int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed);
- int32_t branch_offset21(Label* L, bool jump_elimination_allowed);
- int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
- int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
- int32_t o = branch_offset(L, jump_elimination_allowed);
- DCHECK((o & 3) == 0); // Assert the offset is aligned.
- return o >> 2;
- }
- int32_t shifted_branch_offset_compact(Label* L,
- bool jump_elimination_allowed) {
- int32_t o = branch_offset_compact(L, jump_elimination_allowed);
- DCHECK((o & 3) == 0); // Assert the offset is aligned.
- return o >> 2;
+ int32_t branch_offset_helper(Label* L, OffsetSize bits);
+ inline int32_t branch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset16);
+ }
+ inline int32_t branch_offset21(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset21);
+ }
+ inline int32_t branch_offset26(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset26);
+ }
+ inline int32_t shifted_branch_offset(Label* L) {
+ return branch_offset(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset21(Label* L) {
+ return branch_offset21(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset26(Label* L) {
+ return branch_offset26(L) >> 2;
}
uint64_t jump_address(Label* L);
uint64_t jump_offset(Label* L);
@@ -500,30 +461,28 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
- static void set_target_address_at(Address pc,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED);
+ static void set_target_address_at(
+ Isolate* isolate, Address pc, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// On MIPS there is no Constant Pool so we skip that parameter.
INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
return target_address_at(pc);
}
INLINE(static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- set_target_address_at(pc, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, target, icache_flush_mode);
}
INLINE(static Address target_address_at(Address pc, Code* code)) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
- INLINE(static void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED)) {
+ INLINE(static void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target,
+ icache_flush_mode);
}
// Return the code target address at a call site from the return address
@@ -538,16 +497,17 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target) {
set_target_address_at(
- instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
- code,
+ isolate,
+ instruction_payload - kInstructionsFor64BitConstant * kInstrSize, code,
target);
}
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -631,112 +591,111 @@ class Assembler : public AssemblerBase {
// --------Branch-and-jump-instructions----------
// We don't use likely variant of instructions.
void b(int16_t offset);
- void b(Label* L) { b(branch_offset(L, false)>>2); }
+ inline void b(Label* L) { b(shifted_branch_offset(L)); }
void bal(int16_t offset);
- void bal(Label* L) { bal(branch_offset(L, false)>>2); }
+ inline void bal(Label* L) { bal(shifted_branch_offset(L)); }
void bc(int32_t offset);
- void bc(Label* L) { bc(branch_offset(L, false) >> 2); }
+ inline void bc(Label* L) { bc(shifted_branch_offset26(L)); }
void balc(int32_t offset);
- void balc(Label* L) { balc(branch_offset(L, false) >> 2); }
+ inline void balc(Label* L) { balc(shifted_branch_offset26(L)); }
void beq(Register rs, Register rt, int16_t offset);
- void beq(Register rs, Register rt, Label* L) {
- beq(rs, rt, branch_offset(L, false) >> 2);
+ inline void beq(Register rs, Register rt, Label* L) {
+ beq(rs, rt, shifted_branch_offset(L));
}
void bgez(Register rs, int16_t offset);
void bgezc(Register rt, int16_t offset);
- void bgezc(Register rt, Label* L) {
- bgezc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgezc(Register rt, Label* L) {
+ bgezc(rt, shifted_branch_offset(L));
}
void bgeuc(Register rs, Register rt, int16_t offset);
- void bgeuc(Register rs, Register rt, Label* L) {
- bgeuc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bgeuc(Register rs, Register rt, Label* L) {
+ bgeuc(rs, rt, shifted_branch_offset(L));
}
void bgec(Register rs, Register rt, int16_t offset);
- void bgec(Register rs, Register rt, Label* L) {
- bgec(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bgec(Register rs, Register rt, Label* L) {
+ bgec(rs, rt, shifted_branch_offset(L));
}
void bgezal(Register rs, int16_t offset);
void bgezalc(Register rt, int16_t offset);
- void bgezalc(Register rt, Label* L) {
- bgezalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgezalc(Register rt, Label* L) {
+ bgezalc(rt, shifted_branch_offset(L));
}
void bgezall(Register rs, int16_t offset);
- void bgezall(Register rs, Label* L) {
- bgezall(rs, branch_offset(L, false)>>2);
+ inline void bgezall(Register rs, Label* L) {
+ bgezall(rs, branch_offset(L) >> 2);
}
void bgtz(Register rs, int16_t offset);
void bgtzc(Register rt, int16_t offset);
- void bgtzc(Register rt, Label* L) {
- bgtzc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgtzc(Register rt, Label* L) {
+ bgtzc(rt, shifted_branch_offset(L));
}
void blez(Register rs, int16_t offset);
void blezc(Register rt, int16_t offset);
- void blezc(Register rt, Label* L) {
- blezc(rt, branch_offset_compact(L, false)>>2);
+ inline void blezc(Register rt, Label* L) {
+ blezc(rt, shifted_branch_offset(L));
}
void bltz(Register rs, int16_t offset);
void bltzc(Register rt, int16_t offset);
- void bltzc(Register rt, Label* L) {
- bltzc(rt, branch_offset_compact(L, false)>>2);
+ inline void bltzc(Register rt, Label* L) {
+ bltzc(rt, shifted_branch_offset(L));
}
void bltuc(Register rs, Register rt, int16_t offset);
- void bltuc(Register rs, Register rt, Label* L) {
- bltuc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bltuc(Register rs, Register rt, Label* L) {
+ bltuc(rs, rt, shifted_branch_offset(L));
}
void bltc(Register rs, Register rt, int16_t offset);
- void bltc(Register rs, Register rt, Label* L) {
- bltc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bltc(Register rs, Register rt, Label* L) {
+ bltc(rs, rt, shifted_branch_offset(L));
}
-
void bltzal(Register rs, int16_t offset);
void blezalc(Register rt, int16_t offset);
- void blezalc(Register rt, Label* L) {
- blezalc(rt, branch_offset_compact(L, false)>>2);
+ inline void blezalc(Register rt, Label* L) {
+ blezalc(rt, shifted_branch_offset(L));
}
void bltzalc(Register rt, int16_t offset);
- void bltzalc(Register rt, Label* L) {
- bltzalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bltzalc(Register rt, Label* L) {
+ bltzalc(rt, shifted_branch_offset(L));
}
void bgtzalc(Register rt, int16_t offset);
- void bgtzalc(Register rt, Label* L) {
- bgtzalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgtzalc(Register rt, Label* L) {
+ bgtzalc(rt, shifted_branch_offset(L));
}
void beqzalc(Register rt, int16_t offset);
- void beqzalc(Register rt, Label* L) {
- beqzalc(rt, branch_offset_compact(L, false)>>2);
+ inline void beqzalc(Register rt, Label* L) {
+ beqzalc(rt, shifted_branch_offset(L));
}
void beqc(Register rs, Register rt, int16_t offset);
- void beqc(Register rs, Register rt, Label* L) {
- beqc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void beqc(Register rs, Register rt, Label* L) {
+ beqc(rs, rt, shifted_branch_offset(L));
}
void beqzc(Register rs, int32_t offset);
- void beqzc(Register rs, Label* L) {
- beqzc(rs, branch_offset21_compact(L, false)>>2);
+ inline void beqzc(Register rs, Label* L) {
+ beqzc(rs, shifted_branch_offset21(L));
}
void bnezalc(Register rt, int16_t offset);
- void bnezalc(Register rt, Label* L) {
- bnezalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bnezalc(Register rt, Label* L) {
+ bnezalc(rt, shifted_branch_offset(L));
}
void bnec(Register rs, Register rt, int16_t offset);
- void bnec(Register rs, Register rt, Label* L) {
- bnec(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bnec(Register rs, Register rt, Label* L) {
+ bnec(rs, rt, shifted_branch_offset(L));
}
void bnezc(Register rt, int32_t offset);
- void bnezc(Register rt, Label* L) {
- bnezc(rt, branch_offset21_compact(L, false)>>2);
+ inline void bnezc(Register rt, Label* L) {
+ bnezc(rt, shifted_branch_offset21(L));
}
void bne(Register rs, Register rt, int16_t offset);
- void bne(Register rs, Register rt, Label* L) {
- bne(rs, rt, branch_offset(L, false)>>2);
+ inline void bne(Register rs, Register rt, Label* L) {
+ bne(rs, rt, shifted_branch_offset(L));
}
void bovc(Register rs, Register rt, int16_t offset);
- void bovc(Register rs, Register rt, Label* L) {
- bovc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bovc(Register rs, Register rt, Label* L) {
+ bovc(rs, rt, shifted_branch_offset(L));
}
void bnvc(Register rs, Register rt, int16_t offset);
- void bnvc(Register rs, Register rt, Label* L) {
- bnvc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bnvc(Register rs, Register rt, Label* L) {
+ bnvc(rs, rt, shifted_branch_offset(L));
}
// Never use the int16_t b(l)cond version with a branch offset
@@ -800,8 +759,8 @@ class Assembler : public AssemblerBase {
void ori(Register rd, Register rs, int32_t j);
void xori(Register rd, Register rs, int32_t j);
void lui(Register rd, int32_t j);
- void aui(Register rs, Register rt, int32_t j);
- void daui(Register rs, Register rt, int32_t j);
+ void aui(Register rt, Register rs, int32_t j);
+ void daui(Register rt, Register rs, int32_t j);
void dahi(Register rs, int32_t j);
void dati(Register rs, int32_t j);
@@ -829,6 +788,9 @@ class Assembler : public AssemblerBase {
void dsrl32(Register rt, Register rd, uint16_t sa);
void dsra32(Register rt, Register rd, uint16_t sa);
+ // Address computing instructions with shift.
+ void lsa(Register rd, Register rt, Register rs, uint8_t sa);
+ void dlsa(Register rd, Register rt, Register rs, uint8_t sa);
// ------------Memory-instructions-------------
@@ -912,17 +874,21 @@ class Assembler : public AssemblerBase {
void movz_s(FPURegister fd, FPURegister fs, Register rt);
void movz_d(FPURegister fd, FPURegister fs, Register rt);
- void movt_s(FPURegister fd, FPURegister fs, uint16_t cc);
- void movt_d(FPURegister fd, FPURegister fs, uint16_t cc);
- void movf_s(FPURegister fd, FPURegister fs, uint16_t cc);
- void movf_d(FPURegister fd, FPURegister fs, uint16_t cc);
+ void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
+ void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
+ void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
+ void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
void movn_s(FPURegister fd, FPURegister fs, Register rt);
void movn_d(FPURegister fd, FPURegister fs, Register rt);
// Bit twiddling.
void clz(Register rd, Register rs);
+ void dclz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
void dext_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void bitswap(Register rd, Register rt);
void dbitswap(Register rd, Register rt);
void align(Register rd, Register rs, Register rt, uint8_t bp);
@@ -1029,12 +995,12 @@ class Assembler : public AssemblerBase {
void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
void bc1eqz(int16_t offset, FPURegister ft);
- void bc1eqz(Label* L, FPURegister ft) {
- bc1eqz(branch_offset(L, false)>>2, ft);
+ inline void bc1eqz(Label* L, FPURegister ft) {
+ bc1eqz(shifted_branch_offset(L), ft);
}
void bc1nez(int16_t offset, FPURegister ft);
- void bc1nez(Label* L, FPURegister ft) {
- bc1nez(branch_offset(L, false)>>2, ft);
+ inline void bc1nez(Label* L, FPURegister ft) {
+ bc1nez(shifted_branch_offset(L), ft);
}
// Conditions and branches for non MIPSr6.
@@ -1044,12 +1010,12 @@ class Assembler : public AssemblerBase {
void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
void bc1f(int16_t offset, uint16_t cc = 0);
- void bc1f(Label* L, uint16_t cc = 0) {
- bc1f(branch_offset(L, false)>>2, cc);
+ inline void bc1f(Label* L, uint16_t cc = 0) {
+ bc1f(shifted_branch_offset(L), cc);
}
void bc1t(int16_t offset, uint16_t cc = 0);
- void bc1t(Label* L, uint16_t cc = 0) {
- bc1t(branch_offset(L, false)>>2, cc);
+ inline void bc1t(Label* L, uint16_t cc = 0) {
+ bc1t(shifted_branch_offset(L), cc);
}
void fcmp(FPURegister src1, const double src2, FPUCondition cond);
@@ -1104,7 +1070,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
@@ -1139,9 +1105,6 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data) { dq(data); }
void dd(Label* label);
- // Emits the address of the code stub's first instruction.
- void emit_code_stub_address(Code* stub);
-
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Postpone the generation of the trampoline pool for the specified number of
@@ -1170,8 +1133,16 @@ class Assembler : public AssemblerBase {
// Check if an instruction is a branch of some kind.
static bool IsBranch(Instr instr);
+ static bool IsBc(Instr instr);
+ static bool IsBzc(Instr instr);
+
static bool IsBeq(Instr instr);
static bool IsBne(Instr instr);
+ static bool IsBeqzc(Instr instr);
+ static bool IsBnezc(Instr instr);
+ static bool IsBeqc(Instr instr);
+ static bool IsBnec(Instr instr);
+
static bool IsJump(Instr instr);
static bool IsJ(Instr instr);
@@ -1230,6 +1201,8 @@ class Assembler : public AssemblerBase {
UNREACHABLE();
}
+ bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@@ -1296,6 +1269,15 @@ class Assembler : public AssemblerBase {
return block_buffer_growth_;
}
+ void EmitForbiddenSlotInstruction() {
+ if (IsPrevInstrCompactBranch()) {
+ nop();
+ ClearCompactBranchState();
+ }
+ }
+
+ inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
+
private:
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes.
@@ -1335,12 +1317,19 @@ class Assembler : public AssemblerBase {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
+ // Readable constants for compact branch handling in emit()
+ enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true };
+
// Code emission.
inline void CheckBuffer();
void GrowBuffer();
- inline void emit(Instr x);
+ inline void emit(Instr x,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
inline void emit(uint64_t x);
- inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
+ inline void CheckForEmitInForbiddenSlot();
+ template <typename T>
+ inline void EmitHelper(T x);
+ inline void EmitHelper(Instr x, CompactBranchType is_compact_branch);
// Instruction generation.
// We have 3 different kind of encoding layout on MIPS.
@@ -1392,21 +1381,22 @@ class Assembler : public AssemblerBase {
SecondaryField func = NULLSF);
- void GenInstrImmediate(Opcode opcode,
- Register rs,
- Register rt,
- int32_t j);
- void GenInstrImmediate(Opcode opcode,
- Register rs,
- SecondaryField SF,
- int32_t j);
- void GenInstrImmediate(Opcode opcode,
- Register r1,
- FPURegister r2,
- int32_t j);
- void GenInstrImmediate(Opcode opcode, Register rs, int32_t j);
- void GenInstrImmediate(Opcode opcode, int32_t offset26);
-
+ void GenInstrImmediate(
+ Opcode opcode, Register rs, Register rt, int32_t j,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(
+ Opcode opcode, Register rs, SecondaryField SF, int32_t j,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(
+ Opcode opcode, Register r1, FPURegister r2, int32_t j,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(
+ Opcode opcode, Register rs, int32_t offset21,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21);
+ void GenInstrImmediate(
+ Opcode opcode, int32_t offset26,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
void GenInstrJump(Opcode opcode,
uint32_t address);
@@ -1480,12 +1470,17 @@ class Assembler : public AssemblerBase {
bool trampoline_emitted_;
static const int kTrampolineSlotsSize = 2 * kInstrSize;
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+ static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static const int kInvalidSlotPos = -1;
// Internal reference positions, required for unbounded internal reference
// labels.
std::set<int64_t> internal_reference_positions_;
+ void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
+ void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
+ bool prev_instr_compact_branch_ = false;
+
Trampoline trampoline_;
bool internal_trampoline_exception_;
@@ -1507,6 +1502,7 @@ class EnsureSpace BASE_EMBEDDED {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ARM_ASSEMBLER_MIPS_H_
diff --git a/chromium/v8/src/mips64/builtins-mips64.cc b/chromium/v8/src/mips64/builtins-mips64.cc
index a736019da1c..3a9980beabd 100644
--- a/chromium/v8/src/mips64/builtins-mips64.cc
+++ b/chromium/v8/src/mips64/builtins-mips64.cc
@@ -22,8 +22,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- a0 : number of arguments excluding receiver
- // -- a1 : called function (only guaranteed when
- // -- extra_args requires it)
+ // -- a1 : target
+ // -- a3 : new.target
// -- sp[0] : last argument
// -- ...
// -- sp[8 * (argc - 1)] : first argument
@@ -35,21 +35,31 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(a1);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ switch (extra_args) {
+ case BuiltinExtraArguments::kTarget:
+ __ Push(a1);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kNewTarget:
+ __ Push(a3);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kTargetAndNewTarget:
+ __ Push(a1, a3);
+ num_extra_args += 2;
+ break;
+ case BuiltinExtraArguments::kNone:
+ break;
}
// JumpToExternalReference expects a0 to contain the number of arguments
// including the receiver and the extra arguments.
__ Daddu(a0, a0, num_extra_args + 1);
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -57,32 +67,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the native context.
-
- __ ld(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ld(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
- __ ld(result,
- MemOperand(result,
- Context::SlotOffset(
- Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
-
- __ ld(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ld(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
// Load the Array function from the native context.
- __ ld(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
@@ -148,6 +141,107 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into a0 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+ __ Dsubu(a0, a0, Operand(1));
+ __ dsll(a0, a0, kPointerSizeLog2);
+ __ Daddu(sp, a0, sp);
+ __ ld(a0, MemOperand(sp));
+ __ Drop(2);
+ }
+
+ // 2a. Convert first argument to number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0.
+ __ bind(&no_arguments);
+ __ Move(v0, Smi::FromInt(0));
+ __ DropAndRet(1);
+}
+
+
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- a3 : new target
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into a0 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+ __ Dsubu(a0, a0, Operand(1));
+ __ dsll(a0, a0, kPointerSizeLog2);
+ __ Daddu(sp, a0, sp);
+ __ ld(a0, MemOperand(sp));
+ __ Drop(2);
+ __ jmp(&done);
+ __ bind(&no_arguments);
+ __ Move(a0, Smi::FromInt(0));
+ __ Drop(1);
+ __ bind(&done);
+ }
+
+ // 3. Make sure a0 is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(a0, &done_convert);
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&done_convert, eq, t0, Operand(HEAP_NUMBER_TYPE));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(a0, v0);
+ __ Pop(a1, a3);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ Branch(&new_object, ne, a1, Operand(a3));
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1, a3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(a0);
+ }
+ __ Ret(USE_DELAY_SLOT);
+ __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot.
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -201,7 +295,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&symbol_descriptive_string);
{
__ Push(a0);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -210,12 +304,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- a3 : new target
// -- ra : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
// -----------------------------------
- // 1. Load the first argument into a0 and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into a0 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -232,7 +330,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done);
}
- // 2. Make sure a0 is a string.
+ // 3. Make sure a0 is a string.
{
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
@@ -243,60 +341,50 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
- __ Push(a1);
+ __ Push(a1, a3);
__ CallStub(&stub);
__ Move(a0, v0);
- __ Pop(a1);
+ __ Pop(a1, a3);
}
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- a0 : the first argument
- // -- a1 : constructor function
- // -- ra : return address
- // -----------------------------------
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ Branch(&new_object, ne, a1, Operand(a3));
- Label allocate, done_allocate;
- __ Allocate(JSValue::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
-
- // Initialize the JSValue in eax.
- __ LoadGlobalFunctionInitialMap(a1, a2, a3);
- __ sd(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Move(a2, Smi::FromInt(JSValue::kSize));
- __ Push(a0, a1, a2);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(a0, a1);
- }
- __ jmp(&done_allocate);
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1, a3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(a0);
}
+ __ Ret(USE_DELAY_SLOT);
+ __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot.
}
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- a1 : target function (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -----------------------------------
+
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
- // Push call kind information and function as parameter to the runtime call.
- __ Push(a1, a1);
+ // Push a copy of the target function and the new target.
+ __ Push(a1, a3, a1);
__ CallRuntime(function_id, 1);
- // Restore call kind information and receiver.
- __ Pop(a1);
+ // Restore target function and new target.
+ __ Pop(a1, a3);
}
@@ -333,12 +421,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
// -- a2 : allocation site or undefined
- // -- a3 : original constructor
+ // -- a3 : new target
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -352,164 +441,158 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(a2, t0);
__ SmiTag(a0);
- __ Push(a2, a0, a1, a3);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ li(a2, Operand(debug_step_in_fp));
- __ ld(a2, MemOperand(a2));
- __ Branch(&rt_call, ne, a2, Operand(zero_reg));
-
- // Fall back to runtime if the original constructor and function differ.
- __ Branch(&rt_call, ne, a1, Operand(a3));
-
- // Load the initial map and verify that it is in fact a map.
- // a1: constructor function
- __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &rt_call);
- __ GetObjectType(a2, t1, t0);
- __ Branch(&rt_call, ne, t0, Operand(MAP_TYPE));
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // a1: constructor function
- // a2: initial map
- __ lbu(t1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, t1, Operand(JS_FUNCTION_TYPE));
-
- if (!is_api_function) {
- Label allocate;
- MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lwu(a4, bit_field3);
- __ DecodeField<Map::Counter>(a6, a4);
- __ Branch(&allocate, lt, a6,
- Operand(static_cast<int64_t>(Map::kSlackTrackingCounterEnd)));
- // Decrease generous allocation count.
- __ Dsubu(a4, a4, Operand(1 << Map::Counter::kShift));
- __ Branch(USE_DELAY_SLOT, &allocate, ne, a6,
- Operand(Map::kSlackTrackingCounterEnd));
- __ sw(a4, bit_field3); // In delay slot.
-
- __ Push(a1, a2, a1); // a1 = Constructor.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ Pop(a1, a2);
- __ li(a6, Operand(Map::kSlackTrackingCounterEnd - 1));
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // a1: constructor function
- // a2: initial map
- Label rt_call_reload_new_target;
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-
- __ Allocate(a3, t0, t1, t2, &rt_call_reload_new_target, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // a1: constructor function
- // a2: initial map
- // a3: object size
- // t0: JSObject (not tagged)
- __ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t1, t0);
- __ sd(a2, MemOperand(t1, JSObject::kMapOffset));
- __ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset));
- __ sd(t2, MemOperand(t1, JSObject::kElementsOffset));
- __ Daddu(t1, t1, Operand(3*kPointerSize));
- DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
- DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
- // Fill all the in-object properties with appropriate filler.
- // a1: constructor function
- // a2: initial map
- // a3: object size (in words)
- // t0: JSObject (not tagged)
- // t1: First in-object property of JSObject (not tagged)
- // a6: slack tracking counter (non-API function case)
- DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
-
- // Use t3 to hold undefined, which is used in several places below.
- __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ Branch(&no_inobject_slack_tracking, lt, a6,
- Operand(static_cast<int64_t>(Map::kSlackTrackingCounterEnd)));
-
- // Allocate object with a slack.
- __ lbu(
- a0,
- FieldMemOperand(
- a2, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ lbu(a2, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- __ dsubu(a0, a0, a2);
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(a0, t1, at);
- // a0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ dsll(at, a3, kPointerSizeLog2);
- __ Daddu(t2, t0, Operand(at)); // End of object.
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
- a0, Operand(t2));
+ __ Push(a2, a0);
+
+ if (create_implicit_receiver) {
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ GetObjectType(a3, a5, a4);
+ __ Branch(&rt_call, ne, a4, Operand(JS_FUNCTION_TYPE));
+
+ // Load the initial map and verify that it is in fact a map.
+ // a3: new target
+ __ ld(a2,
+ FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(a2, &rt_call);
+ __ GetObjectType(a2, t1, t0);
+ __ Branch(&rt_call, ne, t0, Operand(MAP_TYPE));
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ ld(a5, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
+ __ Branch(&rt_call, ne, a1, Operand(a5));
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(t1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, t1, Operand(JS_FUNCTION_TYPE));
+
+ // Now allocate the JSObject on the heap.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ Allocate(a4, t0, a4, t2, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size
+ // t0: JSObject (not HeapObject tagged - the actual address).
+ // a4: start of next object
+ __ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t1, t0);
+ STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
+ __ sd(a2, MemOperand(t1, JSObject::kMapOffset));
+ STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
+ __ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset));
+ STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
+ __ sd(t2, MemOperand(t1, JSObject::kElementsOffset));
+ STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
+ __ Daddu(t1, t1, Operand(3 * kPointerSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ Daddu(t0, t0, Operand(kHeapObjectTag));
+
+ // Fill all the in-object properties with appropriate filler.
+ // t0: JSObject (tagged)
+ // t1: First in-object property of JSObject (not tagged)
+ __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ lwu(t2, bit_field3);
+ __ DecodeField<Map::ConstructionCounter>(a6, t2);
+ // a6: slack tracking counter
+ __ Branch(&no_inobject_slack_tracking, lt, a6,
+ Operand(Map::kSlackTrackingCounterEnd));
+ // Decrease generous allocation count.
+ __ Dsubu(t2, t2, Operand(1 << Map::ConstructionCounter::kShift));
+ __ sw(t2, bit_field3);
+
+ // Allocate object with a slack.
+ __ lbu(a0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ __ dsll(a0, a0, kPointerSizeLog2);
+ __ dsubu(a0, a4, a0);
+ // a0: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, t1,
+ Operand(a0));
+ }
+ __ InitializeFieldsWithFiller(t1, a0, t3);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(t1, a4, t3);
+
+ // a6: slack tracking counter value before decreasing.
+ __ Branch(&allocated, ne, a6, Operand(Map::kSlackTrackingCounterEnd));
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(a1, a3, t0, a2);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(a1, a3, t0);
+
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a3: new target
+ // t0: JSObject
+ __ jmp(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- __ InitializeFieldsWithFiller(t1, a0, t3);
- // To allow for truncation.
- __ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex);
- // Fill the remaining fields with one pointer filler map.
- __ bind(&no_inobject_slack_tracking);
+ __ InitializeFieldsWithFiller(t1, a4, t3);
+
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a3: new target
+ // t0: JSObject
+ __ jmp(&allocated);
}
- __ dsll(at, a3, kPointerSizeLog2);
- __ Daddu(a0, t0, Operand(at)); // End of object.
- __ InitializeFieldsWithFiller(t1, a0, t3);
+ // Allocate the new receiver object using the runtime call.
+ // a1: constructor function
+ // a3: new target
+ __ bind(&rt_call);
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ Daddu(t0, t0, Operand(kHeapObjectTag));
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(a1, a3, a1, a3); // constructor function, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ mov(t0, v0);
+ __ Pop(a1, a3);
- // Continue with JSObject being successfully allocated.
- // a4: JSObject
- __ jmp(&allocated);
+ // Receiver for constructor call allocated.
+ // a1: constructor function
+ // a3: new target
+ // t0: JSObject
+ __ bind(&allocated);
- // Reload the original constructor and fall-through.
- __ bind(&rt_call_reload_new_target);
- __ ld(a3, MemOperand(sp, 0 * kPointerSize));
+ __ ld(a0, MemOperand(sp));
}
-
- // Allocate the new receiver object using the runtime call.
- // a1: constructor function
- // a3: original constructor
- __ bind(&rt_call);
-
- __ Push(a1, a3); // arguments 2-3 / 1-2
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mov(t0, v0);
-
- // Receiver for constructor call allocated.
- // t0: JSObject
- __ bind(&allocated);
-
- // Restore the parameters.
- __ Pop(a3); // new.target
- __ Pop(a1);
-
- __ ld(a0, MemOperand(sp));
__ SmiUntag(a0);
- __ Push(a3, t0, t0);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(t0, t0);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -518,26 +601,27 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a0: number of arguments
// a1: constructor function
// a2: address of last argument (caller sp)
- // a3: number of arguments (smi-tagged)
+ // a3: new target
+ // t0: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: number of arguments (smi-tagged)
Label loop, entry;
- __ mov(a3, a0);
+ __ mov(t0, a0);
__ jmp(&entry);
__ bind(&loop);
- __ dsll(a4, a3, kPointerSizeLog2);
+ __ dsll(a4, t0, kPointerSizeLog2);
__ Daddu(a4, a2, Operand(a4));
__ ld(a5, MemOperand(a4));
__ push(a5);
__ bind(&entry);
- __ Daddu(a3, a3, Operand(-1));
- __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+ __ Daddu(t0, t0, Operand(-1));
+ __ Branch(&loop, greater_equal, t0, Operand(zero_reg));
// Call the function.
// a0: number of arguments
// a1: constructor function
+ // a3: new target
if (is_api_function) {
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
Handle<Code> code =
@@ -545,47 +629,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(v0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a1, a3);
- __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ld(v0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (original constructor)
- // sp[2]: number of arguments (smi-tagged)
- __ ld(a1, MemOperand(sp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ JumpIfSmi(v0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, a1, a3);
+ __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ld(v0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ ld(a1, MemOperand(sp, 1 * kPointerSize));
+ } else {
+ __ ld(a1, MemOperand(sp));
+ }
// Leave construct frame.
}
@@ -593,106 +680,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ SmiScale(a4, a1, kPointerSizeLog2);
__ Daddu(sp, sp, a4);
__ Daddu(sp, sp, kPointerSize);
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+ }
__ Ret();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- a2 : allocation site or undefined
- // -- a3 : original constructor
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- __ AssertUndefinedOrAllocationSite(a2, t0);
- __ push(a2);
-
- __ mov(a4, a0);
- __ SmiTag(a4);
- __ push(a4); // Smi-tagged arguments count.
-
- // Push new.target.
- __ push(a3);
-
- // receiver is the hole.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ push(at);
-
- // Set up pointer to last argument.
- __ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- // a0: number of arguments
- // a1: constructor function
- // a2: address of last argument (caller sp)
- // a4: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- Label loop, entry;
- __ SmiUntag(a4);
- __ jmp(&entry);
- __ bind(&loop);
- __ dsll(at, a4, kPointerSizeLog2);
- __ Daddu(at, a2, Operand(at));
- __ ld(at, MemOperand(at));
- __ push(at);
- __ bind(&entry);
- __ Daddu(a4, a4, Operand(-1));
- __ Branch(&loop, ge, a4, Operand(zero_reg));
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ li(a2, Operand(debug_step_in_fp));
- __ ld(a2, MemOperand(a2));
- __ Branch(&skip_step_in, eq, a2, Operand(zero_reg));
-
- __ Push(a0, a1, a1);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(a0, a1);
-
- __ bind(&skip_step_in);
-
-
- // Call the function.
- // a0: number of arguments
- // a1: constructor function
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- // v0: result
- // sp[0]: new.target
- // sp[1]: number of arguments (smi-tagged)
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ ld(a1, MemOperand(sp, kPointerSize));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Leave construct frame.
- }
- __ SmiScale(at, a1, kPointerSizeLog2);
- __ Daddu(sp, sp, Operand(at));
- __ Daddu(sp, sp, Operand(kPointerSize));
- __ Jump(ra);
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -720,7 +733,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ Branch(&okay, gt, a2, Operand(a7)); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -822,6 +835,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o a1: the JS function object being called.
+// o a3: the new target
// o cp: our context
// o fp: the caller's frame pointer
// o sp: stack pointer
@@ -839,6 +853,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(ra, fp, cp, a1);
__ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Push(a3);
+
+ // Push zero for bytecode array offset.
+ __ Push(zero_reg);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -867,7 +885,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Dsubu(a5, sp, Operand(a4));
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
__ Branch(&ok, hs, a5, Operand(a2));
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -889,36 +907,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
Label ok;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(at));
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ CallRuntime(Runtime::kStackGuard);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
// Load bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ Dsubu(
- kInterpreterRegisterFileRegister, fp,
- Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Daddu(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -960,36 +965,167 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ Daddu(a3, a0, Operand(1)); // Add one for receiver.
+ __ dsll(a3, a3, kPointerSizeLog2);
+ __ Dsubu(a3, a2, Operand(a3));
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ __ ld(t0, MemOperand(a2));
+ __ Daddu(a2, a2, Operand(-kPointerSize));
+ __ push(t0);
+ __ bind(&loop_check);
+ __ Branch(&loop_header, gt, a2, Operand(a3));
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- // Push function as parameter to the runtime call.
- __ Push(a1, a1);
- // Whether to compile in a background thread.
- __ LoadRoot(
- at, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ push(at);
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (not including receiver)
+ // -- a3 : new target
+ // -- a1 : constructor to call
+ // -- a2 : address of the first argument
+ // -----------------------------------
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ Pop(a1);
+ // Find the address of the last argument.
+ __ dsll(t0, a0, kPointerSizeLog2);
+ __ Dsubu(t0, a2, Operand(t0));
+
+ // Push a slot for the receiver.
+ __ push(zero_reg);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ __ ld(t1, MemOperand(a2));
+ __ Daddu(a2, a2, Operand(-kPointerSize));
+ __ push(t1);
+ __ bind(&loop_check);
+ __ Branch(&loop_header, gt, a2, Operand(t0));
+
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(a1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use this for interpreter deopts).
+ __ Drop(1);
+
+ // Initialize register file register and dispatch table register.
+ __ Daddu(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Daddu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ ld(kContextRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ ld(a1,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister, at);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at,
+ Operand(zero_reg));
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1,
+ Operand(BYTECODE_ARRAY_TYPE));
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ ld(kInterpreterBytecodeOffsetRegister,
+ MemOperand(
+ kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ Daddu(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ lbu(a1, MemOperand(a1));
+ __ dsll(a1, a1, kPointerSizeLog2);
+ __ Daddu(a1, kInterpreterDispatchTableRegister, a1);
+ __ ld(a1, MemOperand(a1));
+ __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a1);
+}
+
+
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -1009,8 +1145,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// the runtime:
// a0 - contains return address (beginning of patch sequence)
// a1 - isolate
+ // a3 - new target
RegList saved_regs =
- (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
__ PrepareCallCFunction(2, 0, a2);
@@ -1048,8 +1185,9 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// the runtime:
// a0 - contains return address (beginning of patch sequence)
// a1 - isolate
+ // a3 - new target
RegList saved_regs =
- (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
__ PrepareCallCFunction(2, 0, a2);
@@ -1089,7 +1227,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
@@ -1115,7 +1253,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(a0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
}
// Get the full codegen state from the stack and untag it -> a6.
@@ -1157,6 +1295,109 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
+// Clobbers {t2, t3, a4, a5}.
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Label* receiver_check_failed) {
+ Register signature = t2;
+ Register map = t3;
+ Register constructor = a4;
+ Register scratch = a5;
+
+ // If there is no signature, return the holder.
+ __ ld(signature, FieldMemOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ Label receiver_check_passed;
+ __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+ &receiver_check_passed);
+
+ // Walk the prototype chain.
+ __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(constructor, map, scratch, scratch);
+ Label next_prototype;
+ __ Branch(&next_prototype, ne, scratch, Operand(JS_FUNCTION_TYPE));
+ Register type = constructor;
+ __ ld(type,
+ FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ Branch(&receiver_check_passed, eq, signature, Operand(type),
+ USE_DELAY_SLOT);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype);
+ __ GetObjectType(type, scratch, scratch);
+ __ Branch(&next_prototype, ne, scratch, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+
+ // Otherwise load the parent function template and iterate.
+ __ ld(type,
+ FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+ __ Branch(&function_template_loop);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ ld(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
+ __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lwu(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+ __ DecodeField<Map::IsHiddenPrototype>(scratch);
+ __ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg));
+ // Iterate.
+ __ Branch(&prototype_loop_start);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments excluding receiver
+ // -- a1 : callee
+ // -- ra : return address
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[8 * (argc - 1)] : first argument
+ // -- sp[8 * argc] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ ld(t1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(t1, FieldMemOperand(t1, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check
+ Label receiver_check_failed;
+ __ sll(at, a0, kPointerSizeLog2);
+ __ Daddu(t8, sp, at);
+ __ ld(t0, MemOperand(t8));
+ CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ ld(t2, FieldMemOperand(t1, FunctionTemplateInfo::kCallCodeOffset));
+ __ ld(t2, FieldMemOperand(t2, CallHandlerInfo::kFastHandlerOffset));
+ __ Daddu(t2, t2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t2);
+
+ // Compatible receiver check failed: throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ // Drop the arguments (including the receiver);
+ __ Daddu(t8, t8, Operand(kPointerSize));
+ __ daddu(sp, t8, zero_reg);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1164,7 +1405,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(a0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
@@ -1197,7 +1438,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ Branch(&ok, hs, sp, Operand(at));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1208,7 +1449,127 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // 1. Pop receiver into a0 and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ Pop(a0);
+ __ JumpIfSmi(a0, &receiver_not_date);
+ __ GetObjectType(a0, t0, t0);
+ __ Branch(&receiver_not_date, ne, t0, Operand(JS_DATE_TYPE));
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(v0, FieldMemOperand(a0, JSDate::kValueOffset)); // In delay slot.
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ li(a1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+ __ ld(a1, MemOperand(a1));
+ __ ld(t0, FieldMemOperand(a0, JSDate::kCacheStampOffset));
+ __ Branch(&stamp_mismatch, ne, t0, Operand(a1));
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(v0, FieldMemOperand(
+ a0, JSDate::kValueOffset +
+ field_index * kPointerSize)); // In delay slot.
+ __ bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, t0);
+ __ li(a1, Operand(Smi::FromInt(field_index)));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ Ret();
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : argArray
+ // -- sp[4] : thisArg
+ // -- sp[8] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into a1, argArray into a0 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label no_arg;
+ Register scratch = a4;
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ mov(a3, a2);
+ __ dsll(scratch, a0, kPointerSizeLog2);
+ __ Daddu(a0, sp, Operand(scratch));
+ __ ld(a1, MemOperand(a0)); // receiver
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a2, MemOperand(a0)); // thisArg
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a3, MemOperand(a0)); // argArray
+ __ bind(&no_arg);
+ __ Daddu(sp, sp, Operand(scratch));
+ __ sd(a2, MemOperand(sp));
+ __ mov(a0, a3);
+ }
+
+ // ----------- S t a t e -------------
+ // -- a0 : argArray
+ // -- a1 : receiver
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(a1, &receiver_not_callable);
+ __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+ __ And(a4, a4, Operand(1 << Map::kIsCallable));
+ __ Branch(&receiver_not_callable, eq, a4, Operand(zero_reg));
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(a0, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(a0, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ mov(a0, zero_reg);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ sd(a1, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// a0: actual number of arguments
{
@@ -1252,190 +1613,145 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ ld(key, MemOperand(fp, indexOffset));
- __ Branch(&entry);
-
- // Load the current argument from the arguments array.
- __ bind(&loop);
- __ ld(receiver, MemOperand(fp, argumentsOffset));
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ li(slot, Operand(Smi::FromInt(slot_index)));
- __ ld(vector, MemOperand(fp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- __ push(v0);
-
- // Use inline caching to access the arguments.
- __ ld(key, MemOperand(fp, indexOffset));
- __ Daddu(key, key, Operand(Smi::FromInt(1)));
- __ sd(key, MemOperand(fp, indexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ld(a1, MemOperand(fp, limitOffset));
- __ Branch(&loop, ne, key, Operand(a1));
-
- // On exit, the pushed arguments count is in a0, untagged
- __ mov(a0, key);
- __ SmiUntag(a0);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : argumentsList
+ // -- sp[4] : thisArgument
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(a1);
-
- __ ld(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
- __ ld(a1, MemOperand(fp, kArgumentsOffset)); // Get the args array.
- __ Push(a0, a1);
-
- // Returns (in v0) number of arguments to copy to stack as Smi.
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
-
- // Returns the result in v0.
- Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ mov(a1, zero_reg);
- __ ld(a2, MemOperand(fp, kReceiverOffset));
- __ Push(v0, a1, a2); // limit, initial index and receiver.
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ ld(a1, MemOperand(fp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-
- // Tear down the internal frame and remove function, receiver and args.
+ Label no_arg;
+ Register scratch = a4;
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ mov(a2, a1);
+ __ mov(a3, a1);
+ __ dsll(scratch, a0, kPointerSizeLog2);
+ __ mov(a0, scratch);
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(zero_reg));
+ __ Daddu(a0, sp, Operand(a0));
+ __ ld(a1, MemOperand(a0)); // target
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a2, MemOperand(a0)); // thisArgument
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a3, MemOperand(a0)); // argumentsList
+ __ bind(&no_arg);
+ __ Daddu(sp, sp, Operand(scratch));
+ __ sd(a2, MemOperand(sp));
+ __ mov(a0, a3);
}
- __ Ret(USE_DELAY_SLOT);
- __ Daddu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
-}
-
-
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+ // ----------- S t a t e -------------
+ // -- a0 : argumentsList
+ // -- a1 : target
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(a1, &target_not_callable);
+ __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+ __ And(a4, a4, Operand(1 << Map::kIsCallable));
+ __ Branch(&target_not_callable, eq, a4, Operand(zero_reg));
+
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(a1);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ ld(a0, MemOperand(fp, kNewTargetOffset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&validate_arguments, ne, a0, Operand(at));
- __ ld(a0, MemOperand(fp, kFunctionOffset));
- __ sd(a0, MemOperand(fp, kNewTargetOffset));
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ ld(a0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(a0);
- __ ld(a0, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ push(a0);
- __ ld(a0, MemOperand(fp, kNewTargetOffset)); // get the new.target
- __ push(a0);
- // Returns argument count in v0.
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- // Returns result in v0.
- Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ push(v0); // limit
- __ mov(a1, zero_reg); // initial index
- __ push(a1);
- // Push the constructor function as callee.
- __ ld(a0, MemOperand(fp, kFunctionOffset));
- __ push(a0);
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ ld(a1, MemOperand(fp, kFunctionOffset));
- __ ld(a4, MemOperand(fp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ __ sd(a1, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ jr(ra);
- __ Daddu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : new.target (optional)
+ // -- sp[4] : argumentsList
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+ // new.target into a3 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
+ {
+ Label no_arg;
+ Register scratch = a4;
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ mov(a2, a1);
+ __ dsll(scratch, a0, kPointerSizeLog2);
+ __ Daddu(a0, sp, Operand(scratch));
+ __ sd(a2, MemOperand(a0)); // receiver
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a1, MemOperand(a0)); // target
+ __ mov(a3, a1); // new.target defaults to target
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a2, MemOperand(a0)); // argumentsList
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a3, MemOperand(a0)); // new.target
+ __ bind(&no_arg);
+ __ Daddu(sp, sp, Operand(scratch));
+ __ mov(a0, a2);
+ }
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // ----------- S t a t e -------------
+ // -- a0 : argumentsList
+ // -- a3 : new.target
+ // -- a1 : target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(a1, &target_not_constructor);
+ __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+ __ And(a4, a4, Operand(1 << Map::kIsConstructor));
+ __ Branch(&target_not_constructor, eq, a4, Operand(zero_reg));
+
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(a3, &new_target_not_constructor);
+ __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+ __ And(a4, a4, Operand(1 << Map::kIsConstructor));
+ __ Branch(&new_target_not_constructor, eq, a4, Operand(zero_reg));
+
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ sd(a1, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ sd(a3, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1445,6 +1761,7 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
// -- a0 : actual number of arguments
// -- a1 : function (passed through to callee)
// -- a2 : expected number of arguments
+ // -- a3 : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -1488,72 +1805,208 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argumentsList
+ // -- a1 : target
+ // -- a3 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(a0, &create_runtime);
+
+ // Load the map of argumentsList into a2.
+ __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+
+ // Load native context into a4.
+ __ ld(a4, NativeContextMemOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ ld(at, ContextMemOperand(a4, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ Branch(&create_arguments, eq, a2, Operand(at));
+ __ ld(at, ContextMemOperand(a4, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ Branch(&create_arguments, eq, a2, Operand(at));
+
+ // Check if argumentsList is a fast JSArray.
+ __ ld(v0, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+ __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3, a0);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ mov(a0, v0);
+ __ Pop(a1, a3);
+ __ ld(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ SmiUntag(a2);
+ }
+ __ Branch(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ ld(a2,
+ FieldMemOperand(a0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ ld(a4, FieldMemOperand(a0, JSObject::kElementsOffset));
+ __ ld(at, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ Branch(&create_runtime, ne, a2, Operand(at));
+ __ SmiUntag(a2);
+ __ mov(a0, a4);
+ __ Branch(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ ld(a2, FieldMemOperand(a2, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(a2);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS));
+ __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ ld(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
+ __ ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ SmiUntag(a2);
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(a4, Heap::kRealStackLimitRootIndex);
+ // Make ip the space we have left. The stack might already be overflowed
+ // here which will cause ip to become negative.
+ __ Dsubu(a4, sp, a4);
+ // Check if the arguments will overflow the stack.
+ __ dsll(at, a2, kPointerSizeLog2);
+ __ Branch(&done, gt, a4, Operand(at)); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- a1 : target
+ // -- a0 : args (a FixedArray built from argumentsList)
+ // -- a2 : len (number of elements to push from args)
+ // -- a3 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ __ mov(a4, zero_reg);
+ Label done, loop;
+ __ bind(&loop);
+ __ Branch(&done, eq, a4, Operand(a2));
+ __ dsll(at, a4, kPointerSizeLog2);
+ __ Daddu(at, a0, at);
+ __ ld(at, FieldMemOperand(at, FixedArray::kHeaderSize));
+ __ Push(at);
+ __ Daddu(a4, a4, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done);
+ __ Move(a0, a4);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ Label construct;
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&construct, ne, a3, Operand(at));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ bind(&construct);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(a1);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that function is not a "classConstructor".
+ Label class_constructor;
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ Branch(&class_constructor, ne, at, Operand(zero_reg));
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
__ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
__ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
__ Branch(&done_convert, ne, at, Operand(zero_reg));
{
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
- __ ld(a3, MemOperand(at));
-
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
// -- a2 : the shared function info.
- // -- a3 : the receiver
// -- cp : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(a3, &convert_to_object);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ GetObjectType(a3, a4, a4);
- __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
- __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
- __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(a3);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(at, sp, at);
+ __ ld(a3, MemOperand(at));
+ __ JumpIfSmi(a3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ GetObjectType(a3, a4, a4);
+ __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy);
+ __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ }
+ __ Branch(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ Push(a0, a1);
+ __ mov(a0, a3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(a3, v0);
+ __ Pop(a0, a1);
+ __ SmiUntag(a0);
+ }
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ Branch(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(a0);
- __ Push(a0, a1);
- __ mov(a0, a3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(a3, v0);
- __ Pop(a0, a1);
- __ SmiUntag(a0);
- }
- __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ dsll(at, a0, kPointerSizeLog2);
__ daddu(at, sp, at);
__ sd(a3, MemOperand(at));
@@ -1569,15 +2022,118 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
__ lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount actual(a0);
ParameterCount expected(a2);
- __ InvokeCode(a3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(a1, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(a1);
+
+ // Patch the receiver to [[BoundThis]].
+ {
+ __ ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+ __ dsll(a4, a0, kPointerSizeLog2);
+ __ daddu(a4, a4, sp);
+ __ sd(at, MemOperand(a4));
+ }
+
+ // Load [[BoundArguments]] into a2 and length of that into a4.
+ __ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(a4);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ dsll(a5, a4, kPointerSizeLog2);
+ __ Dsubu(sp, sp, Operand(a5));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+ __ Branch(&done, gt, sp, Operand(at)); // Signed comparison.
+ // Restore the stack pointer.
+ __ Daddu(sp, sp, Operand(a5));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ {
+ Label loop, done_loop;
+ __ mov(a5, zero_reg);
+ __ bind(&loop);
+ __ Branch(&done_loop, gt, a5, Operand(a0));
+ __ dsll(a6, a4, kPointerSizeLog2);
+ __ daddu(a6, a6, sp);
+ __ ld(at, MemOperand(a6));
+ __ dsll(a6, a5, kPointerSizeLog2);
+ __ daddu(a6, a6, sp);
+ __ sd(at, MemOperand(a6));
+ __ Daddu(a4, a4, Operand(1));
+ __ Daddu(a5, a5, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop, done_loop;
+ __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(a4);
+ __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Dsubu(a4, a4, Operand(1));
+ __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+ __ dsll(a5, a4, kPointerSizeLog2);
+ __ daddu(a5, a5, a2);
+ __ ld(at, MemOperand(a5));
+ __ dsll(a5, a0, kPointerSizeLog2);
+ __ daddu(a5, a5, sp);
+ __ sd(at, MemOperand(a5));
+ __ Daddu(a0, a0, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ li(at, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+ masm->isolate())));
+ __ ld(at, MemOperand(at));
+ __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
@@ -1587,15 +2143,20 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
- eq, t2, Operand(JS_FUNCTION_TYPE));
- __ Branch(&non_function, ne, t2, Operand(JS_FUNCTION_PROXY_TYPE));
-
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ ld(a1, FieldMemOperand(a1, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(a1);
- __ Branch(&non_smi);
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
+
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ Push(a1);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ Daddu(a0, a0, 2);
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1609,15 +2170,17 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ daddu(at, sp, at);
__ sd(a1, MemOperand(at));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1626,10 +2189,9 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the constructor to call (checked to be a JSFunction)
- // -- a3 : the original constructor (checked to be a JSFunction)
+ // -- a3 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertFunction(a1);
- __ AssertFunction(a3);
// Calling convention for function specific ConstructStubs require
// a2 to contain either an AllocationSite or undefined.
@@ -1645,17 +2207,117 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertBoundFunction(a1);
+
+ // Load [[BoundArguments]] into a2 and length of that into a4.
+ __ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(a4);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a3 : the new target (checked to be a constructor)
+ // -- a4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ dsll(a5, a4, kPointerSizeLog2);
+ __ Dsubu(sp, sp, Operand(a5));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+ __ Branch(&done, gt, sp, Operand(at)); // Signed comparison.
+ // Restore the stack pointer.
+ __ Daddu(sp, sp, Operand(a5));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ {
+ Label loop, done_loop;
+ __ mov(a5, zero_reg);
+ __ bind(&loop);
+ __ Branch(&done_loop, ge, a5, Operand(a0));
+ __ dsll(a6, a4, kPointerSizeLog2);
+ __ daddu(a6, a6, sp);
+ __ ld(at, MemOperand(a6));
+ __ dsll(a6, a5, kPointerSizeLog2);
+ __ daddu(a6, a6, sp);
+ __ sd(at, MemOperand(a6));
+ __ Daddu(a4, a4, Operand(1));
+ __ Daddu(a5, a5, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop, done_loop;
+ __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(a4);
+ __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Dsubu(a4, a4, Operand(1));
+ __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+ __ dsll(a5, a4, kPointerSizeLog2);
+ __ daddu(a5, a5, a2);
+ __ ld(at, MemOperand(a5));
+ __ dsll(a5, a0, kPointerSizeLog2);
+ __ daddu(a5, a5, sp);
+ __ sd(at, MemOperand(a5));
+ __ Daddu(a0, a0, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label skip_load;
+ __ Branch(&skip_load, ne, a1, Operand(a3));
+ __ ld(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&skip_load);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ li(at, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ ld(at, MemOperand(at));
+ __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
- // -- a1 : the constructor to call (checked to be a JSFunctionProxy)
- // -- a3 : the original constructor (either the same as the constructor or
+ // -- a1 : the constructor to call (checked to be a JSProxy)
+ // -- a3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ ld(a1, FieldMemOperand(a1, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ Push(a1, a3);
+ // Include the pushed new_target, constructor and the receiver.
+ __ Daddu(a0, a0, Operand(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1664,24 +2326,33 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the constructor to call (can be any Object)
- // -- a3 : the original constructor (either the same as the constructor or
+ // -- a3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(a1, &non_constructor);
- __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t2, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t2, t2, Operand(1 << Map::kIsCallable));
- __ Branch(&non_constructor, eq, t2, Operand(zero_reg));
// Dispatch based on instance type.
+ __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
__ Jump(masm->isolate()->builtins()->ConstructFunction(),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+
+ // Check if target has a [[Construct]] internal method.
+ __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t3, t3, Operand(1 << Map::kIsConstructor));
+ __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
__ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
- eq, t2, Operand(JS_FUNCTION_PROXY_TYPE));
+ eq, t2, Operand(JS_PROXY_TYPE));
// Called Construct on an exotic Object with a [[Construct]] internal method.
{
@@ -1690,7 +2361,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ daddu(at, sp, at);
__ sd(a1, MemOperand(at));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1698,40 +2369,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
-}
-
-
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : the number of arguments (not including the receiver)
- // -- a2 : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- a1 : the target to call (can be any Object).
-
- // Find the address of the last argument.
- __ Daddu(a3, a0, Operand(1)); // Add one for receiver.
- __ dsll(a3, a3, kPointerSizeLog2);
- __ Dsubu(a3, a2, Operand(a3));
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ Branch(&loop_check);
- __ bind(&loop_header);
- __ ld(a4, MemOperand(a2));
- __ Daddu(a2, a2, Operand(-kPointerSize));
- __ push(a4);
- __ bind(&loop_check);
- __ Branch(&loop_header, gt, a2, Operand(a3));
-
- // Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1741,14 +2380,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- a0: actual arguments count
// -- a1: function (passed through to callee)
// -- a2: expected arguments count
+ // -- a3: new target (passed through to callee)
// -----------------------------------
- Label stack_overflow;
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Branch(&dont_adapt_arguments, eq,
a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
@@ -1758,9 +2395,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0: actual number of arguments as a smi
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into a0 and copy end address into a4.
__ SmiScale(a0, a0, kPointerSizeLog2);
@@ -1775,7 +2413,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0: copy start address
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
// a4: copy end address
Label copy;
@@ -1807,17 +2445,18 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into a0 and copy end address into a7.
// a0: actual number of arguments as a smi
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
__ SmiScale(a0, a0, kPointerSizeLog2);
__ Daddu(a0, fp, a0);
// Adjust for return address and receiver.
@@ -1829,7 +2468,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0: copy start address
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
// a7: copy end address
Label copy;
__ bind(&copy);
@@ -1842,7 +2481,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
__ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
__ dsll(a6, a2, kPointerSizeLog2);
__ Dsubu(a4, fp, Operand(a6));
@@ -1862,7 +2501,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ mov(a0, a2);
// a0 : expected number of arguments
// a1 : function (passed through to callee)
- __ Call(a3);
+ // a3: new target (passed through to callee)
+ __ ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Call(a4);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1876,13 +2517,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ Jump(a3);
+ __ ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Jump(a4);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ break_(0xCC);
}
}
diff --git a/chromium/v8/src/mips64/code-stubs-mips64.cc b/chromium/v8/src/mips64/code-stubs-mips64.cc
index d0c05ad0ccb..2531d6b3f1f 100644
--- a/chromium/v8/src/mips64/code-stubs-mips64.cc
+++ b/chromium/v8/src/mips64/code-stubs-mips64.cc
@@ -289,7 +289,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ GetObjectType(a0, t0, t0);
if (cc == less || cc == greater) {
// Call runtime on identical JSObjects.
- __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
@@ -305,7 +305,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
if (cc != eq) {
- __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
@@ -455,12 +455,12 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label first_non_object;
// Get the type of the first operand into a2 and compare it with
- // FIRST_SPEC_OBJECT_TYPE.
+ // FIRST_JS_RECEIVER_TYPE.
__ GetObjectType(lhs, a2, a2);
- __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE));
// Return non-zero.
Label return_not_equal;
@@ -473,7 +473,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
__ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
__ GetObjectType(rhs, a3, a3);
- __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE));
// Check for oddballs: true, false, null, undefined.
__ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
@@ -535,9 +535,9 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ li(v0, Operand(1)); // Non-zero indicates not equal.
__ bind(&object_test);
- __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
__ GetObjectType(rhs, a2, a3);
- __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -725,8 +725,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cc == eq) {
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result.
if (cc == lt || cc == le) {
@@ -740,9 +739,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -977,7 +975,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -1064,13 +1062,21 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
+ //
+ // If argv_in_register():
+ // a2: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Compute the argv pointer in a callee-saved register.
- __ dsll(s1, a0, kPointerSizeLog2);
- __ Daddu(s1, sp, s1);
- __ Dsubu(s1, s1, kPointerSize);
+ if (argv_in_register()) {
+ // Move argv into the correct register.
+ __ mov(s1, a2);
+ } else {
+ // Compute the argv pointer in a callee-saved register.
+ __ dsll(s1, a0, kPointerSizeLog2);
+ __ Daddu(s1, sp, s1);
+ __ Dsubu(s1, s1, kPointerSize);
+ }
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
@@ -1150,8 +1156,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- // s0: still holds argc (callee-saved).
- __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
+ Register argc;
+ if (argv_in_register()) {
+ // We don't want to pop arguments so set argc to no_reg.
+ argc = no_reg;
+ } else {
+ // s0: still holds argc (callee-saved).
+ argc = s0;
+ }
+ __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -1459,15 +1472,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
__ Branch(&slow_case, ne, at, Operand(zero_reg));
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ ld(shared_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(scratch,
- FieldMemOperand(shared_info, SharedFunctionInfo::kBoundByteOffset));
- __ And(at, scratch, Operand(1 << SharedFunctionInfo::kBoundBitWithinByte));
- __ Branch(&slow_case, ne, at, Operand(zero_reg));
-
// Get the "prototype" (or initial map) of the {function}.
__ ld(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1492,25 +1496,49 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
Register const null = scratch;
- Label done, loop;
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ Register const result = v0;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ Branch(&done, eq, object_prototype, Operand(function_prototype));
- __ Branch(USE_DELAY_SLOT, &loop, ne, object_prototype, Operand(null));
- __ ld(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+
+ // Check if the object needs to be access checked.
+ __ lbu(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ And(map_bit_field, map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ Branch(&fast_runtime_fallback, ne, map_bit_field, Operand(zero_reg));
+ // Check if the current object is a Proxy.
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ __ Branch(&fast_runtime_fallback, eq, object_instance_type,
+ Operand(JS_PROXY_TYPE));
+
+ __ ld(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Branch(&done, eq, object, Operand(function_prototype));
+ __ Branch(USE_DELAY_SLOT, &loop, ne, object, Operand(null));
+ __ ld(object_map,
+ FieldMemOperand(object, HeapObject::kMapOffset)); // In delay slot.
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
__ Ret(USE_DELAY_SLOT);
- __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
-
- // Slow-case: Call the runtime function.
+ __ StoreRoot(result,
+ Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
+
+ // Found Proxy or access check needed: Call the runtime
+ __ bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ DCHECK(Smi::FromInt(0) == 0);
+ __ StoreRoot(zero_reg, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -1581,7 +1609,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(a1);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -1609,7 +1637,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1675,7 +1703,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(t1, v0, a4, t1, &runtime, TAG_OBJECT);
+ __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
// v0 = address of new object(s) (tagged)
// a2 = argument count (smi-tagged)
@@ -1685,8 +1713,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
- __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
+ __ ld(a4, NativeContextMemOperand());
Label skip2_ne, skip2_eq;
__ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
__ ld(a4, MemOperand(a4, kNormalOffset));
@@ -1824,7 +1851,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// a5 = argument count (tagged)
__ bind(&runtime);
__ Push(a1, a3, a5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1843,7 +1870,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1889,10 +1916,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
- __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
- __ ld(a4, MemOperand(a4, Context::SlotOffset(
- Context::STRICT_ARGUMENTS_MAP_INDEX)));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, a4);
__ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
__ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
@@ -1940,7 +1964,33 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+ // a4 : rest parameter index (tagged)
+ // Check if the calling frame is an arguments adaptor frame.
+
+ Label runtime;
+ __ ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a5, MemOperand(a0, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne, a5,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Patch the arguments.length and the parameters pointer.
+ __ ld(a2, MemOperand(a0, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiScale(at, a2, kPointerSizeLog2);
+
+ __ Daddu(a3, a0, Operand(at));
+ __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ Push(a2, a3, a4);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -1949,7 +1999,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2266,7 +2316,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Branch(&runtime, eq, v0, Operand(a1));
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure and exception return null.
@@ -2362,7 +2412,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2408,19 +2458,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// a0 : number of arguments to the construct function
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
// a1 : the function to call
- // a4 : original constructor (for IsSuperConstructorCall)
FrameScope scope(masm, StackFrame::INTERNAL);
- const RegList kSavedRegs = 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7 | // a3
- BoolToInt(is_super) << 8; // a4
+ const RegList kSavedRegs = 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7; // a3
// Number-of-arguments register must be smi-tagged to call out.
@@ -2434,7 +2481,7 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -2442,7 +2489,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
- // a4 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2483,7 +2529,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ Branch(&miss, ne, feedback_map, Operand(at));
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
__ Branch(&megamorphic, ne, a1, Operand(a5));
__ jmp(&done);
@@ -2505,127 +2551,29 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// An uninitialized cache is patched with the function.
__ bind(&initialize);
// Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
__ Branch(&not_array_function, ne, a1, Operand(a5));
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ Branch(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-
- // Do not transform the receiver for strict mode functions.
- int32_t strict_mode_function_mask =
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte;
- // Do not transform the receiver for native (Compilerhints already in a3).
- int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
-
- __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kStrictModeByteOffset));
- __ And(at, a4, Operand(strict_mode_function_mask));
- __ Branch(cont, ne, at, Operand(zero_reg));
- __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kNativeByteOffset));
- __ And(at, a4, Operand(native_mask));
- __ Branch(cont, ne, at, Operand(zero_reg));
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm, int argc) {
- __ li(a0, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(a1);
- __ mov(a0, a3);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ pop(a1);
- }
- __ Branch(USE_DELAY_SLOT, cont);
- __ sd(v0, MemOperand(sp, argc * kPointerSize));
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // a1 : the function to call
- Label slow, wrap, cont;
-
- if (needs_checks) {
- // Check that the function is really a JavaScript function.
- // a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &slow);
-
- // Goto slow case if we do not have a function.
- __ GetObjectType(a1, a4, a4);
- __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
- }
-
- // Fast-case: Invoke the function now.
- // a1: pushed function
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Compute the receiver in sloppy mode.
- __ ld(a3, MemOperand(sp, argc * kPointerSize));
-
- if (needs_checks) {
- __ JumpIfSmi(a3, &wrap);
- __ GetObjectType(a3, a4, a4);
- __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
- } else {
- __ jmp(&wrap);
- }
-
- __ bind(&cont);
- }
- __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- // Wrap the receiver and patch it back onto the stack.
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// a0 : number of arguments
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
- // a4 : original constructor (for IsSuperConstructorCall)
Label non_function;
// Check that the function is not a smi.
@@ -2634,29 +2582,23 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ GetObjectType(a1, a5, a5);
__ Branch(&non_function, ne, a5, Operand(JS_FUNCTION_TYPE));
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
+ GenerateRecordCallTarget(masm);
- __ dsrl(at, a3, 32 - kPointerSizeLog2);
- __ Daddu(a5, a2, at);
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into a2, or undefined.
- __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
- __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
+ __ dsrl(at, a3, 32 - kPointerSizeLog2);
+ __ Daddu(a5, a2, at);
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(a2, a5);
- }
+ __ AssertUndefinedOrAllocationSite(a2, a5);
- // Pass function as original constructor.
- if (IsSuperConstructorCall()) {
- __ mov(a3, a4);
- } else {
- __ mov(a3, a1);
- }
+ // Pass function as new target.
+ __ mov(a3, a1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -2716,7 +2658,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// a3 - slot id
// a2 - vector
// a4 - allocation site (loaded from vector[slot])
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
__ Branch(miss, ne, a1, Operand(at));
__ li(a0, Operand(arg_count()));
@@ -2739,13 +2681,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// a1 - function
// a3 - slot id (Smi)
// a2 - vector
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2782,34 +2718,17 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
- // Compute the receiver in sloppy mode.
- __ ld(a3, MemOperand(sp, argc * kPointerSize));
-
- __ JumpIfSmi(a3, &wrap);
- __ GetObjectType(a3, a4, a4);
- __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call_function);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
+ USE_DELAY_SLOT);
+ __ li(a0, Operand(argc)); // In delay slot.
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&slow_start, eq, a4, Operand(at));
+ __ Branch(&call, eq, a4, Operand(at));
// Verify that a4 contains an AllocationSite
__ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
@@ -2838,14 +2757,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Daddu(a4, a2, Operand(a4));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
- // We have to update statistics for runtime profiling.
- __ ld(a4, FieldMemOperand(a2, with_types_offset));
- __ Dsubu(a4, a4, Operand(Smi::FromInt(1)));
- __ sd(a4, FieldMemOperand(a2, with_types_offset));
- __ ld(a4, FieldMemOperand(a2, generic_offset));
- __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
- __ Branch(USE_DELAY_SLOT, &slow_start);
- __ sd(a4, FieldMemOperand(a2, generic_offset)); // In delay slot.
+
+ __ bind(&call);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
+ USE_DELAY_SLOT);
+ __ li(a0, Operand(argc)); // In delay slot.
__ bind(&uninitialized);
@@ -2858,13 +2775,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a4);
__ Branch(&miss, eq, a1, Operand(a4));
- // Update stats.
- __ ld(a4, FieldMemOperand(a2, with_types_offset));
- __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
- __ sd(a4, FieldMemOperand(a2, with_types_offset));
+ // Make sure the function belongs to the same native context.
+ __ ld(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ ld(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
+ __ ld(t1, NativeContextMemOperand());
+ __ Branch(&miss, ne, t0, Operand(t1));
// Initialize the call counter.
__ dsrl(at, a3, 32 - kPointerSizeLog2);
@@ -2884,23 +2802,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(a1);
}
- __ Branch(&have_js_function);
+ __ Branch(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
- // Check that the function is really a JavaScript function.
- // a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &slow);
-
- // Goto slow case if we do not have a function.
- __ GetObjectType(a1, a4, a4);
- __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
- __ Branch(&have_js_function);
+ __ Branch(&call);
}
@@ -2911,7 +2820,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(a1, a2, a3);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to a1 and exit the internal frame.
__ mov(a1, v0);
@@ -2940,11 +2849,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
@@ -2973,7 +2882,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Move(result_, v0);
@@ -3012,7 +2921,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
__ Move(result_, v0);
call_helper.AfterCall(masm);
@@ -3274,7 +3183,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// v0: original string
@@ -3319,7 +3228,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ mov(v0, a0);
__ bind(&slow_string);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -3329,7 +3238,24 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
+}
+
+
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes on argument in a0.
+ Label not_smi, positive_smi;
+ __ JumpIfNotSmi(a0, &not_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&positive_smi, ge, a0, Operand(zero_reg));
+ __ mov(a0, zero_reg);
+ __ bind(&positive_smi);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_smi);
+
+ __ push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3361,7 +3287,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3499,7 +3425,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(a1, a0);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3538,7 +3464,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
@@ -3831,9 +3757,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3841,18 +3767,19 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
__ GetObjectType(a0, a2, a2);
- __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
__ GetObjectType(a1, a2, a2);
- __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
- DCHECK(GetCondition() == eq);
+ DCHECK_EQ(eq, GetCondition());
__ Ret(USE_DELAY_SLOT);
__ dsubu(v0, a0, a1);
@@ -3861,7 +3788,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ And(a2, a1, a0);
@@ -3876,7 +3803,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ dsubu(v0, a0, a1);
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ li(a2, Operand(Smi::FromInt(GREATER)));
@@ -3884,7 +3811,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ li(a2, Operand(Smi::FromInt(LESS)));
}
__ Push(a1, a0, a2);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -4373,11 +4300,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
@@ -4400,73 +4327,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : element value to store
- // -- a3 : element index as smi
- // -- sp[0] : array literal index in function as smi
- // -- sp[4] : array literal
- // clobbers a1, a2, a4
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ ld(a4, MemOperand(sp, 0 * kPointerSize));
- __ ld(a1, MemOperand(sp, 1 * kPointerSize));
- __ ld(a2, FieldMemOperand(a1, JSObject::kMapOffset));
-
- __ CheckFastElements(a2, a5, &double_elements);
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
- __ JumpIfSmi(a0, &smi_element);
- __ CheckFastSmiElements(a2, a5, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(a1, a3, a0);
- __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ld(a5, FieldMemOperand(a5, JSFunction::kLiteralsOffset));
- __ Push(a5, a4);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ SmiScale(a6, a3, kPointerSizeLog2);
- __ Daddu(a6, a5, a6);
- __ Daddu(a6, a6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sd(a0, MemOperand(a6, 0));
- // Update the write barrier for the array store.
- __ RecordWrite(a5, a6, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ SmiScale(a6, a3, kPointerSizeLog2);
- __ Daddu(a6, a5, a6);
- __ sd(a0, FieldMemOperand(a6, FixedArray::kHeaderSize));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, &slow_elements);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -5169,7 +5029,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- a0 : argc (only if argument_count() == ANY)
// -- a1 : constructor
// -- a2 : AllocationSite or undefined
- // -- a3 : original constructor
+ // -- a3 : new target
// -- sp[0] : last argument
// -----------------------------------
@@ -5191,6 +5051,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(a2, a4);
}
+ // Enter the context of the Array function.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
Label subclassing;
__ Branch(&subclassing, ne, a1, Operand(a3));
@@ -5210,26 +5073,26 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
- __ Push(a1);
- __ Push(a3);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ li(at, Operand(2));
- __ addu(a0, a0, at);
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ Daddu(at, sp, at);
+ __ sd(a1, MemOperand(at));
+ __ li(at, Operand(3));
+ __ Daddu(a0, a0, at);
break;
case NONE:
- __ li(a0, Operand(2));
+ __ sd(a1, MemOperand(sp, 0 * kPointerSize));
+ __ li(a0, Operand(3));
break;
case ONE:
- __ li(a0, Operand(3));
+ __ sd(a1, MemOperand(sp, 1 * kPointerSize));
+ __ li(a0, Operand(4));
break;
}
-
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ Push(a3, a2);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5315,14 +5178,14 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ ld(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ __ ld(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
context_reg = result_reg;
}
// Load the PropertyCell value at the specified slot.
__ dsll(at, slot_reg, kPointerSizeLog2);
__ Daddu(at, at, Operand(context_reg));
- __ ld(result_reg, ContextOperand(at, 0));
+ __ ld(result_reg, ContextMemOperand(at, 0));
__ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
// Check that value is not the_hole.
@@ -5334,7 +5197,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ bind(&slow_case);
__ SmiTag(slot_reg);
__ Push(slot_reg);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5354,14 +5217,14 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ ld(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ __ ld(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
context_reg = cell_reg;
}
// Load the PropertyCell at the specified slot.
__ dsll(at, slot_reg, kPointerSizeLog2);
__ Daddu(at, at, Operand(context_reg));
- __ ld(cell_reg, ContextOperand(at, 0));
+ __ ld(cell_reg, ContextMemOperand(at, 0));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ ld(cell_details_reg,
@@ -5448,8 +5311,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot_reg, value_reg);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5574,7 +5436,7 @@ static void CallApiFunctionAndReturn(
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/chromium/v8/src/mips64/code-stubs-mips64.h b/chromium/v8/src/mips64/code-stubs-mips64.h
index c54a3d07c50..fdaf4c80df0 100644
--- a/chromium/v8/src/mips64/code-stubs-mips64.h
+++ b/chromium/v8/src/mips64/code-stubs-mips64.h
@@ -141,9 +141,8 @@ class RecordWriteStub: public PlatformCodeStub {
}
static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL,
- stub->instruction_start(),
- stub->instruction_size());
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
@@ -161,8 +160,8 @@ class RecordWriteStub: public PlatformCodeStub {
break;
}
DCHECK(GetMode(stub) == mode);
- CpuFeatures::FlushICache(stub->instruction_start(),
- 4 * Assembler::kInstrSize);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
+ 4 * Assembler::kInstrSize);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@@ -344,6 +343,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_CODE_STUBS_MIPS64_H_
diff --git a/chromium/v8/src/mips64/codegen-mips64.cc b/chromium/v8/src/mips64/codegen-mips64.cc
index d30bdbb294b..022426e7d78 100644
--- a/chromium/v8/src/mips64/codegen-mips64.cc
+++ b/chromium/v8/src/mips64/codegen-mips64.cc
@@ -18,23 +18,22 @@ namespace internal {
#if defined(USE_SIMULATOR)
-byte* fast_exp_mips_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
- fast_exp_mips_machine_code, x, 0);
+byte* fast_exp_mips_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+ return Simulator::current(isolate)->CallFP(fast_exp_mips_machine_code, x, 0);
}
#endif
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
DoubleRegister input = f12;
@@ -59,11 +58,11 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_mips_machine_code = buffer;
return &fast_exp_simulator;
@@ -72,7 +71,8 @@ UnaryMathFunction CreateExpFunction() {
#if defined(V8_HOST_ARCH_MIPS)
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
+MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
+ MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
@@ -80,11 +80,12 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
- if (buffer == NULL) return stub;
+ if (buffer == nullptr) return stub;
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
Label lastb, unaligned, aligned, chkw,
@@ -598,23 +599,24 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
#endif
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
- return &std::sqrt;
+ return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
+ if (buffer == nullptr) return nullptr;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
__ MovFromFloatParameter(f12);
__ sqrt_d(f0, f12);
@@ -625,9 +627,9 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
@@ -1182,15 +1184,17 @@ static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
#endif
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before MIPS simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(new CodePatcher(
- young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(
+ new CodePatcher(isolate, young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->Push(ra, fp, cp, a1);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
@@ -1236,10 +1240,11 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CpuFeatures::FlushICache(sequence, young_length);
+ Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ CodePatcher patcher(isolate, sequence,
+ young_length / Assembler::kInstrSize);
// Mark this code sequence for FindPlatformCodeAgeSequence().
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
diff --git a/chromium/v8/src/mips64/codegen-mips64.h b/chromium/v8/src/mips64/codegen-mips64.h
index f79ad4e41ca..ad7abb30c55 100644
--- a/chromium/v8/src/mips64/codegen-mips64.h
+++ b/chromium/v8/src/mips64/codegen-mips64.h
@@ -7,7 +7,7 @@
#define V8_MIPS_CODEGEN_MIPS_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -46,6 +46,7 @@ class MathExpGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/chromium/v8/src/mips64/constants-mips64.cc b/chromium/v8/src/mips64/constants-mips64.cc
index efabfe4f264..c0e98eb623c 100644
--- a/chromium/v8/src/mips64/constants-mips64.cc
+++ b/chromium/v8/src/mips64/constants-mips64.cc
@@ -126,24 +126,28 @@ int FPURegisters::Number(const char* name) {
// -----------------------------------------------------------------------------
// Instructions.
-bool Instruction::IsForbiddenInBranchDelay() const {
- const int op = OpcodeFieldRaw();
- switch (op) {
+bool Instruction::IsForbiddenAfterBranchInstr(Instr instr) {
+ Opcode opcode = static_cast<Opcode>(instr & kOpcodeMask);
+ switch (opcode) {
case J:
case JAL:
case BEQ:
case BNE:
- case BLEZ:
- case BGTZ:
+ case BLEZ: // POP06 bgeuc/bleuc, blezalc, bgezalc
+ case BGTZ: // POP07 bltuc/bgtuc, bgtzalc, bltzalc
case BEQL:
case BNEL:
- case BLEZL:
- case BGTZL:
+ case BLEZL: // POP26 bgezc, blezc, bgec/blec
+ case BGTZL: // POP27 bgtzc, bltzc, bltc/bgtc
case BC:
case BALC:
+ case POP10: // beqzalc, bovc, beqc
+ case POP30: // bnezalc, bnvc, bnec
+ case POP66: // beqzc, jic
+ case POP76: // bnezc, jialc
return true;
case REGIMM:
- switch (RtFieldRaw()) {
+ switch (instr & kRtFieldMask) {
case BLTZ:
case BGEZ:
case BLTZAL:
@@ -154,7 +158,7 @@ bool Instruction::IsForbiddenInBranchDelay() const {
}
break;
case SPECIAL:
- switch (FunctionFieldRaw()) {
+ switch (instr & kFunctionFieldMask) {
case JR:
case JALR:
return true;
@@ -162,6 +166,17 @@ bool Instruction::IsForbiddenInBranchDelay() const {
return false;
}
break;
+ case COP1:
+ switch (instr & kRsFieldMask) {
+ case BC1:
+ case BC1EQZ:
+ case BC1NEZ:
+ return true;
+ break;
+ default:
+ return false;
+ }
+ break;
default:
return false;
}
@@ -169,8 +184,7 @@ bool Instruction::IsForbiddenInBranchDelay() const {
bool Instruction::IsLinkingInstruction() const {
- const int op = OpcodeFieldRaw();
- switch (op) {
+ switch (OpcodeFieldRaw()) {
case JAL:
return true;
case POP76:
diff --git a/chromium/v8/src/mips64/constants-mips64.h b/chromium/v8/src/mips64/constants-mips64.h
index f23f103ac39..226e3ed5baa 100644
--- a/chromium/v8/src/mips64/constants-mips64.h
+++ b/chromium/v8/src/mips64/constants-mips64.h
@@ -119,8 +119,11 @@ const int kInvalidFPURegister = -1;
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const int32_t kFPUInvalidResultNegative = static_cast<int32_t>(1 << 31);
const uint64_t kFPU64InvalidResult =
static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
+const int64_t kFPU64InvalidResultNegative =
+ static_cast<int64_t>(static_cast<uint64_t>(1) << 63);
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
@@ -128,12 +131,14 @@ const uint32_t kFCSRUnderflowFlagBit = 3;
const uint32_t kFCSROverflowFlagBit = 4;
const uint32_t kFCSRDivideByZeroFlagBit = 5;
const uint32_t kFCSRInvalidOpFlagBit = 6;
+const uint32_t kFCSRNaN2008FlagBit = 18;
const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+const uint32_t kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit;
const uint32_t kFCSRFlagMask =
kFCSRInexactFlagMask |
@@ -232,6 +237,7 @@ const int kRdShift = 11;
const int kRdBits = 5;
const int kSaShift = 6;
const int kSaBits = 5;
+const int kLsaSaBits = 2;
const int kFunctionShift = 0;
const int kFunctionBits = 6;
const int kLuiShift = 16;
@@ -298,351 +304,366 @@ const int64_t kSe16MaskOf64 = (int64_t)0xffff << 32;
const int64_t kTh16MaskOf64 = (int64_t)0xffff << 16;
const int32_t kJalRawMark = 0x00000000;
const int32_t kJRawMark = 0xf0000000;
+const int32_t kJumpRawMask = 0xf0000000;
// ----- MIPS Opcodes and Function Fields.
// We use this presentation to stay close to the table representation in
// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
-enum Opcode {
- SPECIAL = 0 << kOpcodeShift,
- REGIMM = 1 << kOpcodeShift,
-
- J = ((0 << 3) + 2) << kOpcodeShift,
- JAL = ((0 << 3) + 3) << kOpcodeShift,
- BEQ = ((0 << 3) + 4) << kOpcodeShift,
- BNE = ((0 << 3) + 5) << kOpcodeShift,
- BLEZ = ((0 << 3) + 6) << kOpcodeShift,
- BGTZ = ((0 << 3) + 7) << kOpcodeShift,
-
- ADDI = ((1 << 3) + 0) << kOpcodeShift,
- ADDIU = ((1 << 3) + 1) << kOpcodeShift,
- SLTI = ((1 << 3) + 2) << kOpcodeShift,
- SLTIU = ((1 << 3) + 3) << kOpcodeShift,
- ANDI = ((1 << 3) + 4) << kOpcodeShift,
- ORI = ((1 << 3) + 5) << kOpcodeShift,
- XORI = ((1 << 3) + 6) << kOpcodeShift,
- LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
- DAUI = ((3 << 3) + 5) << kOpcodeShift,
-
- BEQC = ((2 << 3) + 0) << kOpcodeShift,
- COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
- BEQL = ((2 << 3) + 4) << kOpcodeShift,
- BNEL = ((2 << 3) + 5) << kOpcodeShift,
- BLEZL = ((2 << 3) + 6) << kOpcodeShift,
- BGTZL = ((2 << 3) + 7) << kOpcodeShift,
-
- DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
- DADDIU = ((3 << 3) + 1) << kOpcodeShift,
- LDL = ((3 << 3) + 2) << kOpcodeShift,
- LDR = ((3 << 3) + 3) << kOpcodeShift,
- SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
- SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
-
- LB = ((4 << 3) + 0) << kOpcodeShift,
- LH = ((4 << 3) + 1) << kOpcodeShift,
- LWL = ((4 << 3) + 2) << kOpcodeShift,
- LW = ((4 << 3) + 3) << kOpcodeShift,
- LBU = ((4 << 3) + 4) << kOpcodeShift,
- LHU = ((4 << 3) + 5) << kOpcodeShift,
- LWR = ((4 << 3) + 6) << kOpcodeShift,
- LWU = ((4 << 3) + 7) << kOpcodeShift,
-
- SB = ((5 << 3) + 0) << kOpcodeShift,
- SH = ((5 << 3) + 1) << kOpcodeShift,
- SWL = ((5 << 3) + 2) << kOpcodeShift,
- SW = ((5 << 3) + 3) << kOpcodeShift,
- SDL = ((5 << 3) + 4) << kOpcodeShift,
- SDR = ((5 << 3) + 5) << kOpcodeShift,
- SWR = ((5 << 3) + 6) << kOpcodeShift,
-
- LWC1 = ((6 << 3) + 1) << kOpcodeShift,
- BC = ((6 << 3) + 2) << kOpcodeShift,
- LLD = ((6 << 3) + 4) << kOpcodeShift,
- LDC1 = ((6 << 3) + 5) << kOpcodeShift,
- POP66 = ((6 << 3) + 6) << kOpcodeShift,
- LD = ((6 << 3) + 7) << kOpcodeShift,
-
- PREF = ((6 << 3) + 3) << kOpcodeShift,
-
- SWC1 = ((7 << 3) + 1) << kOpcodeShift,
- BALC = ((7 << 3) + 2) << kOpcodeShift,
- PCREL = ((7 << 3) + 3) << kOpcodeShift,
- SCD = ((7 << 3) + 4) << kOpcodeShift,
- SDC1 = ((7 << 3) + 5) << kOpcodeShift,
- POP76 = ((7 << 3) + 6) << kOpcodeShift,
- SD = ((7 << 3) + 7) << kOpcodeShift,
-
- COP1X = ((1 << 4) + 3) << kOpcodeShift
+enum Opcode : uint32_t {
+ SPECIAL = 0U << kOpcodeShift,
+ REGIMM = 1U << kOpcodeShift,
+
+ J = ((0U << 3) + 2) << kOpcodeShift,
+ JAL = ((0U << 3) + 3) << kOpcodeShift,
+ BEQ = ((0U << 3) + 4) << kOpcodeShift,
+ BNE = ((0U << 3) + 5) << kOpcodeShift,
+ BLEZ = ((0U << 3) + 6) << kOpcodeShift,
+ BGTZ = ((0U << 3) + 7) << kOpcodeShift,
+
+ ADDI = ((1U << 3) + 0) << kOpcodeShift,
+ ADDIU = ((1U << 3) + 1) << kOpcodeShift,
+ SLTI = ((1U << 3) + 2) << kOpcodeShift,
+ SLTIU = ((1U << 3) + 3) << kOpcodeShift,
+ ANDI = ((1U << 3) + 4) << kOpcodeShift,
+ ORI = ((1U << 3) + 5) << kOpcodeShift,
+ XORI = ((1U << 3) + 6) << kOpcodeShift,
+ LUI = ((1U << 3) + 7) << kOpcodeShift, // LUI/AUI family.
+ DAUI = ((3U << 3) + 5) << kOpcodeShift,
+
+ BEQC = ((2U << 3) + 0) << kOpcodeShift,
+ COP1 = ((2U << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
+ BEQL = ((2U << 3) + 4) << kOpcodeShift,
+ BNEL = ((2U << 3) + 5) << kOpcodeShift,
+ BLEZL = ((2U << 3) + 6) << kOpcodeShift,
+ BGTZL = ((2U << 3) + 7) << kOpcodeShift,
+
+ DADDI = ((3U << 3) + 0) << kOpcodeShift, // This is also BNEC.
+ DADDIU = ((3U << 3) + 1) << kOpcodeShift,
+ LDL = ((3U << 3) + 2) << kOpcodeShift,
+ LDR = ((3U << 3) + 3) << kOpcodeShift,
+ SPECIAL2 = ((3U << 3) + 4) << kOpcodeShift,
+ SPECIAL3 = ((3U << 3) + 7) << kOpcodeShift,
+
+ LB = ((4U << 3) + 0) << kOpcodeShift,
+ LH = ((4U << 3) + 1) << kOpcodeShift,
+ LWL = ((4U << 3) + 2) << kOpcodeShift,
+ LW = ((4U << 3) + 3) << kOpcodeShift,
+ LBU = ((4U << 3) + 4) << kOpcodeShift,
+ LHU = ((4U << 3) + 5) << kOpcodeShift,
+ LWR = ((4U << 3) + 6) << kOpcodeShift,
+ LWU = ((4U << 3) + 7) << kOpcodeShift,
+
+ SB = ((5U << 3) + 0) << kOpcodeShift,
+ SH = ((5U << 3) + 1) << kOpcodeShift,
+ SWL = ((5U << 3) + 2) << kOpcodeShift,
+ SW = ((5U << 3) + 3) << kOpcodeShift,
+ SDL = ((5U << 3) + 4) << kOpcodeShift,
+ SDR = ((5U << 3) + 5) << kOpcodeShift,
+ SWR = ((5U << 3) + 6) << kOpcodeShift,
+
+ LWC1 = ((6U << 3) + 1) << kOpcodeShift,
+ BC = ((6U << 3) + 2) << kOpcodeShift,
+ LLD = ((6U << 3) + 4) << kOpcodeShift,
+ LDC1 = ((6U << 3) + 5) << kOpcodeShift,
+ POP66 = ((6U << 3) + 6) << kOpcodeShift,
+ LD = ((6U << 3) + 7) << kOpcodeShift,
+
+ PREF = ((6U << 3) + 3) << kOpcodeShift,
+
+ SWC1 = ((7U << 3) + 1) << kOpcodeShift,
+ BALC = ((7U << 3) + 2) << kOpcodeShift,
+ PCREL = ((7U << 3) + 3) << kOpcodeShift,
+ SCD = ((7U << 3) + 4) << kOpcodeShift,
+ SDC1 = ((7U << 3) + 5) << kOpcodeShift,
+ POP76 = ((7U << 3) + 6) << kOpcodeShift,
+ SD = ((7U << 3) + 7) << kOpcodeShift,
+
+ COP1X = ((1U << 4) + 3) << kOpcodeShift,
+
+ // New r6 instruction.
+ POP06 = BLEZ, // bgeuc/bleuc, blezalc, bgezalc
+ POP07 = BGTZ, // bltuc/bgtuc, bgtzalc, bltzalc
+ POP10 = ADDI, // beqzalc, bovc, beqc
+ POP26 = BLEZL, // bgezc, blezc, bgec/blec
+ POP27 = BGTZL, // bgtzc, bltzc, bltc/bgtc
+ POP30 = DADDI, // bnezalc, bnvc, bnec
};
-enum SecondaryField {
+enum SecondaryField : uint32_t {
// SPECIAL Encoding of Function Field.
- SLL = ((0 << 3) + 0),
- MOVCI = ((0 << 3) + 1),
- SRL = ((0 << 3) + 2),
- SRA = ((0 << 3) + 3),
- SLLV = ((0 << 3) + 4),
- SRLV = ((0 << 3) + 6),
- SRAV = ((0 << 3) + 7),
-
- JR = ((1 << 3) + 0),
- JALR = ((1 << 3) + 1),
- MOVZ = ((1 << 3) + 2),
- MOVN = ((1 << 3) + 3),
- BREAK = ((1 << 3) + 5),
-
- MFHI = ((2 << 3) + 0),
- CLZ_R6 = ((2 << 3) + 0),
- CLO_R6 = ((2 << 3) + 1),
- MFLO = ((2 << 3) + 2),
- DSLLV = ((2 << 3) + 4),
- DSRLV = ((2 << 3) + 6),
- DSRAV = ((2 << 3) + 7),
-
- MULT = ((3 << 3) + 0),
- MULTU = ((3 << 3) + 1),
- DIV = ((3 << 3) + 2),
- DIVU = ((3 << 3) + 3),
- DMULT = ((3 << 3) + 4),
- DMULTU = ((3 << 3) + 5),
- DDIV = ((3 << 3) + 6),
- DDIVU = ((3 << 3) + 7),
-
- ADD = ((4 << 3) + 0),
- ADDU = ((4 << 3) + 1),
- SUB = ((4 << 3) + 2),
- SUBU = ((4 << 3) + 3),
- AND = ((4 << 3) + 4),
- OR = ((4 << 3) + 5),
- XOR = ((4 << 3) + 6),
- NOR = ((4 << 3) + 7),
-
- SLT = ((5 << 3) + 2),
- SLTU = ((5 << 3) + 3),
- DADD = ((5 << 3) + 4),
- DADDU = ((5 << 3) + 5),
- DSUB = ((5 << 3) + 6),
- DSUBU = ((5 << 3) + 7),
-
- TGE = ((6 << 3) + 0),
- TGEU = ((6 << 3) + 1),
- TLT = ((6 << 3) + 2),
- TLTU = ((6 << 3) + 3),
- TEQ = ((6 << 3) + 4),
- SELEQZ_S = ((6 << 3) + 5),
- TNE = ((6 << 3) + 6),
- SELNEZ_S = ((6 << 3) + 7),
-
- DSLL = ((7 << 3) + 0),
- DSRL = ((7 << 3) + 2),
- DSRA = ((7 << 3) + 3),
- DSLL32 = ((7 << 3) + 4),
- DSRL32 = ((7 << 3) + 6),
- DSRA32 = ((7 << 3) + 7),
+ SLL = ((0U << 3) + 0),
+ MOVCI = ((0U << 3) + 1),
+ SRL = ((0U << 3) + 2),
+ SRA = ((0U << 3) + 3),
+ SLLV = ((0U << 3) + 4),
+ LSA = ((0U << 3) + 5),
+ SRLV = ((0U << 3) + 6),
+ SRAV = ((0U << 3) + 7),
+
+ JR = ((1U << 3) + 0),
+ JALR = ((1U << 3) + 1),
+ MOVZ = ((1U << 3) + 2),
+ MOVN = ((1U << 3) + 3),
+ BREAK = ((1U << 3) + 5),
+
+ MFHI = ((2U << 3) + 0),
+ CLZ_R6 = ((2U << 3) + 0),
+ CLO_R6 = ((2U << 3) + 1),
+ MFLO = ((2U << 3) + 2),
+ DCLZ_R6 = ((2U << 3) + 2),
+ DCLO_R6 = ((2U << 3) + 3),
+ DSLLV = ((2U << 3) + 4),
+ DLSA = ((2U << 3) + 5),
+ DSRLV = ((2U << 3) + 6),
+ DSRAV = ((2U << 3) + 7),
+
+ MULT = ((3U << 3) + 0),
+ MULTU = ((3U << 3) + 1),
+ DIV = ((3U << 3) + 2),
+ DIVU = ((3U << 3) + 3),
+ DMULT = ((3U << 3) + 4),
+ DMULTU = ((3U << 3) + 5),
+ DDIV = ((3U << 3) + 6),
+ DDIVU = ((3U << 3) + 7),
+
+ ADD = ((4U << 3) + 0),
+ ADDU = ((4U << 3) + 1),
+ SUB = ((4U << 3) + 2),
+ SUBU = ((4U << 3) + 3),
+ AND = ((4U << 3) + 4),
+ OR = ((4U << 3) + 5),
+ XOR = ((4U << 3) + 6),
+ NOR = ((4U << 3) + 7),
+
+ SLT = ((5U << 3) + 2),
+ SLTU = ((5U << 3) + 3),
+ DADD = ((5U << 3) + 4),
+ DADDU = ((5U << 3) + 5),
+ DSUB = ((5U << 3) + 6),
+ DSUBU = ((5U << 3) + 7),
+
+ TGE = ((6U << 3) + 0),
+ TGEU = ((6U << 3) + 1),
+ TLT = ((6U << 3) + 2),
+ TLTU = ((6U << 3) + 3),
+ TEQ = ((6U << 3) + 4),
+ SELEQZ_S = ((6U << 3) + 5),
+ TNE = ((6U << 3) + 6),
+ SELNEZ_S = ((6U << 3) + 7),
+
+ DSLL = ((7U << 3) + 0),
+ DSRL = ((7U << 3) + 2),
+ DSRA = ((7U << 3) + 3),
+ DSLL32 = ((7U << 3) + 4),
+ DSRL32 = ((7U << 3) + 6),
+ DSRA32 = ((7U << 3) + 7),
// Multiply integers in r6.
- MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
- MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
- D_MUL_MUH = ((7 << 2) + 0), // DMUL, DMUH.
- D_MUL_MUH_U = ((7 << 2) + 1), // DMUL_U, DMUH_U.
- RINT = ((3 << 3) + 2),
-
- MUL_OP = ((0 << 3) + 2),
- MUH_OP = ((0 << 3) + 3),
- DIV_OP = ((0 << 3) + 2),
- MOD_OP = ((0 << 3) + 3),
-
- DIV_MOD = ((3 << 3) + 2),
- DIV_MOD_U = ((3 << 3) + 3),
- D_DIV_MOD = ((3 << 3) + 6),
- D_DIV_MOD_U = ((3 << 3) + 7),
+ MUL_MUH = ((3U << 3) + 0), // MUL, MUH.
+ MUL_MUH_U = ((3U << 3) + 1), // MUL_U, MUH_U.
+ D_MUL_MUH = ((7U << 2) + 0), // DMUL, DMUH.
+ D_MUL_MUH_U = ((7U << 2) + 1), // DMUL_U, DMUH_U.
+ RINT = ((3U << 3) + 2),
+
+ MUL_OP = ((0U << 3) + 2),
+ MUH_OP = ((0U << 3) + 3),
+ DIV_OP = ((0U << 3) + 2),
+ MOD_OP = ((0U << 3) + 3),
+
+ DIV_MOD = ((3U << 3) + 2),
+ DIV_MOD_U = ((3U << 3) + 3),
+ D_DIV_MOD = ((3U << 3) + 6),
+ D_DIV_MOD_U = ((3U << 3) + 7),
// drotr in special4?
// SPECIAL2 Encoding of Function Field.
- MUL = ((0 << 3) + 2),
- CLZ = ((4 << 3) + 0),
- CLO = ((4 << 3) + 1),
+ MUL = ((0U << 3) + 2),
+ CLZ = ((4U << 3) + 0),
+ CLO = ((4U << 3) + 1),
+ DCLZ = ((4U << 3) + 4),
+ DCLO = ((4U << 3) + 5),
// SPECIAL3 Encoding of Function Field.
- EXT = ((0 << 3) + 0),
- DEXTM = ((0 << 3) + 1),
- DEXTU = ((0 << 3) + 2),
- DEXT = ((0 << 3) + 3),
- INS = ((0 << 3) + 4),
- DINSM = ((0 << 3) + 5),
- DINSU = ((0 << 3) + 6),
- DINS = ((0 << 3) + 7),
-
- BSHFL = ((4 << 3) + 0),
- DBSHFL = ((4 << 3) + 4),
+ EXT = ((0U << 3) + 0),
+ DEXTM = ((0U << 3) + 1),
+ DEXTU = ((0U << 3) + 2),
+ DEXT = ((0U << 3) + 3),
+ INS = ((0U << 3) + 4),
+ DINSM = ((0U << 3) + 5),
+ DINSU = ((0U << 3) + 6),
+ DINS = ((0U << 3) + 7),
+
+ BSHFL = ((4U << 3) + 0),
+ DBSHFL = ((4U << 3) + 4),
// SPECIAL3 Encoding of sa Field.
- BITSWAP = ((0 << 3) + 0),
- ALIGN = ((0 << 3) + 2),
- WSBH = ((0 << 3) + 2),
- SEB = ((2 << 3) + 0),
- SEH = ((3 << 3) + 0),
-
- DBITSWAP = ((0 << 3) + 0),
- DALIGN = ((0 << 3) + 1),
- DBITSWAP_SA = ((0 << 3) + 0) << kSaShift,
- DSBH = ((0 << 3) + 2),
- DSHD = ((0 << 3) + 5),
+ BITSWAP = ((0U << 3) + 0),
+ ALIGN = ((0U << 3) + 2),
+ WSBH = ((0U << 3) + 2),
+ SEB = ((2U << 3) + 0),
+ SEH = ((3U << 3) + 0),
+
+ DBITSWAP = ((0U << 3) + 0),
+ DALIGN = ((0U << 3) + 1),
+ DBITSWAP_SA = ((0U << 3) + 0) << kSaShift,
+ DSBH = ((0U << 3) + 2),
+ DSHD = ((0U << 3) + 5),
// REGIMM encoding of rt Field.
- BLTZ = ((0 << 3) + 0) << 16,
- BGEZ = ((0 << 3) + 1) << 16,
- BLTZAL = ((2 << 3) + 0) << 16,
- BGEZAL = ((2 << 3) + 1) << 16,
- BGEZALL = ((2 << 3) + 3) << 16,
- DAHI = ((0 << 3) + 6) << 16,
- DATI = ((3 << 3) + 6) << 16,
+ BLTZ = ((0U << 3) + 0) << 16,
+ BGEZ = ((0U << 3) + 1) << 16,
+ BLTZAL = ((2U << 3) + 0) << 16,
+ BGEZAL = ((2U << 3) + 1) << 16,
+ BGEZALL = ((2U << 3) + 3) << 16,
+ DAHI = ((0U << 3) + 6) << 16,
+ DATI = ((3U << 3) + 6) << 16,
// COP1 Encoding of rs Field.
- MFC1 = ((0 << 3) + 0) << 21,
- DMFC1 = ((0 << 3) + 1) << 21,
- CFC1 = ((0 << 3) + 2) << 21,
- MFHC1 = ((0 << 3) + 3) << 21,
- MTC1 = ((0 << 3) + 4) << 21,
- DMTC1 = ((0 << 3) + 5) << 21,
- CTC1 = ((0 << 3) + 6) << 21,
- MTHC1 = ((0 << 3) + 7) << 21,
- BC1 = ((1 << 3) + 0) << 21,
- S = ((2 << 3) + 0) << 21,
- D = ((2 << 3) + 1) << 21,
- W = ((2 << 3) + 4) << 21,
- L = ((2 << 3) + 5) << 21,
- PS = ((2 << 3) + 6) << 21,
+ MFC1 = ((0U << 3) + 0) << 21,
+ DMFC1 = ((0U << 3) + 1) << 21,
+ CFC1 = ((0U << 3) + 2) << 21,
+ MFHC1 = ((0U << 3) + 3) << 21,
+ MTC1 = ((0U << 3) + 4) << 21,
+ DMTC1 = ((0U << 3) + 5) << 21,
+ CTC1 = ((0U << 3) + 6) << 21,
+ MTHC1 = ((0U << 3) + 7) << 21,
+ BC1 = ((1U << 3) + 0) << 21,
+ S = ((2U << 3) + 0) << 21,
+ D = ((2U << 3) + 1) << 21,
+ W = ((2U << 3) + 4) << 21,
+ L = ((2U << 3) + 5) << 21,
+ PS = ((2U << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
- ADD_S = ((0 << 3) + 0),
- SUB_S = ((0 << 3) + 1),
- MUL_S = ((0 << 3) + 2),
- DIV_S = ((0 << 3) + 3),
- ABS_S = ((0 << 3) + 5),
- SQRT_S = ((0 << 3) + 4),
- MOV_S = ((0 << 3) + 6),
- NEG_S = ((0 << 3) + 7),
- ROUND_L_S = ((1 << 3) + 0),
- TRUNC_L_S = ((1 << 3) + 1),
- CEIL_L_S = ((1 << 3) + 2),
- FLOOR_L_S = ((1 << 3) + 3),
- ROUND_W_S = ((1 << 3) + 4),
- TRUNC_W_S = ((1 << 3) + 5),
- CEIL_W_S = ((1 << 3) + 6),
- FLOOR_W_S = ((1 << 3) + 7),
- RECIP_S = ((2 << 3) + 5),
- RSQRT_S = ((2 << 3) + 6),
- CLASS_S = ((3 << 3) + 3),
- CVT_D_S = ((4 << 3) + 1),
- CVT_W_S = ((4 << 3) + 4),
- CVT_L_S = ((4 << 3) + 5),
- CVT_PS_S = ((4 << 3) + 6),
+ ADD_S = ((0U << 3) + 0),
+ SUB_S = ((0U << 3) + 1),
+ MUL_S = ((0U << 3) + 2),
+ DIV_S = ((0U << 3) + 3),
+ ABS_S = ((0U << 3) + 5),
+ SQRT_S = ((0U << 3) + 4),
+ MOV_S = ((0U << 3) + 6),
+ NEG_S = ((0U << 3) + 7),
+ ROUND_L_S = ((1U << 3) + 0),
+ TRUNC_L_S = ((1U << 3) + 1),
+ CEIL_L_S = ((1U << 3) + 2),
+ FLOOR_L_S = ((1U << 3) + 3),
+ ROUND_W_S = ((1U << 3) + 4),
+ TRUNC_W_S = ((1U << 3) + 5),
+ CEIL_W_S = ((1U << 3) + 6),
+ FLOOR_W_S = ((1U << 3) + 7),
+ RECIP_S = ((2U << 3) + 5),
+ RSQRT_S = ((2U << 3) + 6),
+ CLASS_S = ((3U << 3) + 3),
+ CVT_D_S = ((4U << 3) + 1),
+ CVT_W_S = ((4U << 3) + 4),
+ CVT_L_S = ((4U << 3) + 5),
+ CVT_PS_S = ((4U << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
- ADD_D = ((0 << 3) + 0),
- SUB_D = ((0 << 3) + 1),
- MUL_D = ((0 << 3) + 2),
- DIV_D = ((0 << 3) + 3),
- SQRT_D = ((0 << 3) + 4),
- ABS_D = ((0 << 3) + 5),
- MOV_D = ((0 << 3) + 6),
- NEG_D = ((0 << 3) + 7),
- ROUND_L_D = ((1 << 3) + 0),
- TRUNC_L_D = ((1 << 3) + 1),
- CEIL_L_D = ((1 << 3) + 2),
- FLOOR_L_D = ((1 << 3) + 3),
- ROUND_W_D = ((1 << 3) + 4),
- TRUNC_W_D = ((1 << 3) + 5),
- CEIL_W_D = ((1 << 3) + 6),
- FLOOR_W_D = ((1 << 3) + 7),
- RECIP_D = ((2 << 3) + 5),
- RSQRT_D = ((2 << 3) + 6),
- CLASS_D = ((3 << 3) + 3),
- MIN = ((3 << 3) + 4),
- MINA = ((3 << 3) + 5),
- MAX = ((3 << 3) + 6),
- MAXA = ((3 << 3) + 7),
- CVT_S_D = ((4 << 3) + 0),
- CVT_W_D = ((4 << 3) + 4),
- CVT_L_D = ((4 << 3) + 5),
- C_F_D = ((6 << 3) + 0),
- C_UN_D = ((6 << 3) + 1),
- C_EQ_D = ((6 << 3) + 2),
- C_UEQ_D = ((6 << 3) + 3),
- C_OLT_D = ((6 << 3) + 4),
- C_ULT_D = ((6 << 3) + 5),
- C_OLE_D = ((6 << 3) + 6),
- C_ULE_D = ((6 << 3) + 7),
+ ADD_D = ((0U << 3) + 0),
+ SUB_D = ((0U << 3) + 1),
+ MUL_D = ((0U << 3) + 2),
+ DIV_D = ((0U << 3) + 3),
+ SQRT_D = ((0U << 3) + 4),
+ ABS_D = ((0U << 3) + 5),
+ MOV_D = ((0U << 3) + 6),
+ NEG_D = ((0U << 3) + 7),
+ ROUND_L_D = ((1U << 3) + 0),
+ TRUNC_L_D = ((1U << 3) + 1),
+ CEIL_L_D = ((1U << 3) + 2),
+ FLOOR_L_D = ((1U << 3) + 3),
+ ROUND_W_D = ((1U << 3) + 4),
+ TRUNC_W_D = ((1U << 3) + 5),
+ CEIL_W_D = ((1U << 3) + 6),
+ FLOOR_W_D = ((1U << 3) + 7),
+ RECIP_D = ((2U << 3) + 5),
+ RSQRT_D = ((2U << 3) + 6),
+ CLASS_D = ((3U << 3) + 3),
+ MIN = ((3U << 3) + 4),
+ MINA = ((3U << 3) + 5),
+ MAX = ((3U << 3) + 6),
+ MAXA = ((3U << 3) + 7),
+ CVT_S_D = ((4U << 3) + 0),
+ CVT_W_D = ((4U << 3) + 4),
+ CVT_L_D = ((4U << 3) + 5),
+ C_F_D = ((6U << 3) + 0),
+ C_UN_D = ((6U << 3) + 1),
+ C_EQ_D = ((6U << 3) + 2),
+ C_UEQ_D = ((6U << 3) + 3),
+ C_OLT_D = ((6U << 3) + 4),
+ C_ULT_D = ((6U << 3) + 5),
+ C_OLE_D = ((6U << 3) + 6),
+ C_ULE_D = ((6U << 3) + 7),
// COP1 Encoding of Function Field When rs=W or L.
- CVT_S_W = ((4 << 3) + 0),
- CVT_D_W = ((4 << 3) + 1),
- CVT_S_L = ((4 << 3) + 0),
- CVT_D_L = ((4 << 3) + 1),
- BC1EQZ = ((2 << 2) + 1) << 21,
- BC1NEZ = ((3 << 2) + 1) << 21,
+ CVT_S_W = ((4U << 3) + 0),
+ CVT_D_W = ((4U << 3) + 1),
+ CVT_S_L = ((4U << 3) + 0),
+ CVT_D_L = ((4U << 3) + 1),
+ BC1EQZ = ((2U << 2) + 1) << 21,
+ BC1NEZ = ((3U << 2) + 1) << 21,
// COP1 CMP positive predicates Bit 5..4 = 00.
- CMP_AF = ((0 << 3) + 0),
- CMP_UN = ((0 << 3) + 1),
- CMP_EQ = ((0 << 3) + 2),
- CMP_UEQ = ((0 << 3) + 3),
- CMP_LT = ((0 << 3) + 4),
- CMP_ULT = ((0 << 3) + 5),
- CMP_LE = ((0 << 3) + 6),
- CMP_ULE = ((0 << 3) + 7),
- CMP_SAF = ((1 << 3) + 0),
- CMP_SUN = ((1 << 3) + 1),
- CMP_SEQ = ((1 << 3) + 2),
- CMP_SUEQ = ((1 << 3) + 3),
- CMP_SSLT = ((1 << 3) + 4),
- CMP_SSULT = ((1 << 3) + 5),
- CMP_SLE = ((1 << 3) + 6),
- CMP_SULE = ((1 << 3) + 7),
+ CMP_AF = ((0U << 3) + 0),
+ CMP_UN = ((0U << 3) + 1),
+ CMP_EQ = ((0U << 3) + 2),
+ CMP_UEQ = ((0U << 3) + 3),
+ CMP_LT = ((0U << 3) + 4),
+ CMP_ULT = ((0U << 3) + 5),
+ CMP_LE = ((0U << 3) + 6),
+ CMP_ULE = ((0U << 3) + 7),
+ CMP_SAF = ((1U << 3) + 0),
+ CMP_SUN = ((1U << 3) + 1),
+ CMP_SEQ = ((1U << 3) + 2),
+ CMP_SUEQ = ((1U << 3) + 3),
+ CMP_SSLT = ((1U << 3) + 4),
+ CMP_SSULT = ((1U << 3) + 5),
+ CMP_SLE = ((1U << 3) + 6),
+ CMP_SULE = ((1U << 3) + 7),
// COP1 CMP negative predicates Bit 5..4 = 01.
- CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
- CMP_OR = ((2 << 3) + 1),
- CMP_UNE = ((2 << 3) + 2),
- CMP_NE = ((2 << 3) + 3),
- CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
- CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
- CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
- CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
- CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
- CMP_SOR = ((3 << 3) + 1),
- CMP_SUNE = ((3 << 3) + 2),
- CMP_SNE = ((3 << 3) + 3),
- CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
- CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
- CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
- CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
-
- SEL = ((2 << 3) + 0),
- MOVF = ((2 << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
- MOVZ_C = ((2 << 3) + 2), // COP1 on FPR registers.
- MOVN_C = ((2 << 3) + 3), // COP1 on FPR registers.
- SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
- SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
+ CMP_AT = ((2U << 3) + 0), // Reserved, not implemented.
+ CMP_OR = ((2U << 3) + 1),
+ CMP_UNE = ((2U << 3) + 2),
+ CMP_NE = ((2U << 3) + 3),
+ CMP_UGE = ((2U << 3) + 4), // Reserved, not implemented.
+ CMP_OGE = ((2U << 3) + 5), // Reserved, not implemented.
+ CMP_UGT = ((2U << 3) + 6), // Reserved, not implemented.
+ CMP_OGT = ((2U << 3) + 7), // Reserved, not implemented.
+ CMP_SAT = ((3U << 3) + 0), // Reserved, not implemented.
+ CMP_SOR = ((3U << 3) + 1),
+ CMP_SUNE = ((3U << 3) + 2),
+ CMP_SNE = ((3U << 3) + 3),
+ CMP_SUGE = ((3U << 3) + 4), // Reserved, not implemented.
+ CMP_SOGE = ((3U << 3) + 5), // Reserved, not implemented.
+ CMP_SUGT = ((3U << 3) + 6), // Reserved, not implemented.
+ CMP_SOGT = ((3U << 3) + 7), // Reserved, not implemented.
+
+ SEL = ((2U << 3) + 0),
+ MOVF = ((2U << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
+ MOVZ_C = ((2U << 3) + 2), // COP1 on FPR registers.
+ MOVN_C = ((2U << 3) + 3), // COP1 on FPR registers.
+ SELEQZ_C = ((2U << 3) + 4), // COP1 on FPR registers.
+ SELNEZ_C = ((2U << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
- MADD_D = ((4 << 3) + 1),
+ MADD_D = ((4U << 3) + 1),
// PCREL Encoding of rt Field.
- ADDIUPC = ((0 << 2) + 0),
- LWPC = ((0 << 2) + 1),
- LWUPC = ((0 << 2) + 2),
- LDPC = ((0 << 3) + 6),
- // reserved ((1 << 3) + 6),
- AUIPC = ((3 << 3) + 6),
- ALUIPC = ((3 << 3) + 7),
+ ADDIUPC = ((0U << 2) + 0),
+ LWPC = ((0U << 2) + 1),
+ LWUPC = ((0U << 2) + 2),
+ LDPC = ((0U << 3) + 6),
+ // reserved ((1U << 3) + 6),
+ AUIPC = ((3U << 3) + 6),
+ ALUIPC = ((3U << 3) + 7),
// POP66 Encoding of rs Field.
- JIC = ((0 << 5) + 0),
+ JIC = ((0U << 5) + 0),
// POP76 Encoding of rs Field.
- JIALC = ((0 << 5) + 0),
+ JIALC = ((0U << 5) + 0),
- NULLSF = 0
+ NULLSF = 0U
};
@@ -804,7 +825,12 @@ enum FPURoundingMode {
kRoundToNearest = RN,
kRoundToZero = RZ,
kRoundToPlusInf = RP,
- kRoundToMinusInf = RM
+ kRoundToMinusInf = RM,
+
+ mode_round = RN,
+ mode_ceil = RP,
+ mode_floor = RM,
+ mode_trunc = RZ
};
const uint32_t kFPURoundingModeMask = 3 << 0;
@@ -860,6 +886,11 @@ const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
// A nop instruction. (Encoding of sll 0 0 0).
const Instr nopInstr = 0;
+static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) {
+ return 1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift);
+}
+
+
class Instruction {
public:
enum {
@@ -887,7 +918,7 @@ class Instruction {
// Read a bit field out of the instruction bits.
inline int Bits(int hi, int lo) const {
- return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
}
// Instruction type.
@@ -901,10 +932,7 @@ class Instruction {
enum TypeChecks { NORMAL, EXTRA };
-#define OpcodeToBitNumber(opcode) \
- (1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift))
-
- static const uint64_t kOpcodeImmediateTypeMask =
+ static constexpr uint64_t kOpcodeImmediateTypeMask =
OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
OpcodeToBitNumber(BGTZ) | OpcodeToBitNumber(ADDI) |
@@ -923,10 +951,11 @@ class Instruction {
OpcodeToBitNumber(SWR) | OpcodeToBitNumber(LWC1) |
OpcodeToBitNumber(LDC1) | OpcodeToBitNumber(SWC1) |
OpcodeToBitNumber(SDC1) | OpcodeToBitNumber(PCREL) |
- OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC);
+ OpcodeToBitNumber(DAUI) | OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC);
#define FunctionFieldToBitNumber(function) (1ULL << function)
+ // On r6, DCLZ_R6 aliases to existing MFLO.
static const uint64_t kFunctionFieldRegisterTypeMask =
FunctionFieldToBitNumber(JR) | FunctionFieldToBitNumber(JALR) |
FunctionFieldToBitNumber(BREAK) | FunctionFieldToBitNumber(SLL) |
@@ -937,6 +966,7 @@ class Instruction {
FunctionFieldToBitNumber(SLLV) | FunctionFieldToBitNumber(DSLLV) |
FunctionFieldToBitNumber(SRLV) | FunctionFieldToBitNumber(DSRLV) |
FunctionFieldToBitNumber(SRAV) | FunctionFieldToBitNumber(DSRAV) |
+ FunctionFieldToBitNumber(LSA) | FunctionFieldToBitNumber(DLSA) |
FunctionFieldToBitNumber(MFHI) | FunctionFieldToBitNumber(MFLO) |
FunctionFieldToBitNumber(MULT) | FunctionFieldToBitNumber(DMULT) |
FunctionFieldToBitNumber(MULTU) | FunctionFieldToBitNumber(DMULTU) |
@@ -989,6 +1019,11 @@ class Instruction {
return Bits(kSaShift + kSaBits - 1, kSaShift);
}
+ inline int LsaSaValue() const {
+ DCHECK(InstructionType() == kRegisterType);
+ return Bits(kSaShift + kLsaSaBits - 1, kSaShift);
+ }
+
inline int FunctionValue() const {
DCHECK(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
@@ -1087,6 +1122,11 @@ class Instruction {
}
}
+ inline int32_t ImmValue(int bits) const {
+ DCHECK(InstructionType() == kImmediateType);
+ return Bits(bits - 1, 0);
+ }
+
inline int32_t Imm16Value() const {
DCHECK(InstructionType() == kImmediateType);
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
@@ -1113,8 +1153,14 @@ class Instruction {
return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
}
- // Say if the instruction should not be used in a branch delay slot.
- bool IsForbiddenInBranchDelay() const;
+ static bool IsForbiddenAfterBranchInstr(Instr instr);
+
+ // Say if the instruction should not be used in a branch delay slot or
+ // immediately after a compact branch.
+ inline bool IsForbiddenAfterBranch() const {
+ return IsForbiddenAfterBranchInstr(InstructionBits());
+ }
+
// Say if the instruction 'links'. e.g. jal, bal.
bool IsLinkingInstruction() const;
// Say if the instruction is a break or a trap.
@@ -1171,6 +1217,7 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
switch (FunctionFieldRaw()) {
case MUL:
case CLZ:
+ case DCLZ:
return kRegisterType;
default:
return kUnsupported;
@@ -1181,6 +1228,8 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
case INS:
case EXT:
case DEXT:
+ case DEXTM:
+ case DEXTU:
return kRegisterType;
case BSHFL: {
int sa = SaFieldRaw() >> kSaShift;
@@ -1252,6 +1301,7 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
#undef OpcodeToBitNumber
#undef FunctionFieldToBitNumber
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // #ifndef V8_MIPS_CONSTANTS_H_
diff --git a/chromium/v8/src/mips64/cpu-mips64.cc b/chromium/v8/src/mips64/cpu-mips64.cc
index 6c24fd06a90..ab9cf69620e 100644
--- a/chromium/v8/src/mips64/cpu-mips64.cc
+++ b/chromium/v8/src/mips64/cpu-mips64.cc
@@ -23,33 +23,26 @@ namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
+#if !defined(USE_SIMULATOR)
// Nothing to do, flushing no instructions.
if (size == 0) {
return;
}
-#if !defined (USE_SIMULATOR)
#if defined(ANDROID) && !defined(__LP64__)
// Bionic cacheflush can typically run in userland, avoiding kernel call.
char *end = reinterpret_cast<char *>(start) + size;
cacheflush(
reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
#else // ANDROID
- int res;
+ long res; // NOLINT(runtime/int)
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
if (res) {
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
}
#endif // ANDROID
-#else // USE_SIMULATOR.
- // Not generating mips instructions for C-code. This means that we are
- // building a mips emulator based target. We should notify the simulator
- // that the Icache was flushed.
- // None of this code ends up in the snapshot so there are no issues
- // around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
-#endif // USE_SIMULATOR.
+#endif // !USE_SIMULATOR.
}
} // namespace internal
diff --git a/chromium/v8/src/mips64/deoptimizer-mips64.cc b/chromium/v8/src/mips64/deoptimizer-mips64.cc
index 958951a948e..8daba04ac73 100644
--- a/chromium/v8/src/mips64/deoptimizer-mips64.cc
+++ b/chromium/v8/src/mips64/deoptimizer-mips64.cc
@@ -5,6 +5,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
@@ -37,14 +38,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->break_(0xCC);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->break_(0xCC);
}
}
@@ -65,7 +67,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
- CodePatcher patcher(call_address, call_size_in_words);
+ CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
@@ -88,7 +90,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -139,14 +141,16 @@ void Deoptimizer::TableEntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
- const int kDoubleRegsSize =
- kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kMaxNumRegisters;
// Save all FPU registers before messing with them.
__ Dsubu(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
}
@@ -221,9 +225,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int double_regs_offset = FrameDescription::double_registers_offset();
// Copy FPU registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
}
@@ -289,9 +294,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
__ ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
__ ldc1(fpu_reg, MemOperand(a1, src_offset));
}
diff --git a/chromium/v8/src/mips64/disasm-mips64.cc b/chromium/v8/src/mips64/disasm-mips64.cc
index ffab261cd11..3d0e10c20a5 100644
--- a/chromium/v8/src/mips64/disasm-mips64.cc
+++ b/chromium/v8/src/mips64/disasm-mips64.cc
@@ -67,6 +67,7 @@ class Decoder {
// Printing of common values.
void PrintRegister(int reg);
void PrintFPURegister(int freg);
+ void PrintFPUStatusRegister(int freg);
void PrintRs(Instruction* instr);
void PrintRt(Instruction* instr);
void PrintRd(Instruction* instr);
@@ -74,6 +75,7 @@ class Decoder {
void PrintFt(Instruction* instr);
void PrintFd(Instruction* instr);
void PrintSa(Instruction* instr);
+ void PrintLsaSa(Instruction* instr);
void PrintSd(Instruction* instr);
void PrintSs1(Instruction* instr);
void PrintSs2(Instruction* instr);
@@ -90,7 +92,7 @@ class Decoder {
void PrintXImm19(Instruction* instr);
void PrintSImm19(Instruction* instr);
void PrintXImm21(Instruction* instr);
-
+ void PrintSImm21(Instruction* instr);
void PrintPCImm21(Instruction* instr, int delta_pc, int n_bits);
void PrintXImm26(Instruction* instr);
void PrintSImm26(Instruction* instr);
@@ -190,6 +192,17 @@ void Decoder::PrintFPURegister(int freg) {
}
+void Decoder::PrintFPUStatusRegister(int freg) {
+ switch (freg) {
+ case kFCSRRegister:
+ Print("FCSR");
+ break;
+ default:
+ Print(converter_.NameOfXMMRegister(freg));
+ }
+}
+
+
void Decoder::PrintFs(Instruction* instr) {
int freg = instr->RsValue();
PrintFPURegister(freg);
@@ -215,6 +228,13 @@ void Decoder::PrintSa(Instruction* instr) {
}
+// Print the integer value of the sa field of a lsa instruction.
+void Decoder::PrintLsaSa(Instruction* instr) {
+ int sa = instr->LsaSaValue() + 1;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+
// Print the integer value of the rd field, when it is not used as reg.
void Decoder::PrintSd(Instruction* instr) {
int sd = instr->RdValue();
@@ -325,6 +345,16 @@ void Decoder::PrintXImm21(Instruction* instr) {
}
+// Print 21-bit signed immediate value.
+void Decoder::PrintSImm21(Instruction* instr) {
+ int32_t imm21 = instr->Imm21Value();
+ // set sign
+ imm21 <<= (32 - kImm21Bits);
+ imm21 >>= (32 - kImm21Bits);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm21);
+}
+
+
// Print absoulte address for 21-bit offset or immediate value.
// The absolute address is calculated according following expression:
// PC + delta_pc + (offset << n_bits)
@@ -481,22 +511,42 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
// complexity of FormatOption.
int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
DCHECK(format[0] == 'f');
- if (format[1] == 's') { // 'fs: fs register.
- int reg = instr->FsValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 't') { // 'ft: ft register.
- int reg = instr->FtValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 'd') { // 'fd: fd register.
- int reg = instr->FdValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 'r') { // 'fr: fr register.
- int reg = instr->FrValue();
- PrintFPURegister(reg);
- return 2;
+ if ((CTC1 == instr->RsFieldRaw()) || (CFC1 == instr->RsFieldRaw())) {
+ if (format[1] == 's') { // 'fs: fs register.
+ int reg = instr->FsValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'ft: ft register.
+ int reg = instr->FtValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ } else if (format[1] == 'r') { // 'fr: fr register.
+ int reg = instr->FrValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ }
+ } else {
+ if (format[1] == 's') { // 'fs: fs register.
+ int reg = instr->FsValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'ft: ft register.
+ int reg = instr->FtValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'r') { // 'fr: fr register.
+ int reg = instr->FrValue();
+ PrintFPURegister(reg);
+ return 2;
+ }
}
UNREACHABLE();
return -1;
@@ -582,6 +632,10 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
} else if (format[3] == '2' && format[4] == '1') {
DCHECK(STRING_STARTS_WITH(format, "imm21"));
switch (format[5]) {
+ case 's':
+ DCHECK(STRING_STARTS_WITH(format, "imm21s"));
+ PrintSImm21(instr);
+ break;
case 'x':
DCHECK(STRING_STARTS_WITH(format, "imm21x"));
PrintXImm21(instr);
@@ -652,11 +706,17 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
case 's': { // 'sa.
switch (format[1]) {
- case 'a': {
- DCHECK(STRING_STARTS_WITH(format, "sa"));
- PrintSa(instr);
- return 2;
- }
+ case 'a':
+ if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "sa2")); // 'sa2
+ PrintLsaSa(instr);
+ return 3;
+ } else {
+ DCHECK(STRING_STARTS_WITH(format, "sa"));
+ PrintSa(instr);
+ return 2;
+ }
+ break;
case 'd': {
DCHECK(STRING_STARTS_WITH(format, "sd"));
PrintSd(instr);
@@ -1166,6 +1226,12 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
case DSRAV:
Format(instr, "dsrav 'rd, 'rt, 'rs");
break;
+ case LSA:
+ Format(instr, "lsa 'rd, 'rt, 'rs, 'sa2");
+ break;
+ case DLSA:
+ Format(instr, "dlsa 'rd, 'rt, 'rs, 'sa2");
+ break;
case MFHI:
if (instr->Bits(25, 16) == 0) {
Format(instr, "mfhi 'rd");
@@ -1179,7 +1245,16 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
}
break;
case MFLO:
- Format(instr, "mflo 'rd");
+ if (instr->Bits(25, 16) == 0) {
+ Format(instr, "mflo 'rd");
+ } else {
+ if ((instr->FunctionFieldRaw() == DCLZ_R6) && (instr->FdValue() == 1)) {
+ Format(instr, "dclz 'rd, 'rs");
+ } else if ((instr->FunctionFieldRaw() == DCLO_R6) &&
+ (instr->FdValue() == 1)) {
+ Format(instr, "dclo 'rd, 'rs");
+ }
+ }
break;
case D_MUL_MUH_U: // Equals to DMULTU.
if (kArchVariant != kMips64r6) {
@@ -1360,6 +1435,11 @@ void Decoder::DecodeTypeRegisterSPECIAL2(Instruction* instr) {
Format(instr, "clz 'rd, 'rs");
}
break;
+ case DCLZ:
+ if (kArchVariant != kMips64r6) {
+ Format(instr, "dclz 'rd, 'rs");
+ }
+ break;
default:
UNREACHABLE();
}
@@ -1521,10 +1601,10 @@ void Decoder::DecodeTypeImmediateREGIMM(Instruction* instr) {
Format(instr, "bgezall 'rs, 'imm16u -> 'imm16p4s2");
break;
case DAHI:
- Format(instr, "dahi 'rs, 'imm16u");
+ Format(instr, "dahi 'rs, 'imm16x");
break;
case DATI:
- Format(instr, "dati 'rs, 'imm16u");
+ Format(instr, "dati 'rs, 'imm16x");
break;
default:
UNREACHABLE();
@@ -1559,12 +1639,12 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "blez 'rs, 'imm16u -> 'imm16p4s2");
} else if ((instr->RtValue() != instr->RsValue()) &&
(instr->RsValue() != 0) && (instr->RtValue() != 0)) {
- Format(instr, "bgeuc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ Format(instr, "bgeuc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
} else if ((instr->RtValue() == instr->RsValue()) &&
(instr->RtValue() != 0)) {
- Format(instr, "bgezalc 'rs, 'imm16u -> 'imm16p4s2");
+ Format(instr, "bgezalc 'rs, 'imm16u -> 'imm16p4s2");
} else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
- Format(instr, "blezalc 'rt, 'imm16u -> 'imm16p4s2");
+ Format(instr, "blezalc 'rt, 'imm16u -> 'imm16p4s2");
} else {
UNREACHABLE();
}
@@ -1601,7 +1681,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "bltzc 'rt, 'imm16u -> 'imm16p4s2");
} else if ((instr->RtValue() != instr->RsValue()) &&
(instr->RsValue() != 0) && (instr->RtValue() != 0)) {
- Format(instr, "bltc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ Format(instr, "bltc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
} else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
Format(instr, "bgtzc 'rt, 'imm16u -> 'imm16p4s2");
} else {
@@ -1612,14 +1692,14 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (instr->RsValue() == JIC) {
Format(instr, "jic 'rt, 'imm16s");
} else {
- Format(instr, "beqzc 'rs, 'imm21x -> 'imm21p4s2");
+ Format(instr, "beqzc 'rs, 'imm21s -> 'imm21p4s2");
}
break;
case POP76:
if (instr->RsValue() == JIALC) {
- Format(instr, "jialc 'rt, 'imm16x");
+ Format(instr, "jialc 'rt, 'imm16s");
} else {
- Format(instr, "bnezc 'rs, 'imm21x -> 'imm21p4s2");
+ Format(instr, "bnezc 'rs, 'imm21s -> 'imm21p4s2");
}
break;
// ------------- Arithmetic instructions.
@@ -1627,13 +1707,18 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (kArchVariant != kMips64r6) {
Format(instr, "addi 'rt, 'rs, 'imm16s");
} else {
- // Check if BOVC or BEQC instruction.
- if (instr->RsValue() >= instr->RtValue()) {
+ int rs_reg = instr->RsValue();
+ int rt_reg = instr->RtValue();
+ // Check if BOVC, BEQZALC or BEQC instruction.
+ if (rs_reg >= rt_reg) {
Format(instr, "bovc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
- } else if (instr->RsValue() < instr->RtValue()) {
- Format(instr, "beqc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
- UNREACHABLE();
+ DCHECK(rt_reg > 0);
+ if (rs_reg == 0) {
+ Format(instr, "beqzalc 'rt, 'imm16s -> 'imm16p4s2");
+ } else {
+ Format(instr, "beqc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
+ }
}
}
break;
@@ -1641,13 +1726,18 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (kArchVariant != kMips64r6) {
Format(instr, "daddi 'rt, 'rs, 'imm16s");
} else {
- // Check if BNVC or BNEC instruction.
- if (instr->RsValue() >= instr->RtValue()) {
+ int rs_reg = instr->RsValue();
+ int rt_reg = instr->RtValue();
+ // Check if BNVC, BNEZALC or BNEC instruction.
+ if (rs_reg >= rt_reg) {
Format(instr, "bnvc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
- } else if (instr->RsValue() < instr->RtValue()) {
- Format(instr, "bnec 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
- UNREACHABLE();
+ DCHECK(rt_reg > 0);
+ if (rs_reg == 0) {
+ Format(instr, "bnezalc 'rt, 'imm16s -> 'imm16p4s2");
+ } else {
+ Format(instr, "bnec 'rs, 'rt, 'imm16s -> 'imm16p4s2");
+ }
}
}
break;
@@ -1677,14 +1767,14 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "lui 'rt, 'imm16x");
} else {
if (instr->RsValue() != 0) {
- Format(instr, "aui 'rt, 'imm16x");
+ Format(instr, "aui 'rt, 'rs, 'imm16x");
} else {
Format(instr, "lui 'rt, 'imm16x");
}
}
break;
case DAUI:
- Format(instr, "daui 'rt, 'imm16x");
+ Format(instr, "daui 'rt, 'rs, 'imm16x");
break;
// ------------- Memory instructions.
case LB:
diff --git a/chromium/v8/src/mips64/frames-mips64.h b/chromium/v8/src/mips64/frames-mips64.h
index 9b6d3262752..9c42d8d95c2 100644
--- a/chromium/v8/src/mips64/frames-mips64.h
+++ b/chromium/v8/src/mips64/frames-mips64.h
@@ -169,6 +169,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif
diff --git a/chromium/v8/src/mips64/interface-descriptors-mips64.cc b/chromium/v8/src/mips64/interface-descriptors-mips64.cc
index ab697812dee..c5c1311d94f 100644
--- a/chromium/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/chromium/v8/src/mips64/interface-descriptors-mips64.cc
@@ -63,6 +63,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; }
+const Register RestParamAccessDescriptor::parameter_count() { return a2; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return a3; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return a4; }
+
+
const Register ApiGetterDescriptor::function_address() { return a2; }
@@ -78,14 +83,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister(), MapRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2};
@@ -108,6 +105,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return a0; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return a0; }
@@ -129,6 +130,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a3, a2, a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a3, a2, a1};
@@ -191,7 +199,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
- // a4 : original constructor (for IsSuperConstructorCall)
+ // a4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {a0, a1, a4, a2};
@@ -208,6 +216,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ Register registers[] = {a1, a3, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ Register registers[] = {a1, a3, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a1, a0};
@@ -229,6 +258,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -339,6 +375,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a1, // JSFunction
+ a3, // the new target
a0, // actual number of arguments
a2, // expected number of arguments
};
@@ -371,33 +408,35 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- a1, // math rounding function
- a3, // vector slot id
+ a0, // argument count (not including receiver)
+ a2, // address of first argument
+ a1 // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- a1, // math rounding function
- a3, // vector slot id
- a2, // type vector
+ a0, // argument count (not including receiver)
+ a3, // new target
+ a1, // constructor to call
+ a2 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- a0, // argument count (including receiver)
- a2, // address of first argument
- a1 // the target callable to be call
+ a0, // argument count (argc)
+ a2, // address of first argument (argv)
+ a1 // the runtime function to call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/chromium/v8/src/mips64/macro-assembler-mips64.cc b/chromium/v8/src/mips64/macro-assembler-mips64.cc
index 26229c9d872..7b73ac74e4d 100644
--- a/chromium/v8/src/mips64/macro-assembler-mips64.cc
+++ b/chromium/v8/src/mips64/macro-assembler-mips64.cc
@@ -11,19 +11,21 @@
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/mips64/macro-assembler-mips64.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false),
has_double_zero_reg_set_(false) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -148,7 +150,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
- int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
+ int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -435,10 +437,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- ld(scratch, FieldMemOperand(scratch, offset));
- ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ ld(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1189,6 +1188,32 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
}
+void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
+ Register scratch) {
+ if (kArchVariant == kMips64r6 && sa <= 4) {
+ lsa(rd, rt, rs, sa);
+ } else {
+ Register tmp = rd.is(rt) ? scratch : rd;
+ DCHECK(!tmp.is(rt));
+ sll(tmp, rs, sa);
+ Addu(rd, rt, tmp);
+ }
+}
+
+
+void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
+ Register scratch) {
+ if (kArchVariant == kMips64r6 && sa <= 4) {
+ dlsa(rd, rt, rs, sa);
+ } else {
+ Register tmp = rd.is(rt) ? scratch : rd;
+ DCHECK(!tmp.is(rt));
+ dsll(tmp, rs, sa);
+ Daddu(rd, rt, tmp);
+ }
+}
+
+
// ------------Pseudo-instructions-------------
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
@@ -1482,6 +1507,31 @@ void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
}
+void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos < 32);
+ DCHECK(size <= 64);
+ dextm(rt, rs, pos, size);
+}
+
+
+void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos >= 32 && pos < 64);
+ DCHECK(size < 33);
+ dextu(rt, rs, pos, size);
+}
+
+
+void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos < 32);
+ DCHECK(pos + size <= 32);
+ DCHECK(size != 0);
+ dins_(rt, rs, pos, size);
+}
+
+
void MacroAssembler::Ins(Register rt,
Register rs,
uint16_t pos,
@@ -1493,49 +1543,90 @@ void MacroAssembler::Ins(Register rt,
}
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- FPURegister fs,
- FPURegister scratch) {
+void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
mfc1(t8, fs);
- Cvt_d_uw(fd, t8, scratch);
+ Cvt_d_uw(fd, t8);
}
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- Register rs,
- FPURegister scratch) {
- // Convert rs to a FP value in fd (and fd + 1).
- // We do this by converting rs minus the MSB to avoid sign conversion,
- // then adding 2^31 to the result (if needed).
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+ DCHECK(!rs.is(t9));
+ DCHECK(!rs.is(at));
+
+ // Zero extend int32 in rs.
+ Dext(t9, rs, 0, 32);
+ dmtc1(t9, fd);
+ cvt_d_l(fd, fd);
+}
+
+
+void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
+ // Move the data from fs to t8.
+ dmfc1(t8, fs);
+ Cvt_d_ul(fd, t8);
+}
+
+
+void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
- DCHECK(!fd.is(scratch));
DCHECK(!rs.is(t9));
DCHECK(!rs.is(at));
- // Save rs's MSB to t9.
- Ext(t9, rs, 31, 1);
- // Remove rs's MSB.
- Ext(at, rs, 0, 31);
- // Move the result to fd.
- mtc1(at, fd);
- mthc1(zero_reg, fd);
+ Label msb_clear, conversion_done;
- // Convert fd to a real FP value.
- cvt_d_w(fd, fd);
+ Branch(&msb_clear, ge, rs, Operand(zero_reg));
- Label conversion_done;
+ // Rs >= 2^63
+ andi(t9, rs, 1);
+ dsrl(rs, rs, 1);
+ or_(t9, t9, rs);
+ dmtc1(t9, fd);
+ cvt_d_l(fd, fd);
+ Branch(USE_DELAY_SLOT, &conversion_done);
+ add_d(fd, fd, fd); // In delay slot.
- // If rs's MSB was 0, it's done.
- // Otherwise we need to add that to the FP register.
- Branch(&conversion_done, eq, t9, Operand(zero_reg));
+ bind(&msb_clear);
+ // Rs < 2^63, we can do simple conversion.
+ dmtc1(rs, fd);
+ cvt_d_l(fd, fd);
+
+ bind(&conversion_done);
+}
- // Load 2^31 into f20 as its float representation.
- li(at, 0x41E00000);
- mtc1(zero_reg, scratch);
- mthc1(at, scratch);
- // Add it to fd.
- add_d(fd, fd, scratch);
+
+void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
+ // Move the data from fs to t8.
+ dmfc1(t8, fs);
+ Cvt_s_ul(fd, t8);
+}
+
+
+void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+
+ DCHECK(!rs.is(t9));
+ DCHECK(!rs.is(at));
+
+ Label positive, conversion_done;
+
+ Branch(&positive, ge, rs, Operand(zero_reg));
+
+ // Rs >= 2^31.
+ andi(t9, rs, 1);
+ dsrl(rs, rs, 1);
+ or_(t9, t9, rs);
+ dmtc1(t9, fd);
+ cvt_s_l(fd, fd);
+ Branch(USE_DELAY_SLOT, &conversion_done);
+ add_s(fd, fd, fd); // In delay slot.
+
+ bind(&positive);
+ // Rs < 2^31, we can do simple conversion.
+ dmtc1(rs, fd);
+ cvt_s_l(fd, fd);
bind(&conversion_done);
}
@@ -1581,6 +1672,19 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(t8, fd);
}
+void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
+ FPURegister scratch, Register result) {
+ Trunc_ul_d(fs, t8, scratch, result);
+ dmtc1(t8, fd);
+}
+
+
+void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
+ FPURegister scratch, Register result) {
+ Trunc_ul_s(fs, t8, scratch, result);
+ dmtc1(t8, fd);
+}
+
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
trunc_w_d(fd, fs);
@@ -1635,6 +1739,102 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
}
+void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
+ FPURegister scratch, Register result) {
+ DCHECK(!fd.is(scratch));
+ DCHECK(!AreAliased(rs, result, at));
+
+ Label simple_convert, done, fail;
+ if (result.is_valid()) {
+ mov(result, zero_reg);
+ Move(scratch, -1.0);
+ // If fd =< -1 or unordered, then the conversion fails.
+ BranchF(&fail, &fail, le, fd, scratch);
+ }
+
+ // Load 2^63 into scratch as its double representation.
+ li(at, 0x43e0000000000000);
+ dmtc1(at, scratch);
+
+ // Test if scratch > fd.
+ // If fd < 2^63 we can convert it normally.
+ BranchF(&simple_convert, nullptr, lt, fd, scratch);
+
+ // First we subtract 2^63 from fd, then trunc it to rs
+ // and add 2^63 to rs.
+ sub_d(scratch, fd, scratch);
+ trunc_l_d(scratch, scratch);
+ dmfc1(rs, scratch);
+ Or(rs, rs, Operand(1UL << 63));
+ Branch(&done);
+
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_l_d(scratch, fd);
+ dmfc1(rs, scratch);
+
+ bind(&done);
+ if (result.is_valid()) {
+ // Conversion is failed if the result is negative.
+ addiu(at, zero_reg, -1);
+ dsrl(at, at, 1); // Load 2^62.
+ dmfc1(result, scratch);
+ xor_(result, result, at);
+ Slt(result, zero_reg, result);
+ }
+
+ bind(&fail);
+}
+
+
+void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
+ FPURegister scratch, Register result) {
+ DCHECK(!fd.is(scratch));
+ DCHECK(!AreAliased(rs, result, at));
+
+ Label simple_convert, done, fail;
+ if (result.is_valid()) {
+ mov(result, zero_reg);
+ Move(scratch, -1.0f);
+ // If fd =< -1 or unordered, then the conversion fails.
+ BranchF32(&fail, &fail, le, fd, scratch);
+ }
+
+ // Load 2^63 into scratch as its float representation.
+ li(at, 0x5f000000);
+ mtc1(at, scratch);
+
+ // Test if scratch > fd.
+ // If fd < 2^63 we can convert it normally.
+ BranchF32(&simple_convert, nullptr, lt, fd, scratch);
+
+ // First we subtract 2^63 from fd, then trunc it to rs
+ // and add 2^63 to rs.
+ sub_s(scratch, fd, scratch);
+ trunc_l_s(scratch, scratch);
+ dmfc1(rs, scratch);
+ Or(rs, rs, Operand(1UL << 63));
+ Branch(&done);
+
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_l_s(scratch, fd);
+ dmfc1(rs, scratch);
+
+ bind(&done);
+ if (result.is_valid()) {
+ // Conversion is failed if the result is negative or unordered.
+ addiu(at, zero_reg, -1);
+ dsrl(at, at, 1); // Load 2^62.
+ dmfc1(result, scratch);
+ xor_(result, result, at);
+ Slt(result, zero_reg, result);
+ }
+
+ bind(&fail);
+}
+
+
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (0) { // TODO(plind): find reasonable arch-variant symbol names.
@@ -1668,13 +1868,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
if (kArchVariant != kMips64r6) {
if (long_branch) {
Label skip;
- c(UN, D, cmp1, cmp2);
+ c(UN, sizeField, cmp1, cmp2);
bc1f(&skip);
nop();
- J(nan, bd);
+ BranchLong(nan, bd);
bind(&skip);
} else {
- c(UN, D, cmp1, cmp2);
+ c(UN, sizeField, cmp1, cmp2);
bc1t(nan);
if (bd == PROTECT) {
nop();
@@ -1687,13 +1887,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
if (long_branch) {
Label skip;
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
- J(nan, bd);
+ BranchLong(nan, bd);
bind(&skip);
} else {
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(nan, kDoubleCompareReg);
if (bd == PROTECT) {
nop();
@@ -1709,7 +1909,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
- J(target, bd);
+ BranchLong(target, bd);
bind(&skip);
} else {
BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
@@ -2128,28 +2328,30 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
+ DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
BranchShort(offset, bdslot);
}
-void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BranchShort(offset, cond, rs, rt, bdslot);
+void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
+ DCHECK(is_near);
+ USE(is_near);
}
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
+ if (is_near_branch(L)) {
BranchShort(L, bdslot);
} else {
- J(L, bdslot);
+ BranchLong(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
- J(L, bdslot);
+ BranchLong(L, bdslot);
} else {
BranchShort(L, bdslot);
}
@@ -2161,17 +2363,15 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
- BranchShort(L, cond, rs, rt, bdslot);
- } else {
+ if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
if (cond != cc_always) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- J(L, bdslot);
+ BranchLong(L, bdslot);
bind(&skip);
} else {
- J(L, bdslot);
+ BranchLong(L, bdslot);
}
}
} else {
@@ -2180,10 +2380,10 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- J(L, bdslot);
+ BranchLong(L, bdslot);
bind(&skip);
} else {
- J(L, bdslot);
+ BranchLong(L, bdslot);
}
} else {
BranchShort(L, cond, rs, rt, bdslot);
@@ -2202,7 +2402,10 @@ void MacroAssembler::Branch(Label* L,
}
-void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
b(offset);
// Emit a nop in the branch delay slot if required.
@@ -2211,549 +2414,544 @@ void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
}
-void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- DCHECK(!rs.is(zero_reg));
- Register r2 = no_reg;
- Register scratch = at;
+void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset26);
+ bc(offset);
+}
- if (rt.is_reg()) {
- // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
- // rt.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- r2 = rt.rm_;
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- beq(rs, r2, offset);
- break;
- case ne:
- bne(rs, r2, offset);
- break;
- // Signed comparison.
- case greater:
- if (r2.is(zero_reg)) {
- bgtz(rs, offset);
- } else {
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (r2.is(zero_reg)) {
- bgez(rs, offset);
- } else {
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (r2.is(zero_reg)) {
- bltz(rs, offset);
- } else {
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (r2.is(zero_reg)) {
- blez(rs, offset);
- } else {
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (r2.is(zero_reg)) {
- bne(rs, zero_reg, offset);
- } else {
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (r2.is(zero_reg)) {
- b(offset);
- } else {
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (r2.is(zero_reg)) {
- // No code needs to be emitted.
- return;
- } else {
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (r2.is(zero_reg)) {
- beq(rs, zero_reg, offset);
- } else {
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
+
+void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ BranchShortHelperR6(offset, nullptr);
} else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- if (rt.imm64_ == 0) {
- beq(rs, zero_reg, offset);
- } else {
- // We don't want any other register but scratch clobbered.
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- beq(rs, r2, offset);
- }
- break;
- case ne:
- if (rt.imm64_ == 0) {
- bne(rs, zero_reg, offset);
- } else {
- // We don't want any other register but scratch clobbered.
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- bne(rs, r2, offset);
- }
- break;
- // Signed comparison.
- case greater:
- if (rt.imm64_ == 0) {
- bgtz(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (rt.imm64_ == 0) {
- bgez(rs, offset);
- } else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (rt.imm64_ == 0) {
- bltz(rs, offset);
- } else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (rt.imm64_ == 0) {
- blez(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (rt.imm64_ == 0) {
- bne(rs, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (rt.imm64_ == 0) {
- b(offset);
- } else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (rt.imm64_ == 0) {
- // No code needs to be emitted.
- return;
- } else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (rt.imm64_ == 0) {
- beq(rs, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
+ DCHECK(is_int16(offset));
+ BranchShortHelper(offset, nullptr, bdslot);
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
- // We use branch_offset as an argument for the branch instructions to be sure
- // it is called just before generating the branch instruction, as needed.
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ BranchShortHelperR6(0, L);
+ } else {
+ BranchShortHelper(0, L, bdslot);
+ }
+}
- b(shifted_branch_offset(L, false));
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+static inline bool IsZero(const Operand& rt) {
+ if (rt.is_reg()) {
+ return rt.rm().is(zero_reg);
+ } else {
+ return rt.immediate() == 0;
+ }
}
-void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
+int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
+ if (L) {
+ offset = branch_offset_helper(L, bits) >> 2;
+ } else {
+ DCHECK(is_intn(offset, bits));
+ }
+ return offset;
+}
- int32_t offset = 0;
+
+Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
+ Register scratch) {
Register r2 = no_reg;
- Register scratch = at;
if (rt.is_reg()) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_;
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ return r2;
+}
+
+
+bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ DCHECK(L == nullptr || offset == 0);
+ Register scratch = rs.is(at) ? t8 : at;
+ OffsetSize bits = OffsetSize::kOffset16;
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
break;
case eq:
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
+ if (rs.code() == rt.rm_.reg_code) {
+ // Pre R6 beq is used here to make the code patchable. Otherwise bc
+ // should be used which has no condition field so is not patchable.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beq(rs, scratch, offset);
+ nop();
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ beqzc(rs, offset);
+ } else {
+ // We don't want any other register but scratch clobbered.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beqc(rs, scratch, offset);
+ }
break;
case ne:
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
+ if (rs.code() == rt.rm_.reg_code) {
+ // Pre R6 bne is used here to make the code patchable. Otherwise we
+ // should not generate any instruction.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bne(rs, scratch, offset);
+ nop();
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bnezc(rs, offset);
+ } else {
+ // We don't want any other register but scratch clobbered.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnec(rs, scratch, offset);
+ }
break;
+
// Signed comparison.
case greater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bltzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgtzc(rs, offset);
} else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltc(scratch, rs, offset);
}
break;
case greater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ blezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgezc(rs, offset);
} else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgec(rs, scratch, offset);
}
break;
case less:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgtzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bltzc(rs, offset);
} else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltc(rs, scratch, offset);
}
break;
case less_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
+ // rs <= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ blezc(rs, offset);
} else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgec(scratch, rs, offset);
}
break;
+
// Unsigned comparison.
case Ugreater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bnezc(rs, offset);
} else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltuc(scratch, rs, offset);
}
break;
case Ugreater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- b(offset);
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beqzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
} else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgeuc(rs, scratch, offset);
}
break;
case Uless:
- if (r2.is(zero_reg)) {
- // No code needs to be emitted.
- return;
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ break; // No code needs to be emitted.
} else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltuc(rs, scratch, offset);
}
break;
case Uless_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ // rs <= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ beqzc(rs, offset);
} else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgeuc(scratch, rs, offset);
}
break;
default:
UNREACHABLE();
}
- } else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
+ }
+ CheckTrampolinePoolQuick(1);
+ return true;
+}
+
+
+bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ if (!is_near(L, OffsetSize::kOffset16)) return false;
+
+ Register scratch = at;
+ int32_t offset32;
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ b(offset32);
break;
case eq:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
+ // We don't want any other register but scratch clobbered.
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, scratch, offset32);
}
break;
case ne:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
+ // We don't want any other register but scratch clobbered.
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, scratch, offset32);
}
break;
+
// Signed comparison.
case greater:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgtz(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case greater_equal:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgez(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Slt(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
case less:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
- } else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltz(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Slt(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case less_equal:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ blez(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
+
// Unsigned comparison.
case Ugreater:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case Ugreater_equal:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- b(offset);
- } else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ b(offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Sltu(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
- case Uless:
- if (rt.imm64_ == 0) {
- // No code needs to be emitted.
- return;
- } else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ case Uless:
+ if (IsZero(rt)) {
+ return true; // No code needs to be emitted.
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Sltu(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case Uless_equal:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
default:
UNREACHABLE();
}
}
- // Check that offset could actually hold on an int16_t.
- DCHECK(is_int16(offset));
+
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
+
+ return true;
+}
+
+
+bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
+ } else {
+ DCHECK(is_int16(offset));
+ return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
+ }
+ } else {
+ DCHECK(offset == 0);
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ return BranchShortHelperR6(0, L, cond, rs, rt);
+ } else {
+ return BranchShortHelper(0, L, cond, rs, rt, bdslot);
+ }
+ }
+ return false;
+}
+
+
+void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
}
-void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ BranchShortCheck(0, L, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
BranchAndLinkShort(offset, bdslot);
}
-void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BranchAndLinkShort(offset, cond, rs, rt, bdslot);
+void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
+ DCHECK(is_near);
+ USE(is_near);
}
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
+ if (is_near_branch(L)) {
BranchAndLinkShort(L, bdslot);
} else {
- Jal(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
- Jal(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
} else {
BranchAndLinkShort(L, bdslot);
}
@@ -2765,13 +2963,11 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
- } else {
+ if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jal(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
bind(&skip);
}
} else {
@@ -2779,20 +2975,19 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jal(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
bind(&skip);
} else {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
}
}
}
-// We need to use a bgezal or bltzal, but they can't be used directly with the
-// slt instructions. We could use sub or add instead but we would miss overflow
-// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLinkShort(int16_t offset,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
bal(offset);
// Emit a nop in the branch delay slot if required.
@@ -2801,230 +2996,306 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset,
}
-void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Register r2 = no_reg;
- Register scratch = at;
-
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
- }
+void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset26);
+ balc(offset);
+}
- {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- bal(offset);
- break;
- // Signed comparison.
- case greater:
- // rs > rt
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case greater_equal:
- // rs >= rt
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case less:
- // rs < r2
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case less_equal:
- // rs <= r2
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
+void MacroAssembler::BranchAndLinkShort(int32_t offset,
+ BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ BranchAndLinkShortHelperR6(offset, nullptr);
+ } else {
+ DCHECK(is_int16(offset));
+ BranchAndLinkShortHelper(offset, nullptr, bdslot);
+ }
+}
- // Unsigned comparison.
- case Ugreater:
- // rs > rt
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Ugreater_equal:
- // rs >= rt
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Uless:
- // rs < r2
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Uless_equal:
- // rs <= r2
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- default:
- UNREACHABLE();
- }
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ BranchAndLinkShortHelperR6(0, L);
+ } else {
+ BranchAndLinkShortHelper(0, L, bdslot);
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
-void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
- bal(shifted_branch_offset(L, false));
+bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ DCHECK(L == nullptr || offset == 0);
+ Register scratch = rs.is(at) ? t8 : at;
+ OffsetSize bits = OffsetSize::kOffset16;
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
+ switch (cond) {
+ case cc_always:
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ break;
+ case eq:
+ if (!is_near(L, bits)) return false;
+ Subu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ case ne:
+ if (!is_near(L, bits)) return false;
+ Subu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bltzalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgtzalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ }
+ break;
+ case greater_equal:
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ blezalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgezalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ }
+ break;
+ case less:
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgtzalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bltzalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ }
+ break;
+ case less_equal:
+ // rs <= r2
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgezalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ blezalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ }
+ break;
-void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- int32_t offset = 0;
- Register r2 = no_reg;
- Register scratch = at;
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ case Ugreater_equal:
+ // rs >= r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ case Uless:
+ // rs < r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ default:
+ UNREACHABLE();
}
+ return true;
+}
- {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- // Signed comparison.
- case greater:
- // rs > rt
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case greater_equal:
- // rs >= rt
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case less:
- // rs < r2
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case less_equal:
- // rs <= r2
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
+// with the slt instructions. We could use sub or add instead but we would miss
+// overflow cases, so we keep slt and add an intermediate third instruction.
+bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ if (!is_near(L, OffsetSize::kOffset16)) return false;
+ Register scratch = t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
- // Unsigned comparison.
- case Ugreater:
- // rs > rt
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Ugreater_equal:
- // rs >= rt
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Uless:
- // rs < r2
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Uless_equal:
- // rs <= r2
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+ switch (cond) {
+ case cc_always:
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
+ nop();
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
+ nop();
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
- default:
- UNREACHABLE();
- }
+ // Signed comparison.
+ case greater:
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ Slt(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ Slt(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ Sltu(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ Sltu(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+
+ default:
+ UNREACHABLE();
}
- // Check that offset could actually hold on an int16_t.
- DCHECK(is_int16(offset));
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
+
+ return true;
+}
+
+
+bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
+ } else {
+ DCHECK(is_int16(offset));
+ return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
+ }
+ } else {
+ DCHECK(offset == 0);
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
+ } else {
+ return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
+ }
+ }
+ return false;
}
@@ -3114,6 +3385,10 @@ void MacroAssembler::Call(Register target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+#ifdef DEBUG
+ int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
+#endif
+
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
@@ -3128,8 +3403,10 @@ void MacroAssembler::Call(Register target,
if (bd == PROTECT)
nop();
- DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
+#ifdef DEBUG
+ CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+#endif
}
@@ -3207,31 +3484,43 @@ void MacroAssembler::Ret(Condition cond,
}
-void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- j(L);
+void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
+ (!L->is_bound() || is_near_r6(L))) {
+ BranchShortHelperR6(0, L);
+ } else {
+ EmitForbiddenSlotInstruction();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ j(L);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT) nop();
}
-void MacroAssembler::Jal(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- jal(L);
+void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
+ (!L->is_bound() || is_near_r6(L))) {
+ BranchAndLinkShortHelperR6(0, L);
+ } else {
+ EmitForbiddenSlotInstruction();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ jal(L);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT) nop();
}
@@ -3425,12 +3714,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!scratch1.is(t9));
- DCHECK(!scratch2.is(t9));
- DCHECK(!result.is(t9));
+ DCHECK(!AreAliased(result, scratch1, scratch2, t9));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3446,34 +3730,35 @@ void MacroAssembler::Allocate(int object_size,
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch1;
// This code stores a temporary value in t9.
+ Register alloc_limit = t9;
+ Register result_end = scratch2;
+ li(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- ld(result, MemOperand(topaddr));
- ld(t9, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ ld(result, MemOperand(top_address));
+ ld(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- ld(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ // Assert that result actually contains top on entry.
+ ld(alloc_limit, MemOperand(top_address));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
- // Load allocation limit into t9. Result already contains allocation top.
- ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
+ // Load allocation limit. Result already contains allocation top.
+ ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
}
- DCHECK(kPointerSize == kDoubleSize);
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
if (emit_debug_code()) {
And(at, result, Operand(kDoubleAlignmentMask));
Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
@@ -3481,9 +3766,9 @@ void MacroAssembler::Allocate(int object_size,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- Daddu(scratch2, result, Operand(object_size));
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
- sd(scratch2, MemOperand(topaddr));
+ Daddu(result_end, result, Operand(object_size));
+ Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
+ sd(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3492,28 +3777,23 @@ void MacroAssembler::Allocate(int object_size,
}
-void MacroAssembler::Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
li(result, 0x7091);
- li(scratch1, 0x7191);
- li(scratch2, 0x7291);
+ li(scratch, 0x7191);
+ li(result_end, 0x7291);
}
jmp(gc_required);
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!object_size.is(t9));
- DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+ // |object_size| and |result_end| may overlap, other registers must not.
+ DCHECK(!AreAliased(object_size, result, scratch, t9));
+ DCHECK(!AreAliased(result_end, result, scratch, t9));
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
@@ -3522,34 +3802,34 @@ void MacroAssembler::Allocate(Register object_size,
AllocationUtils::GetAllocationTopReference(isolate(), flags);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(allocation_top));
-
+ Register top_address = scratch;
// This code stores a temporary value in t9.
+ Register alloc_limit = t9;
+ li(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- ld(result, MemOperand(topaddr));
- ld(t9, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ ld(result, MemOperand(top_address));
+ ld(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- ld(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ // Assert that result actually contains top on entry.
+ ld(alloc_limit, MemOperand(top_address));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
- // Load allocation limit into t9. Result already contains allocation top.
- ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
+ // Load allocation limit. Result already contains allocation top.
+ ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
}
- DCHECK(kPointerSize == kDoubleSize);
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
if (emit_debug_code()) {
And(at, result, Operand(kDoubleAlignmentMask));
Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
@@ -3559,19 +3839,19 @@ void MacroAssembler::Allocate(Register object_size,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- dsll(scratch2, object_size, kPointerSizeLog2);
- Daddu(scratch2, result, scratch2);
+ dsll(result_end, object_size, kPointerSizeLog2);
+ Daddu(result_end, result, result_end);
} else {
- Daddu(scratch2, result, Operand(object_size));
+ Daddu(result_end, result, Operand(object_size));
}
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
+ Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
- And(t9, scratch2, Operand(kObjectAlignmentMask));
- Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
+ And(at, result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
}
- sd(scratch2, MemOperand(topaddr));
+ sd(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3748,29 +4028,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
}
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- RegList temps,
- int field_count) {
- DCHECK((temps & dst.bit()) == 0);
- DCHECK((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < kNumRegisters; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.code_ = i;
- break;
- }
- }
- DCHECK(!tmp.is(no_reg));
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
- for (int i = 0; i < field_count; i++) {
- ld(tmp, FieldMemOperand(src, i * kPointerSize));
- sd(tmp, FieldMemOperand(dst, i * kPointerSize));
- }
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
+ sd(value, FieldMemOperand(result, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -3857,16 +4133,16 @@ void MacroAssembler::CopyBytes(Register src,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
Branch(&entry);
bind(&loop);
- sd(filler, MemOperand(start_offset));
- Daddu(start_offset, start_offset, kPointerSize);
+ sd(filler, MemOperand(current_address));
+ Daddu(current_address, current_address, kPointerSize);
bind(&entry);
- Branch(&loop, ult, start_offset, Operand(end_offset));
+ Branch(&loop, ult, current_address, Operand(end_address));
}
@@ -3916,6 +4192,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register scratch2,
Label* fail,
int elements_offset) {
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2));
Label smi_value, done;
// Handle smi values specially.
@@ -3937,10 +4214,9 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
FPUCanonicalizeNaN(double_result, double_result);
bind(&smi_value);
- // scratch1 is now effective address of the double element.
// Untag and transfer.
- dsrl32(at, value_reg, 0);
- mtc1(at, double_scratch);
+ dsrl32(scratch1, value_reg, 0);
+ mtc1(scratch1, double_scratch);
cvt_d_w(double_result, double_scratch);
bind(&done);
@@ -3949,6 +4225,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
elements_offset));
dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
Daddu(scratch1, scratch1, scratch2);
+ // scratch1 is now effective address of the double element.
sdc1(double_result, MemOperand(scratch1, 0));
}
@@ -4114,8 +4391,6 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -4135,7 +4410,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg().is(a0));
DCHECK(expected.is_immediate() || expected.reg().is(a2));
- DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -4163,11 +4437,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- li(a3, Operand(code_constant));
- daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
- }
-
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
@@ -4185,21 +4454,78 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ li(t0, Operand(step_in_enabled));
+ lb(t0, MemOperand(t0));
+ Branch(&skip_flooding, eq, t0, Operand(zero_reg));
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(a1));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
- Label done;
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ }
+
+ Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag,
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = t0;
+ ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@@ -4216,6 +4542,7 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -4225,17 +4552,16 @@ void MacroAssembler::InvokeFunction(Register function,
// Contract with called JS functions requires that function is passed in a1.
DCHECK(function.is(a1));
Register expected_reg = a2;
- Register code_reg = a3;
- ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Register temp_reg = t0;
+ ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// The argument count is stored as int32_t on 64-bit platforms.
// TODO(plind): Smi on 32-bit platforms.
lw(expected_reg,
- FieldMemOperand(code_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
- ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(a1, new_target, expected, actual, flag, call_wrapper);
}
@@ -4253,11 +4579,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
}
@@ -4562,6 +4884,89 @@ void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
}
+static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
+ Label* overflow_label,
+ Label* no_overflow_label) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
+ } else {
+ masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) masm->Branch(no_overflow_label);
+ }
+}
+
+
+void MacroAssembler::DaddBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ if (right.is_reg()) {
+ DaddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
+ } else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ li(overflow_dst, right); // Load right.
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ Daddu(dst, left, overflow_dst); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, overflow_dst);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ Daddu(dst, left, overflow_dst);
+ xor_(scratch, dst, overflow_dst);
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+ }
+}
+
+
+void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!right.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+ DCHECK(!right.is(scratch));
+
+ if (left.is(right) && dst.is(left)) {
+ mov(overflow_dst, right);
+ right = overflow_dst;
+ }
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ daddu(dst, left, right); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, right);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ daddu(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ daddu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+}
+
+
void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
const Operand& right,
Register overflow_dst,
@@ -4693,6 +5098,83 @@ void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
}
}
+
+void MacroAssembler::DsubBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (right.is_reg()) {
+ DsubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
+ } else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+ li(overflow_dst, right); // Load right.
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ Dsubu(dst, left, overflow_dst); // Left is overwritten.
+ xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
+ xor_(scratch, dst, scratch); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ Dsubu(dst, left, overflow_dst);
+ xor_(scratch, left, overflow_dst);
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+ }
+}
+
+
+void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+ DCHECK(!scratch.is(left));
+ DCHECK(!scratch.is(right));
+
+ // This happens with some crankshaft code. Since Subu works fine if
+ // left == right, let's not make that restriction here.
+ if (left.is(right)) {
+ mov(dst, zero_reg);
+ if (no_overflow_label) {
+ Branch(no_overflow_label);
+ }
+ }
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ dsubu(dst, left, right); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ xor_(scratch, scratch, right); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ dsubu(dst, left, right); // Right is overwritten.
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, scratch); // Original right.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ dsubu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+}
+
+
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles,
BranchDelaySlot bd) {
@@ -4725,24 +5207,13 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- PrepareCEntryArgs(num_arguments);
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ PrepareCEntryArgs(function->nargs);
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -4764,34 +5235,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(t9, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(t9));
- Call(t9);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Jump(t9);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ld(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- ld(target, ContextOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(a1));
- GetBuiltinFunction(a1, native_context_index);
- // Load the code entry point from the builtins object.
- ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, a1);
+ InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
}
@@ -4928,47 +5375,29 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- ld(dst, GlobalObjectOperand());
- ld(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- ld(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- ld(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- int offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
- ld(at, FieldMemOperand(scratch, offset));
+ ld(scratch, NativeContextMemOperand());
+ ld(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
Branch(no_map_match, ne, map_in_out, Operand(at));
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ld(map_in_out, FieldMemOperand(scratch, offset));
+ ld(map_in_out,
+ ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- ld(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- ld(function, FieldMemOperand(function,
- GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- ld(function, MemOperand(function, Context::SlotOffset(index)));
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ ld(dst, NativeContextMemOperand());
+ ld(dst, ContextMemOperand(dst, index));
}
@@ -5440,6 +5869,17 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -5703,8 +6143,8 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -5733,28 +6173,6 @@ void MacroAssembler::HasColor(Register object,
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- DCHECK(!AreAliased(value, scratch, t8, no_reg));
- Label is_data_object;
- ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- Branch(&is_data_object, eq, t8, Operand(scratch));
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(not_data_object, ne, t8, Operand(zero_reg));
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
@@ -5774,116 +6192,29 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Register load_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
// Note that we are using a 4-byte aligned 8-byte load.
- LoadWordPair(load_scratch,
- MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- And(t8, mask_scratch, load_scratch);
- Branch(&done, ne, t8, Operand(zero_reg));
-
if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // sll may overflow, making the check conservative.
- dsll(t8, mask_scratch, 1);
- And(t8, load_scratch, t8);
- Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- ld(map, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- {
- Label skip;
- Branch(&skip, ne, t8, Operand(map));
- li(length, HeapNumber::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- And(t8, instance_type, Operand(kExternalStringTag));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- li(length, ExternalString::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
- lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
- And(t8, instance_type, Operand(kStringEncodingMask));
- {
- Label skip;
- Branch(&skip, ne, t8, Operand(zero_reg));
- // Adjust length for UC16.
- dsll(t9, t9, 1);
- bind(&skip);
+ LoadWordPair(load_scratch,
+ MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ } else {
+ lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
}
- Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- DCHECK(!length.is(t8));
- And(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- LoadWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Or(t8, t8, Operand(mask_scratch));
- StoreWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- LoadWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- Daddu(t8, t8, Operand(length));
- StoreWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
+ And(t8, mask_scratch, load_scratch);
+ Branch(value_is_white, eq, t8, Operand(zero_reg));
}
@@ -6042,8 +6373,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
if (regs & candidate.bit()) continue;
return candidate;
}
@@ -6085,17 +6419,13 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
- Register reg6,
- Register reg7,
- Register reg8) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
- reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
+bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
+ Register reg5, Register reg6, Register reg7, Register reg8,
+ Register reg9, Register reg10) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+ reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
@@ -6106,18 +6436,19 @@ bool AreAliased(Register reg1,
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
}
-CodePatcher::CodePatcher(byte* address,
- int instructions,
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap),
+ masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
@@ -6129,7 +6460,7 @@ CodePatcher::CodePatcher(byte* address,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- CpuFeatures::FlushICache(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
@@ -6147,25 +6478,10 @@ void CodePatcher::Emit(Address addr) {
}
-void CodePatcher::ChangeBranchCondition(Condition cond) {
- Instr instr = Assembler::instr_at(masm_.pc_);
- DCHECK(Assembler::IsBranch(instr));
- uint32_t opcode = Assembler::GetOpcodeField(instr);
- // Currently only the 'eq' and 'ne' cond values are supported and the simple
- // branch instructions (with opcode being the branch type).
- // There are some special cases (see Assembler::IsBranch()) so extending this
- // would be tricky.
- DCHECK(opcode == BEQ ||
- opcode == BNE ||
- opcode == BLEZ ||
- opcode == BGTZ ||
- opcode == BEQL ||
- opcode == BNEL ||
- opcode == BLEZL ||
- opcode == BGTZL);
- opcode = (cond == eq) ? BEQ : BNE;
- instr = (instr & ~kOpcodeMask) | opcode;
- masm_.emit(instr);
+void CodePatcher::ChangeBranchCondition(Instr current_instr,
+ uint32_t new_opcode) {
+ current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
+ masm_.emit(current_instr);
}
diff --git a/chromium/v8/src/mips64/macro-assembler-mips64.h b/chromium/v8/src/mips64/macro-assembler-mips64.h
index 5dfee07ad96..31ed8a32e11 100644
--- a/chromium/v8/src/mips64/macro-assembler-mips64.h
+++ b/chromium/v8/src/mips64/macro-assembler-mips64.h
@@ -13,17 +13,19 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_v0_Code};
-const Register kReturnRegister1 = {kRegister_v1_Code};
-const Register kJSFunctionRegister = {kRegister_a1_Code};
-const Register kContextRegister = {kRegister_s7_Code};
-const Register kInterpreterAccumulatorRegister = {kRegister_v0_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_a7_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_t0_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_t1_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_t2_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_a1_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_a0_Code};
+const Register kReturnRegister0 = {Register::kCode_v0};
+const Register kReturnRegister1 = {Register::kCode_v1};
+const Register kJSFunctionRegister = {Register::kCode_a1};
+const Register kContextRegister = {Register::kCpRegister};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_a7};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t0};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t1};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_t2};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_a0};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_a3};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_a1};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_a0};
// Forward declaration.
class JumpTarget;
@@ -103,14 +105,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg5 = no_reg,
Register reg6 = no_reg);
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3 = no_reg,
- Register reg4 = no_reg,
- Register reg5 = no_reg,
- Register reg6 = no_reg,
- Register reg7 = no_reg,
- Register reg8 = no_reg);
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+ Register reg4 = no_reg, Register reg5 = no_reg,
+ Register reg6 = no_reg, Register reg7 = no_reg,
+ Register reg8 = no_reg, Register reg9 = no_reg,
+ Register reg10 = no_reg);
// -----------------------------------------------------------------------------
@@ -123,13 +122,13 @@ bool AreAliased(Register reg1,
#endif
-inline MemOperand ContextOperand(Register context, int index) {
+inline MemOperand ContextMemOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
-inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+inline MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
@@ -167,11 +166,8 @@ inline MemOperand CFunctionArgumentOperand(int index) {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
@@ -192,9 +188,9 @@ class MacroAssembler: public Assembler {
Name(target, COND_ARGS, bd); \
}
-#define DECLARE_BRANCH_PROTOTYPES(Name) \
+#define DECLARE_BRANCH_PROTOTYPES(Name) \
DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
- DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
+ DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
DECLARE_BRANCH_PROTOTYPES(Branch)
DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
@@ -231,6 +227,8 @@ class MacroAssembler: public Assembler {
Ret(cond, rs, rt, bd);
}
+ bool IsNear(Label* L, Condition cond, int rs_reg);
+
void Branch(Label* L,
Condition cond,
Register rs,
@@ -411,22 +409,10 @@ class MacroAssembler: public Assembler {
Register scratch1,
Label* on_black);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -563,12 +549,8 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- void Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
@@ -613,6 +595,12 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* gc_required);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Instruction macros.
@@ -675,6 +663,12 @@ class MacroAssembler: public Assembler {
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
+#undef DEFINE_INSTRUCTION3
+
+ void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
+ Register scratch = at);
+ void Dlsa(Register rd, Register rs, Register rt, uint8_t sa,
+ Register scratch = at);
void Pref(int32_t hint, const MemOperand& rs);
@@ -810,15 +804,26 @@ class MacroAssembler: public Assembler {
// MIPS64 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
// ---------------------------------------------------------------------------
// FPU macros. These do not handle special cases like NaN or +- inf.
// Convert unsigned word to double.
- void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
- void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
+ void Cvt_d_uw(FPURegister fd, FPURegister fs);
+ void Cvt_d_uw(FPURegister fd, Register rs);
+
+ // Convert unsigned long to double.
+ void Cvt_d_ul(FPURegister fd, FPURegister fs);
+ void Cvt_d_ul(FPURegister fd, Register rs);
+
+ // Convert unsigned long to float.
+ void Cvt_s_ul(FPURegister fd, FPURegister fs);
+ void Cvt_s_ul(FPURegister fd, Register rs);
// Convert double to unsigned long.
void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch);
@@ -832,6 +837,18 @@ class MacroAssembler: public Assembler {
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
+ // Convert double to unsigned long.
+ void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch,
+ Register result = no_reg);
+ void Trunc_ul_d(FPURegister fd, Register rs, FPURegister scratch,
+ Register result = no_reg);
+
+ // Convert single to unsigned long.
+ void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch,
+ Register result = no_reg);
+ void Trunc_ul_s(FPURegister fd, Register rs, FPURegister scratch,
+ Register result = no_reg);
+
void Trunc_w_d(FPURegister fd, FPURegister fs);
void Round_w_d(FPURegister fd, FPURegister fs);
void Floor_w_d(FPURegister fd, FPURegister fs);
@@ -978,8 +995,15 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
@@ -992,7 +1016,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- void LoadGlobalFunction(int index, Register function);
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -1010,15 +1034,19 @@ class MacroAssembler: public Assembler {
// JavaScript invokes.
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
@@ -1059,9 +1087,6 @@ class MacroAssembler: public Assembler {
// Must preserve the result register.
void PopStackHandler();
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
-
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
// read or written and length will be zero.
@@ -1070,12 +1095,11 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// -------------------------------------------------------------------------
// Support functions.
@@ -1255,6 +1279,24 @@ class MacroAssembler: public Assembler {
const Operand& right, Register overflow_dst,
Register scratch);
+ inline void DaddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ DaddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
+
+ inline void DaddBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ DaddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
+
+ void DaddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void DaddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
void DsubuAndCheckForOverflow(Register dst, Register left, Register right,
Register overflow_dst, Register scratch = at);
@@ -1262,6 +1304,24 @@ class MacroAssembler: public Assembler {
const Operand& right, Register overflow_dst,
Register scratch);
+ inline void DsubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ DsubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
+
+ inline void DsubBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ DsubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
+
+ void DsubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void DsubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
void BranchOnOverflow(Label* label,
Register overflow_check,
BranchDelaySlot bd = PROTECT) {
@@ -1311,16 +1371,24 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
BranchDelaySlot bd = PROTECT);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
BranchDelaySlot bd = PROTECT) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles, bd);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles, bd);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ BranchDelaySlot bd = PROTECT) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles, bd);
}
// Convenience function: call an external reference.
@@ -1328,17 +1396,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
int num_arguments,
BranchDelaySlot bd = PROTECT);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -1394,13 +1453,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the code object for the given builtin in the target register and
- // setup the function in a1.
- void GetBuiltinEntry(Register target, int native_context_index);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
struct Unresolved {
int pc;
uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
@@ -1588,6 +1640,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1719,21 +1775,39 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
+ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments);
- void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
+ inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
+ inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
+ void BranchShortHelperR6(int32_t offset, Label* L);
+ void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
+ bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+
+ void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
+ void BranchAndLinkShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot);
+ void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
- void J(Label* L, BranchDelaySlot bdslot);
- void Jal(Label* L, BranchDelaySlot bdslot);
+ bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ void BranchLong(Label* L, BranchDelaySlot bdslot);
+ void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
void Jr(Label* L, BranchDelaySlot bdslot);
void Jalr(Label* L, BranchDelaySlot bdslot);
@@ -1750,8 +1824,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -1805,8 +1877,7 @@ class CodePatcher {
DONT_FLUSH
};
- CodePatcher(byte* address,
- int instructions,
+ CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache = FLUSH);
~CodePatcher();
@@ -1821,7 +1892,7 @@ class CodePatcher {
// Change the condition part of an instruction leaving the rest of the current
// instruction unchanged.
- void ChangeBranchCondition(Condition cond);
+ void ChangeBranchCondition(Instr current_instr, uint32_t new_opcode);
private:
byte* address_; // The address of the code being patched.
@@ -1841,6 +1912,7 @@ class CodePatcher {
#define ACCESS_MASM(masm) masm->
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
diff --git a/chromium/v8/src/mips64/simulator-mips64.cc b/chromium/v8/src/mips64/simulator-mips64.cc
index b82b2d9b3c2..7fa96442f96 100644
--- a/chromium/v8/src/mips64/simulator-mips64.cc
+++ b/chromium/v8/src/mips64/simulator-mips64.cc
@@ -146,7 +146,7 @@ void MipsDebugger::Stop(Instruction* instr) {
#else // GENERATED_CODE_COVERAGE
-#define UNSUPPORTED() printf("Unsupported instruction.\n");
+#define UNSUPPORTED() printf("Sim: Unsupported instruction.\n");
static void InitializeCoverage() {}
@@ -519,7 +519,7 @@ void MipsDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int64_t value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ Heap* current_heap = sim_->isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
@@ -898,7 +898,12 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
for (int i = 0; i < kNumFPURegisters; i++) {
FPUregisters_[i] = 0;
}
- FCSR_ = 0;
+
+ if (kArchVariant == kMips64r6) {
+ FCSR_ = kFCSRNaN2008FlagMask;
+ } else {
+ FCSR_ = 0;
+ }
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
@@ -926,12 +931,12 @@ Simulator::~Simulator() { free(stack_); }
// offset from the swi instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, ExternalReference::Type type)
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr),
type_(type),
next_(NULL) {
- Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
@@ -947,14 +952,13 @@ class Redirection {
void* external_function() { return external_function_; }
ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function,
+ static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
- return new Redirection(external_function, type);
+ return new Redirection(isolate, external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -999,9 +1003,10 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
}
-void* Simulator::RedirectExternalReference(void* external_function,
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -1256,6 +1261,8 @@ bool Simulator::set_fcsr_round_error(double original, double rounded) {
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round64_error(double original, double rounded) {
bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
double max_int64 = std::numeric_limits<int64_t>::max();
double min_int64 = std::numeric_limits<int64_t>::min();
@@ -1273,7 +1280,7 @@ bool Simulator::set_fcsr_round64_error(double original, double rounded) {
ret = true;
}
- if (rounded > max_int64 || rounded < min_int64) {
+ if (rounded >= max_int64 || rounded < min_int64) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
@@ -1315,11 +1322,135 @@ bool Simulator::set_fcsr_round_error(float original, float rounded) {
return ret;
}
+void Simulator::set_fpu_register_word_invalid_result(float original,
+ float rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result(float original, float rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result64(float original,
+ float rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_word_invalid_result(double original,
+ double rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result(double original,
+ double rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result64(double original,
+ double rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ }
+}
+
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round64_error(float original, float rounded) {
bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
double max_int64 = std::numeric_limits<int64_t>::max();
double min_int64 = std::numeric_limits<int64_t>::min();
@@ -1337,7 +1468,7 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
ret = true;
}
- if (rounded > max_int64 || rounded < min_int64) {
+ if (rounded >= max_int64 || rounded < min_int64) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
@@ -2259,10 +2390,12 @@ void Simulator::DecodeTypeRegisterSRsType() {
set_fpu_register_float(fd_reg(), -fs);
break;
case SQRT_S:
- set_fpu_register_float(fd_reg(), fast_sqrt(fs));
+ lazily_initialize_fast_sqrt(isolate_);
+ set_fpu_register_float(fd_reg(), fast_sqrt(fs, isolate_));
break;
case RSQRT_S: {
- float result = 1.0 / fast_sqrt(fs);
+ lazily_initialize_fast_sqrt(isolate_);
+ float result = 1.0 / fast_sqrt(fs, isolate_);
set_fpu_register_float(fd_reg(), result);
break;
}
@@ -2369,7 +2502,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
round64_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2379,7 +2512,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
round_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
break;
}
@@ -2388,7 +2521,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case TRUNC_L_S: { // Mips64r2 instruction.
@@ -2396,7 +2529,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int64_t result = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2410,7 +2543,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
break;
}
@@ -2425,7 +2558,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int64_t i64 = static_cast<int64_t>(result);
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2434,7 +2567,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int64_t result = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2444,7 +2577,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case CEIL_W_S: // Round double to word towards positive infinity.
@@ -2453,7 +2586,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_invalid_result(fs, rounded);
}
} break;
case CEIL_L_S: { // Mips64r2 instruction.
@@ -2461,7 +2594,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int64_t result = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2481,7 +2614,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
} else if (fabs(fs) < fabs(ft)) {
result = fs;
} else {
- result = (fs > ft ? fs : ft);
+ result = (fs < ft ? fs : ft);
}
set_fpu_register_float(fd_reg(), result);
}
@@ -2690,7 +2823,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
} else if (fabs(fs) < fabs(ft)) {
result = fs;
} else {
- result = (fs > ft ? fs : ft);
+ result = (fs < ft ? fs : ft);
}
set_fpu_register_double(fd_reg(), result);
}
@@ -2764,10 +2897,12 @@ void Simulator::DecodeTypeRegisterDRsType() {
set_fpu_register_double(fd_reg(), -fs);
break;
case SQRT_D:
- set_fpu_register_double(fd_reg(), fast_sqrt(fs));
+ lazily_initialize_fast_sqrt(isolate_);
+ set_fpu_register_double(fd_reg(), fast_sqrt(fs, isolate_));
break;
case RSQRT_D: {
- double result = 1.0 / fast_sqrt(fs);
+ lazily_initialize_fast_sqrt(isolate_);
+ double result = 1.0 / fast_sqrt(fs, isolate_);
set_fpu_register_double(fd_reg(), result);
break;
}
@@ -2803,7 +2938,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
round_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
break;
}
@@ -2818,7 +2953,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_invalid_result(fs, rounded);
}
} break;
case TRUNC_W_D: // Truncate double to word (round towards 0).
@@ -2827,7 +2962,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_invalid_result(fs, rounded);
}
} break;
case FLOOR_W_D: // Round double to word towards negative infinity.
@@ -2836,7 +2971,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_invalid_result(fs, rounded);
}
} break;
case CEIL_W_D: // Round double to word towards positive infinity.
@@ -2845,7 +2980,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_invalid_result(fs, rounded);
}
} break;
case CVT_S_D: // Convert double to float (single).
@@ -2857,7 +2992,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
round64_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2872,7 +3007,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int64_t i64 = static_cast<int64_t>(result);
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2881,7 +3016,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int64_t result = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2890,7 +3025,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int64_t result = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2899,7 +3034,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int64_t result = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -3184,11 +3319,18 @@ void Simulator::DecodeTypeRegisterCOP1() {
case MFHC1:
set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
break;
- case CTC1:
+ case CTC1: {
// At the moment only FCSR is supported.
DCHECK(fs_reg() == kFCSRRegister);
- FCSR_ = static_cast<uint32_t>(rt());
+ uint32_t reg = static_cast<uint32_t>(rt());
+ if (kArchVariant == kMips64r6) {
+ FCSR_ = reg | kFCSRNaN2008FlagMask;
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ FCSR_ = reg & ~kFCSRNaN2008FlagMask;
+ }
break;
+ }
case MTC1:
// Hardware writes upper 32-bits to zero on mtc1.
set_fpu_register_hi_word(fs_reg(), 0);
@@ -3352,6 +3494,20 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case DSRAV:
SetResult(rd_reg(), rt() >> rs());
break;
+ case LSA: {
+ DCHECK(kArchVariant == kMips64r6);
+ int8_t sa = lsa_sa() + 1;
+ int32_t _rt = static_cast<int32_t>(rt());
+ int32_t _rs = static_cast<int32_t>(rs());
+ int32_t res = _rs << sa;
+ res += _rt;
+ SetResult(rd_reg(), static_cast<int64_t>(res));
+ break;
+ }
+ case DLSA:
+ DCHECK(kArchVariant == kMips64r6);
+ SetResult(rd_reg(), (rs() << (lsa_sa() + 1)) + rt());
+ break;
case MFHI: // MFHI == CLZ on R6.
if (kArchVariant != kMips64r6) {
DCHECK(sa() == 0);
@@ -3364,8 +3520,17 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
}
SetResult(rd_reg(), alu_out);
break;
- case MFLO:
- SetResult(rd_reg(), get_register(LO));
+ case MFLO: // MFLO == DCLZ on R6.
+ if (kArchVariant != kMips64r6) {
+ DCHECK(sa() == 0);
+ alu_out = get_register(LO);
+ } else {
+ // MIPS spec: If no bits were set in GPR rs(), the result written to
+ // GPR rd() is 64.
+ DCHECK(sa() == 1);
+ alu_out = base::bits::CountLeadingZeros64(static_cast<int64_t>(rs_u()));
+ }
+ SetResult(rd_reg(), alu_out);
break;
// Instructions using HI and LO registers.
case MULT: { // MULT == D_MUL_MUH.
@@ -3393,8 +3558,22 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case MULTU:
u64hilo = static_cast<uint64_t>(rs_u() & 0xffffffff) *
static_cast<uint64_t>(rt_u() & 0xffffffff);
- set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+ if (kArchVariant != kMips64r6) {
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+ } else {
+ switch (sa()) {
+ case MUL_OP:
+ set_register(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
+ break;
+ case MUH_OP:
+ set_register(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ }
break;
case DMULT: // DMULT == D_MUL_MUH.
if (kArchVariant != kMips64r6) {
@@ -3462,17 +3641,61 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
break;
}
case DIVU:
- if (rt_u() != 0) {
- uint32_t rt_u_32 = static_cast<uint32_t>(rt_u());
- uint32_t rs_u_32 = static_cast<uint32_t>(rs_u());
- set_register(LO, rs_u_32 / rt_u_32);
- set_register(HI, rs_u_32 % rt_u_32);
+ switch (kArchVariant) {
+ case kMips64r6: {
+ uint32_t rt_u_32 = static_cast<uint32_t>(rt_u());
+ uint32_t rs_u_32 = static_cast<uint32_t>(rs_u());
+ switch (get_instr()->SaValue()) {
+ case DIV_OP:
+ if (rt_u_32 != 0) {
+ set_register(rd_reg(), rs_u_32 / rt_u_32);
+ }
+ break;
+ case MOD_OP:
+ if (rt_u() != 0) {
+ set_register(rd_reg(), rs_u_32 % rt_u_32);
+ }
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ } break;
+ default: {
+ if (rt_u() != 0) {
+ uint32_t rt_u_32 = static_cast<uint32_t>(rt_u());
+ uint32_t rs_u_32 = static_cast<uint32_t>(rs_u());
+ set_register(LO, rs_u_32 / rt_u_32);
+ set_register(HI, rs_u_32 % rt_u_32);
+ }
+ }
}
break;
case DDIVU:
- if (rt_u() != 0) {
- set_register(LO, rs_u() / rt_u());
- set_register(HI, rs_u() % rt_u());
+ switch (kArchVariant) {
+ case kMips64r6: {
+ switch (get_instr()->SaValue()) {
+ case DIV_OP:
+ if (rt_u() != 0) {
+ set_register(rd_reg(), rs_u() / rt_u());
+ }
+ break;
+ case MOD_OP:
+ if (rt_u() != 0) {
+ set_register(rd_reg(), rs_u() % rt_u());
+ }
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ } break;
+ default: {
+ if (rt_u() != 0) {
+ set_register(LO, rs_u() / rt_u());
+ set_register(HI, rs_u() % rt_u());
+ }
+ }
}
break;
case ADD:
@@ -3607,7 +3830,13 @@ void Simulator::DecodeTypeRegisterSPECIAL2() {
// MIPS32 spec: If no bits were set in GPR rs(), the result written to
// GPR rd is 32.
alu_out = base::bits::CountLeadingZeros32(static_cast<uint32_t>(rs_u()));
- set_register(rd_reg(), alu_out);
+ SetResult(rd_reg(), alu_out);
+ break;
+ case DCLZ:
+ // MIPS64 spec: If no bits were set in GPR rs(), the result written to
+ // GPR rd is 64.
+ alu_out = base::bits::CountLeadingZeros64(static_cast<uint64_t>(rs_u()));
+ SetResult(rd_reg(), alu_out);
break;
default:
alu_out = 0x12345678;
@@ -3619,7 +3848,19 @@ void Simulator::DecodeTypeRegisterSPECIAL2() {
void Simulator::DecodeTypeRegisterSPECIAL3() {
int64_t alu_out;
switch (get_instr()->FunctionFieldRaw()) {
- case INS: { // Mips32r2 instruction.
+ case INS: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa();
+ uint16_t size = msb - lsb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = static_cast<int32_t>((rt_u() & ~(mask << lsb)) |
+ ((rs_u() & mask) << lsb));
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
+ case DINS: { // Mips64r2 instruction.
// Interpret rd field as 5-bit msb of insert.
uint16_t msb = rd_reg();
// Interpret sa field as 5-bit lsb of insert.
@@ -3630,7 +3871,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
SetResult(rt_reg(), alu_out);
break;
}
- case EXT: { // Mips32r2 instruction.
+ case EXT: { // Mips64r2 instruction.
// Interpret rd field as 5-bit msb of extract.
uint16_t msb = rd_reg();
// Interpret sa field as 5-bit lsb of extract.
@@ -3641,7 +3882,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
SetResult(rt_reg(), alu_out);
break;
}
- case DEXT: { // Mips32r2 instruction.
+ case DEXT: { // Mips64r2 instruction.
// Interpret rd field as 5-bit msb of extract.
uint16_t msb = rd_reg();
// Interpret sa field as 5-bit lsb of extract.
@@ -3652,6 +3893,28 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
SetResult(rt_reg(), alu_out);
break;
}
+ case DEXTM: {
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa();
+ uint16_t size = msb + 33;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = static_cast<int64_t>((rs_u() & (mask << lsb)) >> lsb);
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
+ case DEXTU: {
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa() + 32;
+ uint16_t size = msb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = static_cast<int64_t>((rs_u() & (mask << lsb)) >> lsb);
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
case BSHFL: {
int32_t sa = get_instr()->SaFieldRaw() >> kSaShift;
switch (sa) {
@@ -3832,27 +4095,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
}
-// Branch instructions common part.
-#define BranchAndLinkHelper(do_branch) \
- execute_branch_delay_instruction = true; \
- if (do_branch) { \
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; \
- set_register(31, current_pc + kBranchReturnOffset); \
- } else { \
- next_pc = current_pc + kBranchReturnOffset; \
- }
-
-
-#define BranchHelper(do_branch) \
- execute_branch_delay_instruction = true; \
- if (do_branch) { \
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; \
- } else { \
- next_pc = current_pc + kBranchReturnOffset; \
- }
-
-
-// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
+// Type 2: instructions using a 16, 21 or 26 bits immediate. (e.g. beq, beqc).
void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Instruction fields.
Opcode op = instr->OpcodeFieldRaw();
@@ -3863,21 +4106,15 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
int64_t rt = get_register(rt_reg);
int16_t imm16 = instr->Imm16Value();
int32_t imm18 = instr->Imm18Value();
- int32_t imm21 = instr->Imm21Value();
- int32_t imm26 = instr->Imm26Value();
int32_t ft_reg = instr->FtValue(); // Destination register.
- int64_t ft = get_fpu_register(ft_reg);
// Zero extended immediate.
uint64_t oe_imm16 = 0xffff & imm16;
// Sign extended immediate.
int64_t se_imm16 = imm16;
int64_t se_imm18 = imm18 | ((imm18 & 0x20000) ? 0xfffffffffffc0000 : 0);
- int64_t se_imm26 = imm26 | ((imm26 & 0x2000000) ? 0xfffffffffc000000 : 0);
- // Get current pc.
- int64_t current_pc = get_pc();
// Next pc.
int64_t next_pc = bad_ra;
@@ -3892,7 +4129,57 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Alignment for 32-bit integers used in LWL, LWR, etc.
const int kInt32AlignmentMask = sizeof(uint32_t) - 1;
- // ---------- Configuration (and execution for REGIMM).
+ // Branch instructions common part.
+ auto BranchAndLinkHelper = [this, instr, &next_pc,
+ &execute_branch_delay_instruction](
+ bool do_branch) {
+ execute_branch_delay_instruction = true;
+ int64_t current_pc = get_pc();
+ if (do_branch) {
+ int16_t imm16 = instr->Imm16Value();
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ };
+
+ auto BranchHelper = [this, instr, &next_pc,
+ &execute_branch_delay_instruction](bool do_branch) {
+ execute_branch_delay_instruction = true;
+ int64_t current_pc = get_pc();
+ if (do_branch) {
+ int16_t imm16 = instr->Imm16Value();
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ };
+
+ auto BranchAndLinkCompactHelper = [this, instr, &next_pc](bool do_branch,
+ int bits) {
+ int64_t current_pc = get_pc();
+ CheckForbiddenSlot(current_pc);
+ if (do_branch) {
+ int32_t imm = instr->ImmValue(bits);
+ imm <<= 32 - bits;
+ imm >>= 32 - bits;
+ next_pc = current_pc + (imm << 2) + Instruction::kInstrSize;
+ set_register(31, current_pc + Instruction::kInstrSize);
+ }
+ };
+
+ auto BranchCompactHelper = [&next_pc, this, instr](bool do_branch, int bits) {
+ int64_t current_pc = get_pc();
+ CheckForbiddenSlot(current_pc);
+ if (do_branch) {
+ int32_t imm = instr->ImmValue(bits);
+ imm <<= 32 - bits;
+ imm >>= 32 - bits;
+ next_pc = get_pc() + (imm << 2) + Instruction::kInstrSize;
+ }
+ };
+
switch (op) {
// ------------- COP1. Coprocessor instructions.
case COP1:
@@ -3902,32 +4189,14 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
uint32_t cc_value = test_fcsr_bit(fcsr_cc);
bool do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
+ BranchHelper(do_branch);
break;
}
case BC1EQZ:
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (!(ft & 0x1)) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
+ BranchHelper(!(get_fpu_register(ft_reg) & 0x1));
break;
case BC1NEZ:
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (ft & 0x1) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
+ BranchHelper(get_fpu_register(ft_reg) & 0x1);
break;
default:
UNREACHABLE();
@@ -3948,6 +4217,12 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case BGEZAL:
BranchAndLinkHelper(rs >= 0);
break;
+ case DAHI:
+ SetResult(rs_reg, rs + (se_imm16 << 32));
+ break;
+ case DATI:
+ SetResult(rs_reg, rs + (se_imm16 << 48));
+ break;
default:
UNREACHABLE();
}
@@ -3961,55 +4236,156 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case BNE:
BranchHelper(rs != rt);
break;
- case BLEZ:
- BranchHelper(rs <= 0);
- break;
- case BGTZ:
- BranchHelper(rs > 0);
- break;
- case POP66: {
- if (rs_reg) { // BEQZC
- int32_t se_imm21 =
- static_cast<int32_t>(imm21 << (kOpcodeBits + kRsBits));
- se_imm21 = se_imm21 >> (kOpcodeBits + kRsBits);
- if (rs == 0)
- next_pc = current_pc + 4 + (se_imm21 << 2);
- else
- next_pc = current_pc + 4;
+ case POP06: // BLEZALC, BGEZALC, BGEUC, BLEZ (pre-r6)
+ if (kArchVariant == kMips64r6) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BLEZALC
+ BranchAndLinkCompactHelper(rt <= 0, 16);
+ } else {
+ if (rs_reg == rt_reg) { // BGEZALC
+ BranchAndLinkCompactHelper(rt >= 0, 16);
+ } else { // BGEUC
+ BranchCompactHelper(
+ static_cast<uint64_t>(rs) >= static_cast<uint64_t>(rt), 16);
+ }
+ }
+ } else { // BLEZ
+ BranchHelper(rs <= 0);
+ }
+ } else { // BLEZ
+ BranchHelper(rs <= 0);
+ }
+ break;
+ case POP07: // BGTZALC, BLTZALC, BLTUC, BGTZ (pre-r6)
+ if (kArchVariant == kMips64r6) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BGTZALC
+ BranchAndLinkCompactHelper(rt > 0, 16);
+ } else {
+ if (rt_reg == rs_reg) { // BLTZALC
+ BranchAndLinkCompactHelper(rt < 0, 16);
+ } else { // BLTUC
+ BranchCompactHelper(
+ static_cast<uint64_t>(rs) < static_cast<uint64_t>(rt), 16);
+ }
+ }
+ } else { // BGTZ
+ BranchHelper(rs > 0);
+ }
+ } else { // BGTZ
+ BranchHelper(rs > 0);
+ }
+ break;
+ case POP26: // BLEZC, BGEZC, BGEC/BLEC / BLEZL (pre-r6)
+ if (kArchVariant == kMips64r6) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BLEZC
+ BranchCompactHelper(rt <= 0, 16);
+ } else {
+ if (rs_reg == rt_reg) { // BGEZC
+ BranchCompactHelper(rt >= 0, 16);
+ } else { // BGEC/BLEC
+ BranchCompactHelper(rs >= rt, 16);
+ }
+ }
+ }
+ } else { // BLEZL
+ BranchAndLinkHelper(rs <= 0);
+ }
+ break;
+ case POP27: // BGTZC, BLTZC, BLTC/BGTC / BGTZL (pre-r6)
+ if (kArchVariant == kMips64r6) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BGTZC
+ BranchCompactHelper(rt > 0, 16);
+ } else {
+ if (rs_reg == rt_reg) { // BLTZC
+ BranchCompactHelper(rt < 0, 16);
+ } else { // BLTC/BGTC
+ BranchCompactHelper(rs < rt, 16);
+ }
+ }
+ }
+ } else { // BGTZL
+ BranchAndLinkHelper(rs > 0);
+ }
+ break;
+ case POP66: // BEQZC, JIC
+ if (rs_reg != 0) { // BEQZC
+ BranchCompactHelper(rs == 0, 21);
} else { // JIC
next_pc = rt + imm16;
}
break;
- }
- case BC: {
- next_pc = current_pc + 4 + (se_imm26 << 2);
- set_pc(next_pc);
- pc_modified_ = true;
+ case POP76: // BNEZC, JIALC
+ if (rs_reg != 0) { // BNEZC
+ BranchCompactHelper(rs != 0, 21);
+ } else { // JIALC
+ int64_t current_pc = get_pc();
+ set_register(31, current_pc + Instruction::kInstrSize);
+ next_pc = rt + imm16;
+ }
break;
- }
- case BALC: {
- set_register(31, current_pc + 4);
- next_pc = current_pc + 4 + (se_imm26 << 2);
- set_pc(next_pc);
- pc_modified_ = true;
+ case BC:
+ BranchCompactHelper(true, 26);
break;
- }
- // ------------- Arithmetic instructions.
- case ADDI:
- case DADDI:
- if (HaveSameSign(rs, se_imm16)) {
- if (rs > 0) {
- if (rs > Registers::kMaxValue - se_imm16) {
- SignalException(kIntegerOverflow);
+ case BALC:
+ BranchAndLinkCompactHelper(true, 26);
+ break;
+ case POP10: // BOVC, BEQZALC, BEQC / ADDI (pre-r6)
+ if (kArchVariant == kMips64r6) {
+ if (rs_reg >= rt_reg) { // BOVC
+ if (HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ BranchCompactHelper(rs > Registers::kMaxValue - rt, 16);
+ } else if (rs < 0) {
+ BranchCompactHelper(rs < Registers::kMinValue - rt, 16);
+ }
}
- } else if (rs < 0) {
- if (rs < Registers::kMinValue - se_imm16) {
- SignalException(kIntegerUnderflow);
+ } else {
+ if (rs_reg == 0) { // BEQZALC
+ BranchAndLinkCompactHelper(rt == 0, 16);
+ } else { // BEQC
+ BranchCompactHelper(rt == rs, 16);
+ }
+ }
+ } else { // ADDI
+ if (HaveSameSign(rs, se_imm16)) {
+ if (rs > 0) {
+ if (rs <= Registers::kMaxValue - se_imm16) {
+ SignalException(kIntegerOverflow);
+ }
+ } else if (rs < 0) {
+ if (rs >= Registers::kMinValue - se_imm16) {
+ SignalException(kIntegerUnderflow);
+ }
}
}
+ SetResult(rt_reg, rs + se_imm16);
}
- SetResult(rt_reg, rs + se_imm16);
break;
+ case POP30: // BNVC, BNEZALC, BNEC / DADDI (pre-r6)
+ if (kArchVariant == kMips64r6) {
+ if (rs_reg >= rt_reg) { // BNVC
+ if (!HaveSameSign(rs, rt) || rs == 0 || rt == 0) {
+ BranchCompactHelper(true, 16);
+ } else {
+ if (rs > 0) {
+ BranchCompactHelper(rs <= Registers::kMaxValue - rt, 16);
+ } else if (rs < 0) {
+ BranchCompactHelper(rs >= Registers::kMinValue - rt, 16);
+ }
+ }
+ } else {
+ if (rs_reg == 0) { // BNEZALC
+ BranchAndLinkCompactHelper(rt != 0, 16);
+ } else { // BNEC
+ BranchCompactHelper(rt != rs, 16);
+ }
+ }
+ }
+ break;
+ // ------------- Arithmetic instructions.
case ADDIU: {
int32_t alu32_out = static_cast<int32_t>(rs + se_imm16);
// Sign-extend result of 32bit operation into 64bit register.
@@ -4034,12 +4410,24 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case XORI:
SetResult(rt_reg, rs ^ oe_imm16);
break;
- case LUI: {
- int32_t alu32_out = static_cast<int32_t>(oe_imm16 << 16);
- // Sign-extend result of 32bit operation into 64bit register.
- SetResult(rt_reg, static_cast<int64_t>(alu32_out));
+ case LUI:
+ if (rs_reg != 0) {
+ // AUI instruction.
+ DCHECK(kArchVariant == kMips64r6);
+ int32_t alu32_out = static_cast<int32_t>(rs + (se_imm16 << 16));
+ SetResult(rt_reg, static_cast<int64_t>(alu32_out));
+ } else {
+ // LUI instruction.
+ int32_t alu32_out = static_cast<int32_t>(oe_imm16 << 16);
+ // Sign-extend result of 32bit operation into 64bit register.
+ SetResult(rt_reg, static_cast<int64_t>(alu32_out));
+ }
+ break;
+ case DAUI:
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(rs_reg != 0);
+ SetResult(rt_reg, rs + (se_imm16 << 16));
break;
- }
// ------------- Memory instructions.
case LB:
set_register(rt_reg, ReadB(rs + se_imm16));
@@ -4132,22 +4520,11 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case SDC1:
WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr);
break;
- // ------------- JIALC and BNEZC instructions.
- case POP76: {
- // Next pc.
- next_pc = rt + se_imm16;
- // The instruction after the jump is NOT executed.
- uint16_t pc_increment = Instruction::kInstrSize;
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + pc_increment);
- }
- set_pc(next_pc);
- pc_modified_ = true;
- break;
- }
// ------------- PC-Relative instructions.
case PCREL: {
// rt field: checking 5-bits.
+ int32_t imm21 = instr->Imm21Value();
+ int64_t current_pc = get_pc();
uint8_t rt = (imm21 >> kImm16Bits);
switch (rt) {
case ALUIPC:
@@ -4217,7 +4594,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// We don't check for end_sim_pc. First it should not be met as the current
// pc is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(get_pc() + Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
}
@@ -4227,9 +4604,6 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
}
}
-#undef BranchHelper
-#undef BranchAndLinkHelper
-
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump(Instruction* instr) {
@@ -4320,7 +4694,7 @@ void Simulator::Execute() {
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
- if (icount_ == static_cast<int64_t>(::v8::internal::FLAG_stop_sim_at)) {
+ if (icount_ == static_cast<uint64_t>(::v8::internal::FLAG_stop_sim_at)) {
MipsDebugger dbg(this);
dbg.Debug();
} else {
diff --git a/chromium/v8/src/mips64/simulator-mips64.h b/chromium/v8/src/mips64/simulator-mips64.h
index e45cbd449e4..1d156d860f6 100644
--- a/chromium/v8/src/mips64/simulator-mips64.h
+++ b/chromium/v8/src/mips64/simulator-mips64.h
@@ -23,7 +23,7 @@ namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4)
@@ -43,9 +43,10 @@ typedef int (*mips_regexp_matcher)(String* input,
void* return_address,
Isolate* isolate);
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
- p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
+ NULL, p8))
#else // O32 Abi.
@@ -60,9 +61,10 @@ typedef int (*mips_regexp_matcher)(String* input,
int32_t direct_call,
Isolate* isolate);
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
+ p7, p8))
#endif // MIPS_ABI_N64
@@ -77,14 +79,17 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() { }
+ static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
@@ -197,6 +202,12 @@ class Simulator {
void set_fpu_register_hi_word(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
+ void set_fpu_register_invalid_result64(float original, float rounded);
+ void set_fpu_register_invalid_result(float original, float rounded);
+ void set_fpu_register_word_invalid_result(float original, float rounded);
+ void set_fpu_register_invalid_result64(double original, double rounded);
+ void set_fpu_register_invalid_result(double original, double rounded);
+ void set_fpu_register_word_invalid_result(double original, double rounded);
int64_t get_fpu_register(int fpureg) const;
int32_t get_fpu_register_word(int fpureg) const;
int32_t get_fpu_register_signed_word(int fpureg) const;
@@ -368,6 +379,7 @@ class Simulator {
inline int32_t ft_reg() const { return currentInstr_->FtValue(); }
inline int32_t fd_reg() const { return currentInstr_->FdValue(); }
inline int32_t sa() const { return currentInstr_->SaValue(); }
+ inline int32_t lsa_sa() const { return currentInstr_->LsaSaValue(); }
inline void SetResult(const int32_t rd_reg, const int64_t alu_out) {
set_register(rd_reg, alu_out);
@@ -380,6 +392,18 @@ class Simulator {
// Used for breakpoints and traps.
void SoftwareInterrupt(Instruction* instr);
+ // Compact branch guard.
+ void CheckForbiddenSlot(int64_t current_pc) {
+ Instruction* instr_after_compact_branch =
+ reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Error: Unexpected instruction 0x%08x immediately after a "
+ "compact branch instruction.",
+ *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
+ }
+ }
+
// Stop helper functions.
bool IsWatchpoint(uint64_t code);
void PrintWatchpoint(uint64_t code);
@@ -402,7 +426,7 @@ class Simulator {
return;
}
- if (instr->IsForbiddenInBranchDelay()) {
+ if (instr->IsForbiddenAfterBranch()) {
V8_Fatal(__FILE__, __LINE__,
"Eror:Unexpected %i opcode in a branch delay slot.",
instr->OpcodeValue());
@@ -429,7 +453,8 @@ class Simulator {
void SignalException(Exception e);
// Runtime call support.
- static void* RedirectExternalReference(void* external_function,
+ static void* RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
@@ -484,24 +509,24 @@ class Simulator {
// When running with the simulator transition into simulated execution at this
// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
- FUNCTION_ADDR(entry), 5, reinterpret_cast<int64_t*>(p0), \
- reinterpret_cast<int64_t*>(p1), reinterpret_cast<int64_t*>(p2), \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
+ FUNCTION_ADDR(entry), 5, reinterpret_cast<int64_t*>(p0), \
+ reinterpret_cast<int64_t*>(p1), reinterpret_cast<int64_t*>(p2), \
reinterpret_cast<int64_t*>(p3), reinterpret_cast<int64_t*>(p4)))
#ifdef MIPS_ABI_N64
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- static_cast<int>(Simulator::current(Isolate::Current()) \
- ->Call(entry, 10, p0, p1, p2, p3, p4, \
- reinterpret_cast<int64_t*>(p5), p6, p7, NULL, \
- p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ static_cast<int>(Simulator::current(isolate)->Call( \
+ entry, 10, p0, p1, p2, p3, p4, reinterpret_cast<int64_t*>(p5), p6, p7, \
+ NULL, p8))
#else // Must be O32 Abi.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- static_cast<int>( \
- Simulator::current(Isolate::Current()) \
- ->Call(entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ static_cast<int>(Simulator::current(isolate)->Call( \
+ entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
#endif // MIPS_ABI_N64
@@ -516,17 +541,19 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
+ static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
+ uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
+ static inline void UnregisterCTryCatch(Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // !defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
diff --git a/chromium/v8/src/objects-body-descriptors-inl.h b/chromium/v8/src/objects-body-descriptors-inl.h
new file mode 100644
index 00000000000..ba3c4be52fd
--- /dev/null
+++ b/chromium/v8/src/objects-body-descriptors-inl.h
@@ -0,0 +1,565 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
+#define V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
+
+#include "src/objects-body-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+template <int start_offset>
+int FlexibleBodyDescriptor<start_offset>::SizeOf(Map* map, HeapObject* object) {
+ return object->SizeFromMap(map);
+}
+
+
+bool BodyDescriptorBase::IsValidSlotImpl(HeapObject* obj, int offset) {
+ if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
+ return true;
+ } else {
+ DCHECK(FLAG_unbox_double_fields);
+ DCHECK(IsAligned(offset, kPointerSize));
+
+ LayoutDescriptorHelper helper(obj->map());
+ DCHECK(!helper.all_fields_tagged());
+ return helper.IsTagged(offset);
+ }
+}
+
+template <typename ObjectVisitor>
+void BodyDescriptorBase::IterateBodyImpl(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v) {
+ if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
+ IteratePointers(obj, start_offset, end_offset, v);
+ } else {
+ DCHECK(FLAG_unbox_double_fields);
+ DCHECK(IsAligned(start_offset, kPointerSize) &&
+ IsAligned(end_offset, kPointerSize));
+
+ LayoutDescriptorHelper helper(obj->map());
+ DCHECK(!helper.all_fields_tagged());
+ for (int offset = start_offset; offset < end_offset;) {
+ int end_of_region_offset;
+ if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
+ IteratePointers(obj, offset, end_of_region_offset, v);
+ }
+ offset = end_of_region_offset;
+ }
+ }
+}
+
+
+template <typename StaticVisitor>
+void BodyDescriptorBase::IterateBodyImpl(Heap* heap, HeapObject* obj,
+ int start_offset, int end_offset) {
+ if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
+ IteratePointers<StaticVisitor>(heap, obj, start_offset, end_offset);
+ } else {
+ DCHECK(FLAG_unbox_double_fields);
+ DCHECK(IsAligned(start_offset, kPointerSize) &&
+ IsAligned(end_offset, kPointerSize));
+
+ LayoutDescriptorHelper helper(obj->map());
+ DCHECK(!helper.all_fields_tagged());
+ for (int offset = start_offset; offset < end_offset;) {
+ int end_of_region_offset;
+ if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
+ IteratePointers<StaticVisitor>(heap, obj, offset, end_of_region_offset);
+ }
+ offset = end_of_region_offset;
+ }
+ }
+}
+
+
+template <typename ObjectVisitor>
+void BodyDescriptorBase::IteratePointers(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v) {
+ v->VisitPointers(HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, end_offset));
+}
+
+
+template <typename StaticVisitor>
+void BodyDescriptorBase::IteratePointers(Heap* heap, HeapObject* obj,
+ int start_offset, int end_offset) {
+ StaticVisitor::VisitPointers(heap, obj,
+ HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, end_offset));
+}
+
+
+template <typename ObjectVisitor>
+void BodyDescriptorBase::IteratePointer(HeapObject* obj, int offset,
+ ObjectVisitor* v) {
+ v->VisitPointer(HeapObject::RawField(obj, offset));
+}
+
+
+template <typename StaticVisitor>
+void BodyDescriptorBase::IteratePointer(Heap* heap, HeapObject* obj,
+ int offset) {
+ StaticVisitor::VisitPointer(heap, obj, HeapObject::RawField(obj, offset));
+}
+
+
+// Iterates the function object according to the visiting policy.
+template <JSFunction::BodyVisitingPolicy body_visiting_policy>
+class JSFunction::BodyDescriptorImpl final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(kNonWeakFieldsEndOffset == kCodeEntryOffset);
+ STATIC_ASSERT(kCodeEntryOffset + kPointerSize == kNextFunctionLinkOffset);
+ STATIC_ASSERT(kNextFunctionLinkOffset + kPointerSize == kSize);
+
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ if (offset < kSize) return true;
+ return IsValidSlotImpl(obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, kPropertiesOffset, kNonWeakFieldsEndOffset, v);
+
+ if (body_visiting_policy & kVisitCodeEntry) {
+ v->VisitCodeEntry(obj->address() + kCodeEntryOffset);
+ }
+
+ if (body_visiting_policy & kVisitNextFunction) {
+ IteratePointers(obj, kNextFunctionLinkOffset, kSize, v);
+ }
+ IterateBodyImpl(obj, kSize, object_size, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ IteratePointers<StaticVisitor>(heap, obj, kPropertiesOffset,
+ kNonWeakFieldsEndOffset);
+
+ if (body_visiting_policy & kVisitCodeEntry) {
+ StaticVisitor::VisitCodeEntry(heap, obj,
+ obj->address() + kCodeEntryOffset);
+ }
+
+ if (body_visiting_policy & kVisitNextFunction) {
+ IteratePointers<StaticVisitor>(heap, obj, kNextFunctionLinkOffset, kSize);
+ }
+ IterateBodyImpl<StaticVisitor>(heap, obj, kSize, object_size);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return map->instance_size();
+ }
+};
+
+
+class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(kByteLengthOffset + kPointerSize == kBackingStoreOffset);
+ STATIC_ASSERT(kBackingStoreOffset + kPointerSize == kBitFieldSlot);
+ STATIC_ASSERT(kBitFieldSlot + kPointerSize == kSize);
+
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ if (offset < kBackingStoreOffset) return true;
+ if (offset < kSize) return false;
+ return IsValidSlotImpl(obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, kPropertiesOffset, kBackingStoreOffset, v);
+ IterateBodyImpl(obj, kSize, object_size, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ IteratePointers<StaticVisitor>(heap, obj, kPropertiesOffset,
+ kBackingStoreOffset);
+ IterateBodyImpl<StaticVisitor>(heap, obj, kSize, object_size);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return map->instance_size();
+ }
+};
+
+
+class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ return offset == kConstantPoolOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointer(obj, kConstantPoolOffset, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ IteratePointer<StaticVisitor>(heap, obj, kConstantPoolOffset);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* obj) {
+ return reinterpret_cast<BytecodeArray*>(obj)->BytecodeArraySize();
+ }
+};
+
+
+class FixedTypedArrayBase::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ return offset == kBasePointerOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointer(obj, kBasePointerOffset, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ IteratePointer<StaticVisitor>(heap, obj, kBasePointerOffset);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+ }
+};
+
+
+template <JSWeakCollection::BodyVisitingPolicy body_visiting_policy>
+class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(kTableOffset + kPointerSize == kNextOffset);
+ STATIC_ASSERT(kNextOffset + kPointerSize == kSize);
+
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ return IsValidSlotImpl(obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ if (body_visiting_policy == kVisitStrong) {
+ IterateBodyImpl(obj, kPropertiesOffset, object_size, v);
+ } else {
+ IteratePointers(obj, kPropertiesOffset, kTableOffset, v);
+ IterateBodyImpl(obj, kSize, object_size, v);
+ }
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ if (body_visiting_policy == kVisitStrong) {
+ IterateBodyImpl<StaticVisitor>(heap, obj, kPropertiesOffset, object_size);
+ } else {
+ IteratePointers<StaticVisitor>(heap, obj, kPropertiesOffset,
+ kTableOffset);
+ IterateBodyImpl<StaticVisitor>(heap, obj, kSize, object_size);
+ }
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return map->instance_size();
+ }
+};
+
+
+class Foreign::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ v->VisitExternalReference(reinterpret_cast<Address*>(
+ HeapObject::RawField(obj, kForeignAddressOffset)));
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ StaticVisitor::VisitExternalReference(reinterpret_cast<Address*>(
+ HeapObject::RawField(obj, kForeignAddressOffset)));
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
+};
+
+
+class ExternalOneByteString::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ typedef v8::String::ExternalOneByteStringResource Resource;
+ v->VisitExternalOneByteString(reinterpret_cast<Resource**>(
+ HeapObject::RawField(obj, kResourceOffset)));
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ typedef v8::String::ExternalOneByteStringResource Resource;
+ StaticVisitor::VisitExternalOneByteString(reinterpret_cast<Resource**>(
+ HeapObject::RawField(obj, kResourceOffset)));
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
+};
+
+
+class ExternalTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ typedef v8::String::ExternalStringResource Resource;
+ v->VisitExternalTwoByteString(reinterpret_cast<Resource**>(
+ HeapObject::RawField(obj, kResourceOffset)));
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ typedef v8::String::ExternalStringResource Resource;
+ StaticVisitor::VisitExternalTwoByteString(reinterpret_cast<Resource**>(
+ HeapObject::RawField(obj, kResourceOffset)));
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
+};
+
+
+class Code::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(kRelocationInfoOffset + kPointerSize == kHandlerTableOffset);
+ STATIC_ASSERT(kHandlerTableOffset + kPointerSize ==
+ kDeoptimizationDataOffset);
+ STATIC_ASSERT(kDeoptimizationDataOffset + kPointerSize ==
+ kTypeFeedbackInfoOffset);
+ STATIC_ASSERT(kTypeFeedbackInfoOffset + kPointerSize == kNextCodeLinkOffset);
+
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ // Slots in code can't be invalid because we never trim code objects.
+ return true;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, ObjectVisitor* v) {
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::kDebugBreakSlotMask;
+
+ IteratePointers(obj, kRelocationInfoOffset, kNextCodeLinkOffset, v);
+ v->VisitNextCodeLink(HeapObject::RawField(obj, kNextCodeLinkOffset));
+
+ RelocIterator it(reinterpret_cast<Code*>(obj), mode_mask);
+ Isolate* isolate = obj->GetIsolate();
+ for (; !it.done(); it.next()) {
+ it.rinfo()->Visit(isolate, v);
+ }
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IterateBody(obj, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj) {
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::kDebugBreakSlotMask;
+
+ Heap* heap = obj->GetHeap();
+ IteratePointers<StaticVisitor>(heap, obj, kRelocationInfoOffset,
+ kNextCodeLinkOffset);
+ StaticVisitor::VisitNextCodeLink(
+ heap, HeapObject::RawField(obj, kNextCodeLinkOffset));
+
+ RelocIterator it(reinterpret_cast<Code*>(obj), mode_mask);
+ for (; !it.done(); it.next()) {
+ it.rinfo()->template Visit<StaticVisitor>(heap);
+ }
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ IterateBody<StaticVisitor>(obj);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return reinterpret_cast<Code*>(object)->CodeSize();
+ }
+};
+
+
+template <typename Op, typename ReturnType, typename T1, typename T2,
+ typename T3>
+ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
+ if (type < FIRST_NONSTRING_TYPE) {
+ switch (type & kStringRepresentationMask) {
+ case kSeqStringTag:
+ return ReturnType();
+ case kConsStringTag:
+ return Op::template apply<ConsString::BodyDescriptor>(p1, p2, p3);
+ case kSlicedStringTag:
+ return Op::template apply<SlicedString::BodyDescriptor>(p1, p2, p3);
+ case kExternalStringTag:
+ if ((type & kStringEncodingMask) == kOneByteStringTag) {
+ return Op::template apply<ExternalOneByteString::BodyDescriptor>(
+ p1, p2, p3);
+ } else {
+ return Op::template apply<ExternalTwoByteString::BodyDescriptor>(
+ p1, p2, p3);
+ }
+ }
+ UNREACHABLE();
+ return ReturnType();
+ }
+
+ switch (type) {
+ case FIXED_ARRAY_TYPE:
+ return Op::template apply<FixedArray::BodyDescriptor>(p1, p2, p3);
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ return ReturnType();
+ case TRANSITION_ARRAY_TYPE:
+ return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3);
+ case JS_OBJECT_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_DATE_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_SET_ITERATOR_TYPE:
+ case JS_MAP_ITERATOR_TYPE:
+ case JS_ITERATOR_RESULT_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
+ case JS_BOUND_FUNCTION_TYPE:
+ return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3);
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ return Op::template apply<JSWeakCollection::BodyDescriptor>(p1, p2, p3);
+ case JS_ARRAY_BUFFER_TYPE:
+ return Op::template apply<JSArrayBuffer::BodyDescriptor>(p1, p2, p3);
+ case JS_FUNCTION_TYPE:
+ return Op::template apply<JSFunction::BodyDescriptor>(p1, p2, p3);
+ case ODDBALL_TYPE:
+ return Op::template apply<Oddball::BodyDescriptor>(p1, p2, p3);
+ case JS_PROXY_TYPE:
+ return Op::template apply<JSProxy::BodyDescriptor>(p1, p2, p3);
+ case FOREIGN_TYPE:
+ return Op::template apply<Foreign::BodyDescriptor>(p1, p2, p3);
+ case MAP_TYPE:
+ return Op::template apply<Map::BodyDescriptor>(p1, p2, p3);
+ case CODE_TYPE:
+ return Op::template apply<Code::BodyDescriptor>(p1, p2, p3);
+ case CELL_TYPE:
+ return Op::template apply<Cell::BodyDescriptor>(p1, p2, p3);
+ case PROPERTY_CELL_TYPE:
+ return Op::template apply<PropertyCell::BodyDescriptor>(p1, p2, p3);
+ case WEAK_CELL_TYPE:
+ return Op::template apply<WeakCell::BodyDescriptor>(p1, p2, p3);
+ case SYMBOL_TYPE:
+ return Op::template apply<Symbol::BodyDescriptor>(p1, p2, p3);
+ case BYTECODE_ARRAY_TYPE:
+ return Op::template apply<BytecodeArray::BodyDescriptor>(p1, p2, p3);
+
+ case HEAP_NUMBER_TYPE:
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case SIMD128_VALUE_TYPE:
+ case FILLER_TYPE:
+ case BYTE_ARRAY_TYPE:
+ case FREE_SPACE_TYPE:
+ return ReturnType();
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ return Op::template apply<FixedTypedArrayBase::BodyDescriptor>(p1, p2, p3);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ case SHARED_FUNCTION_INFO_TYPE: {
+ return Op::template apply<SharedFunctionInfo::BodyDescriptor>(p1, p2, p3);
+ }
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ if (type == ALLOCATION_SITE_TYPE) {
+ return Op::template apply<AllocationSite::BodyDescriptor>(p1, p2, p3);
+ } else {
+ return Op::template apply<StructBodyDescriptor>(p1, p2, p3);
+ }
+ default:
+ PrintF("Unknown type: %d\n", type);
+ UNREACHABLE();
+ return ReturnType();
+ }
+}
+
+
+template <typename ObjectVisitor>
+void HeapObject::IterateFast(ObjectVisitor* v) {
+ BodyDescriptorBase::IteratePointer(this, kMapOffset, v);
+ IterateBodyFast(v);
+}
+
+
+template <typename ObjectVisitor>
+void HeapObject::IterateBodyFast(ObjectVisitor* v) {
+ Map* m = map();
+ IterateBodyFast(m->instance_type(), SizeFromMap(m), v);
+}
+
+
+struct CallIterateBody {
+ template <typename BodyDescriptor, typename ObjectVisitor>
+ static void apply(HeapObject* obj, int object_size, ObjectVisitor* v) {
+ BodyDescriptor::IterateBody(obj, object_size, v);
+ }
+};
+
+template <typename ObjectVisitor>
+void HeapObject::IterateBodyFast(InstanceType type, int object_size,
+ ObjectVisitor* v) {
+ BodyDescriptorApply<CallIterateBody, void>(type, this, object_size, v);
+}
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
diff --git a/chromium/v8/src/objects-body-descriptors.h b/chromium/v8/src/objects-body-descriptors.h
new file mode 100644
index 00000000000..91cb8883be8
--- /dev/null
+++ b/chromium/v8/src/objects-body-descriptors.h
@@ -0,0 +1,141 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_BODY_DESCRIPTORS_H_
+#define V8_OBJECTS_BODY_DESCRIPTORS_H_
+
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// This is the base class for object's body descriptors.
+//
+// Each BodyDescriptor subclass must provide the following methods:
+//
+// 1) Returns true if the object contains a tagged value at given offset.
+// It is used for invalid slots filtering. If the offset points outside
+// of the object or to the map word, the result is UNDEFINED (!!!).
+//
+// static bool IsValidSlot(HeapObject* obj, int offset);
+//
+//
+// 2) Iterate object's body using stateful object visitor.
+//
+// template <typename ObjectVisitor>
+// static inline void IterateBody(HeapObject* obj, int object_size,
+// ObjectVisitor* v);
+//
+//
+// 3) Iterate object's body using stateless object visitor.
+//
+// template <typename StaticVisitor>
+// static inline void IterateBody(HeapObject* obj, int object_size);
+//
+class BodyDescriptorBase BASE_EMBEDDED {
+ public:
+ template <typename ObjectVisitor>
+ static inline void IteratePointers(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v);
+
+ template <typename StaticVisitor>
+ static inline void IteratePointers(Heap* heap, HeapObject* obj,
+ int start_offset, int end_offset);
+
+ template <typename ObjectVisitor>
+ static inline void IteratePointer(HeapObject* obj, int offset,
+ ObjectVisitor* v);
+
+ template <typename StaticVisitor>
+ static inline void IteratePointer(Heap* heap, HeapObject* obj, int offset);
+
+ protected:
+ // Returns true for all header and internal fields.
+ static inline bool IsValidSlotImpl(HeapObject* obj, int offset);
+
+ // Treats all header and internal fields in the range as tagged.
+ template <typename ObjectVisitor>
+ static inline void IterateBodyImpl(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v);
+
+ // Treats all header and internal fields in the range as tagged.
+ template <typename StaticVisitor>
+ static inline void IterateBodyImpl(Heap* heap, HeapObject* obj,
+ int start_offset, int end_offset);
+};
+
+
+// This class describes a body of an object of a fixed size
+// in which all pointer fields are located in the [start_offset, end_offset)
+// interval.
+template <int start_offset, int end_offset, int size>
+class FixedBodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static const int kStartOffset = start_offset;
+ static const int kEndOffset = end_offset;
+ static const int kSize = size;
+
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ return offset >= kStartOffset && offset < kEndOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, ObjectVisitor* v) {
+ IterateBodyImpl(obj, start_offset, end_offset, v);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IterateBody(obj, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj) {
+ Heap* heap = obj->GetHeap();
+ IterateBodyImpl<StaticVisitor>(heap, obj, start_offset, end_offset);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ IterateBody(obj);
+ }
+};
+
+
+// This class describes a body of an object of a variable size
+// in which all pointer fields are located in the [start_offset, object_size)
+// interval.
+template <int start_offset>
+class FlexibleBodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static const int kStartOffset = start_offset;
+
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ if (offset < kStartOffset) return false;
+ return IsValidSlotImpl(obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IterateBodyImpl(obj, start_offset, object_size, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ IterateBodyImpl<StaticVisitor>(heap, obj, start_offset, object_size);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object);
+};
+
+
+typedef FlexibleBodyDescriptor<HeapObject::kHeaderSize> StructBodyDescriptor;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_BODY_DESCRIPTORS_H_
diff --git a/chromium/v8/src/objects-debug.cc b/chromium/v8/src/objects-debug.cc
index 3ce7672c1cb..b6dd42553cf 100644
--- a/chromium/v8/src/objects-debug.cc
+++ b/chromium/v8/src/objects-debug.cc
@@ -76,6 +76,9 @@ void HeapObject::HeapObjectVerify() {
case BYTECODE_ARRAY_TYPE:
BytecodeArray::cast(this)->BytecodeArrayVerify();
break;
+ case TRANSITION_ARRAY_TYPE:
+ TransitionArray::cast(this)->TransitionArrayVerify();
+ break;
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpaceVerify();
break;
@@ -96,6 +99,7 @@ void HeapObject::HeapObjectVerify() {
break;
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_PROMISE_TYPE:
JSObject::cast(this)->JSObjectVerify();
break;
case JS_GENERATOR_OBJECT_TYPE:
@@ -110,6 +114,9 @@ void HeapObject::HeapObjectVerify() {
case JS_DATE_TYPE:
JSDate::cast(this)->JSDateVerify();
break;
+ case JS_BOUND_FUNCTION_TYPE:
+ JSBoundFunction::cast(this)->JSBoundFunctionVerify();
+ break;
case JS_FUNCTION_TYPE:
JSFunction::cast(this)->JSFunctionVerify();
break;
@@ -119,9 +126,6 @@ void HeapObject::HeapObjectVerify() {
case JS_GLOBAL_OBJECT_TYPE:
JSGlobalObject::cast(this)->JSGlobalObjectVerify();
break;
- case JS_BUILTINS_OBJECT_TYPE:
- JSBuiltinsObject::cast(this)->JSBuiltinsObjectVerify();
- break;
case CELL_TYPE:
Cell::cast(this)->CellVerify();
break;
@@ -163,9 +167,6 @@ void HeapObject::HeapObjectVerify() {
case JS_PROXY_TYPE:
JSProxy::cast(this)->JSProxyVerify();
break;
- case JS_FUNCTION_PROXY_TYPE:
- JSFunctionProxy::cast(this)->JSFunctionProxyVerify();
- break;
case FOREIGN_TYPE:
Foreign::cast(this)->ForeignVerify();
break;
@@ -303,7 +304,11 @@ void JSObject::JSObjectVerify() {
if (r.IsNone()) {
CHECK(type_is_none);
} else if (!type_is_any && !(type_is_none && r.IsHeapObject())) {
- CHECK(!field_type->NowStable() || field_type->NowContains(value));
+ // If allocation folding is off then GC could happen during inner
+ // object literal creation and we will end up having and undefined
+ // value that does not match the field type.
+ CHECK(!field_type->NowStable() || field_type->NowContains(value) ||
+ (!FLAG_use_allocation_folding && value->IsUndefined()));
}
}
}
@@ -328,6 +333,8 @@ void Map::MapVerify() {
CHECK(instance_size() == kVariableSizeSentinel ||
(kPointerSize <= instance_size() &&
instance_size() < heap->Capacity()));
+ CHECK(GetBackPointer()->IsUndefined() ||
+ !Map::cast(GetBackPointer())->is_stable());
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
SLOW_DCHECK(instance_descriptors()->IsSortedNoDuplicates());
@@ -353,8 +360,7 @@ void Map::VerifyOmittedMapChecks() {
if (!is_stable() ||
is_deprecated() ||
is_dictionary_map()) {
- CHECK_EQ(0, dependent_code()->number_of_entries(
- DependentCode::kPrototypeCheckGroup));
+ CHECK(dependent_code()->IsEmpty(DependentCode::kPrototypeCheckGroup));
}
}
@@ -410,6 +416,17 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
}
+void TransitionArray::TransitionArrayVerify() {
+ for (int i = 0; i < length(); i++) {
+ Object* e = get(i);
+ VerifyPointer(e);
+ }
+ CHECK_LE(LengthFor(number_of_transitions()), length());
+ CHECK(next_link()->IsUndefined() || next_link()->IsSmi() ||
+ next_link()->IsTransitionArray());
+}
+
+
void JSGeneratorObject::JSGeneratorObjectVerify() {
// In an expression like "new g()", there can be a point where a generator
// object is allocated but its fields are all undefined, as it hasn't yet been
@@ -530,6 +547,20 @@ void SlicedString::SlicedStringVerify() {
}
+void JSBoundFunction::JSBoundFunctionVerify() {
+ CHECK(IsJSBoundFunction());
+ JSObjectVerify();
+ VerifyObjectField(kBoundThisOffset);
+ VerifyObjectField(kBoundTargetFunctionOffset);
+ VerifyObjectField(kBoundArgumentsOffset);
+ VerifyObjectField(kCreationContextOffset);
+ CHECK(bound_target_function()->IsCallable());
+ CHECK(creation_context()->IsNativeContext());
+ CHECK(IsCallable());
+ CHECK_EQ(IsConstructor(), bound_target_function()->IsConstructor());
+}
+
+
void JSFunction::JSFunctionVerify() {
CHECK(IsJSFunction());
VerifyObjectField(kPrototypeOrInitialMapOffset);
@@ -570,23 +601,12 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
void JSGlobalObject::JSGlobalObjectVerify() {
CHECK(IsJSGlobalObject());
- JSObjectVerify();
- for (int i = GlobalObject::kBuiltinsOffset;
- i < JSGlobalObject::kSize;
- i += kPointerSize) {
- VerifyObjectField(i);
+ // Do not check the dummy global object for the builtins.
+ if (GlobalDictionary::cast(properties())->NumberOfElements() == 0 &&
+ elements()->length() == 0) {
+ return;
}
-}
-
-
-void JSBuiltinsObject::JSBuiltinsObjectVerify() {
- CHECK(IsJSBuiltinsObject());
JSObjectVerify();
- for (int i = GlobalObject::kBuiltinsOffset;
- i < JSBuiltinsObject::kSize;
- i += kPointerSize) {
- VerifyObjectField(i);
- }
}
@@ -815,17 +835,14 @@ void JSRegExp::JSRegExpVerify() {
void JSProxy::JSProxyVerify() {
CHECK(IsJSProxy());
+ VerifyPointer(target());
VerifyPointer(handler());
+ CHECK_EQ(target()->IsCallable(), map()->is_callable());
+ CHECK_EQ(target()->IsConstructor(), map()->is_constructor());
CHECK(hash()->IsSmi() || hash()->IsUndefined());
-}
-
-
-void JSFunctionProxy::JSFunctionProxyVerify() {
- CHECK(IsJSFunctionProxy());
- JSProxyVerify();
- VerifyPointer(call_trap());
- VerifyPointer(construct_trap());
- CHECK(map()->is_callable());
+ CHECK(map()->prototype()->IsNull());
+ // There should be no properties on a Proxy.
+ CHECK_EQ(0, map()->NumberOfOwnDescriptors());
}
@@ -891,7 +908,6 @@ void PrototypeInfo::PrototypeInfoVerify() {
CHECK(prototype_users()->IsSmi());
}
CHECK(validity_cell()->IsCell() || validity_cell()->IsSmi());
- VerifyPointer(constructor_name());
}
@@ -929,6 +945,7 @@ void AccessCheckInfo::AccessCheckInfoVerify() {
CHECK(IsAccessCheckInfo());
VerifyPointer(named_callback());
VerifyPointer(indexed_callback());
+ VerifyPointer(callback());
VerifyPointer(data());
}
@@ -982,12 +999,6 @@ void ObjectTemplateInfo::ObjectTemplateInfoVerify() {
}
-void TypeSwitchInfo::TypeSwitchInfoVerify() {
- CHECK(IsTypeSwitchInfo());
- VerifyPointer(types());
-}
-
-
void AllocationSite::AllocationSiteVerify() {
CHECK(IsAllocationSite());
}
@@ -1047,7 +1058,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_objects_with_fast_properties_++;
info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
info->number_of_fast_unused_fields_ += map()->unused_property_fields();
- } else if (IsGlobalObject()) {
+ } else if (IsJSGlobalObject()) {
GlobalDictionary* dict = global_dictionary();
info->number_of_slow_used_properties_ += dict->NumberOfElements();
info->number_of_slow_unused_properties_ +=
diff --git a/chromium/v8/src/objects-inl.h b/chromium/v8/src/objects-inl.h
index 3d39278cce9..0509a80b232 100644
--- a/chromium/v8/src/objects-inl.h
+++ b/chromium/v8/src/objects-inl.h
@@ -134,6 +134,14 @@ bool Object::IsFixedArrayBase() const {
}
+bool Object::IsFixedArray() const {
+ if (!IsHeapObject()) return false;
+ InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
+ return instance_type == FIXED_ARRAY_TYPE ||
+ instance_type == TRANSITION_ARRAY_TYPE;
+}
+
+
// External objects are not extensible, so the map check is enough.
bool Object::IsExternal() const {
return Object::IsHeapObject() &&
@@ -179,6 +187,13 @@ bool Object::IsUniqueName() const {
}
+bool Object::IsFunction() const {
+ STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map()->instance_type() >= FIRST_FUNCTION_TYPE;
+}
+
+
bool Object::IsCallable() const {
return Object::IsHeapObject() && HeapObject::cast(this)->map()->is_callable();
}
@@ -190,12 +205,6 @@ bool Object::IsConstructor() const {
}
-bool Object::IsSpecObject() const {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE;
-}
-
-
bool Object::IsTemplateInfo() const {
return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
}
@@ -286,6 +295,17 @@ bool Object::KeyEquals(Object* second) {
}
+bool Object::FilterKey(PropertyFilter filter) {
+ if (IsSymbol()) {
+ if (filter & SKIP_SYMBOLS) return true;
+ if (Symbol::cast(this)->is_private()) return true;
+ } else {
+ if (filter & SKIP_STRINGS) return true;
+ }
+ return false;
+}
+
+
Handle<Object> Object::NewStorageFor(Isolate* isolate,
Handle<Object> object,
Representation representation) {
@@ -689,7 +709,6 @@ bool Object::IsJSProxy() const {
}
-TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE)
TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
@@ -699,9 +718,9 @@ TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
TYPE_CHECKER(Map, MAP_TYPE)
-TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
bool Object::IsJSWeakCollection() const {
@@ -722,12 +741,10 @@ bool Object::IsLayoutDescriptor() const {
}
-bool Object::IsTransitionArray() const {
- return IsFixedArray();
-}
+bool Object::IsTypeFeedbackVector() const { return IsFixedArray(); }
-bool Object::IsTypeFeedbackVector() const { return IsFixedArray(); }
+bool Object::IsTypeFeedbackMetadata() const { return IsFixedArray(); }
bool Object::IsLiteralsArray() const { return IsFixedArray(); }
@@ -811,6 +828,7 @@ bool Object::IsScopeInfo() const {
}
+TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)
TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
@@ -988,14 +1006,7 @@ bool Object::IsJSGlobalProxy() const {
}
-bool Object::IsGlobalObject() const {
- if (!IsHeapObject()) return false;
- return HeapObject::cast(this)->map()->IsGlobalObjectMap();
-}
-
-
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
-TYPE_CHECKER(JSBuiltinsObject, JS_BUILTINS_OBJECT_TYPE)
bool Object::IsUndetectableObject() const {
@@ -1008,7 +1019,7 @@ bool Object::IsAccessCheckNeeded() const {
if (!IsHeapObject()) return false;
if (IsJSGlobalProxy()) {
const JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
- GlobalObject* global = proxy->GetIsolate()->context()->global_object();
+ JSGlobalObject* global = proxy->GetIsolate()->context()->global_object();
return proxy->IsDetachedFrom(global);
}
return HeapObject::cast(this)->map()->is_access_check_needed();
@@ -1173,19 +1184,21 @@ MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
uint32_t index, Handle<Object> value,
LanguageMode language_mode) {
LookupIterator it(isolate, object, index);
- return SetProperty(&it, value, language_mode, MAY_BE_STORE_FROM_KEYED);
+ MAYBE_RETURN_NULL(
+ SetProperty(&it, value, language_mode, MAY_BE_STORE_FROM_KEYED));
+ return value;
}
-Handle<Object> Object::GetPrototypeSkipHiddenPrototypes(
- Isolate* isolate, Handle<Object> receiver) {
- PrototypeIterator iter(isolate, receiver);
- while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- return PrototypeIterator::GetCurrent(iter);
- }
- iter.Advance();
- }
+MaybeHandle<Object> Object::GetPrototype(Isolate* isolate,
+ Handle<Object> receiver) {
+ // We don't expect access checks to be needed on JSProxy objects.
+ DCHECK(!receiver->IsAccessCheckNeeded() || receiver->IsJSObject());
+ PrototypeIterator iter(isolate, receiver,
+ PrototypeIterator::START_AT_RECEIVER);
+ do {
+ if (!iter.AdvanceFollowingProxies()) return MaybeHandle<Object>();
+ } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
return PrototypeIterator::GetCurrent(iter);
}
@@ -1466,62 +1479,6 @@ int HeapObject::Size() {
}
-HeapObjectContents HeapObject::ContentType() {
- InstanceType type = map()->instance_type();
- if (type <= LAST_NAME_TYPE) {
- if (type == SYMBOL_TYPE) {
- return HeapObjectContents::kTaggedValues;
- }
- DCHECK(type < FIRST_NONSTRING_TYPE);
- // There are four string representations: sequential strings, external
- // strings, cons strings, and sliced strings.
- // Only the former two contain raw values and no heap pointers (besides the
- // map-word).
- if (((type & kIsIndirectStringMask) != kIsIndirectStringTag))
- return HeapObjectContents::kRawValues;
- else
- return HeapObjectContents::kTaggedValues;
-#if 0
- // TODO(jochen): Enable eventually.
- } else if (type == JS_FUNCTION_TYPE) {
- return HeapObjectContents::kMixedValues;
-#endif
- } else if (type == BYTECODE_ARRAY_TYPE) {
- return HeapObjectContents::kMixedValues;
- } else if (type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
- type <= LAST_FIXED_TYPED_ARRAY_TYPE) {
- return HeapObjectContents::kMixedValues;
- } else if (type == JS_ARRAY_BUFFER_TYPE) {
- return HeapObjectContents::kMixedValues;
- } else if (type <= LAST_DATA_TYPE) {
- // TODO(jochen): Why do we claim that Code and Map contain only raw values?
- return HeapObjectContents::kRawValues;
- } else {
- if (FLAG_unbox_double_fields) {
- LayoutDescriptorHelper helper(map());
- if (!helper.all_fields_tagged()) return HeapObjectContents::kMixedValues;
- }
- return HeapObjectContents::kTaggedValues;
- }
-}
-
-
-void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) {
- v->VisitPointers(reinterpret_cast<Object**>(FIELD_ADDR(this, start)),
- reinterpret_cast<Object**>(FIELD_ADDR(this, end)));
-}
-
-
-void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
- v->VisitPointer(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
-}
-
-
-void HeapObject::IterateNextCodeLink(ObjectVisitor* v, int offset) {
- v->VisitNextCodeLink(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
-}
-
-
double HeapNumber::value() const {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
@@ -1642,7 +1599,7 @@ SIMD128_BOOLEAN_LANE_FNS(Bool8x16, int8_t, 16, INT8, kCharSize)
#undef SIMD128_WRITE_LANE
-ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
+ACCESSORS(JSReceiver, properties, FixedArray, kPropertiesOffset)
Object** FixedArray::GetFirstElementAddress() {
@@ -1816,12 +1773,12 @@ void AllocationSite::set_memento_create_count(int count) {
}
-inline bool AllocationSite::IncrementMementoFoundCount() {
+bool AllocationSite::IncrementMementoFoundCount(int increment) {
if (IsZombie()) return false;
int value = memento_found_count();
- set_memento_found_count(value + 1);
- return memento_found_count() == kPretenureMinimumCreated;
+ set_memento_found_count(value + increment);
+ return memento_found_count() >= kPretenureMinimumCreated;
}
@@ -1875,11 +1832,12 @@ inline bool AllocationSite::DigestPretenuringFeedback(
}
if (FLAG_trace_pretenuring_statistics) {
- PrintF(
- "AllocationSite(%p): (created, found, ratio) (%d, %d, %f) %s => %s\n",
- static_cast<void*>(this), create_count, found_count, ratio,
- PretenureDecisionName(current_decision),
- PretenureDecisionName(pretenure_decision()));
+ PrintIsolate(GetIsolate(),
+ "pretenuring: AllocationSite(%p): (created, found, ratio) "
+ "(%d, %d, %f) %s => %s\n",
+ this, create_count, found_count, ratio,
+ PretenureDecisionName(current_decision),
+ PretenureDecisionName(pretenure_decision()));
}
// Clear feedback calculation fields until the next gc.
@@ -2011,18 +1969,22 @@ void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
}
-void JSObject::initialize_properties() {
- DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
- WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
-}
-
-
void JSObject::initialize_elements() {
FixedArrayBase* elements = map()->GetInitialElements();
WRITE_FIELD(this, kElementsOffset, elements);
}
+InterceptorInfo* JSObject::GetIndexedInterceptor() {
+ DCHECK(map()->has_indexed_interceptor());
+ JSFunction* constructor = JSFunction::cast(map()->GetConstructor());
+ DCHECK(constructor->shared()->IsApiFunction());
+ Object* result =
+ constructor->shared()->get_api_func_data()->indexed_property_handler();
+ return InterceptorInfo::cast(result);
+}
+
+
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
@@ -2064,7 +2026,10 @@ Object* WeakCell::value() const { return READ_FIELD(this, kValueOffset); }
void WeakCell::clear() {
- DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT);
+ // Either the garbage collector is clearing the cell or we are simply
+ // initializing the root empty weak cell.
+ DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT ||
+ this == GetHeap()->empty_weak_cell());
WRITE_FIELD(this, kValueOffset, Smi::FromInt(0));
}
@@ -2095,16 +2060,19 @@ void WeakCell::set_next(Object* val, WriteBarrierMode mode) {
}
-void WeakCell::clear_next(Heap* heap) {
- set_next(heap->the_hole_value(), SKIP_WRITE_BARRIER);
+void WeakCell::clear_next(Object* the_hole_value) {
+ DCHECK_EQ(GetHeap()->the_hole_value(), the_hole_value);
+ set_next(the_hole_value, SKIP_WRITE_BARRIER);
}
bool WeakCell::next_cleared() { return next()->IsTheHole(); }
-int JSObject::GetHeaderSize() {
- InstanceType type = map()->instance_type();
+int JSObject::GetHeaderSize() { return GetHeaderSize(map()->instance_type()); }
+
+
+int JSObject::GetHeaderSize(InstanceType type) {
// Check for the most common kind of JavaScript object before
// falling into the generic switch. This speeds up the internal
// field operations considerably on average.
@@ -2118,8 +2086,8 @@ int JSObject::GetHeaderSize() {
return JSGlobalProxy::kSize;
case JS_GLOBAL_OBJECT_TYPE:
return JSGlobalObject::kSize;
- case JS_BUILTINS_OBJECT_TYPE:
- return JSBuiltinsObject::kSize;
+ case JS_BOUND_FUNCTION_TYPE:
+ return JSBoundFunction::kSize;
case JS_FUNCTION_TYPE:
return JSFunction::kSize;
case JS_VALUE_TYPE:
@@ -2148,6 +2116,8 @@ int JSObject::GetHeaderSize() {
return JSWeakMap::kSize;
case JS_WEAK_SET_TYPE:
return JSWeakSet::kSize;
+ case JS_PROMISE_TYPE:
+ return JSObject::kHeaderSize;
case JS_REGEXP_TYPE:
return JSRegExp::kSize;
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -2161,15 +2131,18 @@ int JSObject::GetHeaderSize() {
}
-int JSObject::GetInternalFieldCount() {
- DCHECK(1 << kPointerSizeLog2 == kPointerSize);
- // Make sure to adjust for the number of in-object properties. These
- // properties do contribute to the size, but are not internal fields.
- return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) -
- map()->GetInObjectProperties();
+int JSObject::GetInternalFieldCount(Map* map) {
+ int instance_size = map->instance_size();
+ if (instance_size == kVariableSizeSentinel) return 0;
+ InstanceType instance_type = map->instance_type();
+ return ((instance_size - GetHeaderSize(instance_type)) >> kPointerSizeLog2) -
+ map->GetInObjectProperties();
}
+int JSObject::GetInternalFieldCount() { return GetInternalFieldCount(map()); }
+
+
int JSObject::GetInternalFieldOffset(int index) {
DCHECK(index < GetInternalFieldCount() && index >= 0);
return GetHeaderSize() + (kPointerSize * index);
@@ -2311,8 +2284,7 @@ Object* JSObject::InObjectPropertyAtPut(int index,
}
-
-void JSObject::InitializeBody(Map* map,
+void JSObject::InitializeBody(Map* map, int start_offset,
Object* pre_allocated_value,
Object* filler_value) {
DCHECK(!filler_value->IsHeapObject() ||
@@ -2320,12 +2292,12 @@ void JSObject::InitializeBody(Map* map,
DCHECK(!pre_allocated_value->IsHeapObject() ||
!GetHeap()->InNewSpace(pre_allocated_value));
int size = map->instance_size();
- int offset = kHeaderSize;
+ int offset = start_offset;
if (filler_value != pre_allocated_value) {
- int pre_allocated =
- map->GetInObjectProperties() - map->unused_property_fields();
- DCHECK(pre_allocated * kPointerSize + kHeaderSize <= size);
- for (int i = 0; i < pre_allocated; i++) {
+ int end_of_pre_allocated_offset =
+ size - (map->unused_property_fields() * kPointerSize);
+ DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset);
+ while (offset < end_of_pre_allocated_offset) {
WRITE_FIELD(this, offset, pre_allocated_value);
offset += kPointerSize;
}
@@ -2337,12 +2309,6 @@ void JSObject::InitializeBody(Map* map,
}
-bool JSObject::HasFastProperties() {
- DCHECK(properties()->IsDictionary() == map()->is_dictionary_map());
- return !properties()->IsDictionary();
-}
-
-
bool Map::TooManyFastProperties(StoreFromKeyed store_mode) {
if (unused_property_fields() != 0) return false;
if (is_prototype_map()) return false;
@@ -2361,27 +2327,11 @@ void Struct::InitializeBody(int object_size) {
}
-bool Object::ToArrayLength(uint32_t* index) {
- if (IsSmi()) {
- int value = Smi::cast(this)->value();
- if (value < 0) return false;
- *index = value;
- return true;
- }
- if (IsHeapNumber()) {
- double value = HeapNumber::cast(this)->value();
- uint32_t uint_value = static_cast<uint32_t>(value);
- if (value == static_cast<double>(uint_value)) {
- *index = uint_value;
- return true;
- }
- }
- return false;
-}
+bool Object::ToArrayLength(uint32_t* index) { return Object::ToUint32(index); }
bool Object::ToArrayIndex(uint32_t* index) {
- return ToArrayLength(index) && *index != kMaxUInt32;
+ return Object::ToUint32(index) && *index != kMaxUInt32;
}
@@ -2400,7 +2350,7 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
void Object::VerifyApiCallResultType() {
#if DEBUG
- if (!(IsSmi() || IsString() || IsSymbol() || IsSpecObject() ||
+ if (!(IsSmi() || IsString() || IsSymbol() || IsJSReceiver() ||
IsHeapNumber() || IsSimd128Value() || IsUndefined() || IsTrue() ||
IsFalse() || IsNull())) {
FATAL("API call returned invalid object");
@@ -2436,7 +2386,7 @@ void FixedArray::set(int index, Smi* value) {
void FixedArray::set(int index, Object* value) {
DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
- DCHECK_EQ(FIXED_ARRAY_TYPE, map()->instance_type());
+ DCHECK(IsFixedArray());
DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -2625,20 +2575,6 @@ void FixedArray::set(int index,
}
-void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
- int index,
- Object* value) {
- DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map());
- DCHECK(index >= 0 && index < array->length());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(array, offset, value);
- Heap* heap = array->GetHeap();
- if (heap->InNewSpace(value)) {
- heap->RecordWrite(array->address(), offset);
- }
-}
-
-
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
@@ -3077,20 +3013,12 @@ void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
}
-void DescriptorArray::Set(int descriptor_number,
- Descriptor* desc,
- const WhitenessWitness&) {
+void DescriptorArray::SetDescriptor(int descriptor_number, Descriptor* desc) {
// Range check.
DCHECK(descriptor_number < number_of_descriptors());
-
- NoIncrementalWriteBarrierSet(this,
- ToKeyIndex(descriptor_number),
- *desc->GetKey());
- NoIncrementalWriteBarrierSet(this,
- ToValueIndex(descriptor_number),
- *desc->GetValue());
- NoIncrementalWriteBarrierSet(this, ToDetailsIndex(descriptor_number),
- desc->GetDetails().AsSmi());
+ set(ToKeyIndex(descriptor_number), *desc->GetKey());
+ set(ToValueIndex(descriptor_number), *desc->GetValue());
+ set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi());
}
@@ -3131,19 +3059,6 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
}
-DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array)
- : marking_(array->GetHeap()->incremental_marking()) {
- marking_->EnterNoMarkingScope();
- DCHECK(!marking_->IsMarking() ||
- Marking::Color(array) == Marking::WHITE_OBJECT);
-}
-
-
-DescriptorArray::WhitenessWitness::~WhitenessWitness() {
- marking_->LeaveNoMarkingScope();
-}
-
-
PropertyType DescriptorArray::Entry::type() { return descs_->GetType(index_); }
@@ -3292,7 +3207,6 @@ CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(Float32x4)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(GlobalDictionary)
-CAST_ACCESSOR(GlobalObject)
CAST_ACCESSOR(HandlerTable)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(Int16x8)
@@ -3301,11 +3215,10 @@ CAST_ACCESSOR(Int8x16)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
-CAST_ACCESSOR(JSBuiltinsObject)
+CAST_ACCESSOR(JSBoundFunction)
CAST_ACCESSOR(JSDataView)
CAST_ACCESSOR(JSDate)
CAST_ACCESSOR(JSFunction)
-CAST_ACCESSOR(JSFunctionProxy)
CAST_ACCESSOR(JSGeneratorObject)
CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSGlobalProxy)
@@ -3585,14 +3498,6 @@ FreeSpace* FreeSpace::next() {
}
-FreeSpace** FreeSpace::next_address() {
- DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
- (!GetHeap()->deserialization_complete() && map() == NULL));
- DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
- return reinterpret_cast<FreeSpace**>(address() + kNextOffset);
-}
-
-
void FreeSpace::set_next(FreeSpace* next) {
DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
(!GetHeap()->deserialization_complete() && map() == NULL));
@@ -3650,6 +3555,7 @@ bool Name::Equals(Handle<Name> one, Handle<Name> two) {
ACCESSORS(Symbol, name, Object, kNameOffset)
SMI_ACCESSORS(Symbol, flags, kFlagsOffset)
BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
+BOOL_ACCESSORS(Symbol, flags, is_well_known_symbol, kWellKnownSymbolBit)
bool String::Equals(String* other) {
@@ -4102,11 +4008,6 @@ Address ByteArray::GetDataStartAddress() {
}
-void BytecodeArray::BytecodeArrayIterateBody(ObjectVisitor* v) {
- IteratePointer(v, kConstantPoolOffset);
-}
-
-
byte BytecodeArray::get(int index) {
DCHECK(index >= 0 && index < this->length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@@ -4453,11 +4354,10 @@ int Map::GetInObjectPropertyOffset(int index) {
}
-Handle<Map> Map::CopyInstallDescriptorsForTesting(
- Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> layout_descriptor) {
- return CopyInstallDescriptors(map, new_descriptor, descriptors,
- layout_descriptor);
+Handle<Map> Map::AddMissingTransitionsForTesting(
+ Handle<Map> split_map, Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor) {
+ return AddMissingTransitions(split_map, descriptors, full_layout_descriptor);
}
@@ -4466,8 +4366,10 @@ int HeapObject::SizeFromMap(Map* map) {
if (instance_size != kVariableSizeSentinel) return instance_size;
// Only inline the most frequent cases.
InstanceType instance_type = map->instance_type();
- if (instance_type == FIXED_ARRAY_TYPE) {
- return FixedArray::BodyDescriptor::SizeOf(map, this);
+ if (instance_type == FIXED_ARRAY_TYPE ||
+ instance_type == TRANSITION_ARRAY_TYPE) {
+ return FixedArray::SizeFor(
+ reinterpret_cast<FixedArray*>(this)->synchronized_length());
}
if (instance_type == ONE_BYTE_STRING_TYPE ||
instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
@@ -4568,12 +4470,8 @@ bool Map::has_non_instance_prototype() {
}
-void Map::set_is_constructor(bool value) {
- if (value) {
- set_bit_field(bit_field() | (1 << kIsConstructor));
- } else {
- set_bit_field(bit_field() & ~(1 << kIsConstructor));
- }
+void Map::set_is_constructor() {
+ set_bit_field(bit_field() | (1 << kIsConstructor));
}
@@ -4772,12 +4670,22 @@ bool Map::is_strong() {
}
-void Map::set_counter(int value) {
- set_bit_field3(Counter::update(bit_field3(), value));
+void Map::set_new_target_is_base(bool value) {
+ set_bit_field3(NewTargetIsBase::update(bit_field3(), value));
}
-int Map::counter() { return Counter::decode(bit_field3()); }
+bool Map::new_target_is_base() { return NewTargetIsBase::decode(bit_field3()); }
+
+
+void Map::set_construction_counter(int value) {
+ set_bit_field3(ConstructionCounter::update(bit_field3(), value));
+}
+
+
+int Map::construction_counter() {
+ return ConstructionCounter::decode(bit_field3());
+}
void Map::mark_unstable() {
@@ -4826,10 +4734,15 @@ bool Map::CanTransition() {
}
+bool Map::IsBooleanMap() { return this == GetHeap()->boolean_map(); }
bool Map::IsPrimitiveMap() {
STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
return instance_type() <= LAST_PRIMITIVE_TYPE;
}
+bool Map::IsJSReceiverMap() {
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ return instance_type() >= FIRST_JS_RECEIVER_TYPE;
+}
bool Map::IsJSObjectMap() {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
return instance_type() >= FIRST_JS_OBJECT_TYPE;
@@ -4837,20 +4750,15 @@ bool Map::IsJSObjectMap() {
bool Map::IsJSArrayMap() { return instance_type() == JS_ARRAY_TYPE; }
bool Map::IsJSFunctionMap() { return instance_type() == JS_FUNCTION_TYPE; }
bool Map::IsStringMap() { return instance_type() < FIRST_NONSTRING_TYPE; }
-bool Map::IsJSProxyMap() {
- InstanceType type = instance_type();
- return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
-}
+bool Map::IsJSProxyMap() { return instance_type() == JS_PROXY_TYPE; }
bool Map::IsJSGlobalProxyMap() {
return instance_type() == JS_GLOBAL_PROXY_TYPE;
}
bool Map::IsJSGlobalObjectMap() {
return instance_type() == JS_GLOBAL_OBJECT_TYPE;
}
-bool Map::IsGlobalObjectMap() {
- const InstanceType type = instance_type();
- return type == JS_GLOBAL_OBJECT_TYPE || type == JS_BUILTINS_OBJECT_TYPE;
-}
+bool Map::IsJSTypedArrayMap() { return instance_type() == JS_TYPED_ARRAY_TYPE; }
+bool Map::IsJSDataViewMap() { return instance_type() == JS_DATA_VIEW_TYPE; }
bool Map::CanOmitMapChecks() {
@@ -4858,14 +4766,38 @@ bool Map::CanOmitMapChecks() {
}
-int DependentCode::number_of_entries(DependencyGroup group) {
- if (length() == 0) return 0;
- return Smi::cast(get(group))->value();
+DependentCode* DependentCode::next_link() {
+ return DependentCode::cast(get(kNextLinkIndex));
+}
+
+
+void DependentCode::set_next_link(DependentCode* next) {
+ set(kNextLinkIndex, next);
+}
+
+
+int DependentCode::flags() { return Smi::cast(get(kFlagsIndex))->value(); }
+
+
+void DependentCode::set_flags(int flags) {
+ set(kFlagsIndex, Smi::FromInt(flags));
+}
+
+
+int DependentCode::count() { return CountField::decode(flags()); }
+
+void DependentCode::set_count(int value) {
+ set_flags(CountField::update(flags(), value));
+}
+
+
+DependentCode::DependencyGroup DependentCode::group() {
+ return static_cast<DependencyGroup>(GroupField::decode(flags()));
}
-void DependentCode::set_number_of_entries(DependencyGroup group, int value) {
- set(group, Smi::FromInt(value));
+void DependentCode::set_group(DependentCode::DependencyGroup group) {
+ set_flags(GroupField::update(flags(), static_cast<int>(group)));
}
@@ -4889,16 +4821,6 @@ void DependentCode::copy(int from, int to) {
}
-void DependentCode::ExtendGroup(DependencyGroup group) {
- GroupStartIndexes starts(this);
- for (int g = kGroupCount - 1; g > group; g--) {
- if (starts.at(g) < starts.at(g + 1)) {
- copy(starts.at(g), starts.at(g + 1));
- }
- }
-}
-
-
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
WRITE_INT_FIELD(this, kFlagsOffset, flags);
@@ -4920,12 +4842,8 @@ bool Code::IsCodeStubOrIC() {
bool Code::IsJavaScriptCode() {
- if (kind() == FUNCTION || kind() == OPTIMIZED_FUNCTION) {
- return true;
- }
- Handle<Code> interpreter_entry =
- GetIsolate()->builtins()->InterpreterEntryTrampoline();
- return interpreter_entry.location() != nullptr && *interpreter_entry == this;
+ return kind() == FUNCTION || kind() == OPTIMIZED_FUNCTION ||
+ is_interpreter_entry_trampoline();
}
@@ -4974,6 +4892,12 @@ inline bool Code::is_hydrogen_stub() {
}
+inline bool Code::is_interpreter_entry_trampoline() {
+ Handle<Code> interpreter_entry =
+ GetIsolate()->builtins()->InterpreterEntryTrampoline();
+ return interpreter_entry.location() != nullptr && *interpreter_entry == this;
+}
+
inline void Code::set_is_crankshafted(bool value) {
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = IsCrankshaftedField::update(previous, value);
@@ -5155,21 +5079,6 @@ bool Code::back_edges_patched_for_osr() {
uint16_t Code::to_boolean_state() { return extra_ic_state(); }
-bool Code::has_function_cache() {
- DCHECK(kind() == STUB);
- return HasFunctionCacheField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_has_function_cache(bool flag) {
- DCHECK(kind() == STUB);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = HasFunctionCacheField::update(previous, flag);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
bool Code::marked_for_deoptimization() {
DCHECK(kind() == OPTIMIZED_FUNCTION);
return MarkedForDeoptimizationField::decode(
@@ -5318,8 +5227,6 @@ Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
bool Code::CanContainWeakObjects() {
- // is_turbofanned() implies !can_have_weak_objects().
- DCHECK(!is_optimized_code() || !is_turbofanned() || !can_have_weak_objects());
return is_optimized_code() && can_have_weak_objects();
}
@@ -5339,16 +5246,12 @@ bool Code::IsWeakObjectInOptimizedCode(Object* object) {
} else if (object->IsPropertyCell()) {
object = PropertyCell::cast(object)->value();
}
- if (object->IsJSObject() || object->IsJSProxy()) {
- // JSProxy is handled like JSObject because it can morph into one.
+ if (object->IsJSReceiver()) {
return FLAG_weak_embedded_objects_in_optimized_code;
}
- if (object->IsFixedArray()) {
+ if (object->IsContext()) {
// Contexts of inlined functions are embedded in optimized code.
- Map* map = HeapObject::cast(object)->map();
- Heap* heap = map->GetHeap();
- return FLAG_weak_embedded_objects_in_optimized_code &&
- map == heap->function_context_map();
+ return FLAG_weak_embedded_objects_in_optimized_code;
}
return false;
}
@@ -5511,8 +5414,7 @@ void Map::set_prototype_info(Object* value, WriteBarrierMode mode) {
void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
DCHECK(instance_type() >= FIRST_JS_RECEIVER_TYPE);
- DCHECK((value->IsUndefined() && GetBackPointer()->IsMap()) ||
- (value->IsMap() && GetBackPointer()->IsUndefined()));
+ DCHECK((value->IsMap() && GetBackPointer()->IsUndefined()));
DCHECK(!value->IsMap() ||
Map::cast(value)->GetConstructor() == constructor_or_backpointer());
set_constructor_or_backpointer(value, mode);
@@ -5544,13 +5446,26 @@ void Map::SetConstructor(Object* constructor, WriteBarrierMode mode) {
}
+Handle<Map> Map::CopyInitialMap(Handle<Map> map) {
+ return CopyInitialMap(map, map->instance_size(), map->GetInObjectProperties(),
+ map->unused_property_fields());
+}
+
+
+ACCESSORS(JSBoundFunction, length, Object, kLengthOffset)
+ACCESSORS(JSBoundFunction, name, Object, kNameOffset)
+ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
+ kBoundTargetFunctionOffset)
+ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
+ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
+ACCESSORS(JSBoundFunction, creation_context, Context, kCreationContextOffset)
+
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, literals_or_bindings, FixedArray, kLiteralsOffset)
+ACCESSORS(JSFunction, literals, LiteralsArray, kLiteralsOffset)
ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
-ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
-ACCESSORS(GlobalObject, native_context, Context, kNativeContextOffset)
-ACCESSORS(GlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
+ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
+ACCESSORS(JSGlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
ACCESSORS(JSGlobalProxy, hash, Object, kHashOffset)
@@ -5569,7 +5484,6 @@ ACCESSORS(Box, value, Object, kValueOffset)
ACCESSORS(PrototypeInfo, prototype_users, Object, kPrototypeUsersOffset)
SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
ACCESSORS(PrototypeInfo, validity_cell, Object, kValidityCellOffset)
-ACCESSORS(PrototypeInfo, constructor_name, Object, kConstructorNameOffset)
ACCESSORS(SloppyBlockWithEvalContextExtension, scope_info, ScopeInfo,
kScopeInfoOffset)
@@ -5581,6 +5495,7 @@ ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
+ACCESSORS(AccessCheckInfo, callback, Object, kCallbackOffset)
ACCESSORS(AccessCheckInfo, data, Object, kDataOffset)
ACCESSORS(InterceptorInfo, getter, Object, kGetterOffset)
@@ -5597,6 +5512,7 @@ BOOL_ACCESSORS(InterceptorInfo, flags, non_masking, kNonMasking)
ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
+ACCESSORS(CallHandlerInfo, fast_handler, Object, kFastHandlerOffset)
ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
SMI_ACCESSORS(TemplateInfo, number_of_properties, kNumberOfProperties)
@@ -5626,8 +5542,6 @@ ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
kInternalFieldCountOffset)
-ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
-
ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
SMI_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
@@ -5696,8 +5610,8 @@ SMI_ACCESSORS(BreakPointInfo, statement_position, kStatementPositionIndex)
ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS(SharedFunctionInfo, optimized_code_map, Object,
- kOptimizedCodeMapOffset)
+ACCESSORS(SharedFunctionInfo, optimized_code_map, FixedArray,
+ kOptimizedCodeMapOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, feedback_vector, TypeFeedbackVector,
kFeedbackVectorOffset)
@@ -5890,7 +5804,6 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, force_inline, kForceInline)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
name_should_print_as_anonymous,
kNameShouldPrintAsAnonymous)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_crankshaft,
@@ -6144,17 +6057,28 @@ bool SharedFunctionInfo::IsBuiltin() {
bool SharedFunctionInfo::IsSubjectToDebugging() { return !IsBuiltin(); }
-bool JSFunction::IsBuiltin() { return shared()->IsBuiltin(); }
+bool SharedFunctionInfo::OptimizedCodeMapIsCleared() const {
+ return optimized_code_map() == GetHeap()->cleared_optimized_code_map();
+}
-bool JSFunction::IsSubjectToDebugging() {
- return shared()->IsSubjectToDebugging();
+// static
+void SharedFunctionInfo::AddToOptimizedCodeMap(
+ Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
+ Handle<Code> code, Handle<LiteralsArray> literals, BailoutId osr_ast_id) {
+ AddToOptimizedCodeMapInternal(shared, native_context, code, literals,
+ osr_ast_id);
}
-bool JSFunction::NeedsArgumentsAdaption() {
- return shared()->internal_formal_parameter_count() !=
- SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+// static
+void SharedFunctionInfo::AddLiteralsToOptimizedCodeMap(
+ Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
+ Handle<LiteralsArray> literals) {
+ Isolate* isolate = shared->GetIsolate();
+ Handle<Oddball> undefined = isolate->factory()->undefined_value();
+ AddToOptimizedCodeMapInternal(shared, native_context, undefined, literals,
+ BailoutId::None());
}
@@ -6181,9 +6105,25 @@ bool JSFunction::IsInOptimizationQueue() {
}
-bool JSFunction::IsInobjectSlackTrackingInProgress() {
- return has_initial_map() &&
- initial_map()->counter() >= Map::kSlackTrackingCounterEnd;
+void JSFunction::CompleteInobjectSlackTrackingIfActive() {
+ if (has_initial_map() && initial_map()->IsInobjectSlackTrackingInProgress()) {
+ initial_map()->CompleteInobjectSlackTracking();
+ }
+}
+
+
+bool Map::IsInobjectSlackTrackingInProgress() {
+ return construction_counter() != Map::kNoSlackTracking;
+}
+
+
+void Map::InobjectSlackTrackingStep() {
+ if (!IsInobjectSlackTrackingInProgress()) return;
+ int counter = construction_counter();
+ set_construction_counter(counter - 1);
+ if (counter == kSlackTrackingCounterEnd) {
+ CompleteInobjectSlackTracking();
+ }
}
@@ -6244,6 +6184,9 @@ JSObject* JSFunction::global_proxy() {
}
+Context* JSFunction::native_context() { return context()->native_context(); }
+
+
void JSFunction::set_context(Object* value) {
DCHECK(value->IsUndefined() || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
@@ -6305,58 +6248,16 @@ bool JSFunction::is_compiled() {
}
-bool JSFunction::has_simple_parameters() {
- return shared()->has_simple_parameters();
-}
-
-
-LiteralsArray* JSFunction::literals() {
- DCHECK(!shared()->bound());
- return LiteralsArray::cast(literals_or_bindings());
-}
-
-
-void JSFunction::set_literals(LiteralsArray* literals) {
- DCHECK(!shared()->bound());
- set_literals_or_bindings(literals);
-}
-
-
-FixedArray* JSFunction::function_bindings() {
- DCHECK(shared()->bound());
- return literals_or_bindings();
-}
-
-
-void JSFunction::set_function_bindings(FixedArray* bindings) {
- DCHECK(shared()->bound());
- // Bound function literal may be initialized to the empty fixed array
- // before the bindings are set.
- DCHECK(bindings == GetHeap()->empty_fixed_array() ||
- bindings->map() == GetHeap()->fixed_array_map());
- set_literals_or_bindings(bindings);
-}
-
-
int JSFunction::NumberOfLiterals() {
- DCHECK(!shared()->bound());
return literals()->length();
}
+ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
ACCESSORS(JSProxy, hash, Object, kHashOffset)
-ACCESSORS(JSFunctionProxy, call_trap, JSReceiver, kCallTrapOffset)
-ACCESSORS(JSFunctionProxy, construct_trap, Object, kConstructTrapOffset)
-
-
-void JSProxy::InitializeBody(int object_size, Object* value) {
- DCHECK(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
- for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
- WRITE_FIELD(this, offset, value);
- }
-}
+bool JSProxy::IsRevoked() const { return !handler()->IsJSReceiver(); }
ACCESSORS(JSCollection, table, Object, kTableOffset)
@@ -6626,32 +6527,6 @@ void JSArrayBuffer::set_is_shared(bool value) {
}
-// static
-template <typename StaticVisitor>
-void JSArrayBuffer::JSArrayBufferIterateBody(Heap* heap, HeapObject* obj) {
- StaticVisitor::VisitPointers(
- heap, obj,
- HeapObject::RawField(obj, JSArrayBuffer::BodyDescriptor::kStartOffset),
- HeapObject::RawField(obj,
- JSArrayBuffer::kByteLengthOffset + kPointerSize));
- StaticVisitor::VisitPointers(
- heap, obj, HeapObject::RawField(obj, JSArrayBuffer::kSize),
- HeapObject::RawField(obj, JSArrayBuffer::kSizeWithInternalFields));
-}
-
-
-void JSArrayBuffer::JSArrayBufferIterateBody(HeapObject* obj,
- ObjectVisitor* v) {
- v->VisitPointers(
- HeapObject::RawField(obj, JSArrayBuffer::BodyDescriptor::kStartOffset),
- HeapObject::RawField(obj,
- JSArrayBuffer::kByteLengthOffset + kPointerSize));
- v->VisitPointers(
- HeapObject::RawField(obj, JSArrayBuffer::kSize),
- HeapObject::RawField(obj, JSArrayBuffer::kSizeWithInternalFields));
-}
-
-
Object* JSArrayBufferView::byte_offset() const {
if (WasNeutered()) return Smi::FromInt(0);
return Object::cast(READ_FIELD(this, kByteOffsetOffset));
@@ -6714,6 +6589,8 @@ ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
ACCESSORS(JSRegExp, data, Object, kDataOffset)
+ACCESSORS(JSRegExp, flags, Object, kFlagsOffset)
+ACCESSORS(JSRegExp, source, Object, kSourceOffset)
JSRegExp::Type JSRegExp::TypeTag() {
@@ -6875,16 +6752,9 @@ bool JSObject::HasIndexedInterceptor() {
}
-NameDictionary* JSObject::property_dictionary() {
- DCHECK(!HasFastProperties());
- DCHECK(!IsGlobalObject());
- return NameDictionary::cast(properties());
-}
-
-
GlobalDictionary* JSObject::global_dictionary() {
DCHECK(!HasFastProperties());
- DCHECK(IsGlobalObject());
+ DCHECK(IsJSGlobalObject());
return GlobalDictionary::cast(properties());
}
@@ -7185,29 +7055,61 @@ MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object,
}
-Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
- Handle<Name> name) {
- // Call the "has" trap on proxies.
- if (object->IsJSProxy()) {
- Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
- return JSProxy::HasPropertyWithHandler(proxy, name);
+MaybeHandle<Object> Object::GetPropertyOrElement(Handle<JSReceiver> holder,
+ Handle<Name> name,
+ Handle<Object> receiver,
+ LanguageMode language_mode) {
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ name->GetIsolate(), receiver, name, holder);
+ return GetProperty(&it, language_mode);
+}
+
+
+void JSReceiver::initialize_properties() {
+ DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+ DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_properties_dictionary()));
+ if (map()->is_dictionary_map()) {
+ WRITE_FIELD(this, kPropertiesOffset,
+ GetHeap()->empty_properties_dictionary());
+ } else {
+ WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
}
+}
+
+
+bool JSReceiver::HasFastProperties() {
+ DCHECK(properties()->IsDictionary() == map()->is_dictionary_map());
+ return !properties()->IsDictionary();
+}
+
+
+NameDictionary* JSReceiver::property_dictionary() {
+ DCHECK(!HasFastProperties());
+ DCHECK(!IsJSGlobalObject());
+ return NameDictionary::cast(properties());
+}
+
- Maybe<PropertyAttributes> result = GetPropertyAttributes(object, name);
- return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
+Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(object->GetIsolate(), object, name);
+ return HasProperty(&it);
}
Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
Handle<Name> name) {
- // Call the "has" trap on proxies.
- if (object->IsJSProxy()) {
- Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
- return JSProxy::HasPropertyWithHandler(proxy, name);
+ if (object->IsJSObject()) { // Shortcut
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ object->GetIsolate(), object, name, LookupIterator::HIDDEN);
+ return HasProperty(&it);
}
- Maybe<PropertyAttributes> result = GetOwnPropertyAttributes(object, name);
- return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
+ Maybe<PropertyAttributes> attributes =
+ JSReceiver::GetOwnPropertyAttributes(object, name);
+ MAYBE_RETURN(attributes, Nothing<bool>());
+ return Just(attributes.FromJust() != ABSENT);
}
@@ -7228,31 +7130,8 @@ Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
- // Call the "has" trap on proxies.
- if (object->IsJSProxy()) {
- Isolate* isolate = object->GetIsolate();
- Handle<Name> name = isolate->factory()->Uint32ToString(index);
- Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
- return JSProxy::HasPropertyWithHandler(proxy, name);
- }
-
- Maybe<PropertyAttributes> result = GetElementAttributes(object, index);
- return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
-}
-
-
-Maybe<bool> JSReceiver::HasOwnElement(Handle<JSReceiver> object,
- uint32_t index) {
- // Call the "has" trap on proxies.
- if (object->IsJSProxy()) {
- Isolate* isolate = object->GetIsolate();
- Handle<Name> name = isolate->factory()->Uint32ToString(index);
- Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
- return JSProxy::HasPropertyWithHandler(proxy, name);
- }
-
- Maybe<PropertyAttributes> result = GetOwnElementAttributes(object, index);
- return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
+ LookupIterator it(object->GetIsolate(), object, index);
+ return HasProperty(&it);
}
@@ -7277,7 +7156,7 @@ bool JSGlobalObject::IsDetached() {
}
-bool JSGlobalProxy::IsDetachedFrom(GlobalObject* global) const {
+bool JSGlobalProxy::IsDetachedFrom(JSGlobalObject* global) const {
const PrototypeIterator iter(this->GetIsolate(),
const_cast<JSGlobalProxy*>(this));
return iter.GetCurrent() != global;
@@ -7797,126 +7676,6 @@ Relocatable::~Relocatable() {
}
-// static
-int JSObject::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
- return map->instance_size();
-}
-
-
-// static
-int FixedArray::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
- return SizeFor(reinterpret_cast<FixedArray*>(object)->synchronized_length());
-}
-
-
-// static
-int StructBodyDescriptor::SizeOf(Map* map, HeapObject* object) {
- return map->instance_size();
-}
-
-
-void Foreign::ForeignIterateBody(ObjectVisitor* v) {
- v->VisitExternalReference(
- reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
-}
-
-
-template<typename StaticVisitor>
-void Foreign::ForeignIterateBody() {
- StaticVisitor::VisitExternalReference(
- reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
-}
-
-
-void FixedTypedArrayBase::FixedTypedArrayBaseIterateBody(ObjectVisitor* v) {
- v->VisitPointer(
- reinterpret_cast<Object**>(FIELD_ADDR(this, kBasePointerOffset)));
-}
-
-
-template <typename StaticVisitor>
-void FixedTypedArrayBase::FixedTypedArrayBaseIterateBody() {
- StaticVisitor::VisitPointer(
- reinterpret_cast<Object**>(FIELD_ADDR(this, kBasePointerOffset)));
-}
-
-
-void ExternalOneByteString::ExternalOneByteStringIterateBody(ObjectVisitor* v) {
- typedef v8::String::ExternalOneByteStringResource Resource;
- v->VisitExternalOneByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-template <typename StaticVisitor>
-void ExternalOneByteString::ExternalOneByteStringIterateBody() {
- typedef v8::String::ExternalOneByteStringResource Resource;
- StaticVisitor::VisitExternalOneByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-void ExternalTwoByteString::ExternalTwoByteStringIterateBody(ObjectVisitor* v) {
- typedef v8::String::ExternalStringResource Resource;
- v->VisitExternalTwoByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-template<typename StaticVisitor>
-void ExternalTwoByteString::ExternalTwoByteStringIterateBody() {
- typedef v8::String::ExternalStringResource Resource;
- StaticVisitor::VisitExternalTwoByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-static inline void IterateBodyUsingLayoutDescriptor(HeapObject* object,
- int start_offset,
- int end_offset,
- ObjectVisitor* v) {
- DCHECK(FLAG_unbox_double_fields);
- DCHECK(IsAligned(start_offset, kPointerSize) &&
- IsAligned(end_offset, kPointerSize));
-
- LayoutDescriptorHelper helper(object->map());
- DCHECK(!helper.all_fields_tagged());
-
- for (int offset = start_offset; offset < end_offset; offset += kPointerSize) {
- // Visit all tagged fields.
- if (helper.IsTagged(offset)) {
- v->VisitPointer(HeapObject::RawField(object, offset));
- }
- }
-}
-
-
-template<int start_offset, int end_offset, int size>
-void FixedBodyDescriptor<start_offset, end_offset, size>::IterateBody(
- HeapObject* obj,
- ObjectVisitor* v) {
- if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
- v->VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, end_offset));
- } else {
- IterateBodyUsingLayoutDescriptor(obj, start_offset, end_offset, v);
- }
-}
-
-
-template<int start_offset>
-void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
- int object_size,
- ObjectVisitor* v) {
- if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
- v->VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, object_size));
- } else {
- IterateBodyUsingLayoutDescriptor(obj, start_offset, object_size, v);
- }
-}
-
-
template<class Derived, class TableType>
Object* OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
TableType* table(TableType::cast(this->table()));
@@ -8000,6 +7759,14 @@ String::SubStringRange::iterator String::SubStringRange::end() {
}
+// Predictably converts HeapObject* or Address to uint32 by calculating
+// offset of the address in respective MemoryChunk.
+static inline uint32_t ObjectAddressForHashing(void* object) {
+ uint32_t value = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object));
+ return value & MemoryChunk::kAlignmentMask;
+}
+
+
#undef TYPE_CHECKER
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
@@ -8046,6 +7813,7 @@ String::SubStringRange::iterator String::SubStringRange::end() {
#undef NOBARRIER_READ_BYTE_FIELD
#undef NOBARRIER_WRITE_BYTE_FIELD
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_OBJECTS_INL_H_
diff --git a/chromium/v8/src/objects-printer.cc b/chromium/v8/src/objects-printer.cc
index 8dfd0a17b09..db716505dea 100644
--- a/chromium/v8/src/objects-printer.cc
+++ b/chromium/v8/src/objects-printer.cc
@@ -33,7 +33,7 @@ void Object::Print(std::ostream& os) { // NOLINT
void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
- os << "" << reinterpret_cast<void*>(this) << ": [" << id << "]\n";
+ os << reinterpret_cast<void*>(this) << ": [" << id << "]\n";
}
@@ -76,6 +76,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case BYTECODE_ARRAY_TYPE:
BytecodeArray::cast(this)->BytecodeArrayPrint(os);
break;
+ case TRANSITION_ARRAY_TYPE:
+ TransitionArray::cast(this)->TransitionArrayPrint(os);
+ break;
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpacePrint(os);
break;
@@ -95,15 +98,21 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_ARRAY_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
- case JS_REGEXP_TYPE:
+ case JS_PROMISE_TYPE:
JSObject::cast(this)->JSObjectPrint(os);
break;
+ case JS_REGEXP_TYPE:
+ JSRegExp::cast(this)->JSRegExpPrint(os);
+ break;
case ODDBALL_TYPE:
Oddball::cast(this)->to_string()->Print(os);
break;
case JS_MODULE_TYPE:
JSModule::cast(this)->JSModulePrint(os);
break;
+ case JS_BOUND_FUNCTION_TYPE:
+ JSBoundFunction::cast(this)->JSBoundFunctionPrint(os);
+ break;
case JS_FUNCTION_TYPE:
JSFunction::cast(this)->JSFunctionPrint(os);
break;
@@ -113,12 +122,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_GLOBAL_OBJECT_TYPE:
JSGlobalObject::cast(this)->JSGlobalObjectPrint(os);
break;
- case JS_BUILTINS_OBJECT_TYPE:
- JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint(os);
- break;
case JS_VALUE_TYPE:
- os << "Value wrapper around:";
- JSValue::cast(this)->value()->Print(os);
+ JSValue::cast(this)->JSValuePrint(os);
break;
case JS_DATE_TYPE:
JSDate::cast(this)->JSDatePrint(os);
@@ -129,9 +134,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_PROXY_TYPE:
JSProxy::cast(this)->JSProxyPrint(os);
break;
- case JS_FUNCTION_PROXY_TYPE:
- JSFunctionProxy::cast(this)->JSFunctionProxyPrint(os);
- break;
case JS_SET_TYPE:
JSSet::cast(this)->JSSetPrint(os);
break;
@@ -273,7 +275,7 @@ void JSObject::PrintProperties(std::ostream& os) { // NOLINT
if (HasFastProperties()) {
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
- os << " ";
+ os << "\n ";
descs->GetKey(i)->NamePrint(os);
os << ": ";
switch (descs->GetType(i)) {
@@ -284,24 +286,23 @@ void JSObject::PrintProperties(std::ostream& os) { // NOLINT
} else {
os << Brief(RawFastPropertyAt(index));
}
- os << " (data field at offset " << index.property_index() << ")\n";
+ os << " (data field at offset " << index.property_index() << ")";
break;
}
case ACCESSOR: {
FieldIndex index = FieldIndex::ForDescriptor(map(), i);
- os << " (accessor field at offset " << index.property_index()
- << ")\n";
+ os << " (accessor field at offset " << index.property_index() << ")";
break;
}
case DATA_CONSTANT:
- os << Brief(descs->GetConstant(i)) << " (data constant)\n";
+ os << Brief(descs->GetConstant(i)) << " (data constant)";
break;
case ACCESSOR_CONSTANT:
- os << Brief(descs->GetCallbacksObject(i)) << " (accessor constant)\n";
+ os << Brief(descs->GetCallbacksObject(i)) << " (accessor constant)";
break;
}
}
- } else if (IsGlobalObject()) {
+ } else if (IsJSGlobalObject()) {
global_dictionary()->Print(os);
} else {
property_dictionary()->Print(os);
@@ -313,7 +314,7 @@ template <class T>
static void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
T* p = T::cast(object);
for (int i = 0; i < p->length(); i++) {
- os << " " << i << ": " << p->get_scalar(i) << "\n";
+ os << "\n " << i << ": " << p->get_scalar(i);
}
}
@@ -329,7 +330,7 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
// Print in array notation for non-sparse arrays.
FixedArray* p = FixedArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- os << " " << i << ": " << Brief(p->get(i)) << "\n";
+ os << "\n " << i << ": " << Brief(p->get(i));
}
break;
}
@@ -339,13 +340,12 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
if (elements()->length() > 0) {
FixedDoubleArray* p = FixedDoubleArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- os << " " << i << ": ";
+ os << "\n " << i << ": ";
if (p->is_the_hole(i)) {
os << "<the hole>";
} else {
os << p->get_scalar(i);
}
- os << "\n";
}
}
break;
@@ -376,55 +376,58 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
FixedArray* p = FixedArray::cast(elements());
- os << " parameter map:";
+ os << "\n parameter map:";
for (int i = 2; i < p->length(); i++) {
os << " " << (i - 2) << ":" << Brief(p->get(i));
}
os << "\n context: " << Brief(p->get(0))
- << "\n arguments: " << Brief(p->get(1)) << "\n";
+ << "\n arguments: " << Brief(p->get(1));
break;
}
}
}
-void JSObject::JSObjectPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSObject");
+static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
+ const char* id) { // NOLINT
+ obj->PrintHeader(os, id);
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
- PrototypeIterator iter(GetIsolate(), this);
- os << " - map = " << reinterpret_cast<void*>(map()) << " ["
- << ElementsKindToString(this->map()->elements_kind())
- << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent())
- << "\n {\n";
- PrintProperties(os);
- PrintTransitions(os);
- PrintElements(os);
- os << " }\n";
+ PrototypeIterator iter(obj->GetIsolate(), obj);
+ os << " - map = " << reinterpret_cast<void*>(obj->map()) << " ["
+ << ElementsKindToString(obj->map()->elements_kind())
+ << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
+}
+
+
+static void JSObjectPrintBody(std::ostream& os, JSObject* obj, // NOLINT
+ bool print_elements = true) {
+ os << "\n {";
+ obj->PrintProperties(os);
+ obj->PrintTransitions(os);
+ if (print_elements) obj->PrintElements(os);
+ os << "\n }\n";
+}
+
+
+void JSObject::JSObjectPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSObject");
+ JSObjectPrintBody(os, this);
+}
+
+
+void JSRegExp::JSRegExpPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSRegExp");
+ os << "\n - data = " << Brief(data());
+ JSObjectPrintBody(os, this);
}
void JSModule::JSModulePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSModule");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n"
- << " - context = ";
- context()->Print(os);
- os << " - scope_info = " << Brief(scope_info())
- << ElementsKindToString(this->map()->elements_kind()) << " {\n";
- PrintProperties(os);
- PrintElements(os);
- os << " }\n";
-}
-
-
-static const char* TypeToString(InstanceType type) {
- switch (type) {
-#define TYPE_TO_STRING(TYPE) case TYPE: return #TYPE;
- INSTANCE_TYPE_LIST(TYPE_TO_STRING)
-#undef TYPE_TO_STRING
- }
- UNREACHABLE();
- return "UNKNOWN"; // Keep the compiler happy.
+ JSObjectPrintHeader(os, this, "JSModule");
+ os << "\n - context = " << Brief(context());
+ os << " - scope_info = " << Brief(scope_info());
+ JSObjectPrintBody(os, this);
}
@@ -442,7 +445,7 @@ void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
void Map::MapPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Map");
- os << " - type: " << TypeToString(instance_type()) << "\n";
+ os << " - type: " << instance_type() << "\n";
os << " - instance size: " << instance_size() << "\n";
if (IsJSObjectMap()) {
os << " - inobject properties: " << GetInObjectProperties() << "\n";
@@ -461,6 +464,7 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (is_access_check_needed()) os << " - access_check_needed\n";
if (!is_extensible()) os << " - non-extensible\n";
if (is_observed()) os << " - observed\n";
+ if (is_strong()) os << " - strong_map\n";
if (is_prototype_map()) {
os << " - prototype_map\n";
os << " - prototype info: " << Brief(prototype_info());
@@ -473,14 +477,17 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (FLAG_unbox_double_fields) {
os << "\n - layout descriptor: " << Brief(layout_descriptor());
}
- if (TransitionArray::NumberOfTransitions(raw_transitions()) > 0) {
- os << "\n - transitions: ";
- TransitionArray::PrintTransitions(os, raw_transitions());
+ int nof_transitions = TransitionArray::NumberOfTransitions(raw_transitions());
+ if (nof_transitions > 0) {
+ os << "\n - transitions #" << nof_transitions << ": "
+ << Brief(raw_transitions());
+ TransitionArray::PrintTransitions(os, raw_transitions(), false);
}
os << "\n - prototype: " << Brief(prototype());
os << "\n - constructor: " << Brief(GetConstructor());
os << "\n - code cache: " << Brief(code_cache());
os << "\n - dependent code: " << Brief(dependent_code());
+ os << "\n - construction counter: " << construction_counter();
os << "\n";
}
@@ -539,6 +546,45 @@ void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
}
+void TransitionArray::TransitionArrayPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "TransitionArray");
+ os << " - capacity: " << length();
+ for (int i = 0; i < length(); i++) {
+ os << "\n [" << i << "]: " << Brief(get(i));
+ if (i == kNextLinkIndex) os << " (next link)";
+ if (i == kPrototypeTransitionsIndex) os << " (prototype transitions)";
+ if (i == kTransitionLengthIndex) os << " (number of transitions)";
+ }
+ os << "\n";
+}
+
+
+void TypeFeedbackMetadata::Print() {
+ OFStream os(stdout);
+ TypeFeedbackMetadataPrint(os);
+ os << std::flush;
+}
+
+
+void TypeFeedbackMetadata::TypeFeedbackMetadataPrint(
+ std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "TypeFeedbackMetadata");
+ os << " - length: " << length();
+ if (length() == 0) {
+ os << " (empty)\n";
+ return;
+ }
+
+ TypeFeedbackMetadataIterator iter(this);
+ while (iter.HasNext()) {
+ FeedbackVectorSlot slot = iter.Next();
+ FeedbackVectorSlotKind kind = iter.kind();
+ os << "\n Slot " << slot << " " << kind;
+ }
+ os << "\n";
+}
+
+
void TypeFeedbackVector::Print() {
OFStream os(stdout);
TypeFeedbackVectorPrint(os);
@@ -554,59 +600,50 @@ void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) { // NOLINT
return;
}
- os << "\n - ics with type info: " << ic_with_type_info_count();
- os << "\n - generic ics: " << ic_generic_count();
-
- if (Slots() > 0) {
- for (int i = 0; i < Slots(); i++) {
- FeedbackVectorSlot slot(i);
- os << "\n Slot " << i << " [" << GetIndex(slot)
- << "]: " << Brief(Get(slot));
- }
- }
-
- if (ICSlots() > 0) {
- DCHECK(elements_per_ic_slot() == 2);
-
- for (int i = 0; i < ICSlots(); i++) {
- FeedbackVectorICSlot slot(i);
- FeedbackVectorSlotKind kind = GetKind(slot);
- os << "\n ICSlot " << i << " " << kind << " ";
- switch (kind) {
- case FeedbackVectorSlotKind::LOAD_IC: {
- LoadICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
- KeyedLoadICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackVectorSlotKind::CALL_IC: {
- CallICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackVectorSlotKind::STORE_IC: {
- StoreICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackVectorSlotKind::KEYED_STORE_IC: {
- KeyedStoreICNexus nexus(this, slot);
- os << Code::ICState2String(nexus.StateFromFeedback());
- break;
- }
- case FeedbackVectorSlotKind::UNUSED:
- case FeedbackVectorSlotKind::KINDS_NUMBER:
- UNREACHABLE();
- break;
+ TypeFeedbackMetadataIterator iter(metadata());
+ while (iter.HasNext()) {
+ FeedbackVectorSlot slot = iter.Next();
+ FeedbackVectorSlotKind kind = iter.kind();
+
+ os << "\n Slot " << slot << " " << kind << " ";
+ switch (kind) {
+ case FeedbackVectorSlotKind::LOAD_IC: {
+ LoadICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
+ KeyedLoadICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::CALL_IC: {
+ CallICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
}
+ case FeedbackVectorSlotKind::STORE_IC: {
+ StoreICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::KEYED_STORE_IC: {
+ KeyedStoreICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::GENERAL:
+ break;
+ case FeedbackVectorSlotKind::INVALID:
+ case FeedbackVectorSlotKind::KINDS_NUMBER:
+ UNREACHABLE();
+ break;
+ }
- os << "\n [" << GetIndex(slot) << "]: " << Brief(Get(slot));
- os << "\n [" << (GetIndex(slot) + 1)
- << "]: " << Brief(get(GetIndex(slot) + 1));
+ int entry_size = iter.entry_size();
+ for (int i = 0; i < entry_size; i++) {
+ int index = GetIndex(slot) + i;
+ os << "\n [" << index << "]: " << Brief(get(index));
}
}
os << "\n";
@@ -614,20 +651,21 @@ void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) { // NOLINT
void JSValue::JSValuePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "ValueObject");
- value()->Print(os);
+ JSObjectPrintHeader(os, this, "JSValue");
+ os << "\n - value = " << Brief(value());
+ JSObjectPrintBody(os, this);
}
void JSMessageObject::JSMessageObjectPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSMessageObject");
- os << " - type: " << type();
+ JSObjectPrintHeader(os, this, "JSMessageObject");
+ os << "\n - type: " << type();
os << "\n - arguments: " << Brief(argument());
os << "\n - start_position: " << start_position();
os << "\n - end_position: " << end_position();
os << "\n - script: " << Brief(script());
os << "\n - stack_frames: " << Brief(stack_frames());
- os << "\n";
+ JSObjectPrintBody(os, this);
}
@@ -675,17 +713,15 @@ static const char* const weekdays[] = {
void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSDate");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - value = ";
- value()->Print(os);
+ JSObjectPrintHeader(os, this, "JSDate");
+ os << "\n - value = " << Brief(value());
if (!year()->IsSmi()) {
- os << " - time = NaN\n";
+ os << "\n - time = NaN\n";
} else {
// TODO(svenpanne) Add some basic formatting to our streams.
ScopedVector<char> buf(100);
SNPrintF(
- buf, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
+ buf, "\n - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0],
year()->IsSmi() ? Smi::cast(year())->value() : -1,
month()->IsSmi() ? Smi::cast(month())->value() : -1,
@@ -695,46 +731,34 @@ void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
sec()->IsSmi() ? Smi::cast(sec())->value() : -1);
os << buf.start();
}
+ JSObjectPrintBody(os, this);
}
void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "JSProxy");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - handler = ";
- handler()->Print(os);
+ os << " - map = " << reinterpret_cast<void*>(map());
+ os << "\n - target = ";
+ target()->ShortPrint(os);
+ os << "\n - handler = ";
+ handler()->ShortPrint(os);
os << "\n - hash = ";
- hash()->Print(os);
- os << "\n";
-}
-
-
-void JSFunctionProxy::JSFunctionProxyPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSFunctionProxy");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - handler = ";
- handler()->Print(os);
- os << "\n - call_trap = ";
- call_trap()->Print(os);
- os << "\n - construct_trap = ";
- construct_trap()->Print(os);
+ hash()->ShortPrint(os);
os << "\n";
}
void JSSet::JSSetPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSSet");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+ JSObjectPrintHeader(os, this, "JSSet");
os << " - table = " << Brief(table());
- os << "\n";
+ JSObjectPrintBody(os, this);
}
void JSMap::JSMapPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSMap");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+ JSObjectPrintHeader(os, this, "JSMap");
os << " - table = " << Brief(table());
- os << "\n";
+ JSObjectPrintBody(os, this);
}
@@ -742,8 +766,7 @@ template <class Derived, class TableType>
void
OrderedHashTableIterator<Derived, TableType>::OrderedHashTableIteratorPrint(
std::ostream& os) { // NOLINT
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - table = " << Brief(table());
+ os << "\n - table = " << Brief(table());
os << "\n - index = " << Brief(index());
os << "\n - kind = " << Brief(kind());
os << "\n";
@@ -761,94 +784,91 @@ template void OrderedHashTableIterator<
void JSSetIterator::JSSetIteratorPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSSetIterator");
+ JSObjectPrintHeader(os, this, "JSSetIterator");
OrderedHashTableIteratorPrint(os);
}
void JSMapIterator::JSMapIteratorPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSMapIterator");
+ JSObjectPrintHeader(os, this, "JSMapIterator");
OrderedHashTableIteratorPrint(os);
}
void JSIteratorResult::JSIteratorResultPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSIteratorResult");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - done = " << Brief(done()) << "\n";
- os << " - value = " << Brief(value()) << "\n";
+ JSObjectPrintHeader(os, this, "JSIteratorResult");
+ os << "\n - done = " << Brief(done());
+ os << "\n - value = " << Brief(value());
os << "\n";
}
void JSWeakMap::JSWeakMapPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSWeakMap");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - table = " << Brief(table());
- os << "\n";
+ JSObjectPrintHeader(os, this, "JSWeakMap");
+ os << "\n - table = " << Brief(table());
+ JSObjectPrintBody(os, this);
}
void JSWeakSet::JSWeakSetPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSWeakSet");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - table = " << Brief(table());
- os << "\n";
+ JSObjectPrintHeader(os, this, "JSWeakSet");
+ os << "\n - table = " << Brief(table());
+ JSObjectPrintBody(os, this);
}
void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSArrayBuffer");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - backing_store = " << backing_store() << "\n";
- os << " - byte_length = " << Brief(byte_length());
+ JSObjectPrintHeader(os, this, "JSArrayBuffer");
+ os << "\n - backing_store = " << backing_store();
+ os << "\n - byte_length = " << Brief(byte_length());
if (was_neutered()) os << " - neutered\n";
- os << "\n";
+ JSObjectPrintBody(os, this, !was_neutered());
}
void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSTypedArray");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - buffer = " << Brief(buffer());
+ JSObjectPrintHeader(os, this, "JSTypedArray");
+ os << "\n - buffer = " << Brief(buffer());
os << "\n - byte_offset = " << Brief(byte_offset());
os << "\n - byte_length = " << Brief(byte_length());
os << "\n - length = " << Brief(length());
if (WasNeutered()) os << " - neutered\n";
- os << "\n";
- if (!WasNeutered()) PrintElements(os);
+ JSObjectPrintBody(os, this, !WasNeutered());
}
void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSDataView");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - buffer =" << Brief(buffer());
+ JSObjectPrintHeader(os, this, "JSDataView");
+ os << "\n - buffer =" << Brief(buffer());
os << "\n - byte_offset = " << Brief(byte_offset());
os << "\n - byte_length = " << Brief(byte_length());
if (WasNeutered()) os << " - neutered\n";
- os << "\n";
+ JSObjectPrintBody(os, this, !WasNeutered());
+}
+
+
+void JSBoundFunction::JSBoundFunctionPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSBoundFunction");
+ os << "\n - bound_target_function = " << Brief(bound_target_function());
+ os << "\n - bound_this = " << Brief(bound_this());
+ os << "\n - bound_arguments = " << Brief(bound_arguments());
+ JSObjectPrintBody(os, this);
}
void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "Function");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - initial_map = ";
+ JSObjectPrintHeader(os, this, "Function");
+ os << "\n - initial_map = ";
if (has_initial_map()) os << Brief(initial_map());
os << "\n - shared_info = " << Brief(shared());
- os << "\n - name = " << Brief(shared()->name());
- os << "\n - context = " << Brief(context());
- if (shared()->bound()) {
- os << "\n - bindings = " << Brief(function_bindings());
- } else {
- os << "\n - literals = " << Brief(literals());
+ os << "\n - name = " << Brief(shared()->name());
+ if (shared()->is_generator()) {
+ os << "\n - generator";
}
+ os << "\n - context = " << Brief(context());
+ os << "\n - literals = " << Brief(literals());
os << "\n - code = " << Brief(code());
- os << "\n";
- PrintProperties(os);
- PrintElements(os);
- os << "\n";
+ JSObjectPrintBody(os, this);
}
@@ -904,19 +924,18 @@ void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
}
-void JSBuiltinsObject::JSBuiltinsObjectPrint(std::ostream& os) { // NOLINT
- os << "builtins ";
- JSObjectPrint(os);
-}
-
-
void Cell::CellPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Cell");
+ os << " - value: " << Brief(value());
+ os << "\n";
}
void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PropertyCell");
+ os << " - value: " << Brief(value());
+ os << "\n - details: " << property_details();
+ os << "\n";
}
@@ -927,6 +946,7 @@ void WeakCell::WeakCellPrint(std::ostream& os) { // NOLINT
} else {
os << "\n - value: " << Brief(value());
}
+ os << "\n";
}
@@ -942,6 +962,7 @@ void Code::CodePrint(std::ostream& os) { // NOLINT
void Foreign::ForeignPrint(std::ostream& os) { // NOLINT
os << "foreign address : " << foreign_address();
+ os << "\n";
}
@@ -969,7 +990,6 @@ void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
os << "\n - prototype users: " << Brief(prototype_users());
os << "\n - registry slot: " << registry_slot();
os << "\n - validity cell: " << Brief(validity_cell());
- os << "\n - constructor name: " << Brief(constructor_name());
os << "\n";
}
@@ -995,6 +1015,7 @@ void AccessCheckInfo::AccessCheckInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AccessCheckInfo");
os << "\n - named_callback: " << Brief(named_callback());
os << "\n - indexed_callback: " << Brief(indexed_callback());
+ os << "\n - callback: " << Brief(callback());
os << "\n - data: " << Brief(data());
os << "\n";
}
@@ -1055,13 +1076,6 @@ void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
}
-void TypeSwitchInfo::TypeSwitchInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "TypeSwitchInfo");
- os << "\n - types: " << Brief(types());
- os << "\n";
-}
-
-
void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AllocationSite");
os << " - weak_next: " << Brief(weak_next());
@@ -1237,11 +1251,11 @@ void DescriptorArray::Print() {
void DescriptorArray::PrintDescriptors(std::ostream& os) { // NOLINT
HandleScope scope(GetIsolate());
- os << "Descriptor array " << number_of_descriptors() << "\n";
+ os << "Descriptor array #" << number_of_descriptors();
for (int i = 0; i < number_of_descriptors(); i++) {
Descriptor desc;
Get(i, &desc);
- os << " " << i << ": " << desc << "\n";
+ os << "\n " << i << ": " << desc;
}
os << "\n";
}
@@ -1250,7 +1264,7 @@ void DescriptorArray::PrintDescriptors(std::ostream& os) { // NOLINT
void TransitionArray::Print() {
OFStream os(stdout);
TransitionArray::PrintTransitions(os, this);
- os << std::flush;
+ os << "\n" << std::flush;
}
@@ -1258,12 +1272,12 @@ void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
bool print_header) { // NOLINT
int num_transitions = NumberOfTransitions(transitions);
if (print_header) {
- os << "Transition array " << num_transitions << "\n";
+ os << "Transition array #" << num_transitions << ":";
}
for (int i = 0; i < num_transitions; i++) {
Name* key = GetKey(transitions, i);
Map* target = GetTarget(transitions, i);
- os << " ";
+ os << "\n ";
#ifdef OBJECT_PRINT
key->NamePrint(os);
#else
@@ -1272,19 +1286,23 @@ void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
os << ": ";
Heap* heap = key->GetHeap();
if (key == heap->nonextensible_symbol()) {
- os << " (transition to non-extensible)";
+ os << "(transition to non-extensible)";
} else if (key == heap->sealed_symbol()) {
- os << " (transition to sealed)";
+ os << "(transition to sealed)";
} else if (key == heap->frozen_symbol()) {
- os << " (transition to frozen)";
+ os << "(transition to frozen)";
} else if (key == heap->elements_transition_symbol()) {
- os << " (transition to " << ElementsKindToString(target->elements_kind())
+ os << "(transition to " << ElementsKindToString(target->elements_kind())
<< ")";
+ } else if (key == heap->strict_function_transition_symbol()) {
+ os << " (transition to strict function)";
+ } else if (key == heap->strong_function_transition_symbol()) {
+ os << " (transition to strong function)";
} else if (key == heap->observed_symbol()) {
os << " (transition to Object.observe)";
} else {
PropertyDetails details = GetTargetDetails(key, target);
- os << " (transition to ";
+ os << "(transition to ";
if (details.location() == kDescriptor) {
os << "immutable ";
}
@@ -1296,13 +1314,17 @@ void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
}
os << "), attrs: " << details.attributes();
}
- os << " -> " << Brief(target) << "\n";
+ os << " -> " << Brief(target);
}
}
void JSObject::PrintTransitions(std::ostream& os) { // NOLINT
- TransitionArray::PrintTransitions(os, map()->raw_transitions());
+ Object* transitions = map()->raw_transitions();
+ int num_transitions = TransitionArray::NumberOfTransitions(transitions);
+ if (num_transitions == 0) return;
+ os << "\n - transitions";
+ TransitionArray::PrintTransitions(os, transitions, false);
}
#endif // defined(DEBUG) || defined(OBJECT_PRINT)
} // namespace internal
diff --git a/chromium/v8/src/objects.cc b/chromium/v8/src/objects.cc
index 08383030d8e..ef846d6c423 100644
--- a/chromium/v8/src/objects.cc
+++ b/chromium/v8/src/objects.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -24,25 +24,31 @@
#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/execution.h"
-#include "src/field-index-inl.h"
#include "src/field-index.h"
+#include "src/field-index-inl.h"
#include "src/full-codegen/full-codegen.h"
-#include "src/hydrogen.h"
#include "src/ic/ic.h"
+#include "src/identity-map.h"
#include "src/interpreter/bytecodes.h"
#include "src/isolate-inl.h"
+#include "src/key-accumulator.h"
+#include "src/list.h"
#include "src/log.h"
#include "src/lookup.h"
#include "src/macro-assembler.h"
#include "src/messages.h"
#include "src/objects-inl.h"
+#include "src/objects-body-descriptors-inl.h"
#include "src/profiler/cpu-profiler.h"
+#include "src/property-descriptor.h"
#include "src/prototype.h"
+#include "src/regexp/jsregexp.h"
#include "src/safepoint-table.h"
#include "src/string-builder.h"
#include "src/string-search.h"
#include "src/string-stream.h"
#include "src/utils.h"
+#include "src/zone.h"
#ifdef ENABLE_DISASSEMBLER
#include "src/disasm.h"
@@ -52,6 +58,19 @@
namespace v8 {
namespace internal {
+std::ostream& operator<<(std::ostream& os, InstanceType instance_type) {
+ switch (instance_type) {
+#define WRITE_TYPE(TYPE) \
+ case TYPE: \
+ return os << #TYPE;
+ INSTANCE_TYPE_LIST(WRITE_TYPE)
+#undef WRITE_TYPE
+ }
+ UNREACHABLE();
+ return os << "UNKNOWN"; // Keep the compiler happy.
+}
+
+
Handle<HeapType> Object::OptimalType(Isolate* isolate,
Representation representation) {
if (representation.IsNone()) return HeapType::None(isolate);
@@ -59,9 +78,7 @@ Handle<HeapType> Object::OptimalType(Isolate* isolate,
if (representation.IsHeapObject() && IsHeapObject()) {
// We can track only JavaScript objects with stable maps.
Handle<Map> map(HeapObject::cast(this)->map(), isolate);
- if (map->is_stable() &&
- map->instance_type() >= FIRST_NONCALLABLE_SPEC_OBJECT_TYPE &&
- map->instance_type() <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE) {
+ if (map->is_stable() && map->IsJSReceiverMap()) {
return HeapType::Class(map, isolate);
}
}
@@ -333,21 +350,37 @@ Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
return Just(false);
}
} else if (x->IsSymbol()) {
- return Just(x.is_identical_to(y));
+ if (y->IsSymbol()) {
+ return Just(x.is_identical_to(y));
+ } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+ if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ .ToHandle(&y)) {
+ return Nothing<bool>();
+ }
+ } else {
+ return Just(false);
+ }
} else if (x->IsSimd128Value()) {
- if (!y->IsSimd128Value()) return Just(false);
- return Just(Simd128Value::Equals(Handle<Simd128Value>::cast(x),
- Handle<Simd128Value>::cast(y)));
+ if (y->IsSimd128Value()) {
+ return Just(Simd128Value::Equals(Handle<Simd128Value>::cast(x),
+ Handle<Simd128Value>::cast(y)));
+ } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+ if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ .ToHandle(&y)) {
+ return Nothing<bool>();
+ }
+ } else {
+ return Just(false);
+ }
} else if (x->IsJSReceiver() && !x->IsUndetectableObject()) {
if (y->IsJSReceiver()) {
return Just(x.is_identical_to(y));
- } else if (y->IsNull() || y->IsSimd128Value() || y->IsSymbol() ||
- y->IsUndefined()) {
+ } else if (y->IsNull() || y->IsUndefined()) {
return Just(false);
} else if (y->IsBoolean()) {
y = Oddball::ToNumber(Handle<Oddball>::cast(y));
- }
- if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(x)).ToHandle(&x)) {
+ } else if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(x))
+ .ToHandle(&x)) {
return Nothing<bool>();
}
} else {
@@ -381,7 +414,9 @@ Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
return isolate->factory()->undefined_string();
}
if (object->IsBoolean()) return isolate->factory()->boolean_string();
+ if (object->IsString()) return isolate->factory()->string_string();
if (object->IsSymbol()) return isolate->factory()->symbol_string();
+ if (object->IsString()) return isolate->factory()->string_string();
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
if (object->Is##Type()) return isolate->factory()->type##_string();
SIMD128_TYPES(SIMD128_TYPE)
@@ -588,6 +623,23 @@ MaybeHandle<Object> Object::BitwiseXor(Isolate* isolate, Handle<Object> lhs,
}
+Maybe<bool> Object::IsArray(Handle<Object> object) {
+ if (object->IsJSArray()) return Just(true);
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ Isolate* isolate = proxy->GetIsolate();
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked,
+ isolate->factory()->NewStringFromAsciiChecked("IsArray")));
+ return Nothing<bool>();
+ }
+ return Object::IsArray(handle(proxy->target(), isolate));
+ }
+ return Just(false);
+}
+
+
bool Object::IsPromise(Handle<Object> object) {
if (!object->IsJSObject()) return false;
auto js_object = Handle<JSObject>::cast(object);
@@ -613,15 +665,118 @@ MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
return isolate->factory()->undefined_value();
}
if (!func->IsCallable()) {
- // TODO(bmeurer): Better error message here?
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kCalledNonCallable, func),
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kPropertyNotFunction,
+ func, name, receiver),
Object);
}
return func;
}
+// static
+MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
+ Isolate* isolate, Handle<Object> object, ElementTypes element_types) {
+ // 1. ReturnIfAbrupt(object).
+ // 2. (default elementTypes -- not applicable.)
+ // 3. If Type(obj) is not Object, throw a TypeError exception.
+ if (!object->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "CreateListFromArrayLike")),
+ FixedArray);
+ }
+ // 4. Let len be ? ToLength(? Get(obj, "length")).
+ Handle<Object> raw_length_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, raw_length_obj,
+ JSReceiver::GetProperty(object, isolate->factory()->length_string()),
+ FixedArray);
+ Handle<Object> raw_length_number;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, raw_length_number,
+ Object::ToLength(isolate, raw_length_obj),
+ FixedArray);
+ uint32_t len;
+ if (!raw_length_number->ToUint32(&len) ||
+ len > static_cast<uint32_t>(FixedArray::kMaxLength)) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kInvalidArrayLength),
+ FixedArray);
+ }
+ // 5. Let list be an empty List.
+ Handle<FixedArray> list = isolate->factory()->NewFixedArray(len);
+ // 6. Let index be 0.
+ // 7. Repeat while index < len:
+ for (uint32_t index = 0; index < len; ++index) {
+ // 7a. Let indexName be ToString(index).
+ // 7b. Let next be ? Get(obj, indexName).
+ Handle<Object> next;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, next, Object::GetElement(isolate, object, index), FixedArray);
+ switch (element_types) {
+ case ElementTypes::kAll:
+ // Nothing to do.
+ break;
+ case ElementTypes::kStringAndSymbol: {
+ // 7c. If Type(next) is not an element of elementTypes, throw a
+ // TypeError exception.
+ if (!next->IsName()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kNotPropertyName, next),
+ FixedArray);
+ }
+ // 7d. Append next as the last element of list.
+ // Internalize on the fly so we can use pointer identity later.
+ next = isolate->factory()->InternalizeName(Handle<Name>::cast(next));
+ break;
+ }
+ }
+ list->set(index, *next);
+ // 7e. Set index to index + 1. (See loop header.)
+ }
+ // 8. Return list.
+ return list;
+}
+
+
+// static
+Maybe<bool> JSReceiver::HasProperty(LookupIterator* it) {
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::JSPROXY:
+ // Call the "has" trap on proxies.
+ return JSProxy::HasProperty(it->isolate(), it->GetHolder<JSProxy>(),
+ it->GetName());
+ case LookupIterator::INTERCEPTOR: {
+ Maybe<PropertyAttributes> result =
+ JSObject::GetPropertyAttributesWithInterceptor(it);
+ if (!result.IsJust()) return Nothing<bool>();
+ if (result.FromJust() != ABSENT) return Just(true);
+ break;
+ }
+ case LookupIterator::ACCESS_CHECK: {
+ if (it->HasAccess()) break;
+ Maybe<PropertyAttributes> result =
+ JSObject::GetPropertyAttributesWithFailedAccessCheck(it);
+ if (!result.IsJust()) return Nothing<bool>();
+ return Just(result.FromJust() != ABSENT);
+ }
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ // TypedArray out-of-bounds access.
+ return Just(false);
+ case LookupIterator::ACCESSOR:
+ case LookupIterator::DATA:
+ return Just(true);
+ }
+ }
+ return Just(false);
+}
+
+
+// static
MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
LanguageMode language_mode) {
for (; it->IsFound(); it->Next()) {
@@ -630,8 +785,9 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::JSPROXY:
- return JSProxy::GetPropertyWithHandler(
- it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName());
+ return JSProxy::GetProperty(it->isolate(), it->GetHolder<JSProxy>(),
+ it->GetName(), it->GetReceiver(),
+ language_mode);
case LookupIterator::INTERCEPTOR: {
bool done;
Handle<Object> result;
@@ -656,6 +812,104 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
}
+#define STACK_CHECK(result_value) \
+ do { \
+ StackLimitCheck stack_check(isolate); \
+ if (stack_check.HasOverflowed()) { \
+ isolate->Throw(*isolate->factory()->NewRangeError( \
+ MessageTemplate::kStackOverflow)); \
+ return result_value; \
+ } \
+ } while (false)
+
+
+// static
+MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
+ Handle<JSProxy> proxy,
+ Handle<Name> name,
+ Handle<Object> receiver,
+ LanguageMode language_mode) {
+ if (receiver->IsJSGlobalObject()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kReadGlobalReferenceThroughProxy, name),
+ Object);
+ }
+
+ DCHECK(!name->IsPrivate());
+ STACK_CHECK(MaybeHandle<Object>());
+ Handle<Name> trap_name = isolate->factory()->get_string();
+ // 1. Assert: IsPropertyKey(P) is true.
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyRevoked, trap_name),
+ Object);
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 6. Let trap be ? GetMethod(handler, "get").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name), Object);
+ // 7. If trap is undefined, then
+ if (trap->IsUndefined()) {
+ // 7.a Return target.[[Get]](P, Receiver).
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, receiver, name, target);
+ return Object::GetProperty(&it, language_mode);
+ }
+ // 8. Let trapResult be ? Call(trap, handler, «target, P, Receiver»).
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target, name, receiver};
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args), Object);
+ // 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ PropertyDescriptor target_desc;
+ Maybe<bool> target_found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
+ MAYBE_RETURN_NULL(target_found);
+ // 10. If targetDesc is not undefined, then
+ if (target_found.FromJust()) {
+ // 10.a. If IsDataDescriptor(targetDesc) and targetDesc.[[Configurable]] is
+ // false and targetDesc.[[Writable]] is false, then
+ // 10.a.i. If SameValue(trapResult, targetDesc.[[Value]]) is false,
+ // throw a TypeError exception.
+ bool inconsistent = PropertyDescriptor::IsDataDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ !target_desc.writable() &&
+ !trap_result->SameValue(*target_desc.value());
+ if (inconsistent) {
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kProxyGetNonConfigurableData,
+ name, target_desc.value(), trap_result),
+ Object);
+ }
+ // 10.b. If IsAccessorDescriptor(targetDesc) and targetDesc.[[Configurable]]
+ // is false and targetDesc.[[Get]] is undefined, then
+ // 10.b.i. If trapResult is not undefined, throw a TypeError exception.
+ inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ target_desc.get()->IsUndefined() &&
+ !trap_result->IsUndefined();
+ if (inconsistent) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kProxyGetNonConfigurableAccessor, name,
+ trap_result),
+ Object);
+ }
+ }
+ // 11. Return trap_result
+ return trap_result;
+}
+
+
Handle<Object> JSReceiver::GetDataProperty(Handle<JSReceiver> object,
Handle<Name> name) {
LookupIterator it(object, name,
@@ -672,7 +926,9 @@ Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::ACCESS_CHECK:
- if (it->HasAccess()) continue;
+ // Support calling this method without an active context, but refuse
+ // access to access-checked objects in that case.
+ if (it->isolate()->context() != nullptr && it->HasAccess()) continue;
// Fall through.
case LookupIterator::JSPROXY:
it->NotFound();
@@ -712,15 +968,16 @@ bool Object::ToInt32(int32_t* value) {
bool Object::ToUint32(uint32_t* value) {
if (IsSmi()) {
int num = Smi::cast(this)->value();
- if (num >= 0) {
- *value = static_cast<uint32_t>(num);
- return true;
- }
+ if (num < 0) return false;
+ *value = static_cast<uint32_t>(num);
+ return true;
}
if (IsHeapNumber()) {
double num = HeapNumber::cast(this)->value();
- if (num >= 0 && FastUI2D(FastD2UI(num)) == num) {
- *value = FastD2UI(num);
+ if (num < 0) return false;
+ uint32_t uint_value = FastD2UI(num);
+ if (FastUI2D(uint_value) == num) {
+ *value = uint_value;
return true;
}
}
@@ -772,6 +1029,33 @@ Object* FunctionTemplateInfo::GetCompatibleReceiver(Isolate* isolate,
}
+// static
+MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target,
+ Handle<AllocationSite> site) {
+ // If called through new, new.target can be:
+ // - a subclass of constructor,
+ // - a proxy wrapper around constructor, or
+ // - the constructor itself.
+ // If called through Reflect.construct, it's guaranteed to be a constructor.
+ Isolate* const isolate = constructor->GetIsolate();
+ DCHECK(constructor->IsConstructor());
+ DCHECK(new_target->IsConstructor());
+ DCHECK(!constructor->has_initial_map() ||
+ constructor->initial_map()->instance_type() != JS_FUNCTION_TYPE);
+
+ Handle<Map> initial_map;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, initial_map,
+ JSFunction::GetDerivedMap(isolate, constructor, new_target), JSObject);
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObjectFromMap(initial_map, NOT_TENURED, site);
+ isolate->counters()->constructed_objects()->Increment();
+ isolate->counters()->constructed_objects_runtime()->Increment();
+ return result;
+}
+
+
Handle<FixedArray> JSObject::EnsureWritableFastElements(
Handle<JSObject> object) {
DCHECK(object->HasFastSmiOrObjectElements());
@@ -786,17 +1070,64 @@ Handle<FixedArray> JSObject::EnsureWritableFastElements(
}
-MaybeHandle<Object> JSProxy::GetPropertyWithHandler(Handle<JSProxy> proxy,
- Handle<Object> receiver,
- Handle<Name> name) {
+// ES6 9.5.1
+// static
+MaybeHandle<Object> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
+ Handle<String> trap_name = isolate->factory()->getPrototypeOf_string();
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return isolate->factory()->undefined_value();
+ STACK_CHECK(MaybeHandle<Object>());
- Handle<Object> args[] = { receiver, name };
- return CallTrap(
- proxy, "get", isolate->derived_get_trap(), arraysize(args), args);
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot.
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot.
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyRevoked, trap_name),
+ Object);
+ }
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
+
+ // 5. Let trap be ? GetMethod(handler, "getPrototypeOf").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, trap, GetMethod(handler, trap_name),
+ Object);
+ // 6. If trap is undefined, then return target.[[GetPrototypeOf]]().
+ if (trap->IsUndefined()) {
+ return Object::GetPrototype(isolate, target);
+ }
+ // 7. Let handlerProto be ? Call(trap, handler, «target»).
+ Handle<Object> argv[] = {target};
+ Handle<Object> handler_proto;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, handler_proto,
+ Execution::Call(isolate, trap, handler, arraysize(argv), argv), Object);
+ // 8. If Type(handlerProto) is neither Object nor Null, throw a TypeError.
+ if (!(handler_proto->IsJSReceiver() || handler_proto->IsNull())) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyGetPrototypeOfInvalid),
+ Object);
+ }
+ // 9. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> is_extensible = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN_NULL(is_extensible);
+ // 10. If extensibleTarget is true, return handlerProto.
+ if (is_extensible.FromJust()) return handler_proto;
+ // 11. Let targetProto be ? target.[[GetPrototypeOf]]().
+ Handle<Object> target_proto;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, target_proto,
+ Object::GetPrototype(isolate, target), Object);
+ // 12. If SameValue(handlerProto, targetProto) is false, throw a TypeError.
+ if (!handler_proto->SameValue(*target_proto)) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kProxyGetPrototypeOfNonExtensible),
+ Object);
+ }
+ // 13. Return handlerProto.
+ return handler_proto;
}
@@ -862,8 +1193,9 @@ bool AccessorInfo::IsCompatibleReceiverMap(Isolate* isolate,
}
-MaybeHandle<Object> Object::SetPropertyWithAccessor(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode) {
+Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
Isolate* isolate = it->isolate();
Handle<Object> structure = it->GetAccessors();
Handle<Object> receiver = it->GetReceiver();
@@ -879,21 +1211,24 @@ MaybeHandle<Object> Object::SetPropertyWithAccessor(
Handle<ExecutableAccessorInfo> info =
Handle<ExecutableAccessorInfo>::cast(structure);
if (!info->IsCompatibleReceiver(*receiver)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- name, receiver),
- Object);
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, name, receiver));
+ return Nothing<bool>();
}
v8::AccessorNameSetterCallback call_fun =
v8::ToCData<v8::AccessorNameSetterCallback>(info->setter());
- if (call_fun == nullptr) return value;
+ if (call_fun == nullptr) return Just(true);
+ // TODO(verwaest): Shouldn't this case be unreachable (at least in the
+ // long run?) Should we have ExecutableAccessorPairs with missing setter
+ // that are "writable"? If they aren't writable, shouldn't we have bailed
+ // out already earlier?
LOG(isolate, ApiNamedPropertyAccess("store", *holder, *name));
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder);
args.Call(call_fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return value;
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ return Just(true);
}
// Regular accessor.
@@ -901,15 +1236,12 @@ MaybeHandle<Object> Object::SetPropertyWithAccessor(
if (setter->IsCallable()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return SetPropertyWithDefinedSetter(
- receiver, Handle<JSReceiver>::cast(setter), value);
+ receiver, Handle<JSReceiver>::cast(setter), value, should_throw);
}
- if (is_sloppy(language_mode)) return value;
-
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kNoSetterInCallback,
- it->GetName(), it->GetHolder<JSObject>()),
- Object);
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNoSetterInCallback,
+ it->GetName(), it->GetHolder<JSObject>()));
}
@@ -932,31 +1264,33 @@ MaybeHandle<Object> Object::GetPropertyWithDefinedGetter(
return MaybeHandle<Object>();
}
- Debug* debug = isolate->debug();
- // Handle stepping into a getter if step into is active.
- // TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->is_active()) debug->HandleStepIn(getter, false);
-
return Execution::Call(isolate, getter, receiver, 0, NULL);
}
-MaybeHandle<Object> Object::SetPropertyWithDefinedSetter(
- Handle<Object> receiver,
- Handle<JSReceiver> setter,
- Handle<Object> value) {
+Maybe<bool> Object::SetPropertyWithDefinedSetter(Handle<Object> receiver,
+ Handle<JSReceiver> setter,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
Isolate* isolate = setter->GetIsolate();
- Debug* debug = isolate->debug();
- // Handle stepping into a setter if step into is active.
- // TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->is_active()) debug->HandleStepIn(setter, false);
-
Handle<Object> argv[] = { value };
- RETURN_ON_EXCEPTION(isolate, Execution::Call(isolate, setter, receiver,
- arraysize(argv), argv),
- Object);
- return value;
+ RETURN_ON_EXCEPTION_VALUE(isolate, Execution::Call(isolate, setter, receiver,
+ arraysize(argv), argv),
+ Nothing<bool>());
+ return Just(true);
+}
+
+
+// static
+bool Object::IsErrorObject(Isolate* isolate, Handle<Object> object) {
+ if (!object->IsJSObject()) return false;
+ // Use stack_trace_symbol as proxy for [[ErrorData]].
+ Handle<Name> symbol = isolate->factory()->stack_trace_symbol();
+ Maybe<bool> has_stack_trace =
+ JSReceiver::HasOwnProperty(Handle<JSReceiver>::cast(object), symbol);
+ DCHECK(!has_stack_trace.IsNothing());
+ return has_stack_trace.FromJust();
}
@@ -974,6 +1308,9 @@ bool JSObject::AllCanRead(LookupIterator* it) {
}
} else if (it->state() == LookupIterator::INTERCEPTOR) {
if (it->GetInterceptor()->all_can_read()) return true;
+ } else if (it->state() == LookupIterator::JSPROXY) {
+ // Stop lookupiterating. And no, AllCanNotRead.
+ return false;
}
}
return false;
@@ -994,6 +1331,14 @@ MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
GetPropertyWithInterceptor(it, &done), Object);
if (done) return result;
}
+
+ // Cross-Origin [[Get]] of Well-Known Symbols does not throw, and returns
+ // undefined.
+ Handle<Name> name = it->GetName();
+ if (name->IsSymbol() && Symbol::cast(*name)->is_well_known_symbol()) {
+ return it->factory()->undefined_value();
+ }
+
it->isolate()->ReportFailedAccessCheck(checked);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
return it->factory()->undefined_value();
@@ -1021,7 +1366,7 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
// static
bool JSObject::AllCanWrite(LookupIterator* it) {
- for (; it->IsFound(); it->Next()) {
+ for (; it->IsFound() && it->state() != LookupIterator::JSPROXY; it->Next()) {
if (it->state() == LookupIterator::ACCESSOR) {
Handle<Object> accessors = it->GetAccessors();
if (accessors->IsAccessorInfo()) {
@@ -1033,17 +1378,16 @@ bool JSObject::AllCanWrite(LookupIterator* it) {
}
-MaybeHandle<Object> JSObject::SetPropertyWithFailedAccessCheck(
- LookupIterator* it, Handle<Object> value) {
+Maybe<bool> JSObject::SetPropertyWithFailedAccessCheck(
+ LookupIterator* it, Handle<Object> value, ShouldThrow should_throw) {
Handle<JSObject> checked = it->GetHolder<JSObject>();
if (AllCanWrite(it)) {
- // The supplied language-mode is ignored by SetPropertyWithAccessor.
- return SetPropertyWithAccessor(it, value, SLOPPY);
+ return SetPropertyWithAccessor(it, value, should_throw);
}
it->isolate()->ReportFailedAccessCheck(checked);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
- return value;
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ return Just(true);
}
@@ -1057,7 +1401,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
Handle<String>::cast(name));
}
- if (object->IsGlobalObject()) {
+ if (object->IsJSGlobalObject()) {
Handle<GlobalDictionary> property_dictionary(object->global_dictionary());
int entry = property_dictionary->FindEntry(name);
@@ -1093,12 +1437,13 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
}
-bool Object::HasInPrototypeChain(Isolate* isolate, Object* target) {
- PrototypeIterator iter(isolate, this, PrototypeIterator::START_AT_RECEIVER);
+Maybe<bool> Object::HasInPrototypeChain(Isolate* isolate, Handle<Object> object,
+ Handle<Object> proto) {
+ PrototypeIterator iter(isolate, object, PrototypeIterator::START_AT_RECEIVER);
while (true) {
- iter.AdvanceIgnoringProxies();
- if (iter.IsAtEnd()) return false;
- if (iter.IsAtEnd(target)) return true;
+ if (!iter.AdvanceFollowingProxies()) return Nothing<bool>();
+ if (iter.IsAtEnd()) return Just(false);
+ if (iter.IsAtEnd(proto)) return Just(true);
}
}
@@ -1262,6 +1607,56 @@ bool Object::SameValueZero(Object* other) {
}
+MaybeHandle<Object> Object::ArraySpeciesConstructor(
+ Isolate* isolate, Handle<Object> original_array) {
+ Handle<Context> native_context = isolate->native_context();
+ if (!FLAG_harmony_species) {
+ return Handle<Object>(native_context->array_function(), isolate);
+ }
+ Handle<Object> constructor = isolate->factory()->undefined_value();
+ Maybe<bool> is_array = Object::IsArray(original_array);
+ MAYBE_RETURN_NULL(is_array);
+ if (is_array.FromJust()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, constructor,
+ Object::GetProperty(original_array,
+ isolate->factory()->constructor_string()),
+ Object);
+ if (constructor->IsConstructor()) {
+ Handle<Context> constructor_context;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, constructor_context,
+ JSReceiver::GetFunctionRealm(Handle<JSReceiver>::cast(constructor)),
+ Object);
+ if (*constructor_context != *native_context &&
+ *constructor == constructor_context->array_function()) {
+ constructor = isolate->factory()->undefined_value();
+ }
+ }
+ if (constructor->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, constructor,
+ Object::GetProperty(constructor,
+ isolate->factory()->species_symbol()),
+ Object);
+ if (constructor->IsNull()) {
+ constructor = isolate->factory()->undefined_value();
+ }
+ }
+ }
+ if (constructor->IsUndefined()) {
+ return Handle<Object>(native_context->array_function(), isolate);
+ } else {
+ if (!constructor->IsConstructor()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kSpeciesNotConstructor),
+ Object);
+ }
+ return constructor;
+ }
+}
+
+
void Object::ShortPrint(FILE* out) {
OFStream os(out);
os << Brief(this);
@@ -1353,6 +1748,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
DCHECK(!this->IsExternalString());
+ DCHECK(!resource->IsCompressible());
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
@@ -1415,6 +1811,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
DCHECK(!this->IsExternalString());
+ DCHECK(!resource->IsCompressible());
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
@@ -1552,6 +1949,22 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JS Array[%u]>", static_cast<uint32_t>(length));
break;
}
+ case JS_BOUND_FUNCTION_TYPE: {
+ JSBoundFunction* bound_function = JSBoundFunction::cast(this);
+ Object* name = bound_function->name();
+ accumulator->Add("<JS BoundFunction");
+ if (name->IsString()) {
+ String* str = String::cast(name);
+ if (str->length() > 0) {
+ accumulator->Add(" ");
+ accumulator->Put(str);
+ }
+ }
+ accumulator->Add(
+ " (BoundTargetFunction %p)>",
+ reinterpret_cast<void*>(bound_function->bound_target_function()));
+ break;
+ }
case JS_WEAK_MAP_TYPE: {
accumulator->Add("<JS WeakMap>");
break;
@@ -1661,12 +2074,24 @@ void JSObject::PrintElementsTransition(
}
+// static
+MaybeHandle<JSFunction> Map::GetConstructorFunction(
+ Handle<Map> map, Handle<Context> native_context) {
+ if (map->IsPrimitiveMap()) {
+ int const constructor_function_index = map->GetConstructorFunctionIndex();
+ if (constructor_function_index != kNoConstructorFunctionIndex) {
+ return handle(
+ JSFunction::cast(native_context->get(constructor_function_index)));
+ }
+ }
+ return MaybeHandle<JSFunction>();
+}
+
+
void Map::PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
PropertyAttributes attributes) {
OFStream os(file);
- os << "[reconfiguring ";
- constructor_name()->PrintOn(file);
- os << "] ";
+ os << "[reconfiguring]";
Name* name = instance_descriptors()->GetKey(modify_index);
if (name->IsString()) {
String::cast(name)->PrintOn(file);
@@ -1691,9 +2116,7 @@ void Map::PrintGeneralization(FILE* file,
HeapType* old_field_type,
HeapType* new_field_type) {
OFStream os(file);
- os << "[generalizing ";
- constructor_name()->PrintOn(file);
- os << "] ";
+ os << "[generalizing]";
Name* name = instance_descriptors()->GetKey(modify_index);
if (name->IsString()) {
String::cast(name)->PrintOn(file);
@@ -1725,9 +2148,7 @@ void Map::PrintGeneralization(FILE* file,
void JSObject::PrintInstanceMigration(FILE* file,
Map* original_map,
Map* new_map) {
- PrintF(file, "[migrating ");
- map()->constructor_name()->PrintOn(file);
- PrintF(file, "] ");
+ PrintF(file, "[migrating]");
DescriptorArray* o = original_map->instance_descriptors();
DescriptorArray* n = new_map->instance_descriptors();
for (int i = 0; i < original_map->NumberOfOwnDescriptors(); i++) {
@@ -1796,8 +2217,12 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case BYTECODE_ARRAY_TYPE:
os << "<BytecodeArray[" << BytecodeArray::cast(this)->length() << "]>";
break;
+ case TRANSITION_ARRAY_TYPE:
+ os << "<TransitionArray[" << TransitionArray::cast(this)->length()
+ << "]>";
+ break;
case FREE_SPACE_TYPE:
- os << "<FreeSpace[" << FreeSpace::cast(this)->Size() << "]>";
+ os << "<FreeSpace[" << FreeSpace::cast(this)->size() << "]>";
break;
#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype, size) \
case FIXED_##TYPE##_ARRAY_TYPE: \
@@ -1880,9 +2305,6 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case JS_PROXY_TYPE:
os << "<JSProxy>";
break;
- case JS_FUNCTION_PROXY_TYPE:
- os << "<JSFunctionProxy>";
- break;
case FOREIGN_TYPE:
os << "<Foreign>";
break;
@@ -1900,7 +2322,7 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
StringStream accumulator(&allocator);
PropertyCell* cell = PropertyCell::cast(this);
cell->value()->ShortPrint(&accumulator);
- os << accumulator.ToCString().get() << " " << cell->property_details();
+ os << accumulator.ToCString().get();
break;
}
case WEAK_CELL_TYPE: {
@@ -1918,12 +2340,33 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
}
-void HeapObject::Iterate(ObjectVisitor* v) {
- // Handle header
- IteratePointer(v, kMapOffset);
- // Handle object body
+void HeapObject::Iterate(ObjectVisitor* v) { IterateFast<ObjectVisitor>(v); }
+
+
+void HeapObject::IterateBody(ObjectVisitor* v) {
Map* m = map();
- IterateBody(m->instance_type(), SizeFromMap(m), v);
+ IterateBodyFast<ObjectVisitor>(m->instance_type(), SizeFromMap(m), v);
+}
+
+
+void HeapObject::IterateBody(InstanceType type, int object_size,
+ ObjectVisitor* v) {
+ IterateBodyFast<ObjectVisitor>(type, object_size, v);
+}
+
+
+struct CallIsValidSlot {
+ template <typename BodyDescriptor>
+ static bool apply(HeapObject* obj, int offset, int) {
+ return BodyDescriptor::IsValidSlot(obj, offset);
+ }
+};
+
+
+bool HeapObject::IsValidSlot(int offset) {
+ DCHECK_NE(0, offset);
+ return BodyDescriptorApply<CallIsValidSlot, bool>(map()->instance_type(),
+ this, offset, 0);
}
@@ -2045,7 +2488,7 @@ void Simd128Value::CopyBits(void* destination) const {
String* JSReceiver::class_name() {
- if (IsJSFunction() || IsJSFunctionProxy()) {
+ if (IsFunction()) {
return GetHeap()->Function_string();
}
Object* maybe_constructor = map()->GetConstructor();
@@ -2058,31 +2501,89 @@ String* JSReceiver::class_name() {
}
-String* Map::constructor_name() {
- if (is_prototype_map() && prototype_info()->IsPrototypeInfo()) {
- PrototypeInfo* proto_info = PrototypeInfo::cast(prototype_info());
- if (proto_info->constructor_name()->IsString()) {
- return String::cast(proto_info->constructor_name());
+MaybeHandle<String> JSReceiver::BuiltinStringTag(Handle<JSReceiver> object) {
+ Maybe<bool> is_array = Object::IsArray(object);
+ MAYBE_RETURN(is_array, MaybeHandle<String>());
+ Isolate* const isolate = object->GetIsolate();
+ if (is_array.FromJust()) {
+ return isolate->factory()->Array_string();
+ }
+ // TODO(adamk): According to ES2015, we should return "Function" when
+ // object has a [[Call]] internal method (corresponds to IsCallable).
+ // But this is well cemented in layout tests and might cause webbreakage.
+ // if (object->IsCallable()) {
+ // return isolate->factory()->Function_string();
+ // }
+ // TODO(adamk): class_name() is expensive, replace with instance type
+ // checks where possible.
+ return handle(object->class_name(), isolate);
+}
+
+
+// static
+Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
+ Isolate* isolate = receiver->GetIsolate();
+
+ // If the object was instantiated simply with base == new.target, the
+ // constructor on the map provides the most accurate name.
+ // Don't provide the info for prototypes, since their constructors are
+ // reclaimed and replaced by Object in OptimizeAsPrototype.
+ if (!receiver->IsJSProxy() && receiver->map()->new_target_is_base() &&
+ !receiver->map()->is_prototype_map()) {
+ Object* maybe_constructor = receiver->map()->GetConstructor();
+ if (maybe_constructor->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ String* name = String::cast(constructor->shared()->name());
+ if (name->length() == 0) name = constructor->shared()->inferred_name();
+ if (name->length() != 0 &&
+ !name->Equals(isolate->heap()->Object_string())) {
+ return handle(name, isolate);
+ }
}
}
- Object* maybe_constructor = GetConstructor();
+
+ if (FLAG_harmony_tostring) {
+ Handle<Object> maybe_tag = JSReceiver::GetDataProperty(
+ receiver, isolate->factory()->to_string_tag_symbol());
+ if (maybe_tag->IsString()) return Handle<String>::cast(maybe_tag);
+ }
+
+ PrototypeIterator iter(isolate, receiver);
+ if (iter.IsAtEnd()) return handle(receiver->class_name());
+ Handle<JSReceiver> start = PrototypeIterator::GetCurrent<JSReceiver>(iter);
+ LookupIterator it(receiver, isolate->factory()->constructor_string(), start,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ Handle<Object> maybe_constructor = JSReceiver::GetDataProperty(&it);
+ Handle<String> result = isolate->factory()->Object_string();
if (maybe_constructor->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ JSFunction* constructor = JSFunction::cast(*maybe_constructor);
String* name = String::cast(constructor->shared()->name());
- if (name->length() > 0) return name;
- String* inferred_name = constructor->shared()->inferred_name();
- if (inferred_name->length() > 0) return inferred_name;
- Object* proto = prototype();
- if (proto->IsJSObject()) return JSObject::cast(proto)->constructor_name();
+ if (name->length() == 0) name = constructor->shared()->inferred_name();
+ if (name->length() > 0) result = handle(name, isolate);
}
- // TODO(rossberg): what about proxies?
- // If the constructor is not present, return "Object".
- return GetHeap()->Object_string();
+
+ return result.is_identical_to(isolate->factory()->Object_string())
+ ? handle(receiver->class_name())
+ : result;
}
-String* JSReceiver::constructor_name() {
- return map()->constructor_name();
+Context* JSReceiver::GetCreationContext() {
+ if (IsJSBoundFunction()) {
+ return JSBoundFunction::cast(this)->creation_context();
+ }
+ Object* constructor = map()->GetConstructor();
+ JSFunction* function;
+ if (constructor->IsJSFunction()) {
+ function = JSFunction::cast(constructor);
+ } else {
+ // Functions have null as a constructor,
+ // but any JSFunction knows its context immediately.
+ CHECK(IsJSFunction());
+ function = JSFunction::cast(this);
+ }
+
+ return function->context()->native_context();
}
@@ -2153,7 +2654,7 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
PropertyAttributes attributes) {
DCHECK(!object->HasFastProperties());
Isolate* isolate = object->GetIsolate();
- if (object->IsGlobalObject()) {
+ if (object->IsJSGlobalObject()) {
Handle<GlobalDictionary> dict(object->global_dictionary());
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
int entry = dict->FindEntry(name);
@@ -2190,21 +2691,6 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
}
-Context* JSObject::GetCreationContext() {
- Object* constructor = this->map()->GetConstructor();
- JSFunction* function;
- if (!constructor->IsJSFunction()) {
- // Functions have null as a constructor,
- // but any JSFunction knows its context immediately.
- function = JSFunction::cast(this);
- } else {
- function = JSFunction::cast(constructor);
- }
-
- return function->context()->native_context();
-}
-
-
MaybeHandle<Object> JSObject::EnqueueChangeRecord(Handle<JSObject> object,
const char* type_str,
Handle<Name> name,
@@ -2274,9 +2760,10 @@ bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
}
-static void UpdatePrototypeUserRegistration(Handle<Map> old_map,
- Handle<Map> new_map,
- Isolate* isolate) {
+// static
+void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
+ Handle<Map> new_map,
+ Isolate* isolate) {
if (!FLAG_track_prototype_users) return;
if (!old_map->is_prototype_map()) return;
DCHECK(new_map->is_prototype_map());
@@ -2643,38 +3130,26 @@ static inline bool EqualImmutableValues(Object* obj1, Object* obj2) {
}
-// Invalidates a transition target at |key|, and installs |new_descriptors| over
-// the current instance_descriptors to ensure proper sharing of descriptor
-// arrays.
-// Returns true if the transition target at given key was deprecated.
-bool Map::DeprecateTarget(PropertyKind kind, Name* key,
- PropertyAttributes attributes,
- DescriptorArray* new_descriptors,
- LayoutDescriptor* new_layout_descriptor) {
- bool transition_target_deprecated = false;
- Map* maybe_transition =
- TransitionArray::SearchTransition(this, kind, key, attributes);
- if (maybe_transition != NULL) {
- maybe_transition->DeprecateTransitionTree();
- transition_target_deprecated = true;
+// Installs |new_descriptors| over the current instance_descriptors to ensure
+// proper sharing of descriptor arrays.
+void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
+ LayoutDescriptor* new_layout_descriptor) {
+ // Don't overwrite the empty descriptor array or initial map's descriptors.
+ if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined()) {
+ return;
}
- // Don't overwrite the empty descriptor array.
- if (NumberOfOwnDescriptors() == 0) return transition_target_deprecated;
-
DescriptorArray* to_replace = instance_descriptors();
- Map* current = this;
GetHeap()->incremental_marking()->RecordWrites(to_replace);
+ Map* current = this;
while (current->instance_descriptors() == to_replace) {
+ Object* next = current->GetBackPointer();
+ if (next->IsUndefined()) break; // Stop overwriting at initial map.
current->SetEnumLength(kInvalidEnumCacheSentinel);
current->UpdateDescriptors(new_descriptors, new_layout_descriptor);
- Object* next = current->GetBackPointer();
- if (next->IsUndefined()) break;
current = Map::cast(next);
}
-
set_owns_descriptors(false);
- return transition_target_deprecated;
}
@@ -2682,7 +3157,14 @@ Map* Map::FindRootMap() {
Map* result = this;
while (true) {
Object* back = result->GetBackPointer();
- if (back->IsUndefined()) return result;
+ if (back->IsUndefined()) {
+ // Initial map always owns descriptors and doesn't have unused entries
+ // in the descriptor array.
+ DCHECK(result->owns_descriptors());
+ DCHECK_EQ(result->NumberOfOwnDescriptors(),
+ result->instance_descriptors()->number_of_descriptors());
+ return result;
+ }
result = Map::cast(back);
}
}
@@ -2770,7 +3252,7 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
}
-bool FieldTypeIsCleared(Representation rep, Handle<HeapType> type) {
+bool FieldTypeIsCleared(Representation rep, HeapType* type) {
return type->Is(HeapType::None()) && rep.IsHeapObject();
}
@@ -2784,7 +3266,7 @@ Handle<HeapType> Map::GeneralizeFieldType(Representation rep1,
// Cleared field types need special treatment. They represent lost knowledge,
// so we must be conservative, so their generalization with any other type
// is "Any".
- if (FieldTypeIsCleared(rep1, type1) || FieldTypeIsCleared(rep2, type2)) {
+ if (FieldTypeIsCleared(rep1, *type1) || FieldTypeIsCleared(rep2, *type2)) {
return HeapType::Any(isolate);
}
if (type1->NowIs(type2)) return type2;
@@ -2807,7 +3289,7 @@ void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
isolate);
if (old_representation.Equals(new_representation) &&
- !FieldTypeIsCleared(new_representation, new_field_type) &&
+ !FieldTypeIsCleared(new_representation, *new_field_type) &&
// Checking old_field_type for being cleared is not necessary because
// the NowIs check below would fail anyway in that case.
new_field_type->NowIs(old_field_type)) {
@@ -3333,9 +3815,6 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
int split_nof = split_map->NumberOfOwnDescriptors();
DCHECK_NE(old_nof, split_nof);
- Handle<LayoutDescriptor> new_layout_descriptor =
- LayoutDescriptor::New(split_map, new_descriptors, old_nof);
-
PropertyKind split_kind;
PropertyAttributes split_attributes;
if (modify_index == split_nof) {
@@ -3346,14 +3825,19 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
split_kind = split_prop_details.kind();
split_attributes = split_prop_details.attributes();
}
- bool transition_target_deprecated = split_map->DeprecateTarget(
- split_kind, old_descriptors->GetKey(split_nof), split_attributes,
- *new_descriptors, *new_layout_descriptor);
- // If |transition_target_deprecated| is true then the transition array
- // already contains entry for given descriptor. This means that the transition
+ // Invalidate a transition target at |key|.
+ Map* maybe_transition = TransitionArray::SearchTransition(
+ *split_map, split_kind, old_descriptors->GetKey(split_nof),
+ split_attributes);
+ if (maybe_transition != NULL) {
+ maybe_transition->DeprecateTransitionTree();
+ }
+
+ // If |maybe_transition| is not NULL then the transition array already
+ // contains entry for given descriptor. This means that the transition
// could be inserted regardless of whether transitions array is full or not.
- if (!transition_target_deprecated &&
+ if (maybe_transition == NULL &&
!TransitionArray::CanHaveMoreTransitions(split_map)) {
return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
new_kind, new_attributes,
@@ -3384,13 +3868,16 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
*old_field_type, *new_field_type);
}
- // Add missing transitions.
- Handle<Map> new_map = split_map;
- for (int i = split_nof; i < old_nof; ++i) {
- new_map = CopyInstallDescriptors(new_map, i, new_descriptors,
- new_layout_descriptor);
- }
- new_map->set_owns_descriptors(true);
+ Handle<LayoutDescriptor> new_layout_descriptor =
+ LayoutDescriptor::New(split_map, new_descriptors, old_nof);
+
+ Handle<Map> new_map =
+ AddMissingTransitions(split_map, new_descriptors, new_layout_descriptor);
+
+ // Deprecated part of the transition tree is no longer reachable, so replace
+ // current instance descriptors in the "survived" part of the tree with
+ // the new descriptors to maintain descriptors sharing invariant.
+ split_map->ReplaceDescriptors(*new_descriptors, *new_layout_descriptor);
return new_map;
}
@@ -3454,10 +3941,16 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
switch (new_details.type()) {
case DATA: {
HeapType* new_type = new_descriptors->GetFieldType(i);
+ // Cleared field types need special treatment. They represent lost
+ // knowledge, so we must first generalize the new_type to "Any".
+ if (FieldTypeIsCleared(new_details.representation(), new_type)) {
+ return MaybeHandle<Map>();
+ }
PropertyType old_property_type = old_details.type();
if (old_property_type == DATA) {
HeapType* old_type = old_descriptors->GetFieldType(i);
- if (!old_type->NowIs(new_type)) {
+ if (FieldTypeIsCleared(old_details.representation(), old_type) ||
+ !old_type->NowIs(new_type)) {
return MaybeHandle<Map>();
}
} else {
@@ -3502,8 +3995,8 @@ Handle<Map> Map::Update(Handle<Map> map) {
}
-MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
- Handle<Object> value) {
+Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
+ Handle<Object> value) {
Isolate* isolate = it->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -3511,7 +4004,7 @@ MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
Handle<InterceptorInfo> interceptor(it->GetInterceptor());
- if (interceptor->setter()->IsUndefined()) return MaybeHandle<Object>();
+ if (interceptor->setter()->IsUndefined()) return Just(false);
Handle<JSObject> holder = it->GetHolder<JSObject>();
v8::Local<v8::Value> result;
@@ -3527,9 +4020,10 @@ MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
result = args.Call(setter, index, v8::Utils::ToLocal(value));
} else {
Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
- return MaybeHandle<Object>();
+ return Just(false);
}
v8::GenericNamedPropertySetterCallback setter =
@@ -3541,13 +4035,15 @@ MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
args.Call(setter, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
}
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
- if (result.IsEmpty()) return MaybeHandle<Object>();
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ if (result.IsEmpty()) return Just(false);
#ifdef DEBUG
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
#endif
- return value;
+ return Just(true);
+ // TODO(neis): In the future, we may want to actually return the interceptor's
+ // result, which then should be a boolean.
}
@@ -3556,15 +4052,19 @@ MaybeHandle<Object> Object::SetProperty(Handle<Object> object,
LanguageMode language_mode,
StoreFromKeyed store_mode) {
LookupIterator it(object, name);
- return SetProperty(&it, value, language_mode, store_mode);
+ MAYBE_RETURN_NULL(SetProperty(&it, value, language_mode, store_mode));
+ return value;
}
-MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
- Handle<Object> value,
- LanguageMode language_mode,
- StoreFromKeyed store_mode,
- bool* found) {
+Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
+ Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode,
+ bool* found) {
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc(it->isolate());
@@ -3581,45 +4081,31 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
if (it->HasAccess()) break;
// Check whether it makes sense to reuse the lookup iterator. Here it
// might still call into setters up the prototype chain.
- return JSObject::SetPropertyWithFailedAccessCheck(it, value);
+ return JSObject::SetPropertyWithFailedAccessCheck(it, value,
+ should_throw);
case LookupIterator::JSPROXY:
- if (it->HolderIsReceiverOrHiddenPrototype()) {
- return JSProxy::SetPropertyWithHandler(
- it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName(), value,
- language_mode);
- } else {
- // TODO(verwaest): Use the MaybeHandle to indicate result.
- bool has_result = false;
- MaybeHandle<Object> maybe_result =
- JSProxy::SetPropertyViaPrototypesWithHandler(
- it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName(),
- value, language_mode, &has_result);
- if (has_result) return maybe_result;
- done = true;
- }
- break;
+ return JSProxy::SetProperty(it->GetHolder<JSProxy>(), it->GetName(),
+ value, it->GetReceiver(), language_mode);
case LookupIterator::INTERCEPTOR:
if (it->HolderIsReceiverOrHiddenPrototype()) {
- MaybeHandle<Object> maybe_result =
- JSObject::SetPropertyWithInterceptor(it, value);
- if (!maybe_result.is_null()) return maybe_result;
- if (it->isolate()->has_pending_exception()) return maybe_result;
+ Maybe<bool> result = JSObject::SetPropertyWithInterceptor(it, value);
+ if (result.IsNothing() || result.FromJust()) return result;
} else {
Maybe<PropertyAttributes> maybe_attributes =
JSObject::GetPropertyAttributesWithInterceptor(it);
- if (!maybe_attributes.IsJust()) return MaybeHandle<Object>();
+ if (!maybe_attributes.IsJust()) return Nothing<bool>();
done = maybe_attributes.FromJust() != ABSENT;
if (done && (maybe_attributes.FromJust() & READ_ONLY) != 0) {
- return WriteToReadOnlyProperty(it, value, language_mode);
+ return WriteToReadOnlyProperty(it, value, should_throw);
}
}
break;
case LookupIterator::ACCESSOR: {
if (it->IsReadOnly()) {
- return WriteToReadOnlyProperty(it, value, language_mode);
+ return WriteToReadOnlyProperty(it, value, should_throw);
}
Handle<Object> accessors = it->GetAccessors();
if (accessors->IsAccessorInfo() &&
@@ -3628,15 +4114,15 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
done = true;
break;
}
- return SetPropertyWithAccessor(it, value, language_mode);
+ return SetPropertyWithAccessor(it, value, should_throw);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
// TODO(verwaest): We should throw an exception.
- return value;
+ return Just(true);
case LookupIterator::DATA:
if (it->IsReadOnly()) {
- return WriteToReadOnlyProperty(it, value, language_mode);
+ return WriteToReadOnlyProperty(it, value, should_throw);
}
if (it->HolderIsReceiverOrHiddenPrototype()) {
return SetDataProperty(it, value);
@@ -3654,89 +4140,106 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
// If the receiver is the JSGlobalObject, the store was contextual. In case
// the property did not exist yet on the global object itself, we have to
- // throw a reference error in strict mode.
+ // throw a reference error in strict mode. In sloppy mode, we continue.
if (it->GetReceiver()->IsJSGlobalObject() && is_strict(language_mode)) {
- THROW_NEW_ERROR(it->isolate(),
- NewReferenceError(MessageTemplate::kNotDefined, it->name()),
- Object);
+ it->isolate()->Throw(*it->isolate()->factory()->NewReferenceError(
+ MessageTemplate::kNotDefined, it->name()));
+ return Nothing<bool>();
}
*found = false;
- return MaybeHandle<Object>();
+ return Nothing<bool>();
}
-MaybeHandle<Object> Object::SetProperty(LookupIterator* it,
- Handle<Object> value,
- LanguageMode language_mode,
- StoreFromKeyed store_mode) {
+Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode) {
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+ if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate()) {
+ RETURN_FAILURE(it->isolate(), should_throw,
+ NewTypeError(MessageTemplate::kProxyPrivate));
+ }
bool found = false;
- MaybeHandle<Object> result =
+ Maybe<bool> result =
SetPropertyInternal(it, value, language_mode, store_mode, &found);
if (found) return result;
- return AddDataProperty(it, value, NONE, language_mode, store_mode);
+ return AddDataProperty(it, value, NONE, should_throw, store_mode);
}
-MaybeHandle<Object> Object::SetSuperProperty(LookupIterator* it,
- Handle<Object> value,
- LanguageMode language_mode,
- StoreFromKeyed store_mode) {
+Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode) {
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+ Isolate* isolate = it->isolate();
+ if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyPrivate));
+ }
+
bool found = false;
- MaybeHandle<Object> result =
+ Maybe<bool> result =
SetPropertyInternal(it, value, language_mode, store_mode, &found);
if (found) return result;
+ // The property either doesn't exist on the holder or exists there as a data
+ // property.
+
if (!it->GetReceiver()->IsJSReceiver()) {
- return WriteToReadOnlyProperty(it->isolate(), it->GetReceiver(),
- it->GetName(), value, language_mode);
+ return WriteToReadOnlyProperty(it, value, should_throw);
}
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
LookupIterator::Configuration c = LookupIterator::OWN;
LookupIterator own_lookup =
- it->IsElement()
- ? LookupIterator(it->isolate(), it->GetReceiver(), it->index(), c)
- : LookupIterator(it->GetReceiver(), it->name(), c);
+ it->IsElement() ? LookupIterator(isolate, receiver, it->index(), c)
+ : LookupIterator(receiver, it->name(), c);
for (; own_lookup.IsFound(); own_lookup.Next()) {
switch (own_lookup.state()) {
case LookupIterator::ACCESS_CHECK:
if (!own_lookup.HasAccess()) {
- return JSObject::SetPropertyWithFailedAccessCheck(&own_lookup, value);
+ return JSObject::SetPropertyWithFailedAccessCheck(&own_lookup, value,
+ should_throw);
}
break;
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
- value, language_mode);
+ case LookupIterator::ACCESSOR:
+ return RedefineIncompatibleProperty(isolate, it->GetName(), value,
+ should_throw);
case LookupIterator::DATA: {
PropertyDetails details = own_lookup.property_details();
- if (details.IsConfigurable() || !details.IsReadOnly()) {
- return JSObject::DefineOwnPropertyIgnoreAttributes(
- &own_lookup, value, details.attributes());
- }
- return WriteToReadOnlyProperty(&own_lookup, value, language_mode);
- }
-
- case LookupIterator::ACCESSOR: {
- PropertyDetails details = own_lookup.property_details();
- if (details.IsConfigurable()) {
- return JSObject::DefineOwnPropertyIgnoreAttributes(
- &own_lookup, value, details.attributes());
+ if (details.IsReadOnly()) {
+ return WriteToReadOnlyProperty(&own_lookup, value, should_throw);
}
-
- return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
- value, language_mode);
+ return SetDataProperty(&own_lookup, value);
}
case LookupIterator::INTERCEPTOR:
case LookupIterator::JSPROXY: {
- bool found = false;
- MaybeHandle<Object> result = SetPropertyInternal(
- &own_lookup, value, language_mode, store_mode, &found);
- if (found) return result;
- break;
+ PropertyDescriptor desc;
+ Maybe<bool> owned =
+ JSReceiver::GetOwnPropertyDescriptor(&own_lookup, &desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (!owned.FromJust()) {
+ return JSReceiver::CreateDataProperty(&own_lookup, value,
+ should_throw);
+ }
+ if (PropertyDescriptor::IsAccessorDescriptor(&desc) ||
+ !desc.writable()) {
+ return RedefineIncompatibleProperty(isolate, it->GetName(), value,
+ should_throw);
+ }
+
+ PropertyDescriptor value_desc;
+ value_desc.set_value(value);
+ return JSReceiver::DefineOwnProperty(isolate, receiver, it->GetName(),
+ &value_desc, should_throw);
}
case LookupIterator::NOT_FOUND:
@@ -3745,7 +4248,7 @@ MaybeHandle<Object> Object::SetSuperProperty(LookupIterator* it,
}
}
- return JSObject::AddDataProperty(&own_lookup, value, NONE, language_mode,
+ return JSObject::AddDataProperty(&own_lookup, value, NONE, should_throw,
store_mode);
}
@@ -3775,38 +4278,49 @@ MaybeHandle<Object> Object::ReadAbsentProperty(Isolate* isolate,
}
-MaybeHandle<Object> Object::WriteToReadOnlyProperty(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode) {
+Maybe<bool> Object::CannotCreateProperty(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Object> name,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kStrictCannotCreateProperty, name,
+ Object::TypeOf(isolate, receiver), receiver));
+}
+
+
+Maybe<bool> Object::WriteToReadOnlyProperty(LookupIterator* it,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
return WriteToReadOnlyProperty(it->isolate(), it->GetReceiver(),
- it->GetName(), value, language_mode);
+ it->GetName(), value, should_throw);
}
-MaybeHandle<Object> Object::WriteToReadOnlyProperty(
- Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
- Handle<Object> value, LanguageMode language_mode) {
- if (is_sloppy(language_mode)) return value;
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kStrictReadOnlyProperty, name, receiver),
- Object);
+Maybe<bool> Object::WriteToReadOnlyProperty(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Object> name,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kStrictReadOnlyProperty, name,
+ Object::TypeOf(isolate, receiver), receiver));
}
-MaybeHandle<Object> Object::RedefineNonconfigurableProperty(
- Isolate* isolate, Handle<Object> name, Handle<Object> value,
- LanguageMode language_mode) {
- if (is_sloppy(language_mode)) return value;
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kRedefineDisallowed, name),
- Object);
+Maybe<bool> Object::RedefineIncompatibleProperty(Isolate* isolate,
+ Handle<Object> name,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed, name));
}
-MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
- Handle<Object> value) {
- // Proxies are handled on the WithHandler path. Other non-JSObjects cannot
- // have own properties.
+Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
+ // Proxies are handled elsewhere. Other non-JSObjects cannot have own
+ // properties.
Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
// Store on the holder which may be hidden behind the receiver.
@@ -3825,8 +4339,8 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
// Convert the incoming value to a number for storing into typed arrays.
if (it->IsElement() && receiver->HasFixedTypedArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(it->isolate(), to_assign,
- Object::ToNumber(value), Object);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(), to_assign, Object::ToNumber(value), Nothing<bool>());
// ToNumber above might modify the receiver, causing the cached
// holder_map to mismatch the actual holder->map() after this point.
// Reload the map to be in consistent state. Other cached state cannot
@@ -3837,7 +4351,8 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
// We have to recheck the length. However, it can only change if the
// underlying buffer was neutered, so just check that.
if (Handle<JSArrayBufferView>::cast(receiver)->WasNeutered()) {
- return value;
+ return Just(true);
+ // TODO(neis): According to the spec, this should throw a TypeError.
}
}
}
@@ -3851,10 +4366,11 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
// Send the change record if there are observers.
if (is_observed && !value->SameValue(*maybe_old.ToHandleChecked())) {
- RETURN_ON_EXCEPTION(it->isolate(), JSObject::EnqueueChangeRecord(
- receiver, "update", it->GetName(),
- maybe_old.ToHandleChecked()),
- Object);
+ RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(),
+ JSObject::EnqueueChangeRecord(receiver, "update", it->GetName(),
+ maybe_old.ToHandleChecked()),
+ Nothing<bool>());
}
#if VERIFY_HEAP
@@ -3862,7 +4378,7 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
receiver->JSObjectVerify();
}
#endif
- return value;
+ return Just(true);
}
@@ -3907,15 +4423,14 @@ MUST_USE_RESULT static MaybeHandle<Object> EnqueueSpliceRecord(
}
-MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
- Handle<Object> value,
- PropertyAttributes attributes,
- LanguageMode language_mode,
- StoreFromKeyed store_mode) {
+Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
+ PropertyAttributes attributes,
+ ShouldThrow should_throw,
+ StoreFromKeyed store_mode) {
DCHECK(!it->GetReceiver()->IsJSProxy());
if (!it->GetReceiver()->IsJSObject()) {
- // TODO(verwaest): Throw a TypeError with a more specific message.
- return WriteToReadOnlyProperty(it, value, language_mode);
+ return CannotCreateProperty(it->isolate(), it->GetReceiver(), it->GetName(),
+ value, should_throw);
}
DCHECK_NE(LookupIterator::INTEGER_INDEXED_EXOTIC, it->state());
@@ -3924,24 +4439,25 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
// If the receiver is a JSGlobalProxy, store on the prototype (JSGlobalObject)
// instead. If the prototype is Null, the proxy is detached.
- if (receiver->IsJSGlobalProxy()) return value;
+ if (receiver->IsJSGlobalProxy()) return Just(true);
Isolate* isolate = it->isolate();
if (!receiver->map()->is_extensible() &&
(it->IsElement() || !isolate->IsInternallyUsedPropertyName(it->name()))) {
- if (is_sloppy(language_mode)) return value;
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kObjectNotExtensible,
- it->GetName()),
- Object);
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kObjectNotExtensible, it->GetName()));
}
if (it->IsElement()) {
if (receiver->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
if (JSArray::WouldChangeReadOnlyLength(array, it->index())) {
- if (is_sloppy(language_mode)) return value;
- return JSArray::ReadOnlyLengthError(array);
+ RETURN_FAILURE(array->GetIsolate(), should_throw,
+ NewTypeError(MessageTemplate::kStrictReadOnlyProperty,
+ isolate->factory()->length_string(),
+ Object::TypeOf(isolate, array), array));
}
if (FLAG_trace_external_array_abuse &&
@@ -3954,8 +4470,8 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
}
}
- MaybeHandle<Object> result =
- JSObject::AddDataElement(receiver, it->index(), value, attributes);
+ Maybe<bool> result = JSObject::AddDataElement(receiver, it->index(), value,
+ attributes, should_throw);
JSObject::ValidateElements(receiver);
return result;
} else {
@@ -3979,10 +4495,10 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
// Send the change record if there are observers.
if (receiver->map()->is_observed() &&
!isolate->IsInternallyUsedPropertyName(it->name())) {
- RETURN_ON_EXCEPTION(isolate, JSObject::EnqueueChangeRecord(
- receiver, "add", it->name(),
- it->factory()->the_hole_value()),
- Object);
+ RETURN_ON_EXCEPTION_VALUE(isolate, JSObject::EnqueueChangeRecord(
+ receiver, "add", it->name(),
+ it->factory()->the_hole_value()),
+ Nothing<bool>());
}
#if VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -3991,7 +4507,7 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
#endif
}
- return value;
+ return Just(true);
}
@@ -4027,15 +4543,13 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
// Replace descriptors by new_descriptors in all maps that share it.
map->GetHeap()->incremental_marking()->RecordWrites(*descriptors);
- Map* walk_map;
- for (Object* current = map->GetBackPointer();
- !current->IsUndefined();
- current = walk_map->GetBackPointer()) {
- walk_map = Map::cast(current);
- if (walk_map->instance_descriptors() != *descriptors) break;
- walk_map->UpdateDescriptors(*new_descriptors, layout_descriptor);
+ Map* current = *map;
+ while (current->instance_descriptors() == *descriptors) {
+ Object* next = current->GetBackPointer();
+ if (next->IsUndefined()) break; // Stop overwriting at initial map.
+ current->UpdateDescriptors(*new_descriptors, layout_descriptor);
+ current = Map::cast(next);
}
-
map->UpdateDescriptors(*new_descriptors, layout_descriptor);
}
@@ -4264,19 +4778,16 @@ Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
return handle(native_context->fast_aliased_arguments_map());
}
- } else {
- Object* maybe_array_maps = map->is_strong()
- ? native_context->js_array_strong_maps()
- : native_context->js_array_maps();
+ } else if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
// Reuse map transitions for JSArrays.
- if (maybe_array_maps->IsFixedArray()) {
- DisallowHeapAllocation no_gc;
- FixedArray* array_maps = FixedArray::cast(maybe_array_maps);
- if (array_maps->get(from_kind) == *map) {
- Object* maybe_transitioned_map = array_maps->get(to_kind);
- if (maybe_transitioned_map->IsMap()) {
- return handle(Map::cast(maybe_transitioned_map));
- }
+ DisallowHeapAllocation no_gc;
+ Strength strength = map->is_strong() ? Strength::STRONG : Strength::WEAK;
+ if (native_context->get(Context::ArrayMapIndex(from_kind, strength)) ==
+ *map) {
+ Object* maybe_transitioned_map =
+ native_context->get(Context::ArrayMapIndex(to_kind, strength));
+ if (maybe_transitioned_map->IsMap()) {
+ return handle(Map::cast(maybe_transitioned_map), isolate);
}
}
}
@@ -4325,290 +4836,291 @@ Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
}
-Maybe<bool> JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy,
- Handle<Name> name) {
+void JSProxy::Revoke(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
+ if (!proxy->IsRevoked()) proxy->set_handler(isolate->heap()->null_value());
+ DCHECK(proxy->IsRevoked());
+}
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return Just(false);
- Handle<Object> args[] = { name };
- Handle<Object> result;
+Maybe<bool> JSProxy::HasProperty(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Name> name) {
+ DCHECK(!name->IsPrivate());
+ STACK_CHECK(Nothing<bool>());
+ // 1. (Assert)
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, isolate->factory()->has_string()));
+ return Nothing<bool>();
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 6. Let trap be ? GetMethod(handler, "has").
+ Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, result, CallTrap(proxy, "has", isolate->derived_has_trap(),
- arraysize(args), args),
+ isolate, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
+ isolate->factory()->has_string()),
Nothing<bool>());
-
- return Just(result->BooleanValue());
+ // 7. If trap is undefined, then
+ if (trap->IsUndefined()) {
+ // 7a. Return target.[[HasProperty]](P).
+ return JSReceiver::HasProperty(target, name);
+ }
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «target, P»)).
+ Handle<Object> trap_result_obj;
+ Handle<Object> args[] = {target, name};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_obj,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ bool boolean_trap_result = trap_result_obj->BooleanValue();
+ // 9. If booleanTrapResult is false, then:
+ if (!boolean_trap_result) {
+ // 9a. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ PropertyDescriptor target_desc;
+ Maybe<bool> target_found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, target, name, &target_desc);
+ MAYBE_RETURN(target_found, Nothing<bool>());
+ // 9b. If targetDesc is not undefined, then:
+ if (target_found.FromJust()) {
+ // 9b i. If targetDesc.[[Configurable]] is false, throw a TypeError
+ // exception.
+ if (!target_desc.configurable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyHasNonConfigurable, name));
+ return Nothing<bool>();
+ }
+ // 9b ii. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible_target, Nothing<bool>());
+ // 9b iii. If extensibleTarget is false, throw a TypeError exception.
+ if (!extensible_target.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyHasNonExtensible, name));
+ return Nothing<bool>();
+ }
+ }
+ }
+ // 10. Return booleanTrapResult.
+ return Just(boolean_trap_result);
}
-MaybeHandle<Object> JSProxy::SetPropertyWithHandler(
- Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, LanguageMode language_mode) {
+Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
+ Handle<Object> value, Handle<Object> receiver,
+ LanguageMode language_mode) {
+ DCHECK(!name->IsPrivate());
Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(Nothing<bool>());
+ Factory* factory = isolate->factory();
+ Handle<String> trap_name = factory->set_string();
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return value;
+ if (proxy->IsRevoked()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
- Handle<Object> args[] = { receiver, name, value };
- RETURN_ON_EXCEPTION(
- isolate,
- CallTrap(proxy,
- "set",
- isolate->derived_set_trap(),
- arraysize(args),
- args),
- Object);
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
+ if (trap->IsUndefined()) {
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, receiver, name, target);
+ return Object::SetSuperProperty(&it, value, language_mode,
+ Object::MAY_BE_STORE_FROM_KEYED);
+ }
- return value;
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target, name, value, receiver};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ if (!trap_result->BooleanValue()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
+ trap_name, name));
+ }
+
+ // Enforce the invariant.
+ PropertyDescriptor target_desc;
+ Maybe<bool> owned =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (owned.FromJust()) {
+ bool inconsistent = PropertyDescriptor::IsDataDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ !target_desc.writable() &&
+ !value->SameValue(*target_desc.value());
+ if (inconsistent) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxySetFrozenData, name));
+ return Nothing<bool>();
+ }
+ inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ target_desc.set()->IsUndefined();
+ if (inconsistent) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxySetFrozenAccessor, name));
+ return Nothing<bool>();
+ }
+ }
+ return Just(true);
}
-MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
- Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, LanguageMode language_mode, bool* done) {
+Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
+ Handle<Name> name,
+ LanguageMode language_mode) {
+ DCHECK(!name->IsPrivate());
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
Isolate* isolate = proxy->GetIsolate();
- Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy.
+ STACK_CHECK(Nothing<bool>());
+ Factory* factory = isolate->factory();
+ Handle<String> trap_name = factory->deleteProperty_string();
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) {
- *done = false;
- return isolate->factory()->the_hole_value();
+ if (proxy->IsRevoked()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
}
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
- *done = true; // except where redefined...
- Handle<Object> args[] = { name };
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- CallTrap(proxy,
- "getPropertyDescriptor",
- Handle<Object>(),
- arraysize(args),
- args),
- Object);
-
- if (result->IsUndefined()) {
- *done = false;
- return isolate->factory()->the_hole_value();
- }
-
- // Emulate [[GetProperty]] semantics for proxies.
- Handle<Object> argv[] = { result };
- Handle<Object> desc;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, desc,
- Execution::Call(isolate,
- isolate->to_complete_property_descriptor(),
- result,
- arraysize(argv),
- argv),
- Object);
-
- // [[GetProperty]] requires to check that all properties are configurable.
- Handle<String> configurable_name =
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("configurable_"));
- Handle<Object> configurable =
- Object::GetProperty(desc, configurable_name).ToHandleChecked();
- DCHECK(configurable->IsBoolean());
- if (configurable->IsFalse()) {
- Handle<String> trap = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("getPropertyDescriptor"));
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kProxyPropNotConfigurable,
- handler, name, trap),
- Object);
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
+ if (trap->IsUndefined()) {
+ return JSReceiver::DeletePropertyOrElement(target, name, language_mode);
}
- DCHECK(configurable->IsTrue());
- // Check for DataDescriptor.
- Handle<String> hasWritable_name =
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("hasWritable_"));
- Handle<Object> hasWritable =
- Object::GetProperty(desc, hasWritable_name).ToHandleChecked();
- DCHECK(hasWritable->IsBoolean());
- if (hasWritable->IsTrue()) {
- Handle<String> writable_name = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("writable_"));
- Handle<Object> writable =
- Object::GetProperty(desc, writable_name).ToHandleChecked();
- DCHECK(writable->IsBoolean());
- *done = writable->IsFalse();
- if (!*done) return isolate->factory()->the_hole_value();
- return WriteToReadOnlyProperty(isolate, receiver, name, value,
- language_mode);
- }
-
- // We have an AccessorDescriptor.
- Handle<String> set_name =
- isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("set_"));
- Handle<Object> setter = Object::GetProperty(desc, set_name).ToHandleChecked();
- if (!setter->IsUndefined()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(
- receiver, Handle<JSReceiver>::cast(setter), value);
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target, name};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ if (!trap_result->BooleanValue()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
+ trap_name, name));
+ }
+
+ // Enforce the invariant.
+ PropertyDescriptor target_desc;
+ Maybe<bool> owned =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (owned.FromJust() && !target_desc.configurable()) {
+ isolate->Throw(*factory->NewTypeError(
+ MessageTemplate::kProxyDeletePropertyNonConfigurable, name));
+ return Nothing<bool>();
}
-
- if (is_sloppy(language_mode)) return value;
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kNoSetterInCallback, name, proxy),
- Object);
+ return Just(true);
}
-MaybeHandle<Object> JSProxy::DeletePropertyWithHandler(
- Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode) {
- Isolate* isolate = proxy->GetIsolate();
+// static
+MaybeHandle<JSProxy> JSProxy::New(Isolate* isolate, Handle<Object> target,
+ Handle<Object> handler) {
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kProxyNonObject),
+ JSProxy);
+ }
+ if (target->IsJSProxy() && JSProxy::cast(*target)->IsRevoked()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyHandlerOrTargetRevoked),
+ JSProxy);
+ }
+ if (!handler->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kProxyNonObject),
+ JSProxy);
+ }
+ if (handler->IsJSProxy() && JSProxy::cast(*handler)->IsRevoked()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyHandlerOrTargetRevoked),
+ JSProxy);
+ }
+ return isolate->factory()->NewJSProxy(Handle<JSReceiver>::cast(target),
+ Handle<JSReceiver>::cast(handler));
+}
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return isolate->factory()->false_value();
- Handle<Object> args[] = { name };
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- CallTrap(proxy,
- "delete",
- Handle<Object>(),
- arraysize(args),
- args),
- Object);
-
- bool result_bool = result->BooleanValue();
- if (is_strict(language_mode) && !result_bool) {
- Handle<Object> handler(proxy->handler(), isolate);
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kProxyHandlerDeleteFailed, handler),
- Object);
+// static
+MaybeHandle<Context> JSProxy::GetFunctionRealm(Handle<JSProxy> proxy) {
+ DCHECK(proxy->map()->is_constructor());
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR(proxy->GetIsolate(),
+ NewTypeError(MessageTemplate::kProxyRevoked), Context);
}
- return isolate->factory()->ToBoolean(result_bool);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()));
+ return JSReceiver::GetFunctionRealm(target);
}
-Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
- Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name) {
- Isolate* isolate = proxy->GetIsolate();
- HandleScope scope(isolate);
+// static
+MaybeHandle<Context> JSBoundFunction::GetFunctionRealm(
+ Handle<JSBoundFunction> function) {
+ DCHECK(function->map()->is_constructor());
+ return JSReceiver::GetFunctionRealm(
+ handle(function->bound_target_function()));
+}
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return Just(ABSENT);
- Handle<Object> args[] = { name };
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, result, proxy->CallTrap(proxy, "getPropertyDescriptor",
- Handle<Object>(), arraysize(args), args),
- Nothing<PropertyAttributes>());
+// static
+Handle<Context> JSFunction::GetFunctionRealm(Handle<JSFunction> function) {
+ DCHECK(function->map()->is_constructor());
+ return handle(function->context()->native_context());
+}
- if (result->IsUndefined()) return Just(ABSENT);
- Handle<Object> argv[] = { result };
- Handle<Object> desc;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, desc,
- Execution::Call(isolate, isolate->to_complete_property_descriptor(),
- result, arraysize(argv), argv),
- Nothing<PropertyAttributes>());
-
- // Convert result to PropertyAttributes.
- Handle<String> enum_n = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("enumerable_"));
- Handle<Object> enumerable;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, enumerable,
- Object::GetProperty(desc, enum_n),
- Nothing<PropertyAttributes>());
- Handle<String> conf_n = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("configurable_"));
- Handle<Object> configurable;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, configurable,
- Object::GetProperty(desc, conf_n),
- Nothing<PropertyAttributes>());
- Handle<String> writ_n = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("writable_"));
- Handle<Object> writable;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, writable,
- Object::GetProperty(desc, writ_n),
- Nothing<PropertyAttributes>());
- if (!writable->BooleanValue()) {
- Handle<String> set_n = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("set_"));
- Handle<Object> setter;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, setter,
- Object::GetProperty(desc, set_n),
- Nothing<PropertyAttributes>());
- writable = isolate->factory()->ToBoolean(!setter->IsUndefined());
- }
-
- if (configurable->IsFalse()) {
- Handle<Object> handler(proxy->handler(), isolate);
- Handle<String> trap = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("getPropertyDescriptor"));
- Handle<Object> error = isolate->factory()->NewTypeError(
- MessageTemplate::kProxyPropNotConfigurable, handler, name, trap);
- isolate->Throw(*error);
- return Nothing<PropertyAttributes>();
- }
-
- int attributes = NONE;
- if (!enumerable->BooleanValue()) attributes |= DONT_ENUM;
- if (!configurable->BooleanValue()) attributes |= DONT_DELETE;
- if (!writable->BooleanValue()) attributes |= READ_ONLY;
- return Just(static_cast<PropertyAttributes>(attributes));
-}
-
-
-void JSProxy::Fix(Handle<JSProxy> proxy) {
- Isolate* isolate = proxy->GetIsolate();
+// static
+MaybeHandle<Context> JSObject::GetFunctionRealm(Handle<JSObject> object) {
+ DCHECK(object->map()->is_constructor());
+ DCHECK(!object->IsJSFunction());
+ return handle(object->GetCreationContext());
+}
- // Save identity hash.
- Handle<Object> hash(proxy->GetIdentityHash(), isolate);
- if (proxy->IsJSFunctionProxy()) {
- isolate->factory()->BecomeJSFunction(proxy);
- // Code will be set on the JavaScript side.
- } else {
- isolate->factory()->BecomeJSObject(proxy);
+// static
+MaybeHandle<Context> JSReceiver::GetFunctionRealm(Handle<JSReceiver> receiver) {
+ if (receiver->IsJSProxy()) {
+ return JSProxy::GetFunctionRealm(Handle<JSProxy>::cast(receiver));
}
- DCHECK(proxy->IsJSObject());
- // Inherit identity, if it was present.
- if (hash->IsSmi()) {
- JSObject::SetIdentityHash(Handle<JSObject>::cast(proxy),
- Handle<Smi>::cast(hash));
+ if (receiver->IsJSFunction()) {
+ return JSFunction::GetFunctionRealm(Handle<JSFunction>::cast(receiver));
}
-}
-
-MaybeHandle<Object> JSProxy::CallTrap(Handle<JSProxy> proxy,
- const char* name,
- Handle<Object> derived,
- int argc,
- Handle<Object> argv[]) {
- Isolate* isolate = proxy->GetIsolate();
- Handle<Object> handler(proxy->handler(), isolate);
+ if (receiver->IsJSBoundFunction()) {
+ return JSBoundFunction::GetFunctionRealm(
+ Handle<JSBoundFunction>::cast(receiver));
+ }
- Handle<String> trap_name = isolate->factory()->InternalizeUtf8String(name);
- Handle<Object> trap;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, trap,
- Object::GetPropertyOrElement(handler, trap_name),
- Object);
+ return JSObject::GetFunctionRealm(Handle<JSObject>::cast(receiver));
+}
- if (trap->IsUndefined()) {
- if (derived.is_null()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kProxyHandlerTrapMissing,
- handler, trap_name),
- Object);
- }
- trap = Handle<Object>(derived);
- }
- return Execution::Call(isolate, trap, handler, argc, argv);
+Maybe<PropertyAttributes> JSProxy::GetPropertyAttributes(LookupIterator* it) {
+ Isolate* isolate = it->isolate();
+ HandleScope scope(isolate);
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSProxy::GetOwnPropertyDescriptor(
+ isolate, it->GetHolder<JSProxy>(), it->GetName(), &desc);
+ MAYBE_RETURN(found, Nothing<PropertyAttributes>());
+ if (!found.FromJust()) return Just(ABSENT);
+ return Just(desc.ToAttributes());
}
@@ -4686,8 +5198,9 @@ void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
DCHECK(object->map()->is_extensible() ||
it.isolate()->IsInternallyUsedPropertyName(name));
#endif
- AddDataProperty(&it, value, attributes, STRICT,
- CERTAINLY_NOT_STORE_FROM_KEYED).Check();
+ CHECK(AddDataProperty(&it, value, attributes, THROW_ON_ERROR,
+ CERTAINLY_NOT_STORE_FROM_KEYED)
+ .IsJust());
}
@@ -4705,6 +5218,15 @@ void ExecutableAccessorInfo::ClearSetter(Handle<ExecutableAccessorInfo> info) {
MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
ExecutableAccessorInfoHandling handling) {
+ MAYBE_RETURN_NULL(DefineOwnPropertyIgnoreAttributes(
+ it, value, attributes, THROW_ON_ERROR, handling));
+ return value;
+}
+
+
+Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+ ShouldThrow should_throw, ExecutableAccessorInfoHandling handling) {
Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
bool is_observed = object->map()->is_observed() &&
(it->IsElement() ||
@@ -4720,8 +5242,8 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
case LookupIterator::ACCESS_CHECK:
if (!it->HasAccess()) {
it->isolate()->ReportFailedAccessCheck(it->GetHolder<JSObject>());
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
- return value;
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ return Just(true);
}
break;
@@ -4735,10 +5257,8 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
// they throw. Here we should do the same.
case LookupIterator::INTERCEPTOR:
if (handling == DONT_FORCE_FIELD) {
- MaybeHandle<Object> maybe_result =
- JSObject::SetPropertyWithInterceptor(it, value);
- if (!maybe_result.is_null()) return maybe_result;
- if (it->isolate()->has_pending_exception()) return maybe_result;
+ Maybe<bool> result = JSObject::SetPropertyWithInterceptor(it, value);
+ if (result.IsNothing() || result.FromJust()) return result;
}
break;
@@ -4753,13 +5273,11 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
// Ensure the context isn't changed after calling into accessors.
AssertNoContextChange ncc(it->isolate());
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- it->isolate(), result,
- JSObject::SetPropertyWithAccessor(it, value, STRICT), Object);
- DCHECK(result->SameValue(*value));
+ Maybe<bool> result =
+ JSObject::SetPropertyWithAccessor(it, value, should_throw);
+ if (result.IsNothing() || !result.FromJust()) return result;
- if (details.attributes() == attributes) return value;
+ if (details.attributes() == attributes) return Just(true);
// Reconfigure the accessor if attributes mismatch.
Handle<ExecutableAccessorInfo> new_data = Accessors::CloneAccessor(
@@ -4778,18 +5296,18 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
}
if (is_observed) {
- RETURN_ON_EXCEPTION(
+ RETURN_ON_EXCEPTION_VALUE(
it->isolate(),
EnqueueChangeRecord(object, "reconfigure", it->GetName(),
it->factory()->the_hole_value()),
- Object);
+ Nothing<bool>());
}
- return value;
+ return Just(true);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
- value, STRICT);
+ return RedefineIncompatibleProperty(it->isolate(), it->GetName(), value,
+ should_throw);
case LookupIterator::DATA: {
PropertyDetails details = it->property_details();
@@ -4802,8 +5320,8 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
// Special case: properties of typed arrays cannot be reconfigured to
// non-writable nor to non-enumerable.
if (it->IsElement() && object->HasFixedTypedArrayElements()) {
- return RedefineNonconfigurableProperty(it->isolate(), it->GetName(),
- value, STRICT);
+ return RedefineIncompatibleProperty(it->isolate(), it->GetName(),
+ value, should_throw);
}
// Reconfigure the data property if the attributes mismatch.
@@ -4815,17 +5333,17 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
if (old_value->SameValue(*value)) {
old_value = it->factory()->the_hole_value();
}
- RETURN_ON_EXCEPTION(it->isolate(),
- EnqueueChangeRecord(object, "reconfigure",
- it->GetName(), old_value),
- Object);
+ RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(), EnqueueChangeRecord(object, "reconfigure",
+ it->GetName(), old_value),
+ Nothing<bool>());
}
- return value;
+ return Just(true);
}
}
}
- return AddDataProperty(it, value, attributes, STRICT,
+ return AddDataProperty(it, value, attributes, should_throw,
CERTAINLY_NOT_STORE_FROM_KEYED);
}
@@ -4858,27 +5376,6 @@ MaybeHandle<Object> JSObject::DefinePropertyOrElementIgnoreAttributes(
}
-Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
- Handle<Object> value) {
- DCHECK(it->GetReceiver()->IsJSObject());
- Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(it);
- if (maybe.IsNothing()) return Nothing<bool>();
-
- if (it->IsFound()) {
- if (!it->IsConfigurable()) return Just(false);
- } else {
- if (!JSObject::cast(*it->GetReceiver())->IsExtensible()) return Just(false);
- }
-
- RETURN_ON_EXCEPTION_VALUE(
- it->isolate(),
- DefineOwnPropertyIgnoreAttributes(it, value, NONE, DONT_FORCE_FIELD),
- Nothing<bool>());
-
- return Just(true);
-}
-
-
Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
LookupIterator* it) {
Isolate* isolate = it->isolate();
@@ -4906,6 +5403,7 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
result = args.Call(query, index);
} else {
Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
v8::GenericNamedPropertyQueryCallback query =
v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
interceptor->query());
@@ -4931,7 +5429,7 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
result = args.Call(getter, index);
} else {
Handle<Name> name = it->name();
-
+ DCHECK(!name->IsPrivate());
v8::GenericNamedPropertyGetterCallback getter =
v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
interceptor->getter());
@@ -4955,8 +5453,7 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::JSPROXY:
- return JSProxy::GetPropertyAttributesWithHandler(
- it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName());
+ return JSProxy::GetPropertyAttributes(it);
case LookupIterator::INTERCEPTOR: {
Maybe<PropertyAttributes> result =
JSObject::GetPropertyAttributesWithInterceptor(it);
@@ -5038,7 +5535,7 @@ void JSObject::MigrateFastToSlow(Handle<JSObject> object,
Handle<Map> new_map,
int expected_additional_properties) {
// The global object is always normalized.
- DCHECK(!object->IsGlobalObject());
+ DCHECK(!object->IsJSGlobalObject());
// JSGlobalProxy must never be normalized
DCHECK(!object->IsJSGlobalProxy());
@@ -5155,7 +5652,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
int unused_property_fields,
const char* reason) {
if (object->HasFastProperties()) return;
- DCHECK(!object->IsGlobalObject());
+ DCHECK(!object->IsJSGlobalObject());
Isolate* isolate = object->GetIsolate();
Factory* factory = isolate->factory();
Handle<NameDictionary> dictionary(object->property_dictionary());
@@ -5345,7 +5842,7 @@ void JSObject::RequireSlowElements(SeededNumberDictionary* dictionary) {
dictionary->set_requires_slow_elements();
// TODO(verwaest): Remove this hack.
if (map()->is_prototype_map()) {
- GetHeap()->ClearAllKeyedStoreICs();
+ TypeFeedbackVector::ClearAllKeyedStoreICs(GetIsolate());
}
}
@@ -5476,7 +5973,6 @@ Handle<Smi> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) {
if (object->IsJSGlobalProxy()) {
return GetOrCreateIdentityHashHelper(Handle<JSGlobalProxy>::cast(object));
}
-
Isolate* isolate = object->GetIsolate();
Handle<Object> maybe_hash(object->GetIdentityHash(), isolate);
@@ -5651,8 +6147,7 @@ Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
}
-MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
- LookupIterator* it) {
+Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it) {
Isolate* isolate = it->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -5660,7 +6155,7 @@ MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
Handle<InterceptorInfo> interceptor(it->GetInterceptor());
- if (interceptor->deleter()->IsUndefined()) return MaybeHandle<Object>();
+ if (interceptor->deleter()->IsUndefined()) return Nothing<bool>();
Handle<JSObject> holder = it->GetHolder<JSObject>();
@@ -5675,9 +6170,10 @@ MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
ApiIndexedPropertyAccess("interceptor-indexed-delete", *holder, index));
result = args.Call(deleter, index);
} else if (it->name()->IsSymbol() && !interceptor->can_intercept_symbols()) {
- return MaybeHandle<Object>();
+ return Nothing<bool>();
} else {
Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
v8::GenericNamedPropertyDeleterCallback deleter =
v8::ToCData<v8::GenericNamedPropertyDeleterCallback>(
interceptor->deleter());
@@ -5686,25 +6182,26 @@ MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
result = args.Call(deleter, v8::Utils::ToLocal(name));
}
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (result.IsEmpty()) return MaybeHandle<Object>();
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ if (result.IsEmpty()) return Nothing<bool>();
DCHECK(result->IsBoolean());
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
// Rebox CustomArguments::kReturnValueOffset before returning.
- return handle(*result_internal, isolate);
+ return Just(result_internal->BooleanValue());
}
-void JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
- Handle<Name> name, int entry) {
+void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
+ Handle<Name> name, int entry) {
DCHECK(!object->HasFastProperties());
Isolate* isolate = object->GetIsolate();
- if (object->IsGlobalObject()) {
+ if (object->IsJSGlobalObject()) {
// If we have a global object, invalidate the cell and swap in a new one.
- Handle<GlobalDictionary> dictionary(object->global_dictionary());
+ Handle<GlobalDictionary> dictionary(
+ JSObject::cast(*object)->global_dictionary());
DCHECK_NE(GlobalDictionary::kNotFound, entry);
auto cell = PropertyCell::InvalidateEntry(dictionary, entry);
@@ -5724,15 +6221,23 @@ void JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
}
-// ECMA-262, 3rd, 8.6.2.5
-MaybeHandle<Object> JSReceiver::DeleteProperty(LookupIterator* it,
- LanguageMode language_mode) {
+Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
+ LanguageMode language_mode) {
Isolate* isolate = it->isolate();
+
if (it->state() == LookupIterator::JSPROXY) {
- return JSProxy::DeletePropertyWithHandler(it->GetHolder<JSProxy>(),
- it->GetName(), language_mode);
+ return JSProxy::DeletePropertyOrElement(it->GetHolder<JSProxy>(),
+ it->GetName(), language_mode);
}
+ if (it->GetReceiver()->IsJSProxy()) {
+ if (it->state() != LookupIterator::NOT_FOUND) {
+ DCHECK_EQ(LookupIterator::DATA, it->state());
+ DCHECK(it->GetName()->IsPrivate());
+ it->Delete();
+ }
+ return Just(true);
+ }
Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
bool is_observed =
@@ -5750,19 +6255,20 @@ MaybeHandle<Object> JSReceiver::DeleteProperty(LookupIterator* it,
case LookupIterator::ACCESS_CHECK:
if (it->HasAccess()) break;
isolate->ReportFailedAccessCheck(it->GetHolder<JSObject>());
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return it->factory()->false_value();
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ return Just(false);
case LookupIterator::INTERCEPTOR: {
- MaybeHandle<Object> maybe_result =
- JSObject::DeletePropertyWithInterceptor(it);
- // Delete with interceptor succeeded. Return result.
- if (!maybe_result.is_null()) return maybe_result;
+ Maybe<bool> result = JSObject::DeletePropertyWithInterceptor(it);
// An exception was thrown in the interceptor. Propagate.
- if (isolate->has_pending_exception()) return maybe_result;
+ if (isolate->has_pending_exception()) return Nothing<bool>();
+ // Delete with interceptor succeeded. Return result.
+ // TODO(neis): In strict mode, we should probably throw if the
+ // interceptor returns false.
+ if (result.IsJust()) return result;
break;
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return it->factory()->true_value();
+ return Just(true);
case LookupIterator::DATA:
if (is_observed) {
old_value = it->GetDataValue();
@@ -5776,52 +6282,1130 @@ MaybeHandle<Object> JSReceiver::DeleteProperty(LookupIterator* it,
receiver->map()->is_strong()
? MessageTemplate::kStrongDeleteProperty
: MessageTemplate::kStrictDeleteProperty;
- THROW_NEW_ERROR(
- isolate, NewTypeError(templ, it->GetName(), receiver), Object);
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ templ, it->GetName(), receiver));
+ return Nothing<bool>();
}
- return it->factory()->false_value();
+ return Just(false);
}
it->Delete();
if (is_observed) {
- RETURN_ON_EXCEPTION(isolate,
- JSObject::EnqueueChangeRecord(
- receiver, "delete", it->GetName(), old_value),
- Object);
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate, JSObject::EnqueueChangeRecord(receiver, "delete",
+ it->GetName(), old_value),
+ Nothing<bool>());
}
- return it->factory()->true_value();
+ return Just(true);
}
}
}
- return it->factory()->true_value();
+ return Just(true);
}
-MaybeHandle<Object> JSReceiver::DeleteElement(Handle<JSReceiver> object,
- uint32_t index,
- LanguageMode language_mode) {
+Maybe<bool> JSReceiver::DeleteElement(Handle<JSReceiver> object, uint32_t index,
+ LanguageMode language_mode) {
LookupIterator it(object->GetIsolate(), object, index,
LookupIterator::HIDDEN);
return DeleteProperty(&it, language_mode);
}
-MaybeHandle<Object> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
- Handle<Name> name,
- LanguageMode language_mode) {
+Maybe<bool> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
+ Handle<Name> name,
+ LanguageMode language_mode) {
LookupIterator it(object, name, LookupIterator::HIDDEN);
- return JSObject::DeleteProperty(&it, language_mode);
+ return DeleteProperty(&it, language_mode);
}
-MaybeHandle<Object> JSReceiver::DeletePropertyOrElement(
- Handle<JSReceiver> object, Handle<Name> name, LanguageMode language_mode) {
+Maybe<bool> JSReceiver::DeletePropertyOrElement(Handle<JSReceiver> object,
+ Handle<Name> name,
+ LanguageMode language_mode) {
LookupIterator it = LookupIterator::PropertyOrElement(
name->GetIsolate(), object, name, LookupIterator::HIDDEN);
- return JSObject::DeleteProperty(&it, language_mode);
+ return DeleteProperty(&it, language_mode);
+}
+
+
+// ES6 7.1.14
+MaybeHandle<Object> ToPropertyKey(Isolate* isolate, Handle<Object> value) {
+ // 1. Let key be ToPrimitive(argument, hint String).
+ MaybeHandle<Object> maybe_key =
+ Object::ToPrimitive(value, ToPrimitiveHint::kString);
+ // 2. ReturnIfAbrupt(key).
+ Handle<Object> key;
+ if (!maybe_key.ToHandle(&key)) return key;
+ // 3. If Type(key) is Symbol, then return key.
+ if (key->IsSymbol()) return key;
+ // 4. Return ToString(key).
+ // Extending spec'ed behavior, we'd be happy to return an element index.
+ if (key->IsSmi()) return key;
+ if (key->IsHeapNumber()) {
+ uint32_t uint_value;
+ if (value->ToArrayLength(&uint_value) &&
+ uint_value <= static_cast<uint32_t>(Smi::kMaxValue)) {
+ return handle(Smi::FromInt(static_cast<int>(uint_value)), isolate);
+ }
+ }
+ return Object::ToString(isolate, key);
+}
+
+
+// ES6 19.1.2.4
+// static
+Object* JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
+ Handle<Object> key,
+ Handle<Object> attributes) {
+ // 1. If Type(O) is not Object, throw a TypeError exception.
+ if (!object->IsJSReceiver()) {
+ Handle<String> fun_name =
+ isolate->factory()->InternalizeUtf8String("Object.defineProperty");
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, fun_name));
+ }
+ // 2. Let key be ToPropertyKey(P).
+ // 3. ReturnIfAbrupt(key).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key, ToPropertyKey(isolate, key));
+ // 4. Let desc be ToPropertyDescriptor(Attributes).
+ // 5. ReturnIfAbrupt(desc).
+ PropertyDescriptor desc;
+ if (!PropertyDescriptor::ToPropertyDescriptor(isolate, attributes, &desc)) {
+ return isolate->heap()->exception();
+ }
+ // 6. Let success be DefinePropertyOrThrow(O,key, desc).
+ Maybe<bool> success = DefineOwnProperty(
+ isolate, Handle<JSReceiver>::cast(object), key, &desc, THROW_ON_ERROR);
+ // 7. ReturnIfAbrupt(success).
+ MAYBE_RETURN(success, isolate->heap()->exception());
+ CHECK(success.FromJust());
+ // 8. Return O.
+ return *object;
+}
+
+
+// ES6 19.1.2.3.1
+// static
+MaybeHandle<Object> JSReceiver::DefineProperties(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> properties) {
+ // 1. If Type(O) is not Object, throw a TypeError exception.
+ if (!object->IsJSReceiver()) {
+ Handle<String> fun_name =
+ isolate->factory()->InternalizeUtf8String("Object.defineProperties");
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCalledOnNonObject, fun_name),
+ Object);
+ }
+ // 2. Let props be ToObject(Properties).
+ // 3. ReturnIfAbrupt(props).
+ Handle<JSReceiver> props;
+ if (!Object::ToObject(isolate, properties).ToHandle(&props)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kUndefinedOrNullToObject),
+ Object);
+ }
+ // 4. Let keys be props.[[OwnPropertyKeys]]().
+ // 5. ReturnIfAbrupt(keys).
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, keys,
+ JSReceiver::GetKeys(props, JSReceiver::OWN_ONLY, ALL_PROPERTIES), Object);
+ // 6. Let descriptors be an empty List.
+ int capacity = keys->length();
+ std::vector<PropertyDescriptor> descriptors(capacity);
+ size_t descriptors_index = 0;
+ // 7. Repeat for each element nextKey of keys in List order,
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> next_key(keys->get(i), isolate);
+ // 7a. Let propDesc be props.[[GetOwnProperty]](nextKey).
+ // 7b. ReturnIfAbrupt(propDesc).
+ bool success = false;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, props, next_key, &success, LookupIterator::HIDDEN);
+ DCHECK(success);
+ Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+ if (!maybe.IsJust()) return MaybeHandle<Object>();
+ PropertyAttributes attrs = maybe.FromJust();
+ // 7c. If propDesc is not undefined and propDesc.[[Enumerable]] is true:
+ if (attrs == ABSENT) continue;
+ if (attrs & DONT_ENUM) continue;
+ // 7c i. Let descObj be Get(props, nextKey).
+ // 7c ii. ReturnIfAbrupt(descObj).
+ Handle<Object> desc_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, desc_obj, Object::GetProperty(&it),
+ Object);
+ // 7c iii. Let desc be ToPropertyDescriptor(descObj).
+ success = PropertyDescriptor::ToPropertyDescriptor(
+ isolate, desc_obj, &descriptors[descriptors_index]);
+ // 7c iv. ReturnIfAbrupt(desc).
+ if (!success) return MaybeHandle<Object>();
+ // 7c v. Append the pair (a two element List) consisting of nextKey and
+ // desc to the end of descriptors.
+ descriptors[descriptors_index].set_name(next_key);
+ descriptors_index++;
+ }
+ // 8. For each pair from descriptors in list order,
+ for (size_t i = 0; i < descriptors_index; ++i) {
+ PropertyDescriptor* desc = &descriptors[i];
+ // 8a. Let P be the first element of pair.
+ // 8b. Let desc be the second element of pair.
+ // 8c. Let status be DefinePropertyOrThrow(O, P, desc).
+ Maybe<bool> status =
+ DefineOwnProperty(isolate, Handle<JSReceiver>::cast(object),
+ desc->name(), desc, THROW_ON_ERROR);
+ // 8d. ReturnIfAbrupt(status).
+ if (!status.IsJust()) return MaybeHandle<Object>();
+ CHECK(status.FromJust());
+ }
+ // 9. Return o.
+ return object;
+}
+
+
+// static
+Maybe<bool> JSReceiver::DefineOwnProperty(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ if (object->IsJSArray()) {
+ return JSArray::DefineOwnProperty(isolate, Handle<JSArray>::cast(object),
+ key, desc, should_throw);
+ }
+ if (object->IsJSProxy()) {
+ return JSProxy::DefineOwnProperty(isolate, Handle<JSProxy>::cast(object),
+ key, desc, should_throw);
+ }
+ // TODO(jkummerow): Support Modules (ES6 9.4.6.6)
+
+ // OrdinaryDefineOwnProperty, by virtue of calling
+ // DefineOwnPropertyIgnoreAttributes, can handle arguments (ES6 9.4.4.2)
+ // and IntegerIndexedExotics (ES6 9.4.5.3), with one exception:
+ // TODO(jkummerow): Setting an indexed accessor on a typed array should throw.
+ return OrdinaryDefineOwnProperty(isolate, Handle<JSObject>::cast(object), key,
+ desc, should_throw);
+}
+
+
+// static
+Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Object> key,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ bool success = false;
+ DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey...
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, key, &success, LookupIterator::HIDDEN);
+ DCHECK(success); // ...so creating a LookupIterator can't fail.
+
+ // Deal with access checks first.
+ if (it.state() == LookupIterator::ACCESS_CHECK) {
+ if (!it.HasAccess()) {
+ isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ return Just(true);
+ }
+ it.Next();
+ }
+
+ return OrdinaryDefineOwnProperty(&it, desc, should_throw);
+}
+
+
+// ES6 9.1.6.1
+// static
+Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ Isolate* isolate = it->isolate();
+ // 1. Let current be O.[[GetOwnProperty]](P).
+ // 2. ReturnIfAbrupt(current).
+ PropertyDescriptor current;
+ MAYBE_RETURN(GetOwnPropertyDescriptor(it, &current), Nothing<bool>());
+
+ // TODO(jkummerow/verwaest): It would be nice if we didn't have to reset
+ // the iterator every time. Currently, the reasons why we need it are:
+ // - handle interceptors correctly
+ // - handle accessors correctly (which might change the holder's map)
+ it->Restart();
+ // 3. Let extensible be the value of the [[Extensible]] internal slot of O.
+ Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
+ bool extensible = JSObject::IsExtensible(object);
+
+ return ValidateAndApplyPropertyDescriptor(isolate, it, extensible, desc,
+ &current, should_throw);
+}
+
+
+// ES6 9.1.6.2
+// static
+Maybe<bool> JSReceiver::IsCompatiblePropertyDescriptor(
+ Isolate* isolate, bool extensible, PropertyDescriptor* desc,
+ PropertyDescriptor* current, Handle<Name> property_name,
+ ShouldThrow should_throw) {
+ // 1. Return ValidateAndApplyPropertyDescriptor(undefined, undefined,
+ // Extensible, Desc, Current).
+ return ValidateAndApplyPropertyDescriptor(
+ isolate, NULL, extensible, desc, current, should_throw, property_name);
+}
+
+
+// ES6 9.1.6.3
+// static
+Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
+ Isolate* isolate, LookupIterator* it, bool extensible,
+ PropertyDescriptor* desc, PropertyDescriptor* current,
+ ShouldThrow should_throw, Handle<Name> property_name) {
+ // We either need a LookupIterator, or a property name.
+ DCHECK((it == NULL) != property_name.is_null());
+ Handle<JSObject> object;
+ if (it != NULL) object = Handle<JSObject>::cast(it->GetReceiver());
+ bool desc_is_data_descriptor = PropertyDescriptor::IsDataDescriptor(desc);
+ bool desc_is_accessor_descriptor =
+ PropertyDescriptor::IsAccessorDescriptor(desc);
+ bool desc_is_generic_descriptor =
+ PropertyDescriptor::IsGenericDescriptor(desc);
+ // 1. (Assert)
+ // 2. If current is undefined, then
+ if (current->is_empty()) {
+ // 2a. If extensible is false, return false.
+ if (!extensible) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kDefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
+ }
+ // 2c. If IsGenericDescriptor(Desc) or IsDataDescriptor(Desc) is true, then:
+ // (This is equivalent to !IsAccessorDescriptor(desc).)
+ DCHECK((desc_is_generic_descriptor || desc_is_data_descriptor) ==
+ !desc_is_accessor_descriptor);
+ if (!desc_is_accessor_descriptor) {
+ // 2c i. If O is not undefined, create an own data property named P of
+ // object O whose [[Value]], [[Writable]], [[Enumerable]] and
+ // [[Configurable]] attribute values are described by Desc. If the value
+ // of an attribute field of Desc is absent, the attribute of the newly
+ // created property is set to its default value.
+ if (it != NULL) {
+ if (!desc->has_writable()) desc->set_writable(false);
+ if (!desc->has_enumerable()) desc->set_enumerable(false);
+ if (!desc->has_configurable()) desc->set_configurable(false);
+ Handle<Object> value(
+ desc->has_value()
+ ? desc->value()
+ : Handle<Object>::cast(isolate->factory()->undefined_value()));
+ MaybeHandle<Object> result =
+ JSObject::DefineOwnPropertyIgnoreAttributes(
+ it, value, desc->ToAttributes(), JSObject::DONT_FORCE_FIELD);
+ if (result.is_null()) return Nothing<bool>();
+ }
+ } else {
+ // 2d. Else Desc must be an accessor Property Descriptor,
+ DCHECK(desc_is_accessor_descriptor);
+ // 2d i. If O is not undefined, create an own accessor property named P
+ // of object O whose [[Get]], [[Set]], [[Enumerable]] and
+ // [[Configurable]] attribute values are described by Desc. If the value
+ // of an attribute field of Desc is absent, the attribute of the newly
+ // created property is set to its default value.
+ if (it != NULL) {
+ if (!desc->has_enumerable()) desc->set_enumerable(false);
+ if (!desc->has_configurable()) desc->set_configurable(false);
+ Handle<Object> getter(
+ desc->has_get()
+ ? desc->get()
+ : Handle<Object>::cast(isolate->factory()->null_value()));
+ Handle<Object> setter(
+ desc->has_set()
+ ? desc->set()
+ : Handle<Object>::cast(isolate->factory()->null_value()));
+ MaybeHandle<Object> result =
+ JSObject::DefineAccessor(it, getter, setter, desc->ToAttributes());
+ if (result.is_null()) return Nothing<bool>();
+ }
+ }
+ // 2e. Return true.
+ return Just(true);
+ }
+ // 3. Return true, if every field in Desc is absent.
+ // 4. Return true, if every field in Desc also occurs in current and the
+ // value of every field in Desc is the same value as the corresponding field
+ // in current when compared using the SameValue algorithm.
+ if ((!desc->has_enumerable() ||
+ desc->enumerable() == current->enumerable()) &&
+ (!desc->has_configurable() ||
+ desc->configurable() == current->configurable()) &&
+ (!desc->has_value() ||
+ (current->has_value() && current->value()->SameValue(*desc->value()))) &&
+ (!desc->has_writable() ||
+ (current->has_writable() && current->writable() == desc->writable())) &&
+ (!desc->has_get() ||
+ (current->has_get() && current->get()->SameValue(*desc->get()))) &&
+ (!desc->has_set() ||
+ (current->has_set() && current->set()->SameValue(*desc->set())))) {
+ return Just(true);
+ }
+ // 5. If the [[Configurable]] field of current is false, then
+ if (!current->configurable()) {
+ // 5a. Return false, if the [[Configurable]] field of Desc is true.
+ if (desc->has_configurable() && desc->configurable()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
+ }
+ // 5b. Return false, if the [[Enumerable]] field of Desc is present and the
+ // [[Enumerable]] fields of current and Desc are the Boolean negation of
+ // each other.
+ if (desc->has_enumerable() && desc->enumerable() != current->enumerable()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
+ }
+ }
+
+ bool current_is_data_descriptor =
+ PropertyDescriptor::IsDataDescriptor(current);
+ // 6. If IsGenericDescriptor(Desc) is true, no further validation is required.
+ if (desc_is_generic_descriptor) {
+ // Nothing to see here.
+
+ // 7. Else if IsDataDescriptor(current) and IsDataDescriptor(Desc) have
+ // different results, then:
+ } else if (current_is_data_descriptor != desc_is_data_descriptor) {
+ // 7a. Return false, if the [[Configurable]] field of current is false.
+ if (!current->configurable()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
+ }
+ // 7b. If IsDataDescriptor(current) is true, then:
+ if (current_is_data_descriptor) {
+ // 7b i. If O is not undefined, convert the property named P of object O
+ // from a data property to an accessor property. Preserve the existing
+ // values of the converted property's [[Configurable]] and [[Enumerable]]
+ // attributes and set the rest of the property's attributes to their
+ // default values.
+ // --> Folded into step 10.
+ } else {
+ // 7c i. If O is not undefined, convert the property named P of object O
+ // from an accessor property to a data property. Preserve the existing
+ // values of the converted property’s [[Configurable]] and [[Enumerable]]
+ // attributes and set the rest of the property’s attributes to their
+ // default values.
+ // --> Folded into step 10.
+ }
+
+ // 8. Else if IsDataDescriptor(current) and IsDataDescriptor(Desc) are both
+ // true, then:
+ } else if (current_is_data_descriptor && desc_is_data_descriptor) {
+ // 8a. If the [[Configurable]] field of current is false, then:
+ if (!current->configurable()) {
+ // [Strong mode] Disallow changing writable -> readonly for
+ // non-configurable properties.
+ if (it != NULL && current->writable() && desc->has_writable() &&
+ !desc->writable() && object->map()->is_strong()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kStrongRedefineDisallowed,
+ object, it->GetName()));
+ }
+ // 8a i. Return false, if the [[Writable]] field of current is false and
+ // the [[Writable]] field of Desc is true.
+ if (!current->writable() && desc->has_writable() && desc->writable()) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
+ }
+ // 8a ii. If the [[Writable]] field of current is false, then:
+ if (!current->writable()) {
+ // 8a ii 1. Return false, if the [[Value]] field of Desc is present and
+ // SameValue(Desc.[[Value]], current.[[Value]]) is false.
+ if (desc->has_value() && !desc->value()->SameValue(*current->value())) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
+ }
+ }
+ }
+ } else {
+ // 9. Else IsAccessorDescriptor(current) and IsAccessorDescriptor(Desc)
+ // are both true,
+ DCHECK(PropertyDescriptor::IsAccessorDescriptor(current) &&
+ desc_is_accessor_descriptor);
+ // 9a. If the [[Configurable]] field of current is false, then:
+ if (!current->configurable()) {
+ // 9a i. Return false, if the [[Set]] field of Desc is present and
+ // SameValue(Desc.[[Set]], current.[[Set]]) is false.
+ if (desc->has_set() && !desc->set()->SameValue(*current->set())) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
+ }
+ // 9a ii. Return false, if the [[Get]] field of Desc is present and
+ // SameValue(Desc.[[Get]], current.[[Get]]) is false.
+ if (desc->has_get() && !desc->get()->SameValue(*current->get())) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
+ }
+ }
+ }
+
+ // 10. If O is not undefined, then:
+ if (it != NULL) {
+ // 10a. For each field of Desc that is present, set the corresponding
+ // attribute of the property named P of object O to the value of the field.
+ PropertyAttributes attrs = NONE;
+
+ if (desc->has_enumerable()) {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (desc->enumerable() ? NONE : DONT_ENUM));
+ } else {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (current->enumerable() ? NONE : DONT_ENUM));
+ }
+ if (desc->has_configurable()) {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (desc->configurable() ? NONE : DONT_DELETE));
+ } else {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (current->configurable() ? NONE : DONT_DELETE));
+ }
+ if (desc_is_data_descriptor ||
+ (desc_is_generic_descriptor && current_is_data_descriptor)) {
+ if (desc->has_writable()) {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (desc->writable() ? NONE : READ_ONLY));
+ } else {
+ attrs = static_cast<PropertyAttributes>(
+ attrs | (current->writable() ? NONE : READ_ONLY));
+ }
+ Handle<Object> value(
+ desc->has_value() ? desc->value()
+ : current->has_value()
+ ? current->value()
+ : Handle<Object>::cast(
+ isolate->factory()->undefined_value()));
+ MaybeHandle<Object> result = JSObject::DefineOwnPropertyIgnoreAttributes(
+ it, value, attrs, JSObject::DONT_FORCE_FIELD);
+ if (result.is_null()) return Nothing<bool>();
+ } else {
+ DCHECK(desc_is_accessor_descriptor ||
+ (desc_is_generic_descriptor &&
+ PropertyDescriptor::IsAccessorDescriptor(current)));
+ Handle<Object> getter(
+ desc->has_get()
+ ? desc->get()
+ : current->has_get()
+ ? current->get()
+ : Handle<Object>::cast(isolate->factory()->null_value()));
+ Handle<Object> setter(
+ desc->has_set()
+ ? desc->set()
+ : current->has_set()
+ ? current->set()
+ : Handle<Object>::cast(isolate->factory()->null_value()));
+ MaybeHandle<Object> result =
+ JSObject::DefineAccessor(it, getter, setter, attrs);
+ if (result.is_null()) return Nothing<bool>();
+ }
+ }
+
+ // 11. Return true.
+ return Just(true);
+}
+
+
+// static
+Maybe<bool> JSReceiver::CreateDataProperty(LookupIterator* it,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
+ DCHECK(!it->check_prototype_chain());
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
+ Isolate* isolate = receiver->GetIsolate();
+
+ if (receiver->IsJSObject()) {
+ return JSObject::CreateDataProperty(it, value); // Shortcut.
+ }
+
+ PropertyDescriptor new_desc;
+ new_desc.set_value(value);
+ new_desc.set_writable(true);
+ new_desc.set_enumerable(true);
+ new_desc.set_configurable(true);
+
+ return JSReceiver::DefineOwnProperty(isolate, receiver, it->GetName(),
+ &new_desc, should_throw);
+}
+
+
+Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
+ Handle<Object> value) {
+ DCHECK(it->GetReceiver()->IsJSObject());
+ MAYBE_RETURN(JSReceiver::GetPropertyAttributes(it), Nothing<bool>());
+
+ if (it->IsFound()) {
+ if (!it->IsConfigurable()) return Just(false);
+ } else {
+ if (!JSObject::IsExtensible(Handle<JSObject>::cast(it->GetReceiver())))
+ return Just(false);
+ }
+
+ RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(),
+ DefineOwnPropertyIgnoreAttributes(it, value, NONE, DONT_FORCE_FIELD),
+ Nothing<bool>());
+
+ return Just(true);
+}
+
+
+// TODO(jkummerow): Consider unification with FastAsArrayLength() in
+// accessors.cc.
+bool PropertyKeyToArrayLength(Handle<Object> value, uint32_t* length) {
+ DCHECK(value->IsNumber() || value->IsName());
+ if (value->ToArrayLength(length)) return true;
+ if (value->IsString()) return String::cast(*value)->AsArrayIndex(length);
+ return false;
+}
+
+
+bool PropertyKeyToArrayIndex(Handle<Object> index_obj, uint32_t* output) {
+ return PropertyKeyToArrayLength(index_obj, output) && *output != kMaxUInt32;
+}
+
+
+// ES6 9.4.2.1
+// static
+Maybe<bool> JSArray::DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
+ Handle<Object> name,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ // 1. Assert: IsPropertyKey(P) is true. ("P" is |name|.)
+ // 2. If P is "length", then:
+ // TODO(jkummerow): Check if we need slow string comparison.
+ if (*name == isolate->heap()->length_string()) {
+ // 2a. Return ArraySetLength(A, Desc).
+ return ArraySetLength(isolate, o, desc, should_throw);
+ }
+ // 3. Else if P is an array index, then:
+ uint32_t index = 0;
+ if (PropertyKeyToArrayIndex(name, &index)) {
+ // 3a. Let oldLenDesc be OrdinaryGetOwnProperty(A, "length").
+ PropertyDescriptor old_len_desc;
+ Maybe<bool> success = GetOwnPropertyDescriptor(
+ isolate, o, isolate->factory()->length_string(), &old_len_desc);
+ // 3b. (Assert)
+ DCHECK(success.FromJust());
+ USE(success);
+ // 3c. Let oldLen be oldLenDesc.[[Value]].
+ uint32_t old_len = 0;
+ CHECK(old_len_desc.value()->ToArrayLength(&old_len));
+ // 3d. Let index be ToUint32(P).
+ // (Already done above.)
+ // 3e. (Assert)
+ // 3f. If index >= oldLen and oldLenDesc.[[Writable]] is false,
+ // return false.
+ if (index >= old_len && old_len_desc.has_writable() &&
+ !old_len_desc.writable()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kDefineDisallowed, name));
+ }
+ // 3g. Let succeeded be OrdinaryDefineOwnProperty(A, P, Desc).
+ Maybe<bool> succeeded =
+ OrdinaryDefineOwnProperty(isolate, o, name, desc, should_throw);
+ // 3h. Assert: succeeded is not an abrupt completion.
+ // In our case, if should_throw == THROW_ON_ERROR, it can be!
+ // 3i. If succeeded is false, return false.
+ if (succeeded.IsNothing() || !succeeded.FromJust()) return succeeded;
+ // 3j. If index >= oldLen, then:
+ if (index >= old_len) {
+ // 3j i. Set oldLenDesc.[[Value]] to index + 1.
+ old_len_desc.set_value(isolate->factory()->NewNumberFromUint(index + 1));
+ // 3j ii. Let succeeded be
+ // OrdinaryDefineOwnProperty(A, "length", oldLenDesc).
+ succeeded = OrdinaryDefineOwnProperty(isolate, o,
+ isolate->factory()->length_string(),
+ &old_len_desc, should_throw);
+ // 3j iii. Assert: succeeded is true.
+ DCHECK(succeeded.FromJust());
+ USE(succeeded);
+ }
+ // 3k. Return true.
+ return Just(true);
+ }
+
+ // 4. Return OrdinaryDefineOwnProperty(A, P, Desc).
+ return OrdinaryDefineOwnProperty(isolate, o, name, desc, should_throw);
+}
+
+
+// Part of ES6 9.4.2.4 ArraySetLength.
+// static
+bool JSArray::AnythingToArrayLength(Isolate* isolate,
+ Handle<Object> length_object,
+ uint32_t* output) {
+ // Fast path: check numbers and strings that can be converted directly
+ // and unobservably.
+ if (length_object->ToArrayLength(output)) return true;
+ if (length_object->IsString() &&
+ Handle<String>::cast(length_object)->AsArrayIndex(output)) {
+ return true;
+ }
+ // Slow path: follow steps in ES6 9.4.2.4 "ArraySetLength".
+ // 3. Let newLen be ToUint32(Desc.[[Value]]).
+ Handle<Object> uint32_v;
+ if (!Object::ToUint32(isolate, length_object).ToHandle(&uint32_v)) {
+ // 4. ReturnIfAbrupt(newLen).
+ return false;
+ }
+ // 5. Let numberLen be ToNumber(Desc.[[Value]]).
+ Handle<Object> number_v;
+ if (!Object::ToNumber(length_object).ToHandle(&number_v)) {
+ // 6. ReturnIfAbrupt(newLen).
+ return false;
+ }
+ // 7. If newLen != numberLen, throw a RangeError exception.
+ if (uint32_v->Number() != number_v->Number()) {
+ Handle<Object> exception =
+ isolate->factory()->NewRangeError(MessageTemplate::kInvalidArrayLength);
+ isolate->Throw(*exception);
+ return false;
+ }
+ CHECK(uint32_v->ToArrayLength(output));
+ return true;
+}
+
+
+// ES6 9.4.2.4
+// static
+Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ // 1. If the [[Value]] field of Desc is absent, then
+ if (!desc->has_value()) {
+ // 1a. Return OrdinaryDefineOwnProperty(A, "length", Desc).
+ return OrdinaryDefineOwnProperty(
+ isolate, a, isolate->factory()->length_string(), desc, should_throw);
+ }
+ // 2. Let newLenDesc be a copy of Desc.
+ // (Actual copying is not necessary.)
+ PropertyDescriptor* new_len_desc = desc;
+ // 3. - 7. Convert Desc.[[Value]] to newLen.
+ uint32_t new_len = 0;
+ if (!AnythingToArrayLength(isolate, desc->value(), &new_len)) {
+ DCHECK(isolate->has_pending_exception());
+ return Nothing<bool>();
+ }
+ // 8. Set newLenDesc.[[Value]] to newLen.
+ // (Done below, if needed.)
+ // 9. Let oldLenDesc be OrdinaryGetOwnProperty(A, "length").
+ PropertyDescriptor old_len_desc;
+ Maybe<bool> success = GetOwnPropertyDescriptor(
+ isolate, a, isolate->factory()->length_string(), &old_len_desc);
+ // 10. (Assert)
+ DCHECK(success.FromJust());
+ USE(success);
+ // 11. Let oldLen be oldLenDesc.[[Value]].
+ uint32_t old_len = 0;
+ CHECK(old_len_desc.value()->ToArrayLength(&old_len));
+ // 12. If newLen >= oldLen, then
+ if (new_len >= old_len) {
+ // 8. Set newLenDesc.[[Value]] to newLen.
+ // 12a. Return OrdinaryDefineOwnProperty(A, "length", newLenDesc).
+ new_len_desc->set_value(isolate->factory()->NewNumberFromUint(new_len));
+ return OrdinaryDefineOwnProperty(isolate, a,
+ isolate->factory()->length_string(),
+ new_len_desc, should_throw);
+ }
+ // 13. If oldLenDesc.[[Writable]] is false, return false.
+ if (!old_len_desc.writable()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ isolate->factory()->length_string()));
+ }
+ // 14. If newLenDesc.[[Writable]] is absent or has the value true,
+ // let newWritable be true.
+ bool new_writable = false;
+ if (!new_len_desc->has_writable() || new_len_desc->writable()) {
+ new_writable = true;
+ } else {
+ // 15. Else,
+ // 15a. Need to defer setting the [[Writable]] attribute to false in case
+ // any elements cannot be deleted.
+ // 15b. Let newWritable be false. (It's initialized as "false" anyway.)
+ // 15c. Set newLenDesc.[[Writable]] to true.
+ // (Not needed.)
+ }
+ // Most of steps 16 through 19 is implemented by JSArray::SetLength.
+ if (JSArray::ObservableSetLength(a, new_len).is_null()) {
+ DCHECK(isolate->has_pending_exception());
+ return Nothing<bool>();
+ }
+ // Steps 19d-ii, 20.
+ if (!new_writable) {
+ PropertyDescriptor readonly;
+ readonly.set_writable(false);
+ Maybe<bool> success = OrdinaryDefineOwnProperty(
+ isolate, a, isolate->factory()->length_string(), &readonly,
+ should_throw);
+ DCHECK(success.FromJust());
+ USE(success);
+ }
+ uint32_t actual_new_len = 0;
+ CHECK(a->length()->ToArrayLength(&actual_new_len));
+ // Steps 19d-v, 21. Return false if there were non-deletable elements.
+ bool result = actual_new_len == new_len;
+ if (!result) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kStrictDeleteProperty,
+ isolate->factory()->NewNumberFromUint(actual_new_len - 1),
+ a));
+ }
+ return Just(result);
+}
+
+
+// ES6 9.5.6
+// static
+Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Object> key,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ STACK_CHECK(Nothing<bool>());
+ if (key->IsSymbol() && Handle<Symbol>::cast(key)->IsPrivate()) {
+ return AddPrivateProperty(isolate, proxy, Handle<Symbol>::cast(key), desc,
+ should_throw);
+ }
+ Handle<String> trap_name = isolate->factory()->defineProperty_string();
+ // 1. Assert: IsPropertyKey(P) is true.
+ DCHECK(key->IsName() || key->IsNumber());
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 6. Let trap be ? GetMethod(handler, "defineProperty").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
+ Nothing<bool>());
+ // 7. If trap is undefined, then:
+ if (trap->IsUndefined()) {
+ // 7a. Return target.[[DefineOwnProperty]](P, Desc).
+ return JSReceiver::DefineOwnProperty(isolate, target, key, desc,
+ should_throw);
+ }
+ // 8. Let descObj be FromPropertyDescriptor(Desc).
+ Handle<Object> desc_obj = desc->ToObject(isolate);
+ // 9. Let booleanTrapResult be
+ // ToBoolean(? Call(trap, handler, «target, P, descObj»)).
+ Handle<Name> property_name =
+ key->IsName()
+ ? Handle<Name>::cast(key)
+ : Handle<Name>::cast(isolate->factory()->NumberToString(key));
+ // Do not leak private property names.
+ DCHECK(!property_name->IsPrivate());
+ Handle<Object> trap_result_obj;
+ Handle<Object> args[] = {target, property_name, desc_obj};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_obj,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ // 10. If booleanTrapResult is false, return false.
+ if (!trap_result_obj->BooleanValue()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
+ trap_name, property_name));
+ }
+ // 11. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ PropertyDescriptor target_desc;
+ Maybe<bool> target_found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, key, &target_desc);
+ MAYBE_RETURN(target_found, Nothing<bool>());
+ // 12. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(maybe_extensible, Nothing<bool>());
+ bool extensible_target = maybe_extensible.FromJust();
+ // 13. If Desc has a [[Configurable]] field and if Desc.[[Configurable]]
+ // is false, then:
+ // 13a. Let settingConfigFalse be true.
+ // 14. Else let settingConfigFalse be false.
+ bool setting_config_false = desc->has_configurable() && !desc->configurable();
+ // 15. If targetDesc is undefined, then
+ if (!target_found.FromJust()) {
+ // 15a. If extensibleTarget is false, throw a TypeError exception.
+ if (!extensible_target) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyNonExtensible, property_name));
+ return Nothing<bool>();
+ }
+ // 15b. If settingConfigFalse is true, throw a TypeError exception.
+ if (setting_config_false) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyNonConfigurable, property_name));
+ return Nothing<bool>();
+ }
+ } else {
+ // 16. Else targetDesc is not undefined,
+ // 16a. If IsCompatiblePropertyDescriptor(extensibleTarget, Desc,
+ // targetDesc) is false, throw a TypeError exception.
+ Maybe<bool> valid =
+ IsCompatiblePropertyDescriptor(isolate, extensible_target, desc,
+ &target_desc, property_name, DONT_THROW);
+ MAYBE_RETURN(valid, Nothing<bool>());
+ if (!valid.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyIncompatible, property_name));
+ return Nothing<bool>();
+ }
+ // 16b. If settingConfigFalse is true and targetDesc.[[Configurable]] is
+ // true, throw a TypeError exception.
+ if (setting_config_false && target_desc.configurable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyNonConfigurable, property_name));
+ return Nothing<bool>();
+ }
+ }
+ // 17. Return true.
+ return Just(true);
+}
+
+
+// static
+Maybe<bool> JSProxy::AddPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Symbol> private_name,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ // Despite the generic name, this can only add private data properties.
+ if (!PropertyDescriptor::IsDataDescriptor(desc) ||
+ desc->ToAttributes() != DONT_ENUM) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyPrivate));
+ }
+ DCHECK(proxy->map()->is_dictionary_map());
+ Handle<Object> value =
+ desc->has_value()
+ ? desc->value()
+ : Handle<Object>::cast(isolate->factory()->undefined_value());
+
+ LookupIterator it(proxy, private_name);
+
+ if (it.IsFound()) {
+ DCHECK_EQ(LookupIterator::DATA, it.state());
+ DCHECK_EQ(DONT_ENUM, it.property_details().attributes());
+ it.WriteDataValue(value);
+ return Just(true);
+ }
+
+ Handle<NameDictionary> dict(proxy->property_dictionary());
+ PropertyDetails details(DONT_ENUM, DATA, 0, PropertyCellType::kNoCell);
+ Handle<NameDictionary> result =
+ NameDictionary::Add(dict, private_name, value, details);
+ if (!dict.is_identical_to(result)) proxy->set_properties(*result);
+ return Just(true);
+}
+
+
+// static
+Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key,
+ PropertyDescriptor* desc) {
+ bool success = false;
+ DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey...
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, key, &success, LookupIterator::HIDDEN);
+ DCHECK(success); // ...so creating a LookupIterator can't fail.
+ return GetOwnPropertyDescriptor(&it, desc);
+}
+
+
+// ES6 9.1.5.1
+// Returns true on success, false if the property didn't exist, nothing if
+// an exception was thrown.
+// static
+Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
+ PropertyDescriptor* desc) {
+ Isolate* isolate = it->isolate();
+ // "Virtual" dispatch.
+ if (it->IsFound() && it->GetHolder<JSReceiver>()->IsJSProxy()) {
+ return JSProxy::GetOwnPropertyDescriptor(isolate, it->GetHolder<JSProxy>(),
+ it->GetName(), desc);
+ }
+
+ // 1. (Assert)
+ // 2. If O does not have an own property with key P, return undefined.
+ Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(it);
+ MAYBE_RETURN(maybe, Nothing<bool>());
+ PropertyAttributes attrs = maybe.FromJust();
+ if (attrs == ABSENT) return Just(false);
+ DCHECK(!isolate->has_pending_exception());
+
+ // 3. Let D be a newly created Property Descriptor with no fields.
+ DCHECK(desc->is_empty());
+ // 4. Let X be O's own property whose key is P.
+ // 5. If X is a data property, then
+ bool is_accessor_pair = it->state() == LookupIterator::ACCESSOR &&
+ it->GetAccessors()->IsAccessorPair();
+ if (!is_accessor_pair) {
+ // 5a. Set D.[[Value]] to the value of X's [[Value]] attribute.
+ Handle<Object> value;
+ if (!JSObject::GetProperty(it).ToHandle(&value)) {
+ DCHECK(isolate->has_pending_exception());
+ return Nothing<bool>();
+ }
+ desc->set_value(value);
+ // 5b. Set D.[[Writable]] to the value of X's [[Writable]] attribute
+ desc->set_writable((attrs & READ_ONLY) == 0);
+ } else {
+ // 6. Else X is an accessor property, so
+ Handle<AccessorPair> accessors =
+ Handle<AccessorPair>::cast(it->GetAccessors());
+ // 6a. Set D.[[Get]] to the value of X's [[Get]] attribute.
+ desc->set_get(handle(accessors->GetComponent(ACCESSOR_GETTER), isolate));
+ // 6b. Set D.[[Set]] to the value of X's [[Set]] attribute.
+ desc->set_set(handle(accessors->GetComponent(ACCESSOR_SETTER), isolate));
+ }
+
+ // 7. Set D.[[Enumerable]] to the value of X's [[Enumerable]] attribute.
+ desc->set_enumerable((attrs & DONT_ENUM) == 0);
+ // 8. Set D.[[Configurable]] to the value of X's [[Configurable]] attribute.
+ desc->set_configurable((attrs & DONT_DELETE) == 0);
+ // 9. Return D.
+ DCHECK(PropertyDescriptor::IsAccessorDescriptor(desc) !=
+ PropertyDescriptor::IsDataDescriptor(desc));
+ return Just(true);
+}
+
+
+// ES6 9.5.5
+// static
+Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
+ Handle<JSProxy> proxy,
+ Handle<Name> name,
+ PropertyDescriptor* desc) {
+ DCHECK(!name->IsPrivate());
+ STACK_CHECK(Nothing<bool>());
+
+ Handle<String> trap_name =
+ isolate->factory()->getOwnPropertyDescriptor_string();
+ // 1. (Assert)
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 6. Let trap be ? GetMethod(handler, "getOwnPropertyDescriptor").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
+ Nothing<bool>());
+ // 7. If trap is undefined, then
+ if (trap->IsUndefined()) {
+ // 7a. Return target.[[GetOwnProperty]](P).
+ return JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, desc);
+ }
+ // 8. Let trapResultObj be ? Call(trap, handler, «target, P»).
+ Handle<Object> trap_result_obj;
+ Handle<Object> args[] = {target, name};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_obj,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ // 9. If Type(trapResultObj) is neither Object nor Undefined, throw a
+ // TypeError exception.
+ if (!trap_result_obj->IsJSReceiver() && !trap_result_obj->IsUndefined()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorInvalid, name));
+ return Nothing<bool>();
+ }
+ // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ PropertyDescriptor target_desc;
+ Maybe<bool> found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
+ MAYBE_RETURN(found, Nothing<bool>());
+ // 11. If trapResultObj is undefined, then
+ if (trap_result_obj->IsUndefined()) {
+ // 11a. If targetDesc is undefined, return undefined.
+ if (!found.FromJust()) return Just(false);
+ // 11b. If targetDesc.[[Configurable]] is false, throw a TypeError
+ // exception.
+ if (!target_desc.configurable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorUndefined, name));
+ return Nothing<bool>();
+ }
+ // 11c. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible_target, Nothing<bool>());
+ // 11d. (Assert)
+ // 11e. If extensibleTarget is false, throw a TypeError exception.
+ if (!extensible_target.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorNonExtensible, name));
+ return Nothing<bool>();
+ }
+ // 11f. Return undefined.
+ return Just(false);
+ }
+ // 12. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible_target, Nothing<bool>());
+ // 13. Let resultDesc be ? ToPropertyDescriptor(trapResultObj).
+ if (!PropertyDescriptor::ToPropertyDescriptor(isolate, trap_result_obj,
+ desc)) {
+ DCHECK(isolate->has_pending_exception());
+ return Nothing<bool>();
+ }
+ // 14. Call CompletePropertyDescriptor(resultDesc).
+ PropertyDescriptor::CompletePropertyDescriptor(isolate, desc);
+ // 15. Let valid be IsCompatiblePropertyDescriptor (extensibleTarget,
+ // resultDesc, targetDesc).
+ Maybe<bool> valid =
+ IsCompatiblePropertyDescriptor(isolate, extensible_target.FromJust(),
+ desc, &target_desc, name, DONT_THROW);
+ MAYBE_RETURN(valid, Nothing<bool>());
+ // 16. If valid is false, throw a TypeError exception.
+ if (!valid.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorIncompatible, name));
+ return Nothing<bool>();
+ }
+ // 17. If resultDesc.[[Configurable]] is false, then
+ if (!desc->configurable()) {
+ // 17a. If targetDesc is undefined or targetDesc.[[Configurable]] is true:
+ if (target_desc.is_empty() || target_desc.configurable()) {
+ // 17a i. Throw a TypeError exception.
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorNonConfigurable,
+ name));
+ return Nothing<bool>();
+ }
+ }
+ // 18. Return resultDesc.
+ return Just(true);
}
@@ -5960,41 +7544,197 @@ bool JSObject::ReferencesObject(Object* obj) {
}
-MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
- if (!object->map()->is_extensible()) return object;
+Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
+ IntegrityLevel level,
+ ShouldThrow should_throw) {
+ DCHECK(level == SEALED || level == FROZEN);
- if (!object->HasSloppyArgumentsElements() && !object->map()->is_observed()) {
- return PreventExtensionsWithTransition<NONE>(object);
+ if (receiver->IsJSObject()) {
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver);
+ if (!object->HasSloppyArgumentsElements() &&
+ !object->map()->is_observed() &&
+ (!object->map()->is_strong() || level == SEALED)) { // Fast path.
+ if (level == SEALED) {
+ return JSObject::PreventExtensionsWithTransition<SEALED>(object,
+ should_throw);
+ } else {
+ return JSObject::PreventExtensionsWithTransition<FROZEN>(object,
+ should_throw);
+ }
+ }
}
+ Isolate* isolate = receiver->GetIsolate();
+
+ MAYBE_RETURN(JSReceiver::PreventExtensions(receiver, should_throw),
+ Nothing<bool>());
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys, JSReceiver::OwnPropertyKeys(receiver), Nothing<bool>());
+
+ PropertyDescriptor no_conf;
+ no_conf.set_configurable(false);
+
+ PropertyDescriptor no_conf_no_write;
+ no_conf_no_write.set_configurable(false);
+ no_conf_no_write.set_writable(false);
+
+ if (level == SEALED) {
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> key(keys->get(i), isolate);
+ MAYBE_RETURN(
+ DefineOwnProperty(isolate, receiver, key, &no_conf, THROW_ON_ERROR),
+ Nothing<bool>());
+ }
+ return Just(true);
+ }
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> key(keys->get(i), isolate);
+ PropertyDescriptor current_desc;
+ Maybe<bool> owned = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, receiver, key, &current_desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (owned.FromJust()) {
+ PropertyDescriptor desc =
+ PropertyDescriptor::IsAccessorDescriptor(&current_desc)
+ ? no_conf
+ : no_conf_no_write;
+ MAYBE_RETURN(
+ DefineOwnProperty(isolate, receiver, key, &desc, THROW_ON_ERROR),
+ Nothing<bool>());
+ }
+ }
+ return Just(true);
+}
+
+
+Maybe<bool> JSReceiver::TestIntegrityLevel(Handle<JSReceiver> object,
+ IntegrityLevel level) {
+ DCHECK(level == SEALED || level == FROZEN);
Isolate* isolate = object->GetIsolate();
- if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
+ Maybe<bool> extensible = JSReceiver::IsExtensible(object);
+ MAYBE_RETURN(extensible, Nothing<bool>());
+ if (extensible.FromJust()) return Just(false);
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys, JSReceiver::OwnPropertyKeys(object), Nothing<bool>());
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> key(keys->get(i), isolate);
+ PropertyDescriptor current_desc;
+ Maybe<bool> owned = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, object, key, &current_desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (owned.FromJust()) {
+ if (current_desc.configurable()) return Just(false);
+ if (level == FROZEN &&
+ PropertyDescriptor::IsDataDescriptor(&current_desc) &&
+ current_desc.writable()) {
+ return Just(false);
+ }
+ }
+ }
+ return Just(true);
+}
+
+
+Maybe<bool> JSReceiver::PreventExtensions(Handle<JSReceiver> object,
+ ShouldThrow should_throw) {
+ if (object->IsJSProxy()) {
+ return JSProxy::PreventExtensions(Handle<JSProxy>::cast(object),
+ should_throw);
+ }
+ DCHECK(object->IsJSObject());
+ return JSObject::PreventExtensions(Handle<JSObject>::cast(object),
+ should_throw);
+}
+
+
+Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
+ ShouldThrow should_throw) {
+ Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(Nothing<bool>());
+ Factory* factory = isolate->factory();
+ Handle<String> trap_name = factory->preventExtensions_string();
+
+ if (proxy->IsRevoked()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
+
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
+ if (trap->IsUndefined()) {
+ return JSReceiver::PreventExtensions(target, should_throw);
+ }
+
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ if (!trap_result->BooleanValue()) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+ }
+
+ // Enforce the invariant.
+ Maybe<bool> target_result = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(target_result, Nothing<bool>());
+ if (target_result.FromJust()) {
+ isolate->Throw(*factory->NewTypeError(
+ MessageTemplate::kProxyPreventExtensionsExtensible));
+ return Nothing<bool>();
+ }
+ return Just(true);
+}
+
+
+Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
+ ShouldThrow should_throw) {
+ Isolate* isolate = object->GetIsolate();
+
+ if (!object->HasSloppyArgumentsElements() && !object->map()->is_observed()) {
+ return PreventExtensionsWithTransition<NONE>(object, should_throw);
+ }
+
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), object)) {
isolate->ReportFailedAccessCheck(object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->false_value();
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNoAccess));
}
+ if (!object->map()->is_extensible()) return Just(true);
+
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return object;
+ if (iter.IsAtEnd()) return Just(true);
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return PreventExtensions(PrototypeIterator::GetCurrent<JSObject>(iter));
+ return PreventExtensions(PrototypeIterator::GetCurrent<JSObject>(iter),
+ should_throw);
}
- // It's not possible to seal objects with external array elements
- if (object->HasFixedTypedArrayElements()) {
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kCannotPreventExtExternalArray),
- Object);
- }
-
- // If there are fast elements we normalize.
- Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
- DCHECK(object->HasDictionaryElements() || object->HasSlowArgumentsElements());
+ if (!object->HasFixedTypedArrayElements()) {
+ // If there are fast elements we normalize.
+ Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
+ DCHECK(object->HasDictionaryElements() ||
+ object->HasSlowArgumentsElements());
- // Make sure that we never go back to fast case.
- object->RequireSlowElements(*dictionary);
+ // Make sure that we never go back to fast case.
+ object->RequireSlowElements(*dictionary);
+ }
// Do a map transition, other objects with this map may still
// be extensible.
@@ -6006,24 +7746,78 @@ MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
DCHECK(!object->map()->is_extensible());
if (object->map()->is_observed()) {
- RETURN_ON_EXCEPTION(
+ RETURN_ON_EXCEPTION_VALUE(
isolate,
EnqueueChangeRecord(object, "preventExtensions", Handle<Name>(),
isolate->factory()->the_hole_value()),
- Object);
+ Nothing<bool>());
}
- return object;
+ return Just(true);
}
-bool JSObject::IsExtensible() {
- if (IsJSGlobalProxy()) {
- PrototypeIterator iter(GetIsolate(), this);
+Maybe<bool> JSReceiver::IsExtensible(Handle<JSReceiver> object) {
+ if (object->IsJSProxy()) {
+ return JSProxy::IsExtensible(Handle<JSProxy>::cast(object));
+ }
+ return Just(JSObject::IsExtensible(Handle<JSObject>::cast(object)));
+}
+
+
+Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(Nothing<bool>());
+ Factory* factory = isolate->factory();
+ Handle<String> trap_name = factory->isExtensible_string();
+
+ if (proxy->IsRevoked()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
+
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
+ if (trap->IsUndefined()) {
+ return JSReceiver::IsExtensible(target);
+ }
+
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+
+ // Enforce the invariant.
+ Maybe<bool> target_result = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(target_result, Nothing<bool>());
+ if (target_result.FromJust() != trap_result->BooleanValue()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyIsExtensibleInconsistent,
+ factory->ToBoolean(target_result.FromJust())));
+ return Nothing<bool>();
+ }
+ return target_result;
+}
+
+
+bool JSObject::IsExtensible(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), object)) {
+ return true;
+ }
+ if (object->IsJSGlobalProxy()) {
+ PrototypeIterator iter(isolate, *object);
if (iter.IsAtEnd()) return false;
DCHECK(iter.GetCurrent()->IsJSGlobalObject());
return iter.GetCurrent<JSObject>()->map()->is_extensible();
}
- return map()->is_extensible();
+ return object->map()->is_extensible();
}
@@ -6052,8 +7846,8 @@ static void ApplyAttributesToDictionary(Dictionary* dictionary,
template <PropertyAttributes attrs>
-MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
- Handle<JSObject> object) {
+Maybe<bool> JSObject::PreventExtensionsWithTransition(
+ Handle<JSObject> object, ShouldThrow should_throw) {
STATIC_ASSERT(attrs == NONE || attrs == SEALED || attrs == FROZEN);
// Sealing/freezing sloppy arguments should be handled elsewhere.
@@ -6061,29 +7855,27 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
DCHECK(!object->map()->is_observed());
Isolate* isolate = object->GetIsolate();
- if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), object)) {
isolate->ReportFailedAccessCheck(object);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->false_value();
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNoAccess));
}
+ if (attrs == NONE && !object->map()->is_extensible()) return Just(true);
+
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return object;
+ if (iter.IsAtEnd()) return Just(true);
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
return PreventExtensionsWithTransition<attrs>(
- PrototypeIterator::GetCurrent<JSObject>(iter));
- }
-
- // It's not possible to seal or freeze objects with external array elements
- if (object->HasFixedTypedArrayElements()) {
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kCannotPreventExtExternalArray),
- Object);
+ PrototypeIterator::GetCurrent<JSObject>(iter), should_throw);
}
Handle<SeededNumberDictionary> new_element_dictionary;
- if (!object->HasDictionaryElements()) {
+ if (!object->HasFixedTypedArrayElements() &&
+ !object->HasDictionaryElements()) {
int length =
object->IsJSArray()
? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
@@ -6109,7 +7901,8 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
TransitionArray::SearchSpecial(*old_map, *transition_marker);
if (transition != NULL) {
Handle<Map> transition_map(transition, isolate);
- DCHECK(transition_map->has_dictionary_elements());
+ DCHECK(transition_map->has_dictionary_elements() ||
+ transition_map->has_fixed_typed_array_elements());
DCHECK(!transition_map->is_extensible());
JSObject::MigrateToMap(object, transition_map);
} else if (TransitionArray::CanHaveMoreTransitions(old_map)) {
@@ -6128,11 +7921,13 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
Handle<Map> new_map =
Map::Copy(handle(object->map()), "SlowCopyForPreventExtensions");
new_map->set_is_extensible(false);
- new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ if (!new_element_dictionary.is_null()) {
+ new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ }
JSObject::MigrateToMap(object, new_map);
if (attrs != NONE) {
- if (object->IsGlobalObject()) {
+ if (object->IsJSGlobalObject()) {
ApplyAttributesToDictionary(object->global_dictionary(), attrs);
} else {
ApplyAttributesToDictionary(object->property_dictionary(), attrs);
@@ -6140,6 +7935,18 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
}
}
+ // Both seal and preventExtensions always go through without modifications to
+ // typed array elements. Freeze works only if there are no actual elements.
+ if (object->HasFixedTypedArrayElements()) {
+ if (attrs == FROZEN &&
+ JSArrayBufferView::cast(*object)->byte_length()->Number() > 0) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kCannotFreezeArrayBufferView));
+ return Nothing<bool>();
+ }
+ return Just(true);
+ }
+
DCHECK(object->map()->has_dictionary_elements());
if (!new_element_dictionary.is_null()) {
object->set_elements(*new_element_dictionary);
@@ -6154,17 +7961,7 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
}
}
- return object;
-}
-
-
-MaybeHandle<Object> JSObject::Freeze(Handle<JSObject> object) {
- return PreventExtensionsWithTransition<FROZEN>(object);
-}
-
-
-MaybeHandle<Object> JSObject::Seal(Handle<JSObject> object) {
- return PreventExtensionsWithTransition<SEALED>(object);
+ return Just(true);
}
@@ -6313,22 +8110,20 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
}
} else {
- Handle<FixedArray> names =
- isolate->factory()->NewFixedArray(copy->NumberOfOwnProperties());
- copy->GetOwnPropertyNames(*names, 0);
+ // Only deep copy fields from the object literal expression.
+ // In particular, don't try to copy the length attribute of
+ // an array.
+ PropertyFilter filter = static_cast<PropertyFilter>(
+ ONLY_WRITABLE | ONLY_ENUMERABLE | ONLY_CONFIGURABLE);
+ KeyAccumulator accumulator(isolate, filter);
+ accumulator.NextPrototype();
+ copy->CollectOwnPropertyNames(&accumulator, filter);
+ Handle<FixedArray> names = accumulator.GetKeys();
for (int i = 0; i < names->length(); i++) {
- DCHECK(names->get(i)->IsString());
- Handle<String> key_string(String::cast(names->get(i)));
- Maybe<PropertyAttributes> maybe =
- JSReceiver::GetOwnPropertyAttributes(copy, key_string);
- DCHECK(maybe.IsJust());
- PropertyAttributes attributes = maybe.FromJust();
- // Only deep copy fields from the object literal expression.
- // In particular, don't try to copy the length attribute of
- // an array.
- if (attributes != NONE) continue;
+ DCHECK(names->get(i)->IsName());
+ Handle<Name> name(Name::cast(names->get(i)));
Handle<Object> value =
- Object::GetProperty(copy, key_string).ToHandleChecked();
+ Object::GetProperty(copy, name).ToHandleChecked();
if (value->IsJSObject()) {
Handle<JSObject> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -6337,7 +8132,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
JSObject);
if (copying) {
// Creating object copy for literals. No strict mode needed.
- JSObject::SetProperty(copy, key_string, result, SLOPPY).Assert();
+ JSObject::SetProperty(copy, name, result, SLOPPY).Assert();
}
}
}
@@ -6517,6 +8312,71 @@ MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
}
+// TODO(cbruni/jkummerow): Consider moving this into elements.cc.
+bool HasEnumerableElements(JSObject* object) {
+ if (object->IsJSValue()) {
+ Object* value = JSValue::cast(object)->value();
+ if (value->IsString()) {
+ if (String::cast(value)->length() > 0) return true;
+ }
+ }
+ switch (object->GetElementsKind()) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ int length = object->IsJSArray()
+ ? Smi::cast(JSArray::cast(object)->length())->value()
+ : object->elements()->length();
+ return length > 0;
+ }
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ FixedArray* elements = FixedArray::cast(object->elements());
+ int length = object->IsJSArray()
+ ? Smi::cast(JSArray::cast(object)->length())->value()
+ : elements->length();
+ for (int i = 0; i < length; i++) {
+ if (!elements->is_the_hole(i)) return true;
+ }
+ return false;
+ }
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ int length = object->IsJSArray()
+ ? Smi::cast(JSArray::cast(object)->length())->value()
+ : object->elements()->length();
+ // Zero-length arrays would use the empty FixedArray...
+ if (length == 0) return false;
+ // ...so only cast to FixedDoubleArray otherwise.
+ FixedDoubleArray* elements = FixedDoubleArray::cast(object->elements());
+ for (int i = 0; i < length; i++) {
+ if (!elements->is_the_hole(i)) return true;
+ }
+ return false;
+ }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ {
+ int length = object->elements()->length();
+ return length > 0;
+ }
+ case DICTIONARY_ELEMENTS: {
+ SeededNumberDictionary* elements =
+ SeededNumberDictionary::cast(object->elements());
+ return elements->NumberOfElementsFilterAttributes(ONLY_ENUMERABLE) > 0;
+ }
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ // We're approximating non-empty arguments objects here.
+ return true;
+ }
+ UNREACHABLE();
+ return true;
+}
+
+
// Tests for the fast common case for property enumeration:
// - This object and all prototypes has an enum cache (which means that
// it is no proxy, has no interceptors and needs no access checks).
@@ -6533,33 +8393,15 @@ bool JSReceiver::IsSimpleEnum() {
if (current->IsAccessCheckNeeded()) return false;
DCHECK(!current->HasNamedInterceptor());
DCHECK(!current->HasIndexedInterceptor());
- if (current->NumberOfEnumElements() > 0) return false;
+ if (HasEnumerableElements(current)) return false;
if (current != this && enum_length != 0) return false;
}
return true;
}
-static bool FilterKey(Object* key, PropertyAttributes filter) {
- if ((filter & SYMBOLIC) && key->IsSymbol()) {
- return true;
- }
-
- if ((filter & PRIVATE_SYMBOL) &&
- key->IsSymbol() && Symbol::cast(key)->is_private()) {
- return true;
- }
-
- if ((filter & STRING) && !key->IsSymbol()) {
- return true;
- }
-
- return false;
-}
-
-
int Map::NumberOfDescribedProperties(DescriptorFlag which,
- PropertyAttributes filter) {
+ PropertyFilter filter) {
int result = 0;
DescriptorArray* descs = instance_descriptors();
int limit = which == ALL_DESCRIPTORS
@@ -6567,7 +8409,7 @@ int Map::NumberOfDescribedProperties(DescriptorFlag which,
: NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
if ((descs->GetDetails(i).attributes() & filter) == 0 &&
- !FilterKey(descs->GetKey(i), filter)) {
+ !descs->GetKey(i)->FilterKey(filter)) {
result++;
}
}
@@ -6612,92 +8454,91 @@ static Handle<FixedArray> ReduceFixedArrayTo(
}
-Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object,
- bool cache_result) {
- Isolate* isolate = object->GetIsolate();
- if (object->HasFastProperties()) {
- int own_property_count = object->map()->EnumLength();
- // If the enum length of the given map is set to kInvalidEnumCache, this
- // means that the map itself has never used the present enum cache. The
- // first step to using the cache is to set the enum length of the map by
- // counting the number of own descriptors that are not DONT_ENUM or
- // SYMBOLIC.
- if (own_property_count == kInvalidEnumCacheSentinel) {
- own_property_count = object->map()->NumberOfDescribedProperties(
- OWN_DESCRIPTORS, DONT_SHOW);
- } else {
- DCHECK(own_property_count == object->map()->NumberOfDescribedProperties(
- OWN_DESCRIPTORS, DONT_SHOW));
- }
-
- if (object->map()->instance_descriptors()->HasEnumCache()) {
- DescriptorArray* desc = object->map()->instance_descriptors();
- Handle<FixedArray> keys(desc->GetEnumCache(), isolate);
-
- // In case the number of properties required in the enum are actually
- // present, we can reuse the enum cache. Otherwise, this means that the
- // enum cache was generated for a previous (smaller) version of the
- // Descriptor Array. In that case we regenerate the enum cache.
- if (own_property_count <= keys->length()) {
- if (cache_result) object->map()->SetEnumLength(own_property_count);
- isolate->counters()->enum_cache_hits()->Increment();
- return ReduceFixedArrayTo(keys, own_property_count);
- }
- }
-
- Handle<Map> map(object->map());
+namespace {
- if (map->instance_descriptors()->IsEmpty()) {
+Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
+ Handle<JSObject> object,
+ bool cache_enum_length) {
+ Handle<Map> map(object->map());
+ Handle<DescriptorArray> descs =
+ Handle<DescriptorArray>(map->instance_descriptors(), isolate);
+ int own_property_count = map->EnumLength();
+ // If the enum length of the given map is set to kInvalidEnumCache, this
+ // means that the map itself has never used the present enum cache. The
+ // first step to using the cache is to set the enum length of the map by
+ // counting the number of own descriptors that are ENUMERABLE_STRINGS.
+ if (own_property_count == kInvalidEnumCacheSentinel) {
+ own_property_count =
+ map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
+ } else {
+ DCHECK(
+ own_property_count ==
+ map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS));
+ }
+
+ if (descs->HasEnumCache()) {
+ Handle<FixedArray> keys(descs->GetEnumCache(), isolate);
+ // In case the number of properties required in the enum are actually
+ // present, we can reuse the enum cache. Otherwise, this means that the
+ // enum cache was generated for a previous (smaller) version of the
+ // Descriptor Array. In that case we regenerate the enum cache.
+ if (own_property_count <= keys->length()) {
isolate->counters()->enum_cache_hits()->Increment();
- if (cache_result) map->SetEnumLength(0);
- return isolate->factory()->empty_fixed_array();
+ if (cache_enum_length) map->SetEnumLength(own_property_count);
+ return ReduceFixedArrayTo(keys, own_property_count);
}
+ }
- isolate->counters()->enum_cache_misses()->Increment();
+ if (descs->IsEmpty()) {
+ isolate->counters()->enum_cache_hits()->Increment();
+ if (cache_enum_length) map->SetEnumLength(0);
+ return isolate->factory()->empty_fixed_array();
+ }
- Handle<FixedArray> storage = isolate->factory()->NewFixedArray(
- own_property_count);
- Handle<FixedArray> indices = isolate->factory()->NewFixedArray(
- own_property_count);
+ isolate->counters()->enum_cache_misses()->Increment();
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
+ Handle<FixedArray> storage =
+ isolate->factory()->NewFixedArray(own_property_count);
+ Handle<FixedArray> indices =
+ isolate->factory()->NewFixedArray(own_property_count);
- int size = map->NumberOfOwnDescriptors();
- int index = 0;
+ int size = map->NumberOfOwnDescriptors();
+ int index = 0;
- for (int i = 0; i < size; i++) {
- PropertyDetails details = descs->GetDetails(i);
- Object* key = descs->GetKey(i);
- if (!(details.IsDontEnum() || key->IsSymbol())) {
- storage->set(index, key);
- if (!indices.is_null()) {
- if (details.type() != DATA) {
- indices = Handle<FixedArray>();
- } else {
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- int load_by_field_index = field_index.GetLoadByFieldIndex();
- indices->set(index, Smi::FromInt(load_by_field_index));
- }
- }
- index++;
+ for (int i = 0; i < size; i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ Object* key = descs->GetKey(i);
+ if (details.IsDontEnum() || key->IsSymbol()) continue;
+ storage->set(index, key);
+ if (!indices.is_null()) {
+ if (details.type() != DATA) {
+ indices = Handle<FixedArray>();
+ } else {
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ int load_by_field_index = field_index.GetLoadByFieldIndex();
+ indices->set(index, Smi::FromInt(load_by_field_index));
}
}
- DCHECK(index == storage->length());
+ index++;
+ }
+ DCHECK(index == storage->length());
- Handle<FixedArray> bridge_storage =
- isolate->factory()->NewFixedArray(
- DescriptorArray::kEnumCacheBridgeLength);
- DescriptorArray* desc = object->map()->instance_descriptors();
- desc->SetEnumCache(*bridge_storage,
- *storage,
- indices.is_null() ? Object::cast(Smi::FromInt(0))
- : Object::cast(*indices));
- if (cache_result) {
- object->map()->SetEnumLength(own_property_count);
- }
- return storage;
- } else if (object->IsGlobalObject()) {
+ DescriptorArray::SetEnumCache(descs, isolate, storage, indices);
+ if (cache_enum_length) {
+ map->SetEnumLength(own_property_count);
+ }
+ return storage;
+}
+
+} // namespace
+
+
+Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object,
+ bool cache_enum_length) {
+ Isolate* isolate = object->GetIsolate();
+ if (object->HasFastProperties()) {
+ return GetFastEnumPropertyKeys(isolate, object, cache_enum_length);
+ } else if (object->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(object->global_dictionary());
int length = dictionary->NumberOfEnumElements();
if (length == 0) {
@@ -6719,210 +8560,368 @@ Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object,
}
-Handle<FixedArray> KeyAccumulator::GetKeys() {
- if (length_ == 0) {
- return isolate_->factory()->empty_fixed_array();
- }
- if (set_.is_null()) {
- keys_->Shrink(length_);
- return keys_;
- }
- // copy over results from set_
- Handle<FixedArray> result = isolate_->factory()->NewFixedArray(length_);
- for (int i = 0; i < length_; i++) {
- result->set(i, set_->KeyAt(i));
- }
- return result;
-}
+enum IndexedOrNamed { kIndexed, kNamed };
-void KeyAccumulator::AddKey(Handle<Object> key, int check_limit) {
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- DCHECK(key->IsNumber() || key->IsName());
- }
-#endif
- if (!set_.is_null()) {
- set_ = OrderedHashSet::Add(set_, key);
- length_ = set_->NumberOfElements();
- return;
+// Returns |true| on success, |nothing| on exception.
+template <class Callback, IndexedOrNamed type>
+static Maybe<bool> GetKeysFromInterceptor(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ PropertyFilter filter,
+ KeyAccumulator* accumulator) {
+ if (type == kIndexed) {
+ if (!object->HasIndexedInterceptor()) return Just(true);
+ } else {
+ if (!object->HasNamedInterceptor()) return Just(true);
}
- // check if we already have the key in the case we are still using
- // the keys_ FixedArray
- check_limit = Min(check_limit, length_);
- for (int i = 0; i < check_limit; i++) {
- Object* current = keys_->get(i);
- if (current->KeyEquals(*key)) return;
+ Handle<InterceptorInfo> interceptor(type == kIndexed
+ ? object->GetIndexedInterceptor()
+ : object->GetNamedInterceptor(),
+ isolate);
+ if ((filter & ONLY_ALL_CAN_READ) && !interceptor->all_can_read()) {
+ return Just(true);
}
- EnsureCapacity(length_);
- keys_->set(length_, *key);
- length_++;
-}
-
-
-void KeyAccumulator::AddKeys(Handle<FixedArray> array,
- FixedArray::KeyFilter filter) {
- int add_length = array->length();
- if (add_length == 0) return;
- if (keys_.is_null() && filter == FixedArray::ALL_KEYS) {
- keys_ = array;
- length_ = keys_->length();
- return;
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *object);
+ v8::Local<v8::Object> result;
+ if (!interceptor->enumerator()->IsUndefined()) {
+ Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
+ const char* log_tag = type == kIndexed ? "interceptor-indexed-enum"
+ : "interceptor-named-enum";
+ LOG(isolate, ApiObjectAccess(log_tag, *object));
+ result = args.Call(enum_fun);
}
- PrepareForComparisons(add_length);
- int previous_key_count = length_;
- for (int i = 0; i < add_length; i++) {
- Handle<Object> current(array->get(i), isolate_);
- if (filter == FixedArray::NON_SYMBOL_KEYS && current->IsSymbol()) continue;
- AddKey(current, previous_key_count);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ if (result.IsEmpty()) return Just(true);
+ DCHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
+ (v8::Utils::OpenHandle(*result)->IsJSObject() &&
+ Handle<JSObject>::cast(v8::Utils::OpenHandle(*result))
+ ->HasSloppyArgumentsElements()));
+ // The accumulator takes care of string/symbol filtering.
+ if (type == kIndexed) {
+ accumulator->AddElementKeysFromInterceptor(
+ Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)));
+ } else {
+ accumulator->AddKeys(
+ Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)));
}
+ return Just(true);
}
-void KeyAccumulator::AddKeys(Handle<JSObject> array_like,
- FixedArray::KeyFilter filter) {
- DCHECK(array_like->IsJSArray() || array_like->HasSloppyArgumentsElements());
- ElementsAccessor* accessor = array_like->GetElementsAccessor();
- accessor->AddElementsToKeyAccumulator(array_like, this, filter);
-}
-
-
-void KeyAccumulator::PrepareForComparisons(int count) {
- // Depending on how many comparisons we do we should switch to the
- // hash-table-based checks which have a one-time overhead for
- // initializing but O(1) for HasKey checks.
- if (!set_.is_null()) return;
- // This limit was obtained through evaluation of a microbench.
- if (length_ * count < 50) return;
- set_ = OrderedHashSet::Allocate(isolate_, length_);
- for (int i = 0; i < length_; i++) {
- Handle<Object> value(keys_->get(i), isolate_);
- set_ = OrderedHashSet::Add(set_, value);
+// Returns |true| on success, |false| if prototype walking should be stopped,
+// |nothing| if an exception was thrown.
+static Maybe<bool> GetKeysFromJSObject(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ PropertyFilter* filter,
+ JSReceiver::KeyCollectionType type,
+ KeyAccumulator* accumulator) {
+ accumulator->NextPrototype();
+ // Check access rights if required.
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), object)) {
+ // The cross-origin spec says that [[Enumerate]] shall return an empty
+ // iterator when it doesn't have access...
+ if (type == JSReceiver::INCLUDE_PROTOS) {
+ return Just(false);
+ }
+ // ...whereas [[OwnPropertyKeys]] shall return whitelisted properties.
+ DCHECK(type == JSReceiver::OWN_ONLY);
+ *filter = static_cast<PropertyFilter>(*filter | ONLY_ALL_CAN_READ);
}
-}
-
-void KeyAccumulator::EnsureCapacity(int capacity) {
- if (keys_.is_null() || keys_->length() <= capacity) {
- Grow();
- }
-}
+ JSObject::CollectOwnElementKeys(object, accumulator, *filter);
+ // Add the element keys from the interceptor.
+ Maybe<bool> success =
+ GetKeysFromInterceptor<v8::IndexedPropertyEnumeratorCallback, kIndexed>(
+ isolate, receiver, object, *filter, accumulator);
+ MAYBE_RETURN(success, Nothing<bool>());
-void KeyAccumulator::Grow() {
- // The OrderedHashSet handles growing by itself.
- if (!set_.is_null()) return;
- // Otherwise, grow the internal keys_ FixedArray
- int capacity = keys_.is_null() ? 16 : keys_->length() * 2 + 16;
- Handle<FixedArray> new_keys = isolate_->factory()->NewFixedArray(capacity);
- if (keys_.is_null()) {
- keys_ = new_keys;
- return;
- }
- int buffer_length = keys_->length();
- {
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = new_keys->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < buffer_length; i++) {
- new_keys->set(i, keys_->get(i), mode);
- }
+ if (*filter == ENUMERABLE_STRINGS) {
+ // We can cache the computed property keys if access checks are
+ // not needed and no interceptors are involved.
+ //
+ // We do not use the cache if the object has elements and
+ // therefore it does not make sense to cache the property names
+ // for arguments objects. Arguments objects will always have
+ // elements.
+ // Wrapped strings have elements, but don't have an elements
+ // array or dictionary. So the fast inline test for whether to
+ // use the cache says yes, so we should not create a cache.
+ Handle<JSFunction> arguments_function(
+ JSFunction::cast(isolate->sloppy_arguments_map()->GetConstructor()));
+ bool cache_enum_length =
+ ((object->map()->GetConstructor() != *arguments_function) &&
+ !object->IsJSValue() && !object->IsAccessCheckNeeded() &&
+ !object->HasNamedInterceptor() && !object->HasIndexedInterceptor());
+ // Compute the property keys and cache them if possible.
+ Handle<FixedArray> enum_keys =
+ JSObject::GetEnumPropertyKeys(object, cache_enum_length);
+ accumulator->AddKeys(enum_keys);
+ } else {
+ object->CollectOwnPropertyNames(accumulator, *filter);
}
- keys_ = new_keys;
-}
+ // Add the property keys from the interceptor.
+ success = GetKeysFromInterceptor<v8::GenericNamedPropertyEnumeratorCallback,
+ kNamed>(isolate, receiver, object, *filter,
+ accumulator);
+ MAYBE_RETURN(success, Nothing<bool>());
+ return Just(true);
+}
-MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
- KeyCollectionType type) {
- USE(ContainsOnlyValidKeys);
- Isolate* isolate = object->GetIsolate();
- KeyAccumulator accumulator(isolate);
- Handle<JSFunction> arguments_function(
- JSFunction::cast(isolate->sloppy_arguments_map()->GetConstructor()));
- PrototypeIterator::WhereToEnd end = type == OWN_ONLY
+// Helper function for JSReceiver::GetKeys() below. Can be called recursively.
+// Returns |true| or |nothing|.
+static Maybe<bool> GetKeys_Internal(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSReceiver> object,
+ JSReceiver::KeyCollectionType type,
+ PropertyFilter filter,
+ KeyAccumulator* accumulator) {
+ PrototypeIterator::WhereToEnd end = type == JSReceiver::OWN_ONLY
? PrototypeIterator::END_AT_NON_HIDDEN
: PrototypeIterator::END_AT_NULL;
- // Only collect keys if access is permitted.
for (PrototypeIterator iter(isolate, object,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(end); iter.Advance()) {
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- Handle<JSProxy> proxy = PrototypeIterator::GetCurrent<JSProxy>(iter);
- Handle<Object> args[] = { proxy };
- Handle<Object> names;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, names,
- Execution::Call(isolate,
- isolate->proxy_enumerate(),
- object,
- arraysize(args),
- args),
- FixedArray);
- accumulator.AddKeys(Handle<JSObject>::cast(names), FixedArray::ALL_KEYS);
- break;
- }
-
- Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
-
- // Check access rights if required.
- if (current->IsAccessCheckNeeded() && !isolate->MayAccess(current)) {
- if (iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
- isolate->ReportFailedAccessCheck(current);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, FixedArray);
+ Handle<JSReceiver> current =
+ PrototypeIterator::GetCurrent<JSReceiver>(iter);
+ Maybe<bool> result = Just(false); // Dummy initialization.
+ if (current->IsJSProxy()) {
+ if (type == JSReceiver::OWN_ONLY) {
+ result = JSProxy::OwnPropertyKeys(isolate, receiver,
+ Handle<JSProxy>::cast(current),
+ filter, accumulator);
+ } else {
+ DCHECK(type == JSReceiver::INCLUDE_PROTOS);
+ result = JSProxy::Enumerate(
+ isolate, receiver, Handle<JSProxy>::cast(current), accumulator);
}
- break;
+ } else {
+ DCHECK(current->IsJSObject());
+ result = GetKeysFromJSObject(isolate, receiver,
+ Handle<JSObject>::cast(current), &filter,
+ type, accumulator);
}
+ MAYBE_RETURN(result, Nothing<bool>());
+ if (!result.FromJust()) break; // |false| means "stop iterating".
+ }
+ return Just(true);
+}
- // Compute the element keys.
- Handle<FixedArray> element_keys =
- isolate->factory()->NewFixedArray(current->NumberOfEnumElements());
- current->GetEnumElementKeys(*element_keys);
- accumulator.AddKeys(element_keys, FixedArray::ALL_KEYS);
- DCHECK(ContainsOnlyValidKeys(accumulator.GetKeys()));
- // Add the element keys from the interceptor.
- if (current->HasIndexedInterceptor()) {
- Handle<JSObject> result;
- if (JSObject::GetKeysForIndexedInterceptor(
- current, object).ToHandle(&result)) {
- accumulator.AddKeys(result, FixedArray::ALL_KEYS);
- }
- DCHECK(ContainsOnlyValidKeys(accumulator.GetKeys()));
- }
+// ES6 9.5.11
+// Returns false in case of exception.
+// static
+Maybe<bool> JSProxy::Enumerate(Isolate* isolate, Handle<JSReceiver> receiver,
+ Handle<JSProxy> proxy,
+ KeyAccumulator* accumulator) {
+ STACK_CHECK(Nothing<bool>());
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked,
+ isolate->factory()->enumerate_string()));
+ return Nothing<bool>();
+ }
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 5. Let trap be ? GetMethod(handler, "enumerate").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
+ isolate->factory()->enumerate_string()),
+ Nothing<bool>());
+ // 6. If trap is undefined, then
+ if (trap->IsUndefined()) {
+ // 6a. Return target.[[Enumerate]]().
+ return GetKeys_Internal(isolate, receiver, target, INCLUDE_PROTOS,
+ ENUMERABLE_STRINGS, accumulator);
+ }
+ // The "proxy_enumerate" helper calls the trap (steps 7 - 9), which returns
+ // a generator; it then iterates over that generator until it's exhausted
+ // and returns an array containing the generated values.
+ Handle<Object> trap_result_array;
+ Handle<Object> args[] = {trap, handler, target};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_array,
+ Execution::Call(isolate, isolate->proxy_enumerate(),
+ isolate->factory()->undefined_value(), arraysize(args),
+ args),
+ Nothing<bool>());
+ accumulator->NextPrototype();
+ accumulator->AddKeysFromProxy(Handle<JSObject>::cast(trap_result_array));
+ return Just(true);
+}
- // We can cache the computed property keys if access checks are
- // not needed and no interceptors are involved.
- //
- // We do not use the cache if the object has elements and
- // therefore it does not make sense to cache the property names
- // for arguments objects. Arguments objects will always have
- // elements.
- // Wrapped strings have elements, but don't have an elements
- // array or dictionary. So the fast inline test for whether to
- // use the cache says yes, so we should not create a cache.
- bool cache_enum_keys =
- ((current->map()->GetConstructor() != *arguments_function) &&
- !current->IsJSValue() && !current->IsAccessCheckNeeded() &&
- !current->HasNamedInterceptor() && !current->HasIndexedInterceptor());
- // Compute the property keys and cache them if possible.
- Handle<FixedArray> enum_keys =
- JSObject::GetEnumPropertyKeys(current, cache_enum_keys);
- accumulator.AddKeys(enum_keys, FixedArray::ALL_KEYS);
- DCHECK(ContainsOnlyValidKeys(accumulator.GetKeys()));
-
- // Add the non-symbol property keys from the interceptor.
- if (current->HasNamedInterceptor()) {
- Handle<JSObject> result;
- if (JSObject::GetKeysForNamedInterceptor(
- current, object).ToHandle(&result)) {
- accumulator.AddKeys(result, FixedArray::NON_SYMBOL_KEYS);
- }
- DCHECK(ContainsOnlyValidKeys(accumulator.GetKeys()));
+// ES6 9.5.12
+// Returns |true| on success, |nothing| in case of exception.
+// static
+Maybe<bool> JSProxy::OwnPropertyKeys(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSProxy> proxy,
+ PropertyFilter filter,
+ KeyAccumulator* accumulator) {
+ STACK_CHECK(Nothing<bool>());
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, isolate->factory()->ownKeys_string()));
+ return Nothing<bool>();
+ }
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 5. Let trap be ? GetMethod(handler, "ownKeys").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
+ isolate->factory()->ownKeys_string()),
+ Nothing<bool>());
+ // 6. If trap is undefined, then
+ if (trap->IsUndefined()) {
+ // 6a. Return target.[[OwnPropertyKeys]]().
+ return GetKeys_Internal(isolate, receiver, target, OWN_ONLY, filter,
+ accumulator);
+ }
+ // 7. Let trapResultArray be Call(trap, handler, «target»).
+ Handle<Object> trap_result_array;
+ Handle<Object> args[] = {target};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_array,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ // 8. Let trapResult be ? CreateListFromArrayLike(trapResultArray,
+ // «String, Symbol»).
+ Handle<FixedArray> trap_result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Object::CreateListFromArrayLike(isolate, trap_result_array,
+ ElementTypes::kStringAndSymbol),
+ Nothing<bool>());
+ // 9. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(maybe_extensible, Nothing<bool>());
+ bool extensible_target = maybe_extensible.FromJust();
+ // 10. Let targetKeys be ? target.[[OwnPropertyKeys]]().
+ Handle<FixedArray> target_keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, target_keys,
+ JSReceiver::OwnPropertyKeys(target),
+ Nothing<bool>());
+ // 11. (Assert)
+ // 12. Let targetConfigurableKeys be an empty List.
+ // To save memory, we're re-using target_keys and will modify it in-place.
+ Handle<FixedArray> target_configurable_keys = target_keys;
+ // 13. Let targetNonconfigurableKeys be an empty List.
+ Handle<FixedArray> target_nonconfigurable_keys =
+ isolate->factory()->NewFixedArray(target_keys->length());
+ int nonconfigurable_keys_length = 0;
+ // 14. Repeat, for each element key of targetKeys:
+ for (int i = 0; i < target_keys->length(); ++i) {
+ // 14a. Let desc be ? target.[[GetOwnProperty]](key).
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, target, handle(target_keys->get(i), isolate), &desc);
+ MAYBE_RETURN(found, Nothing<bool>());
+ // 14b. If desc is not undefined and desc.[[Configurable]] is false, then
+ if (found.FromJust() && !desc.configurable()) {
+ // 14b i. Append key as an element of targetNonconfigurableKeys.
+ target_nonconfigurable_keys->set(nonconfigurable_keys_length,
+ target_keys->get(i));
+ nonconfigurable_keys_length++;
+ // The key was moved, null it out in the original list.
+ target_keys->set(i, Smi::FromInt(0));
+ } else {
+ // 14c. Else,
+ // 14c i. Append key as an element of targetConfigurableKeys.
+ // (No-op, just keep it in |target_keys|.)
+ }
+ }
+ accumulator->NextPrototype(); // Prepare for accumulating keys.
+ // 15. If extensibleTarget is true and targetNonconfigurableKeys is empty,
+ // then:
+ if (extensible_target && nonconfigurable_keys_length == 0) {
+ // 15a. Return trapResult.
+ return accumulator->AddKeysFromProxy(proxy, trap_result);
+ }
+ // 16. Let uncheckedResultKeys be a new List which is a copy of trapResult.
+ Zone set_zone;
+ const int kPresent = 1;
+ const int kGone = 0;
+ IdentityMap<int> unchecked_result_keys(isolate->heap(), &set_zone);
+ int unchecked_result_keys_size = trap_result->length();
+ for (int i = 0; i < trap_result->length(); ++i) {
+ DCHECK(trap_result->get(i)->IsUniqueName());
+ unchecked_result_keys.Set(trap_result->get(i), kPresent);
+ }
+ // 17. Repeat, for each key that is an element of targetNonconfigurableKeys:
+ for (int i = 0; i < nonconfigurable_keys_length; ++i) {
+ Object* key = target_nonconfigurable_keys->get(i);
+ // 17a. If key is not an element of uncheckedResultKeys, throw a
+ // TypeError exception.
+ int* found = unchecked_result_keys.Find(key);
+ if (found == nullptr || *found == kGone) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysMissing, handle(key, isolate)));
+ return Nothing<bool>();
+ }
+ // 17b. Remove key from uncheckedResultKeys.
+ *found = kGone;
+ unchecked_result_keys_size--;
+ }
+ // 18. If extensibleTarget is true, return trapResult.
+ if (extensible_target) {
+ return accumulator->AddKeysFromProxy(proxy, trap_result);
+ }
+ // 19. Repeat, for each key that is an element of targetConfigurableKeys:
+ for (int i = 0; i < target_configurable_keys->length(); ++i) {
+ Object* key = target_configurable_keys->get(i);
+ if (key->IsSmi()) continue; // Zapped entry, was nonconfigurable.
+ // 19a. If key is not an element of uncheckedResultKeys, throw a
+ // TypeError exception.
+ int* found = unchecked_result_keys.Find(key);
+ if (found == nullptr || *found == kGone) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysMissing, handle(key, isolate)));
+ return Nothing<bool>();
}
+ // 19b. Remove key from uncheckedResultKeys.
+ *found = kGone;
+ unchecked_result_keys_size--;
+ }
+ // 20. If uncheckedResultKeys is not empty, throw a TypeError exception.
+ if (unchecked_result_keys_size != 0) {
+ DCHECK_GT(unchecked_result_keys_size, 0);
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysNonExtensible));
+ return Nothing<bool>();
}
+ // 21. Return trapResult.
+ return accumulator->AddKeysFromProxy(proxy, trap_result);
+}
+
- Handle<FixedArray> keys = accumulator.GetKeys();
+MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
+ KeyCollectionType type,
+ PropertyFilter filter,
+ GetKeysConversion keys_conversion) {
+ USE(ContainsOnlyValidKeys);
+ Isolate* isolate = object->GetIsolate();
+ KeyAccumulator accumulator(isolate, filter);
+ MAYBE_RETURN(
+ GetKeys_Internal(isolate, object, object, type, filter, &accumulator),
+ MaybeHandle<FixedArray>());
+ Handle<FixedArray> keys = accumulator.GetKeys(keys_conversion);
DCHECK(ContainsOnlyValidKeys(keys));
return keys;
}
@@ -6967,31 +8966,41 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+ return DefineAccessor(&it, getter, setter, attributes);
+}
- if (it.state() == LookupIterator::ACCESS_CHECK) {
- if (!it.HasAccess()) {
- isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
+
+MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes) {
+ Isolate* isolate = it->isolate();
+
+ if (it->state() == LookupIterator::ACCESS_CHECK) {
+ if (!it->HasAccess()) {
+ isolate->ReportFailedAccessCheck(it->GetHolder<JSObject>());
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
}
- it.Next();
+ it->Next();
}
+ Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
// Ignore accessors on typed arrays.
- if (it.IsElement() && object->HasFixedTypedArrayElements()) {
- return it.factory()->undefined_value();
+ if (it->IsElement() && object->HasFixedTypedArrayElements()) {
+ return it->factory()->undefined_value();
}
Handle<Object> old_value = isolate->factory()->the_hole_value();
bool is_observed = object->map()->is_observed() &&
- !isolate->IsInternallyUsedPropertyName(name);
+ !isolate->IsInternallyUsedPropertyName(it->GetName());
bool preexists = false;
if (is_observed) {
- CHECK(GetPropertyAttributes(&it).IsJust());
- preexists = it.IsFound();
- if (preexists && (it.state() == LookupIterator::DATA ||
- it.GetAccessors()->IsAccessorInfo())) {
- old_value = GetProperty(&it).ToHandleChecked();
+ CHECK(GetPropertyAttributes(it).IsJust());
+ preexists = it->IsFound();
+ if (preexists && (it->state() == LookupIterator::DATA ||
+ it->GetAccessors()->IsAccessorInfo())) {
+ old_value = GetProperty(it).ToHandleChecked();
}
}
@@ -7000,10 +9009,10 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
// At least one of the accessors needs to be a new value.
DCHECK(!getter->IsNull() || !setter->IsNull());
if (!getter->IsNull()) {
- it.TransitionToAccessorProperty(ACCESSOR_GETTER, getter, attributes);
+ it->TransitionToAccessorProperty(ACCESSOR_GETTER, getter, attributes);
}
if (!setter->IsNull()) {
- it.TransitionToAccessorProperty(ACCESSOR_SETTER, setter, attributes);
+ it->TransitionToAccessorProperty(ACCESSOR_SETTER, setter, attributes);
}
if (is_observed) {
@@ -7011,7 +9020,8 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
AssertNoContextChange ncc(isolate);
const char* type = preexists ? "reconfigure" : "add";
RETURN_ON_EXCEPTION(
- isolate, EnqueueChangeRecord(object, type, name, old_value), Object);
+ isolate, EnqueueChangeRecord(object, type, it->GetName(), old_value),
+ Object);
}
return isolate->factory()->undefined_value();
@@ -7139,7 +9149,7 @@ Object* JSObject::SlowReverseLookup(Object* value) {
}
}
return GetHeap()->undefined_value();
- } else if (IsGlobalObject()) {
+ } else if (IsJSGlobalObject()) {
return global_dictionary()->SlowReverseLookup(value);
} else {
return property_dictionary()->SlowReverseLookup(value);
@@ -7165,7 +9175,8 @@ Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size) {
if (!map->is_dictionary_map()) {
new_bit_field3 = IsUnstable::update(new_bit_field3, false);
}
- new_bit_field3 = Counter::update(new_bit_field3, kRetainingCounterStart);
+ new_bit_field3 =
+ ConstructionCounter::update(new_bit_field3, kNoSlackTracking);
result->set_bit_field3(new_bit_field3);
return result;
}
@@ -7261,13 +9272,57 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
}
+Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
+ int in_object_properties,
+ int unused_property_fields) {
+#ifdef DEBUG
+ Isolate* isolate = map->GetIsolate();
+ // Strict and strong function maps have Function as a constructor but the
+ // Function's initial map is a sloppy function map. Same holds for
+ // GeneratorFunction and its initial map.
+ Object* constructor = map->GetConstructor();
+ DCHECK(constructor->IsJSFunction());
+ DCHECK(*map == JSFunction::cast(constructor)->initial_map() ||
+ *map == *isolate->strict_function_map() ||
+ *map == *isolate->strong_function_map() ||
+ *map == *isolate->strict_generator_function_map() ||
+ *map == *isolate->strong_generator_function_map());
+#endif
+ // Initial maps must always own their descriptors and it's descriptor array
+ // does not contain descriptors that do not belong to the map.
+ DCHECK(map->owns_descriptors());
+ DCHECK_EQ(map->NumberOfOwnDescriptors(),
+ map->instance_descriptors()->number_of_descriptors());
+
+ Handle<Map> result = RawCopy(map, instance_size);
+
+ // Please note instance_type and instance_size are set when allocated.
+ result->SetInObjectProperties(in_object_properties);
+ result->set_unused_property_fields(unused_property_fields);
+
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors > 0) {
+ // The copy will use the same descriptors array.
+ result->UpdateDescriptors(map->instance_descriptors(),
+ map->GetLayoutDescriptor());
+ result->SetNumberOfOwnDescriptors(number_of_own_descriptors);
+
+ DCHECK_EQ(result->NumberOfFields(),
+ in_object_properties - unused_property_fields);
+ }
+
+ return result;
+}
+
+
Handle<Map> Map::CopyDropDescriptors(Handle<Map> map) {
Handle<Map> result = RawCopy(map, map->instance_size());
// Please note instance_type and instance_size are set when allocated.
- result->SetInObjectProperties(map->GetInObjectProperties());
- result->set_unused_property_fields(map->unused_property_fields());
-
+ if (map->IsJSObjectMap()) {
+ result->SetInObjectProperties(map->GetInObjectProperties());
+ result->set_unused_property_fields(map->unused_property_fields());
+ }
result->ClearCodeCache(map->GetHeap());
map->NotifyLeafMapLayoutChange();
return result;
@@ -7280,8 +9335,8 @@ Handle<Map> Map::ShareDescriptor(Handle<Map> map,
// Sanity check. This path is only to be taken if the map owns its descriptor
// array, implying that its NumberOfOwnDescriptors equals the number of
// descriptors in the descriptor array.
- DCHECK(map->NumberOfOwnDescriptors() ==
- map->instance_descriptors()->number_of_descriptors());
+ DCHECK_EQ(map->NumberOfOwnDescriptors(),
+ map->instance_descriptors()->number_of_descriptors());
Handle<Map> result = CopyDropDescriptors(map);
Handle<Name> name = descriptor->GetKey();
@@ -7346,7 +9401,15 @@ void Map::TraceAllTransitions(Map* map) {
void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
Handle<Name> name, SimpleTransitionFlag flag) {
- parent->set_owns_descriptors(false);
+ if (!parent->GetBackPointer()->IsUndefined()) {
+ parent->set_owns_descriptors(false);
+ } else {
+ // |parent| is initial map and it must keep the ownership, there must be no
+ // descriptors in the descriptors array that do not belong to the map.
+ DCHECK(parent->owns_descriptors());
+ DCHECK_EQ(parent->NumberOfOwnDescriptors(),
+ parent->instance_descriptors()->number_of_descriptors());
+ }
if (parent->is_prototype_map()) {
DCHECK(child->is_prototype_map());
#if TRACE_MAPS
@@ -7408,48 +9471,85 @@ Handle<Map> Map::CopyReplaceDescriptors(
}
-// Since this method is used to rewrite an existing transition tree, it can
-// always insert transitions without checking.
-Handle<Map> Map::CopyInstallDescriptors(
- Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors,
+// Creates transition tree starting from |split_map| and adding all descriptors
+// starting from descriptor with index |split_map|.NumberOfOwnDescriptors().
+// The way how it is done is tricky because of GC and special descriptors
+// marking logic.
+Handle<Map> Map::AddMissingTransitions(
+ Handle<Map> split_map, Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> full_layout_descriptor) {
DCHECK(descriptors->IsSortedNoDuplicates());
+ int split_nof = split_map->NumberOfOwnDescriptors();
+ int nof_descriptors = descriptors->number_of_descriptors();
+ DCHECK_LT(split_nof, nof_descriptors);
+
+ // Start with creating last map which will own full descriptors array.
+ // This is necessary to guarantee that GC will mark the whole descriptor
+ // array if any of the allocations happening below fail.
+ // Number of unused properties is temporarily incorrect and the layout
+ // descriptor could unnecessarily be in slow mode but we will fix after
+ // all the other intermediate maps are created.
+ Handle<Map> last_map = CopyDropDescriptors(split_map);
+ last_map->InitializeDescriptors(*descriptors, *full_layout_descriptor);
+ last_map->set_unused_property_fields(0);
+
+ // During creation of intermediate maps we violate descriptors sharing
+ // invariant since the last map is not yet connected to the transition tree
+ // we create here. But it is safe because GC never trims map's descriptors
+ // if there are no dead transitions from that map and this is exactly the
+ // case for all the intermediate maps we create here.
+ Handle<Map> map = split_map;
+ for (int i = split_nof; i < nof_descriptors - 1; ++i) {
+ Handle<Map> new_map = CopyDropDescriptors(map);
+ InstallDescriptors(map, new_map, i, descriptors, full_layout_descriptor);
+ map = new_map;
+ }
+ map->NotifyLeafMapLayoutChange();
+ InstallDescriptors(map, last_map, nof_descriptors - 1, descriptors,
+ full_layout_descriptor);
+ return last_map;
+}
- Handle<Map> result = CopyDropDescriptors(map);
- result->set_instance_descriptors(*descriptors);
- result->SetNumberOfOwnDescriptors(new_descriptor + 1);
+// Since this method is used to rewrite an existing transition tree, it can
+// always insert transitions without checking.
+void Map::InstallDescriptors(Handle<Map> parent, Handle<Map> child,
+ int new_descriptor,
+ Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor) {
+ DCHECK(descriptors->IsSortedNoDuplicates());
- int unused_property_fields = map->unused_property_fields();
+ child->set_instance_descriptors(*descriptors);
+ child->SetNumberOfOwnDescriptors(new_descriptor + 1);
+
+ int unused_property_fields = parent->unused_property_fields();
PropertyDetails details = descriptors->GetDetails(new_descriptor);
if (details.location() == kField) {
- unused_property_fields = map->unused_property_fields() - 1;
+ unused_property_fields = parent->unused_property_fields() - 1;
if (unused_property_fields < 0) {
unused_property_fields += JSObject::kFieldsAdded;
}
}
- result->set_unused_property_fields(unused_property_fields);
+ child->set_unused_property_fields(unused_property_fields);
if (FLAG_unbox_double_fields) {
Handle<LayoutDescriptor> layout_descriptor =
- LayoutDescriptor::AppendIfFastOrUseFull(map, details,
+ LayoutDescriptor::AppendIfFastOrUseFull(parent, details,
full_layout_descriptor);
- result->set_layout_descriptor(*layout_descriptor);
+ child->set_layout_descriptor(*layout_descriptor);
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(result->layout_descriptor()->IsConsistentWithMap(*result));
+ CHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
}
#else
- SLOW_DCHECK(result->layout_descriptor()->IsConsistentWithMap(*result));
+ SLOW_DCHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
#endif
- result->set_visitor_id(Heap::GetStaticVisitorIdForMap(*result));
+ child->set_visitor_id(Heap::GetStaticVisitorIdForMap(*child));
}
Handle<Name> name = handle(descriptors->GetKey(new_descriptor));
- ConnectTransition(map, result, name, SIMPLE_PROPERTY_TRANSITION);
-
- return result;
+ ConnectTransition(parent, child, name, SIMPLE_PROPERTY_TRANSITION);
}
@@ -7488,6 +9588,58 @@ Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
}
+Handle<Map> Map::AsLanguageMode(Handle<Map> initial_map,
+ LanguageMode language_mode, FunctionKind kind) {
+ DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
+ // Initial map for sloppy mode function is stored in the function
+ // constructor. Initial maps for strict and strong modes are cached as
+ // special transitions using |strict_function_transition_symbol| and
+ // |strong_function_transition_symbol| respectively as a key.
+ if (language_mode == SLOPPY) return initial_map;
+ Isolate* isolate = initial_map->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<Symbol> transition_symbol;
+
+ int map_index = Context::FunctionMapIndex(language_mode, kind);
+ Handle<Map> function_map(
+ Map::cast(isolate->native_context()->get(map_index)));
+
+ STATIC_ASSERT(LANGUAGE_END == 3);
+ switch (language_mode) {
+ case STRICT:
+ transition_symbol = factory->strict_function_transition_symbol();
+ break;
+ case STRONG:
+ transition_symbol = factory->strong_function_transition_symbol();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ Map* maybe_transition =
+ TransitionArray::SearchSpecial(*initial_map, *transition_symbol);
+ if (maybe_transition != NULL) {
+ return handle(maybe_transition, isolate);
+ }
+ initial_map->NotifyLeafMapLayoutChange();
+
+ // Create new map taking descriptors from the |function_map| and all
+ // the other details from the |initial_map|.
+ Handle<Map> map =
+ Map::CopyInitialMap(function_map, initial_map->instance_size(),
+ initial_map->GetInObjectProperties(),
+ initial_map->unused_property_fields());
+ map->SetConstructor(initial_map->GetConstructor());
+ map->set_prototype(initial_map->prototype());
+
+ if (TransitionArray::CanHaveMoreTransitions(initial_map)) {
+ Map::ConnectTransition(initial_map, map, transition_symbol,
+ SPECIAL_TRANSITION);
+ }
+ return map;
+}
+
+
Handle<Map> Map::CopyForObserved(Handle<Map> map) {
DCHECK(!map->is_observed());
@@ -7600,26 +9752,9 @@ Handle<Map> Map::CopyForPreventExtensions(Handle<Map> map,
map, new_desc, new_layout_descriptor, INSERT_TRANSITION,
transition_marker, reason, SPECIAL_TRANSITION);
new_map->set_is_extensible(false);
- new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- return new_map;
-}
-
-
-Handle<Map> Map::FixProxy(Handle<Map> map, InstanceType type, int size) {
- DCHECK(type == JS_OBJECT_TYPE || type == JS_FUNCTION_TYPE);
- DCHECK(map->IsJSProxyMap());
-
- Isolate* isolate = map->GetIsolate();
-
- // Allocate fresh map.
- // TODO(rossberg): Once we optimize proxies, cache these maps.
- Handle<Map> new_map = isolate->factory()->NewMap(type, size);
-
- Handle<Object> prototype(map->prototype(), isolate);
- Map::SetPrototype(new_map, prototype);
-
- map->NotifyLeafMapLayoutChange();
-
+ if (!IsFixedTypedArrayElementsKind(map->elements_kind())) {
+ new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ }
return new_map;
}
@@ -7842,7 +9977,9 @@ Handle<Map> Map::CopyAddDescriptor(Handle<Map> map,
// Ensure the key is unique.
descriptor->KeyToUniqueName();
+ // Share descriptors only if map owns descriptors and it not an initial map.
if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
+ !map->GetBackPointer()->IsUndefined() &&
TransitionArray::CanHaveMoreTransitions(map)) {
return ShareDescriptor(map, descriptors, descriptor);
}
@@ -7902,7 +10039,6 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
Handle<DescriptorArray> descriptors =
DescriptorArray::Allocate(desc->GetIsolate(), size, slack);
- DescriptorArray::WhitenessWitness witness(*descriptors);
if (attributes != NONE) {
for (int i = 0; i < size; ++i) {
@@ -7910,7 +10046,7 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
Name* key = desc->GetKey(i);
PropertyDetails details = desc->GetDetails(i);
// Bulk attribute changes never affect private properties.
- if (!key->IsSymbol() || !Symbol::cast(key)->is_private()) {
+ if (!key->IsPrivate()) {
int mask = DONT_DELETE | DONT_ENUM;
// READ_ONLY is an invalid attribute for JS setters/getters.
if (details.type() != ACCESSOR_CONSTANT || !value->IsAccessorPair()) {
@@ -7921,11 +10057,11 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
}
Descriptor inner_desc(
handle(key), handle(value, desc->GetIsolate()), details);
- descriptors->Set(i, &inner_desc, witness);
+ descriptors->SetDescriptor(i, &inner_desc);
}
} else {
for (int i = 0; i < size; ++i) {
- descriptors->CopyFrom(i, *desc, witness);
+ descriptors->CopyFrom(i, *desc);
}
}
@@ -7935,6 +10071,22 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
}
+bool DescriptorArray::IsEqualUpTo(DescriptorArray* desc, int nof_descriptors) {
+ for (int i = 0; i < nof_descriptors; i++) {
+ if (GetKey(i) != desc->GetKey(i) || GetValue(i) != desc->GetValue(i)) {
+ return false;
+ }
+ PropertyDetails details = GetDetails(i);
+ PropertyDetails other_details = desc->GetDetails(i);
+ if (details.type() != other_details.type() ||
+ !details.representation().Equals(other_details.representation())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map,
Handle<DescriptorArray> descriptors,
Descriptor* descriptor,
@@ -8616,6 +10768,12 @@ Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj1,
}
+bool ArrayList::IsFull() {
+ int capacity = length();
+ return kFirstIndex + Length() == capacity;
+}
+
+
Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
int capacity = array->length();
bool empty = (capacity == 0);
@@ -8641,7 +10799,7 @@ Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
int size = number_of_descriptors + slack;
if (size == 0) return factory->empty_descriptor_array();
// Allocate the array of keys.
- Handle<FixedArray> result = factory->NewFixedArray(LengthFor(size));
+ Handle<FixedArray> result = factory->NewFixedArray(LengthFor(size), TENURED);
result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
result->set(kEnumCacheIndex, Smi::FromInt(0));
@@ -8660,36 +10818,40 @@ void DescriptorArray::Replace(int index, Descriptor* descriptor) {
}
-void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
- FixedArray* new_cache,
- Object* new_index_cache) {
- DCHECK(bridge_storage->length() >= kEnumCacheBridgeLength);
- DCHECK(new_index_cache->IsSmi() || new_index_cache->IsFixedArray());
- DCHECK(!IsEmpty());
- DCHECK(!HasEnumCache() || new_cache->length() > GetEnumCache()->length());
- FixedArray::cast(bridge_storage)->
- set(kEnumCacheBridgeCacheIndex, new_cache);
- FixedArray::cast(bridge_storage)->
- set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
- set(kEnumCacheIndex, bridge_storage);
+// static
+void DescriptorArray::SetEnumCache(Handle<DescriptorArray> descriptors,
+ Isolate* isolate,
+ Handle<FixedArray> new_cache,
+ Handle<FixedArray> new_index_cache) {
+ DCHECK(!descriptors->IsEmpty());
+ FixedArray* bridge_storage;
+ bool needs_new_enum_cache = !descriptors->HasEnumCache();
+ if (needs_new_enum_cache) {
+ bridge_storage = *isolate->factory()->NewFixedArray(
+ DescriptorArray::kEnumCacheBridgeLength);
+ } else {
+ bridge_storage = FixedArray::cast(descriptors->get(kEnumCacheIndex));
+ }
+ bridge_storage->set(kEnumCacheBridgeCacheIndex, *new_cache);
+ bridge_storage->set(kEnumCacheBridgeIndicesCacheIndex,
+ new_index_cache.is_null() ? Object::cast(Smi::FromInt(0))
+ : *new_index_cache);
+ if (needs_new_enum_cache) {
+ descriptors->set(kEnumCacheIndex, bridge_storage);
+ }
}
-void DescriptorArray::CopyFrom(int index, DescriptorArray* src,
- const WhitenessWitness& witness) {
+void DescriptorArray::CopyFrom(int index, DescriptorArray* src) {
Object* value = src->GetValue(index);
PropertyDetails details = src->GetDetails(index);
Descriptor desc(handle(src->GetKey(index)),
handle(value, src->GetIsolate()),
details);
- Set(index, &desc, witness);
+ SetDescriptor(index, &desc);
}
-// We need the whiteness witness since sort will reshuffle the entries in the
-// descriptor array. If the descriptor array were to be black, the shuffling
-// would move a slot that was already recorded as pointing into an evacuation
-// candidate. This would result in missing updates upon evacuation.
void DescriptorArray::Sort() {
// In-place heap sort.
int len = number_of_descriptors();
@@ -9462,8 +11624,10 @@ static void CalculateLineEndsImpl(Isolate* isolate,
if (src_len > 0 && cache->IsLineTerminatorSequence(src[src_len - 1], 0)) {
line_ends->Add(src_len - 1);
- } else if (include_ending_line) {
- // Even if the last line misses a line end, it is counted.
+ }
+ if (include_ending_line) {
+ // Include one character beyond the end of script. The rewriter uses that
+ // position for the implicit return statement.
line_ends->Add(src_len);
}
}
@@ -10059,12 +12223,6 @@ void String::PrintOn(FILE* file) {
}
-inline static uint32_t ObjectAddressForHashing(Object* object) {
- uint32_t value = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object));
- return value & MemoryChunk::kAlignmentMask;
-}
-
-
int Map::Hash() {
// For performance reasons we only hash the 3 most variable fields of a map:
// constructor, prototype and bit_field2. For predictability reasons we
@@ -10099,7 +12257,15 @@ bool CheckEquivalent(Map* first, Map* second) {
bool Map::EquivalentToForTransition(Map* other) {
- return CheckEquivalent(this, other);
+ if (!CheckEquivalent(this, other)) return false;
+ if (instance_type() == JS_FUNCTION_TYPE) {
+ // JSFunctions require more checks to ensure that sloppy function is
+ // not equvalent to strict function.
+ int nof = Min(NumberOfOwnDescriptors(), other->NumberOfOwnDescriptors());
+ return instance_descriptors()->IsEqualUpTo(other->instance_descriptors(),
+ nof);
+ }
+ return true;
}
@@ -10112,15 +12278,6 @@ bool Map::EquivalentToForNormalization(Map* other,
}
-void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
- // Iterate over all fields in the body but take care in dealing with
- // the code entry.
- IteratePointers(v, kPropertiesOffset, kCodeEntryOffset);
- v->VisitCodeEntry(this->address() + kCodeEntryOffset);
- IteratePointers(v, kCodeEntryOffset + kPointerSize, object_size);
-}
-
-
bool JSFunction::Inlines(SharedFunctionInfo* candidate) {
DisallowHeapAllocation no_gc;
if (shared() == candidate) return true;
@@ -10187,19 +12344,26 @@ void JSFunction::AttemptConcurrentOptimization() {
void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared, Handle<Code> code) {
Isolate* isolate = shared->GetIsolate();
+ if (isolate->serializer_enabled()) return;
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
- Handle<Object> value(shared->optimized_code_map(), isolate);
- if (value->IsSmi()) return; // Empty code maps are unsupported.
- Handle<FixedArray> code_map = Handle<FixedArray>::cast(value);
- code_map->set(kSharedCodeIndex, *code);
+ // Empty code maps are unsupported.
+ if (!shared->OptimizedCodeMapIsCleared()) {
+ Handle<WeakCell> cell = isolate->factory()->NewWeakCell(code);
+ // A collection may have occured and cleared the optimized code map in the
+ // allocation above.
+ if (!shared->OptimizedCodeMapIsCleared()) {
+ shared->optimized_code_map()->set(kSharedCodeIndex, *cell);
+ }
+ }
}
-void SharedFunctionInfo::AddToOptimizedCodeMap(
+void SharedFunctionInfo::AddToOptimizedCodeMapInternal(
Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
Handle<HeapObject> code, Handle<LiteralsArray> literals,
BailoutId osr_ast_id) {
Isolate* isolate = shared->GetIsolate();
+ if (isolate->serializer_enabled()) return;
DCHECK(*code == isolate->heap()->undefined_value() ||
!shared->SearchOptimizedCodeMap(*native_context, osr_ast_id).code);
DCHECK(*code == isolate->heap()->undefined_value() ||
@@ -10207,84 +12371,110 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
DCHECK(native_context->IsNativeContext());
STATIC_ASSERT(kEntryLength == 4);
Handle<FixedArray> new_code_map;
- Handle<Object> value(shared->optimized_code_map(), isolate);
int entry;
- if (value->IsSmi()) {
- // No optimized code map.
- DCHECK_EQ(0, Smi::cast(*value)->value());
+
+ if (shared->OptimizedCodeMapIsCleared()) {
new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
+ new_code_map->set(kSharedCodeIndex, *isolate->factory()->empty_weak_cell(),
+ SKIP_WRITE_BARRIER);
entry = kEntriesStart;
} else {
- Handle<FixedArray> old_code_map = Handle<FixedArray>::cast(value);
+ Handle<FixedArray> old_code_map(shared->optimized_code_map(), isolate);
entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id);
if (entry > kSharedCodeIndex) {
- // Found an existing context-specific entry, it must not contain any code.
- DCHECK_EQ(isolate->heap()->undefined_value(),
- old_code_map->get(entry + kCachedCodeOffset));
+ // Found an existing context-specific entry. If the user provided valid
+ // code, it must not contain any code.
+ DCHECK(code->IsUndefined() ||
+ WeakCell::cast(old_code_map->get(entry + kCachedCodeOffset))
+ ->cleared());
+
// Just set the code and literals to the entry.
- old_code_map->set(entry + kCachedCodeOffset, *code);
- old_code_map->set(entry + kLiteralsOffset, *literals);
+ if (!code->IsUndefined()) {
+ Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
+ old_code_map->set(entry + kCachedCodeOffset, *code_cell);
+ }
+ Handle<WeakCell> literals_cell =
+ isolate->factory()->NewWeakCell(literals);
+ old_code_map->set(entry + kLiteralsOffset, *literals_cell);
return;
}
- // Copy old optimized code map and append one new entry.
- new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
- old_code_map, kEntryLength, TENURED);
- int old_length = old_code_map->length();
- // Zap the old map to avoid any stale entries. Note that this is required
- // for correctness because entries are being treated weakly by the GC.
- MemsetPointer(old_code_map->data_start(), isolate->heap()->the_hole_value(),
- old_length);
- entry = old_length;
+ // Can we reuse an entry?
+ DCHECK(entry < kEntriesStart);
+ int length = old_code_map->length();
+ for (int i = kEntriesStart; i < length; i += kEntryLength) {
+ if (WeakCell::cast(old_code_map->get(i + kContextOffset))->cleared()) {
+ new_code_map = old_code_map;
+ entry = i;
+ break;
+ }
+ }
+
+ if (entry < kEntriesStart) {
+ // Copy old optimized code map and append one new entry.
+ new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
+ old_code_map, kEntryLength, TENURED);
+ // TODO(mstarzinger): Temporary workaround. The allocation above might
+ // have flushed the optimized code map and the copy we created is full of
+ // holes. For now we just give up on adding the entry and pretend it got
+ // flushed.
+ if (shared->OptimizedCodeMapIsCleared()) return;
+ entry = old_code_map->length();
+ }
}
- new_code_map->set(entry + kContextOffset, *native_context);
- new_code_map->set(entry + kCachedCodeOffset, *code);
- new_code_map->set(entry + kLiteralsOffset, *literals);
+
+ Handle<WeakCell> code_cell = code->IsUndefined()
+ ? isolate->factory()->empty_weak_cell()
+ : isolate->factory()->NewWeakCell(code);
+ Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
+ WeakCell* context_cell = native_context->self_weak_cell();
+
+ new_code_map->set(entry + kContextOffset, context_cell);
+ new_code_map->set(entry + kCachedCodeOffset, *code_cell);
+ new_code_map->set(entry + kLiteralsOffset, *literals_cell);
new_code_map->set(entry + kOsrAstIdOffset, Smi::FromInt(osr_ast_id.ToInt()));
#ifdef DEBUG
for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
- DCHECK(new_code_map->get(i + kContextOffset)->IsNativeContext());
- Object* code = new_code_map->get(i + kCachedCodeOffset);
- if (code != isolate->heap()->undefined_value()) {
- DCHECK(code->IsCode());
- DCHECK(Code::cast(code)->kind() == Code::OPTIMIZED_FUNCTION);
- }
- DCHECK(new_code_map->get(i + kLiteralsOffset)->IsFixedArray());
+ WeakCell* cell = WeakCell::cast(new_code_map->get(i + kContextOffset));
+ DCHECK(cell->cleared() || cell->value()->IsNativeContext());
+ cell = WeakCell::cast(new_code_map->get(i + kCachedCodeOffset));
+ DCHECK(cell->cleared() ||
+ (cell->value()->IsCode() &&
+ Code::cast(cell->value())->kind() == Code::OPTIMIZED_FUNCTION));
+ cell = WeakCell::cast(new_code_map->get(i + kLiteralsOffset));
+ DCHECK(cell->cleared() || cell->value()->IsFixedArray());
DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
}
#endif
- shared->set_optimized_code_map(*new_code_map);
+
+ FixedArray* old_code_map = shared->optimized_code_map();
+ if (old_code_map != *new_code_map) {
+ shared->set_optimized_code_map(*new_code_map);
+ }
}
void SharedFunctionInfo::ClearOptimizedCodeMap() {
- FixedArray* code_map = FixedArray::cast(optimized_code_map());
-
- // If the next map link slot is already used then the function was
- // enqueued with code flushing and we remove it now.
- if (!code_map->get(kNextMapIndex)->IsUndefined()) {
- CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
- flusher->EvictOptimizedCodeMap(this);
- }
-
- DCHECK(code_map->get(kNextMapIndex)->IsUndefined());
- set_optimized_code_map(Smi::FromInt(0));
+ FixedArray* cleared_map = GetHeap()->cleared_optimized_code_map();
+ set_optimized_code_map(cleared_map, SKIP_WRITE_BARRIER);
}
void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
const char* reason) {
DisallowHeapAllocation no_gc;
- if (optimized_code_map()->IsSmi()) return;
+ if (OptimizedCodeMapIsCleared()) return;
Heap* heap = GetHeap();
- FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ FixedArray* code_map = optimized_code_map();
int dst = kEntriesStart;
int length = code_map->length();
for (int src = kEntriesStart; src < length; src += kEntryLength) {
- DCHECK(code_map->get(src)->IsNativeContext());
- if (code_map->get(src + kCachedCodeOffset) == optimized_code) {
+ DCHECK(WeakCell::cast(code_map->get(src))->cleared() ||
+ WeakCell::cast(code_map->get(src))->value()->IsNativeContext());
+ if (WeakCell::cast(code_map->get(src + kCachedCodeOffset))->value() ==
+ optimized_code) {
BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code map (%s) for ", reason);
@@ -10301,7 +12491,8 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
}
// In case of non-OSR entry just clear the code in order to proceed
// sharing literals.
- code_map->set_undefined(src + kCachedCodeOffset);
+ code_map->set(src + kCachedCodeOffset, heap->empty_weak_cell(),
+ SKIP_WRITE_BARRIER);
}
// Keep the src entry by copying it to the dst entry.
@@ -10316,9 +12507,11 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
}
dst += kEntryLength;
}
- if (code_map->get(kSharedCodeIndex) == optimized_code) {
+ if (WeakCell::cast(code_map->get(kSharedCodeIndex))->value() ==
+ optimized_code) {
// Evict context-independent code as well.
- code_map->set_undefined(kSharedCodeIndex);
+ code_map->set(kSharedCodeIndex, heap->empty_weak_cell(),
+ SKIP_WRITE_BARRIER);
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code map (%s) for ", reason);
ShortPrint();
@@ -10330,7 +12523,7 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
length - dst);
if (code_map->length() == kEntriesStart &&
- code_map->get(kSharedCodeIndex)->IsUndefined()) {
+ WeakCell::cast(code_map->get(kSharedCodeIndex))->cleared()) {
ClearOptimizedCodeMap();
}
}
@@ -10338,14 +12531,14 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
- FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ FixedArray* code_map = optimized_code_map();
DCHECK(shrink_by % kEntryLength == 0);
DCHECK(shrink_by <= code_map->length() - kEntriesStart);
// Always trim even when array is cleared because of heap verifier.
GetHeap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(code_map,
shrink_by);
if (code_map->length() == kEntriesStart &&
- code_map->get(kSharedCodeIndex)->IsUndefined()) {
+ WeakCell::cast(code_map->get(kSharedCodeIndex))->cleared()) {
ClearOptimizedCodeMap();
}
}
@@ -10370,18 +12563,17 @@ static void ShrinkInstanceSize(Map* map, void* data) {
}
-void JSFunction::CompleteInobjectSlackTracking() {
- DCHECK(has_initial_map());
- Map* map = initial_map();
+void Map::CompleteInobjectSlackTracking() {
+ // Has to be an initial map.
+ DCHECK(GetBackPointer()->IsUndefined());
- DCHECK(map->counter() >= Map::kSlackTrackingCounterEnd - 1);
- map->set_counter(Map::kRetainingCounterStart);
+ set_construction_counter(kNoSlackTracking);
- int slack = map->unused_property_fields();
- TransitionArray::TraverseTransitionTree(map, &GetMinInobjectSlack, &slack);
+ int slack = unused_property_fields();
+ TransitionArray::TraverseTransitionTree(this, &GetMinInobjectSlack, &slack);
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
- TransitionArray::TraverseTransitionTree(map, &ShrinkInstanceSize, &slack);
+ TransitionArray::TraverseTransitionTree(this, &ShrinkInstanceSize, &slack);
}
}
@@ -10408,8 +12600,7 @@ static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
// static
void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
PrototypeOptimizationMode mode) {
- if (object->IsGlobalObject()) return;
- if (object->IsJSGlobalProxy()) return;
+ if (object->IsJSGlobalObject()) return;
if (mode == FAST_PROTOTYPE && PrototypeBenefitsFromNormalization(object)) {
// First normalize to ensure all JSFunctions are DATA_CONSTANT.
JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
@@ -10435,13 +12626,9 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
if (!constructor->shared()->IsApiFunction() &&
object->class_name() == isolate->heap()->Object_string()) {
- Handle<String> constructor_name(object->constructor_name(), isolate);
Context* context = constructor->context()->native_context();
JSFunction* object_function = context->object_function();
object->map()->SetConstructor(object_function);
- Handle<PrototypeInfo> proto_info =
- Map::GetOrCreatePrototypeInfo(object, isolate);
- proto_info->set_constructor_name(*constructor_name);
}
}
}
@@ -10471,8 +12658,8 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
break;
}
Handle<Object> maybe_proto = PrototypeIterator::GetCurrent(iter);
- if (maybe_proto->IsJSGlobalProxy()) continue;
- // Proxies on the prototype chain are not supported.
+ // Proxies on the prototype chain are not supported. They make it
+ // impossible to make any assumptions about the prototype chain anyway.
if (maybe_proto->IsJSProxy()) return;
Handle<JSObject> proto = Handle<JSObject>::cast(maybe_proto);
Handle<PrototypeInfo> proto_info =
@@ -10505,17 +12692,18 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
DCHECK(user->is_prototype_map());
// If it doesn't have a PrototypeInfo, it was never registered.
if (!user->prototype_info()->IsPrototypeInfo()) return false;
- // If it doesn't have a prototype, it can't be registered.
- if (!user->prototype()->IsJSObject()) return false;
+ // If it had no prototype before, see if it had users that might expect
+ // registration.
+ if (!user->prototype()->IsJSObject()) {
+ Object* users =
+ PrototypeInfo::cast(user->prototype_info())->prototype_users();
+ return users->IsWeakFixedArray();
+ }
Handle<JSObject> prototype(JSObject::cast(user->prototype()), isolate);
Handle<PrototypeInfo> user_info =
Map::GetOrCreatePrototypeInfo(user, isolate);
int slot = user_info->registry_slot();
if (slot == PrototypeInfo::UNREGISTERED) return false;
- if (prototype->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, prototype);
- prototype = PrototypeIterator::GetCurrent<JSObject>(iter);
- }
DCHECK(prototype->map()->is_prototype_map());
Object* maybe_proto_info = prototype->map()->prototype_info();
// User knows its registry slot, prototype info and user registry must exist.
@@ -10564,10 +12752,6 @@ static void InvalidatePrototypeChainsInternal(Map* map) {
void JSObject::InvalidatePrototypeChains(Map* map) {
if (!FLAG_eliminate_prototype_chain_checks) return;
DisallowHeapAllocation no_gc;
- if (map->IsJSGlobalProxyMap()) {
- PrototypeIterator iter(map);
- map = iter.GetCurrent<JSObject>()->map();
- }
InvalidatePrototypeChainsInternal(map);
}
@@ -10604,10 +12788,6 @@ Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
Handle<Object> maybe_prototype(map->prototype(), isolate);
if (!maybe_prototype->IsJSObject()) return Handle<Cell>::null();
Handle<JSObject> prototype = Handle<JSObject>::cast(maybe_prototype);
- if (prototype->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, prototype);
- prototype = PrototypeIterator::GetCurrent<JSObject>(iter);
- }
// Ensure the prototype is registered with its own prototypes so its cell
// will be invalidated when necessary.
JSObject::LazyRegisterPrototypeUser(handle(prototype->map(), isolate),
@@ -10647,33 +12827,26 @@ Handle<Object> CacheInitialJSArrayMaps(
Handle<Context> native_context, Handle<Map> initial_map) {
// Replace all of the cached initial array maps in the native context with
// the appropriate transitioned elements kind maps.
- Factory* factory = native_context->GetIsolate()->factory();
- Handle<FixedArray> maps = factory->NewFixedArrayWithHoles(
- kElementsKindCount, TENURED);
-
+ Strength strength =
+ initial_map->is_strong() ? Strength::STRONG : Strength::WEAK;
Handle<Map> current_map = initial_map;
ElementsKind kind = current_map->elements_kind();
- DCHECK(kind == GetInitialFastElementsKind());
- maps->set(kind, *current_map);
+ DCHECK_EQ(GetInitialFastElementsKind(), kind);
+ native_context->set(Context::ArrayMapIndex(kind, strength), *current_map);
for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
i < kFastElementsKindCount; ++i) {
Handle<Map> new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- Map* maybe_elements_transition = current_map->ElementsTransitionMap();
- if (maybe_elements_transition != NULL) {
+ if (Map* maybe_elements_transition = current_map->ElementsTransitionMap()) {
new_map = handle(maybe_elements_transition);
- DCHECK(new_map->elements_kind() == next_kind);
} else {
new_map = Map::CopyAsElementsKind(
current_map, next_kind, INSERT_TRANSITION);
}
- maps->set(next_kind, *new_map);
+ DCHECK_EQ(next_kind, new_map->elements_kind());
+ native_context->set(Context::ArrayMapIndex(next_kind, strength), *new_map);
current_map = new_map;
}
- if (initial_map->is_strong())
- native_context->set_js_array_strong_maps(*maps);
- else
- native_context->set_js_array_maps(*maps);
return initial_map;
}
@@ -10691,9 +12864,7 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
// copy containing the new prototype. Also complete any in-object
// slack tracking that is in progress at this point because it is
// still tracking the old copy.
- if (function->IsInobjectSlackTrackingInProgress()) {
- function->CompleteInobjectSlackTracking();
- }
+ function->CompleteInobjectSlackTrackingIfActive();
Handle<Map> initial_map(function->initial_map(), isolate);
@@ -10763,7 +12934,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
new_map->set_non_instance_prototype(true);
Isolate* isolate = new_map->GetIsolate();
construct_prototype = handle(
- isolate->context()->native_context()->initial_object_prototype(),
+ function->context()->native_context()->initial_object_prototype(),
isolate);
} else {
function->map()->set_non_instance_prototype(false);
@@ -10813,24 +12984,98 @@ void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
}
+#ifdef DEBUG
+namespace {
+
+bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
+ switch (instance_type) {
+ case JS_OBJECT_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_DATE_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
+ case JS_ARRAY_BUFFER_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_SET_ITERATOR_TYPE:
+ case JS_MAP_ITERATOR_TYPE:
+ case JS_ITERATOR_RESULT_TYPE:
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_FUNCTION_TYPE:
+ return true;
+
+ case JS_BOUND_FUNCTION_TYPE:
+ case JS_PROXY_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case FIXED_ARRAY_TYPE:
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ case ODDBALL_TYPE:
+ case FOREIGN_TYPE:
+ case MAP_TYPE:
+ case CODE_TYPE:
+ case CELL_TYPE:
+ case PROPERTY_CELL_TYPE:
+ case WEAK_CELL_TYPE:
+ case SYMBOL_TYPE:
+ case BYTECODE_ARRAY_TYPE:
+ case HEAP_NUMBER_TYPE:
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case SIMD128_VALUE_TYPE:
+ case FILLER_TYPE:
+ case BYTE_ARRAY_TYPE:
+ case FREE_SPACE_TYPE:
+ case SHARED_FUNCTION_INFO_TYPE:
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE:
+#undef TYPED_ARRAY_CASE
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ // We must not end up here for these instance types at all.
+ UNREACHABLE();
+ // Fall through.
+ default:
+ return false;
+ }
+}
+
+} // namespace
+#endif
+
+
void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
+ DCHECK(function->IsConstructor() || function->shared()->is_generator());
if (function->has_initial_map()) return;
Isolate* isolate = function->GetIsolate();
+ // The constructor should be compiled for the optimization hints to be
+ // available.
+ Compiler::Compile(function, CLEAR_EXCEPTION);
+
// First create a new map with the size and number of in-object properties
// suggested by the function.
InstanceType instance_type;
- int instance_size;
- int in_object_properties;
if (function->shared()->is_generator()) {
instance_type = JS_GENERATOR_OBJECT_TYPE;
- instance_size = JSGeneratorObject::kSize;
- in_object_properties = 0;
} else {
instance_type = JS_OBJECT_TYPE;
- instance_size = function->shared()->CalculateInstanceSize();
- in_object_properties = function->shared()->CalculateInObjectProperties();
}
+ int instance_size;
+ int in_object_properties;
+ function->CalculateInstanceSize(instance_type, 0, &instance_size,
+ &in_object_properties);
+
Handle<Map> map = isolate->factory()->NewMap(instance_type, instance_size);
if (function->map()->is_strong()) {
map->set_is_strong();
@@ -10850,15 +13095,109 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
// Finally link initial map and constructor function.
DCHECK(prototype->IsJSReceiver());
JSFunction::SetInitialMap(function, map, prototype);
-
- if (!function->shared()->is_generator()) {
- function->StartInobjectSlackTracking();
- }
+ map->StartInobjectSlackTracking();
}
-void JSFunction::SetInstanceClassName(String* name) {
- shared()->set_instance_class_name(name);
+// static
+MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
+ Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target) {
+ EnsureHasInitialMap(constructor);
+
+ Handle<Map> constructor_initial_map(constructor->initial_map(), isolate);
+ if (*new_target == *constructor) return constructor_initial_map;
+
+ // Fast case, new.target is a subclass of constructor. The map is cacheable
+ // (and may already have been cached). new.target.prototype is guaranteed to
+ // be a JSReceiver.
+ if (new_target->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(new_target);
+
+ // Check that |function|'s initial map still in sync with the |constructor|,
+ // otherwise we must create a new initial map for |function|.
+ if (function->has_initial_map() &&
+ function->initial_map()->GetConstructor() == *constructor) {
+ return handle(function->initial_map(), isolate);
+ }
+
+ // Create a new map with the size and number of in-object properties
+ // suggested by |function|.
+
+ // Link initial map and constructor function if the new.target is actually a
+ // subclass constructor.
+ if (IsSubclassConstructor(function->shared()->kind())) {
+ Handle<Object> prototype(function->instance_prototype(), isolate);
+ InstanceType instance_type = constructor_initial_map->instance_type();
+ DCHECK(CanSubclassHaveInobjectProperties(instance_type));
+ int internal_fields =
+ JSObject::GetInternalFieldCount(*constructor_initial_map);
+ int pre_allocated = constructor_initial_map->GetInObjectProperties() -
+ constructor_initial_map->unused_property_fields();
+ int instance_size;
+ int in_object_properties;
+ function->CalculateInstanceSizeForDerivedClass(
+ instance_type, internal_fields, &instance_size,
+ &in_object_properties);
+
+ int unused_property_fields = in_object_properties - pre_allocated;
+ Handle<Map> map =
+ Map::CopyInitialMap(constructor_initial_map, instance_size,
+ in_object_properties, unused_property_fields);
+ map->set_new_target_is_base(false);
+
+ JSFunction::SetInitialMap(function, map, prototype);
+ map->SetConstructor(*constructor);
+ map->StartInobjectSlackTracking();
+ return map;
+ }
+ }
+
+ // Slow path, new.target is either a proxy or can't cache the map.
+ // new.target.prototype is not guaranteed to be a JSReceiver, and may need to
+ // fall back to the intrinsicDefaultProto.
+ Handle<Object> prototype;
+ if (new_target->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(new_target);
+ // Make sure the new.target.prototype is cached.
+ EnsureHasInitialMap(function);
+ prototype = handle(function->prototype(), isolate);
+ } else {
+ Handle<String> prototype_string = isolate->factory()->prototype_string();
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, prototype,
+ JSReceiver::GetProperty(new_target, prototype_string), Map);
+ // The above prototype lookup might change the constructor and its
+ // prototype, hence we have to reload the initial map.
+ EnsureHasInitialMap(constructor);
+ constructor_initial_map = handle(constructor->initial_map(), isolate);
+ }
+
+ // If prototype is not a JSReceiver, fetch the intrinsicDefaultProto from the
+ // correct realm. Rather than directly fetching the .prototype, we fetch the
+ // constructor that points to the .prototype. This relies on
+ // constructor.prototype being FROZEN for those constructors.
+ if (!prototype->IsJSReceiver()) {
+ Handle<Context> context;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, context,
+ JSReceiver::GetFunctionRealm(new_target), Map);
+ DCHECK(context->IsNativeContext());
+ Handle<Object> maybe_index = JSReceiver::GetDataProperty(
+ constructor, isolate->factory()->native_context_index_symbol());
+ int index = maybe_index->IsSmi() ? Smi::cast(*maybe_index)->value()
+ : Context::OBJECT_FUNCTION_INDEX;
+ Handle<JSFunction> realm_constructor(JSFunction::cast(context->get(index)));
+ prototype = handle(realm_constructor->prototype(), isolate);
+ }
+
+ Handle<Map> map = Map::CopyInitialMap(constructor_initial_map);
+ map->set_new_target_is_base(false);
+ DCHECK(prototype->IsJSReceiver());
+ if (map->prototype() != *prototype) {
+ Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
+ }
+ map->SetConstructor(*constructor);
+ return map;
}
@@ -10905,7 +13244,7 @@ bool JSFunction::PassesFilter(const char* raw_filter) {
}
-Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
+Handle<String> JSFunction::GetName(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
Handle<Object> name =
JSReceiver::GetDataProperty(function, isolate->factory()->name_string());
@@ -10914,6 +13253,94 @@ Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
}
+Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
+ Isolate* isolate = function->GetIsolate();
+ Handle<Object> name = JSReceiver::GetDataProperty(
+ function, isolate->factory()->display_name_string());
+ if (name->IsString()) return Handle<String>::cast(name);
+ return JSFunction::GetName(function);
+}
+
+
+namespace {
+
+char const kNativeCodeSource[] = "function () { [native code] }";
+
+
+Handle<String> NativeCodeFunctionSourceString(
+ Handle<SharedFunctionInfo> shared_info) {
+ Isolate* const isolate = shared_info->GetIsolate();
+ if (shared_info->name()->IsString()) {
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("function ");
+ builder.AppendString(handle(String::cast(shared_info->name()), isolate));
+ builder.AppendCString("() { [native code] }");
+ return builder.Finish().ToHandleChecked();
+ }
+ return isolate->factory()->NewStringFromAsciiChecked(kNativeCodeSource);
+}
+
+} // namespace
+
+
+// static
+Handle<String> JSBoundFunction::ToString(Handle<JSBoundFunction> function) {
+ Isolate* const isolate = function->GetIsolate();
+ return isolate->factory()->NewStringFromAsciiChecked(kNativeCodeSource);
+}
+
+
+// static
+Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
+ Isolate* const isolate = function->GetIsolate();
+ Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
+
+ // Check if {function} should hide its source code.
+ if (!shared_info->script()->IsScript() ||
+ Script::cast(shared_info->script())->hide_source()) {
+ return NativeCodeFunctionSourceString(shared_info);
+ }
+
+ // Check if we should print {function} as a class.
+ Handle<Object> class_start_position = JSReceiver::GetDataProperty(
+ function, isolate->factory()->class_start_position_symbol());
+ if (class_start_position->IsSmi()) {
+ Handle<Object> class_end_position = JSReceiver::GetDataProperty(
+ function, isolate->factory()->class_end_position_symbol());
+ Handle<String> script_source(
+ String::cast(Script::cast(shared_info->script())->source()), isolate);
+ return isolate->factory()->NewSubString(
+ script_source, Handle<Smi>::cast(class_start_position)->value(),
+ Handle<Smi>::cast(class_end_position)->value());
+ }
+
+ // Check if we have source code for the {function}.
+ if (!shared_info->HasSourceCode()) {
+ return NativeCodeFunctionSourceString(shared_info);
+ }
+
+ IncrementalStringBuilder builder(isolate);
+ if (!shared_info->is_arrow()) {
+ if (shared_info->is_concise_method()) {
+ if (shared_info->is_generator()) builder.AppendCharacter('*');
+ } else {
+ if (shared_info->is_generator()) {
+ builder.AppendCString("function* ");
+ } else {
+ builder.AppendCString("function ");
+ }
+ }
+ if (shared_info->name_should_print_as_anonymous()) {
+ builder.AppendCString("anonymous");
+ } else {
+ builder.AppendString(handle(String::cast(shared_info->name()), isolate));
+ }
+ }
+ builder.AppendString(Handle<String>::cast(shared_info->GetSourceCode()));
+ return builder.Finish().ToHandleChecked();
+}
+
+
void Oddball::Initialize(Isolate* isolate, Handle<Oddball> oddball,
const char* to_string, Handle<Object> to_number,
const char* type_of, byte kind) {
@@ -11025,11 +13452,11 @@ Handle<Object> Script::GetNameOrSourceURL(Handle<Script> script) {
Handle<Object> property = Object::GetProperty(
script_wrapper, name_or_source_url_key).ToHandleChecked();
DCHECK(property->IsJSFunction());
- Handle<JSFunction> method = Handle<JSFunction>::cast(property);
Handle<Object> result;
// Do not check against pending exception, since this function may be called
// when an exception has already been pending.
- if (!Execution::TryCall(method, script_wrapper, 0, NULL).ToHandle(&result)) {
+ if (!Execution::TryCall(isolate, property, script_wrapper, 0, NULL)
+ .ToHandle(&result)) {
return isolate->factory()->undefined_value();
}
return result;
@@ -11084,9 +13511,8 @@ Script* Script::Iterator::Next() { return iterator_.Next<Script>(); }
SharedFunctionInfo::Iterator::Iterator(Isolate* isolate)
- : script_iterator_(isolate), sfi_iterator_(NULL) {
- NextScript();
-}
+ : script_iterator_(isolate),
+ sfi_iterator_(isolate->heap()->noscript_shared_function_infos()) {}
bool SharedFunctionInfo::Iterator::NextScript() {
@@ -11109,6 +13535,38 @@ SharedFunctionInfo* SharedFunctionInfo::Iterator::Next() {
void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
Handle<Object> script_object) {
if (shared->script() == *script_object) return;
+ Isolate* isolate = shared->GetIsolate();
+
+ // Add shared function info to new script's list. If a collection occurs,
+ // the shared function info may be temporarily in two lists.
+ // This is okay because the gc-time processing of these lists can tolerate
+ // duplicates.
+ Handle<Object> list;
+ if (script_object->IsScript()) {
+ Handle<Script> script = Handle<Script>::cast(script_object);
+ list = handle(script->shared_function_infos(), isolate);
+ } else {
+ list = isolate->factory()->noscript_shared_function_infos();
+ }
+
+#ifdef DEBUG
+ {
+ WeakFixedArray::Iterator iterator(*list);
+ SharedFunctionInfo* next;
+ while ((next = iterator.Next<SharedFunctionInfo>())) {
+ DCHECK_NE(next, *shared);
+ }
+ }
+#endif // DEBUG
+ list = WeakFixedArray::Add(list, shared);
+
+ if (script_object->IsScript()) {
+ Handle<Script> script = Handle<Script>::cast(script_object);
+ script->set_shared_function_infos(*list);
+ } else {
+ isolate->heap()->SetRootNoScriptSharedFunctionInfos(*list);
+ }
+
// Remove shared function info from old script's list.
if (shared->script()->IsScript()) {
Script* old_script = Script::cast(shared->script());
@@ -11117,23 +13575,12 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
WeakFixedArray::cast(old_script->shared_function_infos());
list->Remove(shared);
}
+ } else {
+ // Remove shared function info from root array.
+ Object* list = isolate->heap()->noscript_shared_function_infos();
+ CHECK(WeakFixedArray::cast(list)->Remove(shared));
}
- // Add shared function info to new script's list.
- if (script_object->IsScript()) {
- Handle<Script> script = Handle<Script>::cast(script_object);
- Handle<Object> list(script->shared_function_infos(), shared->GetIsolate());
-#ifdef DEBUG
- {
- WeakFixedArray::Iterator iterator(*list);
- SharedFunctionInfo* next;
- while ((next = iterator.Next<SharedFunctionInfo>())) {
- DCHECK_NE(next, *shared);
- }
- }
-#endif // DEBUG
- list = WeakFixedArray::Add(list, shared);
- script->set_shared_function_infos(*list);
- }
+
// Finally set new script.
shared->set_script(*script_object);
}
@@ -11172,19 +13619,56 @@ int SharedFunctionInfo::SourceSize() {
}
-int SharedFunctionInfo::CalculateInstanceSize() {
- int instance_size =
- JSObject::kHeaderSize +
- expected_nof_properties() * kPointerSize;
- if (instance_size > JSObject::kMaxInstanceSize) {
- instance_size = JSObject::kMaxInstanceSize;
- }
- return instance_size;
+namespace {
+
+void CalculateInstanceSizeHelper(InstanceType instance_type,
+ int requested_internal_fields,
+ int requested_in_object_properties,
+ int* instance_size,
+ int* in_object_properties) {
+ int header_size = JSObject::GetHeaderSize(instance_type);
+ DCHECK_LE(requested_internal_fields,
+ (JSObject::kMaxInstanceSize - header_size) >> kPointerSizeLog2);
+ *instance_size =
+ Min(header_size +
+ ((requested_internal_fields + requested_in_object_properties)
+ << kPointerSizeLog2),
+ JSObject::kMaxInstanceSize);
+ *in_object_properties = ((*instance_size - header_size) >> kPointerSizeLog2) -
+ requested_internal_fields;
+}
+
+} // namespace
+
+
+void JSFunction::CalculateInstanceSize(InstanceType instance_type,
+ int requested_internal_fields,
+ int* instance_size,
+ int* in_object_properties) {
+ CalculateInstanceSizeHelper(instance_type, requested_internal_fields,
+ shared()->expected_nof_properties(),
+ instance_size, in_object_properties);
}
-int SharedFunctionInfo::CalculateInObjectProperties() {
- return (CalculateInstanceSize() - JSObject::kHeaderSize) / kPointerSize;
+void JSFunction::CalculateInstanceSizeForDerivedClass(
+ InstanceType instance_type, int requested_internal_fields,
+ int* instance_size, int* in_object_properties) {
+ Isolate* isolate = GetIsolate();
+ int expected_nof_properties = 0;
+ for (PrototypeIterator iter(isolate, this,
+ PrototypeIterator::START_AT_RECEIVER);
+ !iter.IsAtEnd(); iter.Advance()) {
+ JSFunction* func = iter.GetCurrent<JSFunction>();
+ SharedFunctionInfo* shared = func->shared();
+ expected_nof_properties += shared->expected_nof_properties();
+ if (!IsSubclassConstructor(shared->kind())) {
+ break;
+ }
+ }
+ CalculateInstanceSizeHelper(instance_type, requested_internal_fields,
+ expected_nof_properties, instance_size,
+ in_object_properties);
}
@@ -11300,6 +13784,10 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_dont_crankshaft(lit->flags() &
AstProperties::kDontCrankshaft);
shared_info->set_kind(lit->kind());
+ if (!IsConstructable(lit->kind(), lit->language_mode())) {
+ shared_info->set_construct_stub(
+ *shared_info->GetIsolate()->builtins()->ConstructedNonConstructable());
+ }
shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
shared_info->set_asm_function(lit->scope()->asm_function());
}
@@ -11316,18 +13804,16 @@ bool SharedFunctionInfo::VerifyBailoutId(BailoutId id) {
}
-void JSFunction::StartInobjectSlackTracking() {
- DCHECK(has_initial_map() && !IsInobjectSlackTrackingInProgress());
-
- Map* map = initial_map();
+void Map::StartInobjectSlackTracking() {
+ DCHECK(!IsInobjectSlackTrackingInProgress());
// No tracking during the snapshot construction phase.
Isolate* isolate = GetIsolate();
if (isolate->serializer_enabled()) return;
- if (map->unused_property_fields() == 0) return;
+ if (unused_property_fields() == 0) return;
- map->set_counter(Map::kSlackTrackingCounterStart);
+ set_construction_counter(Map::kSlackTrackingCounterStart);
}
@@ -11354,18 +13840,19 @@ int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context,
BailoutId osr_ast_id) {
DisallowHeapAllocation no_gc;
DCHECK(native_context->IsNativeContext());
- Object* value = optimized_code_map();
- if (!value->IsSmi()) {
- FixedArray* optimized_code_map = FixedArray::cast(value);
+ if (!OptimizedCodeMapIsCleared()) {
+ FixedArray* optimized_code_map = this->optimized_code_map();
int length = optimized_code_map->length();
Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
for (int i = kEntriesStart; i < length; i += kEntryLength) {
- if (optimized_code_map->get(i + kContextOffset) == native_context &&
+ if (WeakCell::cast(optimized_code_map->get(i + kContextOffset))
+ ->value() == native_context &&
optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
return i;
}
}
- Object* shared_code = optimized_code_map->get(kSharedCodeIndex);
+ Object* shared_code =
+ WeakCell::cast(optimized_code_map->get(kSharedCodeIndex))->value();
if (shared_code->IsCode() && osr_ast_id.IsNone()) {
return kSharedCodeIndex;
}
@@ -11379,18 +13866,27 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
CodeAndLiterals result = {nullptr, nullptr};
int entry = SearchOptimizedCodeMapEntry(native_context, osr_ast_id);
if (entry != kNotFound) {
- FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ FixedArray* code_map = optimized_code_map();
if (entry == kSharedCodeIndex) {
- result = {Code::cast(code_map->get(kSharedCodeIndex)), nullptr};
-
+ // We know the weak cell isn't cleared because we made sure of it in
+ // SearchOptimizedCodeMapEntry and performed no allocations since that
+ // call.
+ result = {
+ Code::cast(WeakCell::cast(code_map->get(kSharedCodeIndex))->value()),
+ nullptr};
} else {
DCHECK_LE(entry + kEntryLength, code_map->length());
- Object* code = code_map->get(entry + kCachedCodeOffset);
- result = {code->IsUndefined() ? nullptr : Code::cast(code),
- LiteralsArray::cast(code_map->get(entry + kLiteralsOffset))};
+ WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
+ WeakCell* literals_cell =
+ WeakCell::cast(code_map->get(entry + kLiteralsOffset));
+
+ result = {cell->cleared() ? nullptr : Code::cast(cell->value()),
+ literals_cell->cleared()
+ ? nullptr
+ : LiteralsArray::cast(literals_cell->value())};
}
}
- if (FLAG_trace_opt && !optimized_code_map()->IsSmi() &&
+ if (FLAG_trace_opt && !OptimizedCodeMapIsCleared() &&
result.code == nullptr) {
PrintF("[didn't find optimized code in optimized code map for ");
ShortPrint();
@@ -11789,7 +14285,6 @@ void Code::ClearInlineCaches(Code::Kind kind) {
void Code::ClearInlineCaches(Code::Kind* kind) {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
@@ -11806,13 +14301,11 @@ void Code::ClearInlineCaches(Code::Kind* kind) {
void SharedFunctionInfo::ClearTypeFeedbackInfo() {
feedback_vector()->ClearSlots(this);
- feedback_vector()->ClearICSlots(this);
}
void SharedFunctionInfo::ClearTypeFeedbackInfoAtGCTime() {
feedback_vector()->ClearSlotsAtGCTime(this);
- feedback_vector()->ClearICSlotsAtGCTime(this);
}
@@ -12129,6 +14622,17 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
break;
}
+ case Translation::INTERPRETED_FRAME: {
+ int bytecode_offset = iterator.Next();
+ int shared_info_id = iterator.Next();
+ unsigned height = iterator.Next();
+ Object* shared_info = LiteralArray()->get(shared_info_id);
+ os << "{bytecode_offset=" << bytecode_offset << ", function="
+ << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << ", height=" << height << "}";
+ break;
+ }
+
case Translation::JS_FRAME_FUNCTION: {
os << "{function}";
break;
@@ -12188,7 +14692,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
case Translation::DOUBLE_REGISTER: {
int reg_code = iterator.Next();
- os << "{input=" << DoubleRegister::AllocationIndexToString(reg_code)
+ os << "{input=" << DoubleRegister::from_code(reg_code).ToString()
<< "}";
break;
}
@@ -12224,8 +14728,10 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
}
case Translation::LITERAL: {
- unsigned literal_index = iterator.Next();
- os << "{literal_id=" << literal_index << "}";
+ int literal_index = iterator.Next();
+ Object* literal_value = LiteralArray()->get(literal_index);
+ os << "{literal_id=" << literal_index << " (" << Brief(literal_value)
+ << ")}";
break;
}
@@ -12305,8 +14811,6 @@ const char* Code::ICState2String(InlineCacheState state) {
case MEGAMORPHIC: return "MEGAMORPHIC";
case GENERIC: return "GENERIC";
case DEBUG_STUB: return "DEBUG_STUB";
- case DEFAULT:
- return "DEFAULT";
}
UNREACHABLE();
return NULL;
@@ -12358,6 +14862,11 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
}
if ((name != NULL) && (name[0] != '\0')) {
os << "name = " << name << "\n";
+ } else if (kind() == BUILTIN) {
+ name = GetIsolate()->builtins()->Lookup(instruction_start());
+ if (name != NULL) {
+ os << "name = " << name << "\n";
+ }
}
if (kind() == OPTIMIZED_FUNCTION) {
os << "stack_slots = " << stack_slots() << "\n";
@@ -12498,6 +15007,25 @@ void BytecodeArray::Disassemble(std::ostream& os) {
SNPrintF(buf, "%p", bytecode_start);
os << buf.start() << " : ";
interpreter::Bytecodes::Decode(os, bytecode_start, parameter_count());
+
+ if (interpreter::Bytecodes::IsJumpConstantWide(bytecode)) {
+ DCHECK_EQ(bytecode_size, 3);
+ int index = static_cast<int>(ReadUnalignedUInt16(bytecode_start + 1));
+ int offset = Smi::cast(constant_pool()->get(index))->value();
+ SNPrintF(buf, " (%p)", bytecode_start + offset);
+ os << buf.start();
+ } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
+ DCHECK_EQ(bytecode_size, 2);
+ int index = static_cast<int>(bytecode_start[1]);
+ int offset = Smi::cast(constant_pool()->get(index))->value();
+ SNPrintF(buf, " (%p)", bytecode_start + offset);
+ os << buf.start();
+ } else if (interpreter::Bytecodes::IsJump(bytecode)) {
+ DCHECK_EQ(bytecode_size, 2);
+ int offset = static_cast<int8_t>(bytecode_start[1]);
+ SNPrintF(buf, " (%p)", bytecode_start + offset);
+ os << buf.start();
+ }
os << "\n";
}
@@ -12560,8 +15088,7 @@ MaybeHandle<Object> JSArray::ObservableSetLength(Handle<JSArray> array,
uint32_t old_length = 0;
CHECK(old_length_handle->ToArrayLength(&old_length));
- static const PropertyAttributes kNoAttrFilter = NONE;
- int num_elements = array->NumberOfOwnElements(kNoAttrFilter);
+ int num_elements = array->NumberOfOwnElements(ALL_PROPERTIES);
if (num_elements > 0) {
if (old_length == static_cast<uint32_t>(num_elements)) {
// Simple case for arrays without holes.
@@ -12573,7 +15100,7 @@ MaybeHandle<Object> JSArray::ObservableSetLength(Handle<JSArray> array,
// TODO(rafaelw): For fast, sparse arrays, we can avoid iterating over
// the to-be-removed indices twice.
Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
- array->GetOwnElementKeys(*keys, kNoAttrFilter);
+ array->GetOwnElementKeys(*keys, ALL_PROPERTIES);
while (num_elements-- > 0) {
uint32_t index = NumberToUint32(keys->get(num_elements));
if (index < new_length) break;
@@ -12643,20 +15170,6 @@ void Map::AddDependentCode(Handle<Map> map,
}
-DependentCode::GroupStartIndexes::GroupStartIndexes(DependentCode* entries) {
- Recompute(entries);
-}
-
-
-void DependentCode::GroupStartIndexes::Recompute(DependentCode* entries) {
- start_indexes_[0] = 0;
- for (int g = 1; g <= kGroupCount; g++) {
- int count = entries->number_of_entries(static_cast<DependencyGroup>(g - 1));
- start_indexes_[g] = start_indexes_[g - 1] + count;
- }
-}
-
-
Handle<DependentCode> DependentCode::InsertCompilationDependencies(
Handle<DependentCode> entries, DependencyGroup group,
Handle<Foreign> info) {
@@ -12674,44 +15187,54 @@ Handle<DependentCode> DependentCode::InsertWeakCode(
Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
DependencyGroup group,
Handle<Object> object) {
- GroupStartIndexes starts(*entries);
- int start = starts.at(group);
- int end = starts.at(group + 1);
- int number_of_entries = starts.number_of_entries();
+ if (entries->length() == 0 || entries->group() > group) {
+ // There is no such group.
+ return DependentCode::New(group, object, entries);
+ }
+ if (entries->group() < group) {
+ // The group comes later in the list.
+ Handle<DependentCode> old_next(entries->next_link());
+ Handle<DependentCode> new_next = Insert(old_next, group, object);
+ if (!old_next.is_identical_to(new_next)) {
+ entries->set_next_link(*new_next);
+ }
+ return entries;
+ }
+ DCHECK_EQ(group, entries->group());
+ int count = entries->count();
// Check for existing entry to avoid duplicates.
- for (int i = start; i < end; i++) {
+ for (int i = 0; i < count; i++) {
if (entries->object_at(i) == *object) return entries;
}
- if (entries->length() < kCodesStartIndex + number_of_entries + 1) {
+ if (entries->length() < kCodesStartIndex + count + 1) {
entries = EnsureSpace(entries);
- // The number of codes can change after Compact and GC.
- starts.Recompute(*entries);
- start = starts.at(group);
- end = starts.at(group + 1);
+ // Count could have changed, reload it.
+ count = entries->count();
}
-
- entries->ExtendGroup(group);
- entries->set_object_at(end, *object);
- entries->set_number_of_entries(group, end + 1 - start);
+ entries->set_object_at(count, *object);
+ entries->set_count(count + 1);
return entries;
}
+Handle<DependentCode> DependentCode::New(DependencyGroup group,
+ Handle<Object> object,
+ Handle<DependentCode> next) {
+ Isolate* isolate = next->GetIsolate();
+ Handle<DependentCode> result = Handle<DependentCode>::cast(
+ isolate->factory()->NewFixedArray(kCodesStartIndex + 1, TENURED));
+ result->set_next_link(*next);
+ result->set_flags(GroupField::encode(group) | CountField::encode(1));
+ result->set_object_at(0, *object);
+ return result;
+}
+
+
Handle<DependentCode> DependentCode::EnsureSpace(
Handle<DependentCode> entries) {
- Isolate* isolate = entries->GetIsolate();
- if (entries->length() == 0) {
- entries = Handle<DependentCode>::cast(
- isolate->factory()->NewFixedArray(kCodesStartIndex + 1, TENURED));
- for (int g = 0; g < kGroupCount; g++) {
- entries->set_number_of_entries(static_cast<DependencyGroup>(g), 0);
- }
- return entries;
- }
if (entries->Compact()) return entries;
- GroupStartIndexes starts(*entries);
- int capacity =
- kCodesStartIndex + DependentCode::Grow(starts.number_of_entries());
+ Isolate* isolate = entries->GetIsolate();
+ int capacity = kCodesStartIndex + DependentCode::Grow(entries->count());
int grow_by = capacity - entries->length();
return Handle<DependentCode>::cast(
isolate->factory()->CopyFixedArrayAndGrow(entries, grow_by, TENURED));
@@ -12719,46 +15242,47 @@ Handle<DependentCode> DependentCode::EnsureSpace(
bool DependentCode::Compact() {
- GroupStartIndexes starts(this);
- int n = 0;
- for (int g = 0; g < kGroupCount; g++) {
- int start = starts.at(g);
- int end = starts.at(g + 1);
- int count = 0;
- DCHECK(start >= n);
- for (int i = start; i < end; i++) {
- Object* obj = object_at(i);
- if (!obj->IsWeakCell() || !WeakCell::cast(obj)->cleared()) {
- if (i != n + count) {
- copy(i, n + count);
- }
- count++;
+ int old_count = count();
+ int new_count = 0;
+ for (int i = 0; i < old_count; i++) {
+ Object* obj = object_at(i);
+ if (!obj->IsWeakCell() || !WeakCell::cast(obj)->cleared()) {
+ if (i != new_count) {
+ copy(i, new_count);
}
+ new_count++;
}
- if (count != end - start) {
- set_number_of_entries(static_cast<DependencyGroup>(g), count);
- }
- n += count;
}
- return n < starts.number_of_entries();
+ set_count(new_count);
+ for (int i = new_count; i < old_count; i++) {
+ clear_at(i);
+ }
+ return new_count < old_count;
}
void DependentCode::UpdateToFinishedCode(DependencyGroup group, Foreign* info,
WeakCell* code_cell) {
+ if (this->length() == 0 || this->group() > group) {
+ // There is no such group.
+ return;
+ }
+ if (this->group() < group) {
+ // The group comes later in the list.
+ next_link()->UpdateToFinishedCode(group, info, code_cell);
+ return;
+ }
+ DCHECK_EQ(group, this->group());
DisallowHeapAllocation no_gc;
- GroupStartIndexes starts(this);
- int start = starts.at(group);
- int end = starts.at(group + 1);
- for (int i = start; i < end; i++) {
+ int count = this->count();
+ for (int i = 0; i < count; i++) {
if (object_at(i) == info) {
set_object_at(i, code_cell);
break;
}
}
-
#ifdef DEBUG
- for (int i = start; i < end; i++) {
+ for (int i = 0; i < count; i++) {
DCHECK(object_at(i) != info);
}
#endif
@@ -12767,34 +15291,36 @@ void DependentCode::UpdateToFinishedCode(DependencyGroup group, Foreign* info,
void DependentCode::RemoveCompilationDependencies(
DependentCode::DependencyGroup group, Foreign* info) {
+ if (this->length() == 0 || this->group() > group) {
+ // There is no such group.
+ return;
+ }
+ if (this->group() < group) {
+ // The group comes later in the list.
+ next_link()->RemoveCompilationDependencies(group, info);
+ return;
+ }
+ DCHECK_EQ(group, this->group());
DisallowHeapAllocation no_allocation;
- GroupStartIndexes starts(this);
- int start = starts.at(group);
- int end = starts.at(group + 1);
+ int old_count = count();
// Find compilation info wrapper.
int info_pos = -1;
- for (int i = start; i < end; i++) {
+ for (int i = 0; i < old_count; i++) {
if (object_at(i) == info) {
info_pos = i;
break;
}
}
if (info_pos == -1) return; // Not found.
- int gap = info_pos;
- // Use the last of each group to fill the gap in the previous group.
- for (int i = group; i < kGroupCount; i++) {
- int last_of_group = starts.at(i + 1) - 1;
- DCHECK(last_of_group >= gap);
- if (last_of_group == gap) continue;
- copy(last_of_group, gap);
- gap = last_of_group;
- }
- DCHECK(gap == starts.number_of_entries() - 1);
- clear_at(gap); // Clear last gap.
- set_number_of_entries(group, end - start - 1);
+ // Use the last code to fill the gap.
+ if (info_pos < old_count - 1) {
+ copy(old_count - 1, info_pos);
+ }
+ clear_at(old_count - 1);
+ set_count(old_count - 1);
#ifdef DEBUG
- for (int i = start; i < end - 1; i++) {
+ for (int i = 0; i < old_count - 1; i++) {
DCHECK(object_at(i) != info);
}
#endif
@@ -12802,30 +15328,55 @@ void DependentCode::RemoveCompilationDependencies(
bool DependentCode::Contains(DependencyGroup group, WeakCell* code_cell) {
- GroupStartIndexes starts(this);
- int start = starts.at(group);
- int end = starts.at(group + 1);
- for (int i = start; i < end; i++) {
+ if (this->length() == 0 || this->group() > group) {
+ // There is no such group.
+ return false;
+ }
+ if (this->group() < group) {
+ // The group comes later in the list.
+ return next_link()->Contains(group, code_cell);
+ }
+ DCHECK_EQ(group, this->group());
+ int count = this->count();
+ for (int i = 0; i < count; i++) {
if (object_at(i) == code_cell) return true;
}
return false;
}
+bool DependentCode::IsEmpty(DependencyGroup group) {
+ if (this->length() == 0 || this->group() > group) {
+ // There is no such group.
+ return true;
+ }
+ if (this->group() < group) {
+ // The group comes later in the list.
+ return next_link()->IsEmpty(group);
+ }
+ DCHECK_EQ(group, this->group());
+ return count() == 0;
+}
+
+
bool DependentCode::MarkCodeForDeoptimization(
Isolate* isolate,
DependentCode::DependencyGroup group) {
+ if (this->length() == 0 || this->group() > group) {
+ // There is no such group.
+ return false;
+ }
+ if (this->group() < group) {
+ // The group comes later in the list.
+ return next_link()->MarkCodeForDeoptimization(isolate, group);
+ }
+ DCHECK_EQ(group, this->group());
DisallowHeapAllocation no_allocation_scope;
- DependentCode::GroupStartIndexes starts(this);
- int start = starts.at(group);
- int end = starts.at(group + 1);
- int code_entries = starts.number_of_entries();
- if (start == end) return false;
-
// Mark all the code that needs to be deoptimized.
bool marked = false;
bool invalidate_embedded_objects = group == kWeakCodeGroup;
- for (int i = start; i < end; i++) {
+ int count = this->count();
+ for (int i = 0; i < count; i++) {
Object* obj = object_at(i);
if (obj->IsWeakCell()) {
WeakCell* cell = WeakCell::cast(obj);
@@ -12846,16 +15397,10 @@ bool DependentCode::MarkCodeForDeoptimization(
info->Abort();
}
}
- // Compact the array by moving all subsequent groups to fill in the new holes.
- for (int src = end, dst = start; src < code_entries; src++, dst++) {
- copy(src, dst);
- }
- // Now the holes are at the end of the array, zap them for heap-verifier.
- int removed = end - start;
- for (int i = code_entries - removed; i < code_entries; i++) {
+ for (int i = 0; i < count; i++) {
clear_at(i);
}
- set_number_of_entries(group, 0);
+ set_count(0);
return marked;
}
@@ -12923,25 +15468,177 @@ Handle<Map> Map::TransitionToPrototype(Handle<Map> map,
}
-MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
- Handle<Object> value,
- bool from_javascript) {
+Maybe<bool> JSReceiver::SetPrototype(Handle<JSReceiver> object,
+ Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw) {
+ if (object->IsJSProxy()) {
+ return JSProxy::SetPrototype(Handle<JSProxy>::cast(object), value,
+ from_javascript, should_throw);
+ }
+ return JSObject::SetPrototype(Handle<JSObject>::cast(object), value,
+ from_javascript, should_throw);
+}
+
+
+// ES6: 9.5.2 [[SetPrototypeOf]] (V)
+// static
+Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
+ bool from_javascript,
+ ShouldThrow should_throw) {
+ Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(Nothing<bool>());
+ Handle<Name> trap_name = isolate->factory()->setPrototypeOf_string();
+ // 1. Assert: Either Type(V) is Object or Type(V) is Null.
+ DCHECK(value->IsJSReceiver() || value->IsNull());
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 6. Let trap be ? GetMethod(handler, "getPrototypeOf").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
+ Nothing<bool>());
+ // 7. If trap is undefined, then return target.[[SetPrototypeOf]]().
+ if (trap->IsUndefined()) {
+ return JSReceiver::SetPrototype(target, value, from_javascript,
+ should_throw);
+ }
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «target, V»)).
+ Handle<Object> argv[] = {target, value};
+ Handle<Object> trap_result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(argv), argv),
+ Nothing<bool>());
+ bool bool_trap_result = trap_result->BooleanValue();
+ // 9. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> is_extensible = JSReceiver::IsExtensible(target);
+ if (is_extensible.IsNothing()) return Nothing<bool>();
+ // 10. If extensibleTarget is true, return booleanTrapResult.
+ if (is_extensible.FromJust()) {
+ if (bool_trap_result) return Just(true);
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+ }
+ // 11. Let targetProto be ? target.[[GetPrototypeOf]]().
+ Handle<Object> target_proto;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, target_proto,
+ Object::GetPrototype(isolate, target),
+ Nothing<bool>());
+ // 12. If booleanTrapResult is true and SameValue(V, targetProto) is false,
+ // throw a TypeError exception.
+ if (bool_trap_result && !value->SameValue(*target_proto)) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxySetPrototypeOfNonExtensible));
+ return Nothing<bool>();
+ }
+ // 13. Return booleanTrapResult.
+ if (bool_trap_result) return Just(true);
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+}
+
+
+Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
+ Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw) {
+ Isolate* isolate = object->GetIsolate();
+
+ const bool observed = from_javascript && object->map()->is_observed();
+ Handle<Object> old_value;
+ if (observed) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, old_value,
+ Object::GetPrototype(isolate, object),
+ Nothing<bool>());
+ }
+
+ Maybe<bool> result =
+ SetPrototypeUnobserved(object, value, from_javascript, should_throw);
+ MAYBE_RETURN(result, Nothing<bool>());
+
+ if (result.FromJust() && observed) {
+ Handle<Object> new_value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, new_value,
+ Object::GetPrototype(isolate, object),
+ Nothing<bool>());
+ if (!new_value->SameValue(*old_value)) {
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate, JSObject::EnqueueChangeRecord(
+ object, "setPrototype",
+ isolate->factory()->proto_string(), old_value),
+ Nothing<bool>());
+ }
+ }
+
+ return result;
+}
+
+
+Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
+ Handle<Object> value,
+ bool from_javascript,
+ ShouldThrow should_throw) {
#ifdef DEBUG
int size = object->Size();
#endif
Isolate* isolate = object->GetIsolate();
+
+ if (from_javascript) {
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), object)) {
+ isolate->ReportFailedAccessCheck(object);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNoAccess));
+ }
+ } else {
+ DCHECK(!object->IsAccessCheckNeeded());
+ }
+
// Strong objects may not have their prototype set via __proto__ or
// setPrototypeOf.
if (from_javascript && object->map()->is_strong()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongSetProto, object),
- Object);
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kStrongSetProto, object));
}
Heap* heap = isolate->heap();
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
- if (!value->IsJSReceiver() && !value->IsNull()) return value;
+ if (!value->IsJSReceiver() && !value->IsNull()) return Just(true);
+
+ bool dictionary_elements_in_chain =
+ object->map()->DictionaryElementsInPrototypeChainOnly();
+
+ bool all_extensible = object->map()->is_extensible();
+ Handle<JSObject> real_receiver = object;
+ if (from_javascript) {
+ // Find the first object in the chain whose prototype object is not
+ // hidden.
+ PrototypeIterator iter(isolate, real_receiver);
+ while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+ // Casting to JSObject is fine because hidden prototypes are never
+ // JSProxies.
+ real_receiver = PrototypeIterator::GetCurrent<JSObject>(iter);
+ iter.Advance();
+ all_extensible = all_extensible && real_receiver->map()->is_extensible();
+ }
+ }
+ Handle<Map> map(real_receiver->map());
+
+ // Nothing to do if prototype is already set.
+ if (map->prototype() == *value) return Just(true);
// From 8.6.2 Object Internal Methods
// ...
@@ -12951,50 +15648,25 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
// Implementation specific extensions that modify [[Class]], [[Prototype]]
// or [[Extensible]] must not violate the invariants defined in the preceding
// paragraph.
- if (!object->map()->is_extensible()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kNonExtensibleProto, object),
- Object);
+ if (!all_extensible) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNonExtensibleProto, object));
}
- // Before we can set the prototype we need to be sure
- // prototype cycles are prevented.
- // It is sufficient to validate that the receiver is not in the new prototype
- // chain.
+ // Before we can set the prototype we need to be sure prototype cycles are
+ // prevented. It is sufficient to validate that the receiver is not in the
+ // new prototype chain.
for (PrototypeIterator iter(isolate, *value,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
if (iter.GetCurrent<JSReceiver>() == *object) {
// Cycle detected.
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kCyclicProto),
- Object);
- }
- }
-
- bool dictionary_elements_in_chain =
- object->map()->DictionaryElementsInPrototypeChainOnly();
- Handle<JSObject> real_receiver = object;
-
- if (from_javascript) {
- // Find the first object in the chain whose prototype object is not
- // hidden and set the new prototype on that object.
- PrototypeIterator iter(isolate, real_receiver);
- while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
- real_receiver = PrototypeIterator::GetCurrent<JSObject>(iter);
- iter.Advance();
- if (!real_receiver->map()->is_extensible()) {
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kNonExtensibleProto, object),
- Object);
- }
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kCyclicProto));
}
}
// Set the new prototype of the object.
- Handle<Map> map(real_receiver->map());
-
- // Nothing to do if prototype is already set.
- if (map->prototype() == *value) return value;
isolate->UpdateArrayProtectorOnSetPrototype(real_receiver);
@@ -13009,12 +15681,12 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
// If the prototype chain didn't previously have element callbacks, then
// KeyedStoreICs need to be cleared to ensure any that involve this
// map go generic.
- object->GetHeap()->ClearAllKeyedStoreICs();
+ TypeFeedbackVector::ClearAllKeyedStoreICs(isolate);
}
heap->ClearInstanceofCache();
DCHECK(size == object->Size());
- return value;
+ return Just(true);
}
@@ -13128,6 +15800,8 @@ static bool ShouldConvertToFastElements(JSObject* object,
uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
SeededNumberDictionary::kEntrySize;
+
+ // Turn fast if the dictionary only saves 50% space.
return 2 * dictionary_size >= *new_capacity;
}
@@ -13137,6 +15811,17 @@ MaybeHandle<Object> JSObject::AddDataElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes) {
+ MAYBE_RETURN_NULL(
+ AddDataElement(object, index, value, attributes, THROW_ON_ERROR));
+ return value;
+}
+
+
+// static
+Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ ShouldThrow should_throw) {
DCHECK(object->map()->is_extensible());
Isolate* isolate = object->GetIsolate();
@@ -13197,30 +15882,33 @@ MaybeHandle<Object> JSObject::AddDataElement(Handle<JSObject> object,
Handle<JSArray> array = Handle<JSArray>::cast(object);
Handle<String> name = isolate->factory()->Uint32ToString(index);
- RETURN_ON_EXCEPTION(isolate, BeginPerformSplice(array), Object);
- RETURN_ON_EXCEPTION(
+ RETURN_ON_EXCEPTION_VALUE(isolate, BeginPerformSplice(array),
+ Nothing<bool>());
+ RETURN_ON_EXCEPTION_VALUE(
isolate, EnqueueChangeRecord(array, "add", name,
isolate->factory()->the_hole_value()),
- Object);
- RETURN_ON_EXCEPTION(isolate,
- EnqueueChangeRecord(array, "update",
- isolate->factory()->length_string(),
- old_length_handle),
- Object);
- RETURN_ON_EXCEPTION(isolate, EndPerformSplice(array), Object);
+ Nothing<bool>());
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate, EnqueueChangeRecord(array, "update",
+ isolate->factory()->length_string(),
+ old_length_handle),
+ Nothing<bool>());
+ RETURN_ON_EXCEPTION_VALUE(isolate, EndPerformSplice(array),
+ Nothing<bool>());
Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
- RETURN_ON_EXCEPTION(isolate, EnqueueSpliceRecord(array, old_length, deleted,
- new_length - old_length),
- Object);
+ RETURN_ON_EXCEPTION_VALUE(isolate,
+ EnqueueSpliceRecord(array, old_length, deleted,
+ new_length - old_length),
+ Nothing<bool>());
} else if (object->map()->is_observed()) {
Handle<String> name = isolate->factory()->Uint32ToString(index);
- RETURN_ON_EXCEPTION(
+ RETURN_ON_EXCEPTION_VALUE(
isolate, EnqueueChangeRecord(object, "add", name,
isolate->factory()->the_hole_value()),
- Object);
+ Nothing<bool>());
}
- return value;
+ return Just(true);
}
@@ -13423,16 +16111,6 @@ bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
}
-MaybeHandle<Object> JSArray::ReadOnlyLengthError(Handle<JSArray> array) {
- Isolate* isolate = array->GetIsolate();
- Handle<Name> length = isolate->factory()->length_string();
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kStrictReadOnlyProperty, length, array),
- Object);
-}
-
-
template <typename BackingStore>
static int FastHoleyElementsUsage(JSObject* object, BackingStore* store) {
int limit = object->IsJSArray()
@@ -13452,8 +16130,8 @@ int JSObject::GetFastElementsUsage() {
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- // Only JSArray have packed elements.
- return Smi::cast(JSArray::cast(this)->length())->value();
+ return IsJSArray() ? Smi::cast(JSArray::cast(this)->length())->value()
+ : store->length();
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
store = FixedArray::cast(FixedArray::cast(store)->get(1));
// Fall through.
@@ -13489,14 +16167,13 @@ void Dictionary<Derived, Shape, Key>::Print(std::ostream& os) { // NOLINT
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
if (this->IsKey(k)) {
- os << " ";
+ os << "\n ";
if (k->IsString()) {
String::cast(k)->StringPrint(os);
} else {
os << Brief(k);
}
- os << ": " << Brief(this->ValueAt(i)) << " " << this->DetailsAt(i)
- << "\n";
+ os << ": " << Brief(this->ValueAt(i)) << " " << this->DetailsAt(i);
}
}
}
@@ -13529,16 +16206,6 @@ InterceptorInfo* JSObject::GetNamedInterceptor() {
}
-InterceptorInfo* JSObject::GetIndexedInterceptor() {
- DCHECK(map()->has_indexed_interceptor());
- JSFunction* constructor = JSFunction::cast(map()->GetConstructor());
- DCHECK(constructor->shared()->IsApiFunction());
- Object* result =
- constructor->shared()->get_api_func_data()->indexed_property_handler();
- return InterceptorInfo::cast(result);
-}
-
-
MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
bool* done) {
*done = false;
@@ -13567,6 +16234,7 @@ MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
result = args.Call(getter, index);
} else {
Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
return isolate->factory()->undefined_value();
@@ -13590,59 +16258,11 @@ MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
}
-// Compute the property keys from the interceptor.
-MaybeHandle<JSObject> JSObject::GetKeysForNamedInterceptor(
- Handle<JSObject> object, Handle<JSReceiver> receiver) {
- Isolate* isolate = receiver->GetIsolate();
- Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- PropertyCallbackArguments
- args(isolate, interceptor->data(), *receiver, *object);
- v8::Local<v8::Object> result;
- if (!interceptor->enumerator()->IsUndefined()) {
- v8::GenericNamedPropertyEnumeratorCallback enum_fun =
- v8::ToCData<v8::GenericNamedPropertyEnumeratorCallback>(
- interceptor->enumerator());
- LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
- result = args.Call(enum_fun);
- }
- if (result.IsEmpty()) return MaybeHandle<JSObject>();
- DCHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
- v8::Utils::OpenHandle(*result)->HasSloppyArgumentsElements());
- // Rebox before returning.
- return handle(*v8::Utils::OpenHandle(*result), isolate);
-}
-
-
-// Compute the element keys from the interceptor.
-MaybeHandle<JSObject> JSObject::GetKeysForIndexedInterceptor(
- Handle<JSObject> object, Handle<JSReceiver> receiver) {
- Isolate* isolate = receiver->GetIsolate();
- Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- PropertyCallbackArguments
- args(isolate, interceptor->data(), *receiver, *object);
- v8::Local<v8::Object> result;
- if (!interceptor->enumerator()->IsUndefined()) {
- v8::IndexedPropertyEnumeratorCallback enum_fun =
- v8::ToCData<v8::IndexedPropertyEnumeratorCallback>(
- interceptor->enumerator());
- LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
- result = args.Call(enum_fun);
- }
- if (result.IsEmpty()) return MaybeHandle<JSObject>();
- DCHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
- v8::Utils::OpenHandle(*result)->HasSloppyArgumentsElements());
- // Rebox before returning.
- return handle(*v8::Utils::OpenHandle(*result), isolate);
-}
-
-
Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
Handle<Name> name) {
LookupIterator it = LookupIterator::PropertyOrElement(
name->GetIsolate(), object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
- Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
- if (!maybe_result.IsJust()) return Nothing<bool>();
- return Just(it.IsFound());
+ return HasProperty(&it);
}
@@ -13651,9 +16271,7 @@ Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
LookupIterator it(isolate, object, index,
LookupIterator::OWN_SKIP_INTERCEPTOR);
- Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
- if (!maybe_result.IsJust()) return Nothing<bool>();
- return Just(it.IsFound());
+ return HasProperty(&it);
}
@@ -13667,23 +16285,6 @@ Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
}
-int JSObject::NumberOfOwnProperties(PropertyAttributes filter) {
- if (HasFastProperties()) {
- Map* map = this->map();
- if (filter == NONE) return map->NumberOfOwnDescriptors();
- if (filter & DONT_ENUM) {
- int result = map->EnumLength();
- if (result != kInvalidEnumCacheSentinel) return result;
- }
- return map->NumberOfDescribedProperties(OWN_DESCRIPTORS, filter);
- } else if (IsGlobalObject()) {
- return global_dictionary()->NumberOfElementsFilterAttributes(filter);
- } else {
- return property_dictionary()->NumberOfElementsFilterAttributes(filter);
- }
-}
-
-
void FixedArray::SwapPairs(FixedArray* numbers, int i, int j) {
Object* temp = get(i);
set(i, get(j));
@@ -13797,58 +16398,76 @@ void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
}
-// Fill in the names of own properties into the supplied storage. The main
-// purpose of this function is to provide reflection information for the object
-// mirrors.
-int JSObject::GetOwnPropertyNames(FixedArray* storage, int index,
- PropertyAttributes filter) {
- DCHECK(storage->length() >= (NumberOfOwnProperties(filter) - index));
+void JSObject::CollectOwnPropertyNames(KeyAccumulator* keys,
+ PropertyFilter filter) {
if (HasFastProperties()) {
- int start_index = index;
int real_size = map()->NumberOfOwnDescriptors();
- DescriptorArray* descs = map()->instance_descriptors();
+ Handle<DescriptorArray> descs(map()->instance_descriptors());
for (int i = 0; i < real_size; i++) {
- if ((descs->GetDetails(i).attributes() & filter) == 0 &&
- !FilterKey(descs->GetKey(i), filter)) {
- storage->set(index++, descs->GetKey(i));
+ PropertyDetails details = descs->GetDetails(i);
+ if ((details.attributes() & filter) != 0) continue;
+ if (filter & ONLY_ALL_CAN_READ) {
+ if (details.kind() != kAccessor) continue;
+ Object* accessors = descs->GetValue(i);
+ if (!accessors->IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
}
+ Name* key = descs->GetKey(i);
+ if (key->FilterKey(filter)) continue;
+ keys->AddKey(key);
}
- return index - start_index;
- } else if (IsGlobalObject()) {
- return global_dictionary()->CopyKeysTo(storage, index, filter,
- GlobalDictionary::UNSORTED);
+ } else if (IsJSGlobalObject()) {
+ GlobalDictionary::CollectKeysTo(handle(global_dictionary()), keys, filter);
} else {
- return property_dictionary()->CopyKeysTo(storage, index, filter,
- NameDictionary::UNSORTED);
+ NameDictionary::CollectKeysTo(handle(property_dictionary()), keys, filter);
}
}
-int JSObject::NumberOfOwnElements(PropertyAttributes filter) {
+int JSObject::NumberOfOwnElements(PropertyFilter filter) {
+ // Fast case for objects with no elements.
+ if (!IsJSValue() && HasFastElements()) {
+ uint32_t length =
+ IsJSArray()
+ ? static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(this)->length())->value())
+ : static_cast<uint32_t>(FixedArrayBase::cast(elements())->length());
+ if (length == 0) return 0;
+ }
+ // Compute the number of enumerable elements.
return GetOwnElementKeys(NULL, filter);
}
-int JSObject::NumberOfEnumElements() {
- // Fast case for objects with no elements.
- if (!IsJSValue() && HasFastObjectElements()) {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if (length == 0) return 0;
+void JSObject::CollectOwnElementKeys(Handle<JSObject> object,
+ KeyAccumulator* keys,
+ PropertyFilter filter) {
+ if (filter & SKIP_STRINGS) return;
+ uint32_t string_keys = 0;
+
+ // If this is a String wrapper, add the string indices first,
+ // as they're guaranteed to precede the elements in numerical order
+ // and ascending order is required by ECMA-262, 6th, 9.1.12.
+ if (object->IsJSValue()) {
+ Object* val = JSValue::cast(*object)->value();
+ if (val->IsString() && (filter & ONLY_ALL_CAN_READ) == 0) {
+ String* str = String::cast(val);
+ string_keys = str->length();
+ for (uint32_t i = 0; i < string_keys; i++) {
+ keys->AddKey(i);
+ }
+ }
}
- // Compute the number of enumerable elements.
- return NumberOfOwnElements(static_cast<PropertyAttributes>(DONT_ENUM));
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ accessor->CollectElementIndices(object, keys, kMaxUInt32, filter, 0);
}
-int JSObject::GetOwnElementKeys(FixedArray* storage,
- PropertyAttributes filter) {
+int JSObject::GetOwnElementKeys(FixedArray* storage, PropertyFilter filter) {
int counter = 0;
// If this is a String wrapper, add the string indices first,
- // as they're guaranteed to preced the elements in numerical order
+ // as they're guaranteed to precede the elements in numerical order
// and ascending order is required by ECMA-262, 6th, 9.1.12.
if (IsJSValue()) {
Object* val = JSValue::cast(this)->value();
@@ -13973,8 +16592,36 @@ int JSObject::GetOwnElementKeys(FixedArray* storage,
}
-int JSObject::GetEnumElementKeys(FixedArray* storage) {
- return GetOwnElementKeys(storage, static_cast<PropertyAttributes>(DONT_ENUM));
+MaybeHandle<String> Object::ObjectProtoToString(Isolate* isolate,
+ Handle<Object> object) {
+ if (object->IsUndefined()) return isolate->factory()->undefined_to_string();
+ if (object->IsNull()) return isolate->factory()->null_to_string();
+
+ Handle<JSReceiver> receiver;
+ CHECK(Object::ToObject(isolate, object).ToHandle(&receiver));
+
+ Handle<String> tag;
+ if (FLAG_harmony_tostring) {
+ Handle<Object> to_string_tag;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, to_string_tag,
+ GetProperty(receiver, isolate->factory()->to_string_tag_symbol()),
+ String);
+ if (to_string_tag->IsString()) {
+ tag = Handle<String>::cast(to_string_tag);
+ }
+ }
+
+ if (tag.is_null()) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, tag,
+ JSReceiver::BuiltinStringTag(receiver), String);
+ }
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("[object ");
+ builder.AppendString(tag);
+ builder.AppendCharacter(']');
+ return builder.Finish();
}
@@ -14093,12 +16740,197 @@ class StringSharedKey : public HashTableKey {
};
+namespace {
+
+JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags, bool* success) {
+ JSRegExp::Flags value = JSRegExp::kNone;
+ int length = flags->length();
+ // A longer flags string cannot be valid.
+ if (length > 5) return JSRegExp::Flags(0);
+ for (int i = 0; i < length; i++) {
+ JSRegExp::Flag flag = JSRegExp::kNone;
+ switch (flags->Get(i)) {
+ case 'g':
+ flag = JSRegExp::kGlobal;
+ break;
+ case 'i':
+ flag = JSRegExp::kIgnoreCase;
+ break;
+ case 'm':
+ flag = JSRegExp::kMultiline;
+ break;
+ case 'u':
+ if (!FLAG_harmony_unicode_regexps) return JSRegExp::Flags(0);
+ flag = JSRegExp::kUnicode;
+ break;
+ case 'y':
+ if (!FLAG_harmony_regexps) return JSRegExp::Flags(0);
+ flag = JSRegExp::kSticky;
+ break;
+ default:
+ return JSRegExp::Flags(0);
+ }
+ // Duplicate flag.
+ if (value & flag) return JSRegExp::Flags(0);
+ value |= flag;
+ }
+ *success = true;
+ return value;
+}
+
+} // namespace
+
+
+// static
+MaybeHandle<JSRegExp> JSRegExp::New(Handle<String> pattern, Flags flags) {
+ Isolate* isolate = pattern->GetIsolate();
+ Handle<JSFunction> constructor = isolate->regexp_function();
+ Handle<JSRegExp> regexp =
+ Handle<JSRegExp>::cast(isolate->factory()->NewJSObject(constructor));
+
+ return JSRegExp::Initialize(regexp, pattern, flags);
+}
+
+
+// static
+MaybeHandle<JSRegExp> JSRegExp::New(Handle<String> pattern,
+ Handle<String> flags_string) {
+ Isolate* isolate = pattern->GetIsolate();
+ bool success = false;
+ Flags flags = RegExpFlagsFromString(flags_string, &success);
+ if (!success) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string),
+ JSRegExp);
+ }
+ return New(pattern, flags);
+}
+
+
+// static
+Handle<JSRegExp> JSRegExp::Copy(Handle<JSRegExp> regexp) {
+ Isolate* const isolate = regexp->GetIsolate();
+ return Handle<JSRegExp>::cast(isolate->factory()->CopyJSObject(regexp));
+}
+
+
+template <typename Char>
+inline int CountRequiredEscapes(Handle<String> source) {
+ DisallowHeapAllocation no_gc;
+ int escapes = 0;
+ Vector<const Char> src = source->GetCharVector<Char>();
+ for (int i = 0; i < src.length(); i++) {
+ if (src[i] == '/' && (i == 0 || src[i - 1] != '\\')) escapes++;
+ }
+ return escapes;
+}
+
+
+template <typename Char, typename StringType>
+inline Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
+ Handle<StringType> result) {
+ DisallowHeapAllocation no_gc;
+ Vector<const Char> src = source->GetCharVector<Char>();
+ Vector<Char> dst(result->GetChars(), result->length());
+ int s = 0;
+ int d = 0;
+ while (s < src.length()) {
+ if (src[s] == '/' && (s == 0 || src[s - 1] != '\\')) dst[d++] = '\\';
+ dst[d++] = src[s++];
+ }
+ DCHECK_EQ(result->length(), d);
+ return result;
+}
+
+
+MaybeHandle<String> EscapeRegExpSource(Isolate* isolate,
+ Handle<String> source) {
+ String::Flatten(source);
+ if (source->length() == 0) return isolate->factory()->query_colon_string();
+ bool one_byte = source->IsOneByteRepresentationUnderneath();
+ int escapes = one_byte ? CountRequiredEscapes<uint8_t>(source)
+ : CountRequiredEscapes<uc16>(source);
+ if (escapes == 0) return source;
+ int length = source->length() + escapes;
+ if (one_byte) {
+ Handle<SeqOneByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ isolate->factory()->NewRawOneByteString(length),
+ String);
+ return WriteEscapedRegExpSource<uint8_t>(source, result);
+ } else {
+ Handle<SeqTwoByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ isolate->factory()->NewRawTwoByteString(length),
+ String);
+ return WriteEscapedRegExpSource<uc16>(source, result);
+ }
+}
+
+
+// static
+MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source,
+ Handle<String> flags_string) {
+ Isolate* isolate = source->GetIsolate();
+ bool success = false;
+ Flags flags = RegExpFlagsFromString(flags_string, &success);
+ if (!success) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string),
+ JSRegExp);
+ }
+ return Initialize(regexp, source, flags);
+}
+
+
+// static
+MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source, Flags flags) {
+ Isolate* isolate = regexp->GetIsolate();
+ Factory* factory = isolate->factory();
+ // If source is the empty string we set it to "(?:)" instead as
+ // suggested by ECMA-262, 5th, section 15.10.4.1.
+ if (source->length() == 0) source = factory->query_colon_string();
+
+ Handle<String> escaped_source;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, escaped_source,
+ EscapeRegExpSource(isolate, source), JSRegExp);
+
+ regexp->set_source(*escaped_source);
+ regexp->set_flags(Smi::FromInt(flags));
+
+ Map* map = regexp->map();
+ Object* constructor = map->GetConstructor();
+ if (constructor->IsJSFunction() &&
+ JSFunction::cast(constructor)->initial_map() == map) {
+ // If we still have the original map, set in-object properties directly.
+ regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
+ Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ } else {
+ // Map has changed, so use generic, but slower, method.
+ PropertyAttributes writable =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ regexp, factory->last_index_string(),
+ Handle<Smi>(Smi::FromInt(0), isolate), writable)
+ .Check();
+ }
+
+ RETURN_ON_EXCEPTION(isolate, RegExpImpl::Compile(regexp, source, flags),
+ JSRegExp);
+
+ return regexp;
+}
+
+
// RegExpKey carries the source and flags of a regular expression as key.
class RegExpKey : public HashTableKey {
public:
RegExpKey(Handle<String> string, JSRegExp::Flags flags)
- : string_(string),
- flags_(Smi::FromInt(flags.value())) { }
+ : string_(string), flags_(Smi::FromInt(flags)) {}
// Rather than storing the key in the hash table, a pointer to the
// stored value is stored where the key should be. IsMatch then
@@ -14200,15 +17032,14 @@ class InternalizedStringKey : public HashTableKey {
template<typename Derived, typename Shape, typename Key>
void HashTable<Derived, Shape, Key>::IteratePrefix(ObjectVisitor* v) {
- IteratePointers(v, 0, kElementsStartOffset);
+ BodyDescriptorBase::IteratePointers(this, 0, kElementsStartOffset, v);
}
template<typename Derived, typename Shape, typename Key>
void HashTable<Derived, Shape, Key>::IterateElements(ObjectVisitor* v) {
- IteratePointers(v,
- kElementsStartOffset,
- kHeaderSize + length() * kPointerSize);
+ BodyDescriptorBase::IteratePointers(this, kElementsStartOffset,
+ kHeaderSize + length() * kPointerSize, v);
}
@@ -14395,14 +17226,8 @@ Handle<Derived> HashTable<Derived, Shape, Key>::EnsureCapacity(
Isolate* isolate = table->GetIsolate();
int capacity = table->Capacity();
int nof = table->NumberOfElements() + n;
- int nod = table->NumberOfDeletedElements();
- // Return if:
- // 50% is still free after adding n elements and
- // at most 50% of the free elements are deleted elements.
- if (nod <= (capacity - nof) >> 1) {
- int needed_free = nof >> 1;
- if (nof + needed_free <= capacity) return table;
- }
+
+ if (table->HasSufficientCapacity(n)) return table;
const int kMinCapacityForPretenure = 256;
bool should_pretenure = pretenure == TENURED ||
@@ -14419,6 +17244,22 @@ Handle<Derived> HashTable<Derived, Shape, Key>::EnsureCapacity(
}
+template <typename Derived, typename Shape, typename Key>
+bool HashTable<Derived, Shape, Key>::HasSufficientCapacity(int n) {
+ int capacity = Capacity();
+ int nof = NumberOfElements() + n;
+ int nod = NumberOfDeletedElements();
+ // Return true if:
+ // 50% is still free after adding n elements and
+ // at most 50% of the free elements are deleted elements.
+ if (nod <= (capacity - nof) >> 1) {
+ int needed_free = nof >> 1;
+ if (nof + needed_free <= capacity) return true;
+ }
+ return false;
+}
+
+
template<typename Derived, typename Shape, typename Key>
Handle<Derived> HashTable<Derived, Shape, Key>::Shrink(Handle<Derived> table,
Key key) {
@@ -14585,6 +17426,9 @@ template Handle<UnseededNumberDictionary>
Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>::
EnsureCapacity(Handle<UnseededNumberDictionary>, int, uint32_t);
+template void Dictionary<NameDictionary, NameDictionaryShape,
+ Handle<Name> >::SetRequiresCopyOnCapacityChange();
+
template Handle<NameDictionary>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
EnsureCapacity(Handle<NameDictionary>, int, Handle<Name>);
@@ -14852,8 +17696,8 @@ size_t JSTypedArray::element_size() {
}
-void GlobalObject::InvalidatePropertyCell(Handle<GlobalObject> global,
- Handle<Name> name) {
+void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
+ Handle<Name> name) {
DCHECK(!global->HasFastProperties());
auto dictionary = handle(global->global_dictionary());
int entry = dictionary->FindEntry(name);
@@ -14863,8 +17707,8 @@ void GlobalObject::InvalidatePropertyCell(Handle<GlobalObject> global,
// TODO(ishell): rename to EnsureEmptyPropertyCell or something.
-Handle<PropertyCell> GlobalObject::EnsurePropertyCell(
- Handle<GlobalObject> global, Handle<Name> name) {
+Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
+ Handle<JSGlobalObject> global, Handle<Name> name) {
DCHECK(!global->HasFastProperties());
auto dictionary = handle(global->global_dictionary());
int entry = dictionary->FindEntry(name);
@@ -15313,7 +18157,17 @@ Dictionary<Derived, Shape, Key>::GenerateNewEnumerationIndices(
}
-template<typename Derived, typename Shape, typename Key>
+template <typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::SetRequiresCopyOnCapacityChange() {
+ DCHECK_EQ(0, DerivedHashTable::NumberOfElements());
+ DCHECK_EQ(0, DerivedHashTable::NumberOfDeletedElements());
+ // Make sure that HashTable::EnsureCapacity will create a copy.
+ DerivedHashTable::SetNumberOfDeletedElements(DerivedHashTable::Capacity());
+ DCHECK(!DerivedHashTable::HasSufficientCapacity(1));
+}
+
+
+template <typename Derived, typename Shape, typename Key>
Handle<Derived> Dictionary<Derived, Shape, Key>::EnsureCapacity(
Handle<Derived> dictionary, int n, Key key) {
// Check whether there are enough enumeration indices to add n elements.
@@ -15417,7 +18271,7 @@ void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key,
if (key > kRequiresSlowElementsLimit) {
if (used_as_prototype) {
// TODO(verwaest): Remove this hack.
- GetHeap()->ClearAllKeyedStoreICs();
+ TypeFeedbackVector::ClearAllKeyedStoreICs(GetIsolate());
}
set_requires_slow_elements();
return;
@@ -15496,12 +18350,12 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
template <typename Derived, typename Shape, typename Key>
int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
- PropertyAttributes filter) {
+ PropertyFilter filter) {
int capacity = this->Capacity();
int result = 0;
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
- if (this->IsKey(k) && !FilterKey(k, filter)) {
+ if (this->IsKey(k) && !k->FilterKey(filter)) {
if (this->IsDeleted(i)) continue;
PropertyDetails details = this->DetailsAt(i);
PropertyAttributes attr = details.attributes();
@@ -15517,12 +18371,12 @@ bool Dictionary<Derived, Shape, Key>::HasComplexElements() {
int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
- if (this->IsKey(k) && !FilterKey(k, NONE)) {
+ if (this->IsKey(k) && !k->FilterKey(ALL_PROPERTIES)) {
if (this->IsDeleted(i)) continue;
PropertyDetails details = this->DetailsAt(i);
if (details.type() == ACCESSOR_CONSTANT) return true;
PropertyAttributes attr = details.attributes();
- if (attr & (READ_ONLY | DONT_DELETE | DONT_ENUM)) return true;
+ if (attr & ALL_ATTRIBUTES_MASK) return true;
}
}
return false;
@@ -15569,19 +18423,19 @@ void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(FixedArray* storage) {
template <typename Derived, typename Shape, typename Key>
int Dictionary<Derived, Shape, Key>::CopyKeysTo(
- FixedArray* storage, int index, PropertyAttributes filter,
+ FixedArray* storage, int index, PropertyFilter filter,
typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter));
int start_index = index;
int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
- if (this->IsKey(k) && !FilterKey(k, filter)) {
- if (this->IsDeleted(i)) continue;
- PropertyDetails details = this->DetailsAt(i);
- PropertyAttributes attr = details.attributes();
- if ((attr & filter) == 0) storage->set(index++, k);
- }
+ if (!this->IsKey(k) || k->FilterKey(filter)) continue;
+ if (this->IsDeleted(i)) continue;
+ PropertyDetails details = this->DetailsAt(i);
+ PropertyAttributes attr = details.attributes();
+ if ((attr & filter) != 0) continue;
+ storage->set(index++, k);
}
if (sort_mode == Dictionary::SORTED) {
storage->SortPairs(storage, index);
@@ -15591,6 +18445,48 @@ int Dictionary<Derived, Shape, Key>::CopyKeysTo(
}
+template <typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::CollectKeysTo(
+ Handle<Dictionary<Derived, Shape, Key> > dictionary, KeyAccumulator* keys,
+ PropertyFilter filter) {
+ int capacity = dictionary->Capacity();
+ Handle<FixedArray> array =
+ keys->isolate()->factory()->NewFixedArray(dictionary->NumberOfElements());
+ int array_size = 0;
+
+ {
+ DisallowHeapAllocation no_gc;
+ Dictionary<Derived, Shape, Key>* raw_dict = *dictionary;
+ for (int i = 0; i < capacity; i++) {
+ Object* k = raw_dict->KeyAt(i);
+ if (!raw_dict->IsKey(k) || k->FilterKey(filter)) continue;
+ if (raw_dict->IsDeleted(i)) continue;
+ PropertyDetails details = raw_dict->DetailsAt(i);
+ if ((details.attributes() & filter) != 0) continue;
+ if (filter & ONLY_ALL_CAN_READ) {
+ if (details.kind() != kAccessor) continue;
+ Object* accessors = raw_dict->ValueAt(i);
+ if (accessors->IsPropertyCell()) {
+ accessors = PropertyCell::cast(accessors)->value();
+ }
+ if (!accessors->IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
+ }
+ array->set(array_size++, Smi::FromInt(i));
+ }
+
+ EnumIndexComparator<Derived> cmp(static_cast<Derived*>(raw_dict));
+ Smi** start = reinterpret_cast<Smi**>(array->GetFirstElementAddress());
+ std::sort(start, start + array_size, cmp);
+ }
+
+ for (int i = 0; i < array_size; i++) {
+ int index = Smi::cast(array->get(i))->value();
+ keys->AddKey(dictionary->KeyAt(index));
+ }
+}
+
+
// Backwards lookup (slow).
template<typename Derived, typename Shape, typename Key>
Object* Dictionary<Derived, Shape, Key>::SlowReverseLookup(Object* value) {
@@ -16098,7 +18994,6 @@ void JSMap::Clear(Handle<JSMap> map) {
void JSWeakCollection::Initialize(Handle<JSWeakCollection> weak_collection,
Isolate* isolate) {
- DCHECK_EQ(0, weak_collection->map()->GetInObjectProperties());
Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 0);
weak_collection->set_table(*table);
}
@@ -16395,6 +19290,39 @@ int BreakPointInfo::GetBreakPointCount() {
}
+// static
+MaybeHandle<JSDate> JSDate::New(Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target, double tv) {
+ Isolate* const isolate = constructor->GetIsolate();
+ Handle<JSObject> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ JSObject::New(constructor, new_target), JSDate);
+ if (-DateCache::kMaxTimeInMs <= tv && tv <= DateCache::kMaxTimeInMs) {
+ tv = DoubleToInteger(tv) + 0.0;
+ } else {
+ tv = std::numeric_limits<double>::quiet_NaN();
+ }
+ Handle<Object> value = isolate->factory()->NewNumber(tv);
+ Handle<JSDate>::cast(result)->SetValue(*value, std::isnan(tv));
+ return Handle<JSDate>::cast(result);
+}
+
+
+// static
+double JSDate::CurrentTimeValue(Isolate* isolate) {
+ if (FLAG_log_timer_events || FLAG_prof_cpp) LOG(isolate, CurrentTimeEvent());
+
+ // According to ECMA-262, section 15.9.1, page 117, the precision of
+ // the number in a Date object representing a particular instant in
+ // time is milliseconds. Therefore, we floor the result of getting
+ // the OS time.
+ return Floor(FLAG_verify_predictable
+ ? isolate->heap()->MonotonicallyIncreasingTimeInMs()
+ : base::OS::TimeCurrentMillis());
+}
+
+
+// static
Object* JSDate::GetField(Object* object, Smi* index) {
return JSDate::cast(object)->DoGetField(
static_cast<FieldIndex>(index->value()));
@@ -16487,6 +19415,16 @@ Object* JSDate::GetUTCField(FieldIndex index,
}
+// static
+Handle<Object> JSDate::SetValue(Handle<JSDate> date, double v) {
+ Isolate* const isolate = date->GetIsolate();
+ Handle<Object> value = isolate->factory()->NewNumber(v);
+ bool value_is_nan = std::isnan(v);
+ date->SetValue(*value, value_is_nan);
+ return value;
+}
+
+
void JSDate::SetValue(Object* value, bool is_value_nan) {
set_value(value);
if (is_value_nan) {
diff --git a/chromium/v8/src/objects.h b/chromium/v8/src/objects.h
index 225a7db42e0..c55c5c97805 100644
--- a/chromium/v8/src/objects.h
+++ b/chromium/v8/src/objects.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -11,6 +11,7 @@
#include "src/assert-scope.h"
#include "src/bailout-reason.h"
#include "src/base/bits.h"
+#include "src/base/flags.h"
#include "src/base/smart-pointers.h"
#include "src/builtins.h"
#include "src/checks.h"
@@ -50,6 +51,7 @@
// - JSArrayBufferView
// - JSTypedArray
// - JSDataView
+// - JSBoundFunction
// - JSCollection
// - JSSet
// - JSMap
@@ -62,15 +64,12 @@
// - JSFunction
// - JSGeneratorObject
// - JSModule
-// - GlobalObject
-// - JSGlobalObject
-// - JSBuiltinsObject
+// - JSGlobalObject
// - JSGlobalProxy
// - JSValue
// - JSDate
// - JSMessageObject
// - JSProxy
-// - JSFunctionProxy
// - FixedArrayBase
// - ByteArray
// - BytecodeArray
@@ -87,6 +86,7 @@
// - OrderedHashSet
// - OrderedHashMap
// - Context
+// - TypeFeedbackMetadata
// - TypeFeedbackVector
// - ScopeInfo
// - TransitionArray
@@ -143,7 +143,6 @@
// - FunctionTemplateInfo
// - ObjectTemplateInfo
// - Script
-// - TypeSwitchInfo
// - DebugInfo
// - BreakPointInfo
// - CodeCache
@@ -179,7 +178,7 @@ enum class ToPrimitiveHint { kDefault, kNumber, kString };
enum class OrdinaryToPrimitiveHint { kNumber, kString };
-enum TypeofMode { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
enum MutableMode {
@@ -411,6 +410,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(FIXED_DOUBLE_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
V(WEAK_CELL_TYPE) \
+ V(TRANSITION_ARRAY_TYPE) \
\
V(JS_MESSAGE_OBJECT_TYPE) \
\
@@ -421,7 +421,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_GENERATOR_OBJECT_TYPE) \
V(JS_MODULE_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
- V(JS_BUILTINS_OBJECT_TYPE) \
V(JS_GLOBAL_PROXY_TYPE) \
V(JS_ARRAY_TYPE) \
V(JS_ARRAY_BUFFER_TYPE) \
@@ -435,10 +434,11 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_ITERATOR_RESULT_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_WEAK_SET_TYPE) \
+ V(JS_PROMISE_TYPE) \
V(JS_REGEXP_TYPE) \
\
+ V(JS_BOUND_FUNCTION_TYPE) \
V(JS_FUNCTION_TYPE) \
- V(JS_FUNCTION_PROXY_TYPE) \
V(DEBUG_INFO_TYPE) \
V(BREAK_POINT_INFO_TYPE)
@@ -513,7 +513,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
V(SCRIPT, Script, script) \
V(ALLOCATION_SITE, AllocationSite, allocation_site) \
V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
@@ -702,6 +701,7 @@ enum InstanceType {
SHARED_FUNCTION_INFO_TYPE,
CELL_TYPE,
WEAK_CELL_TYPE,
+ TRANSITION_ARRAY_TYPE,
PROPERTY_CELL_TYPE,
PROTOTYPE_INFO_TYPE,
SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE,
@@ -709,11 +709,9 @@ enum InstanceType {
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
// the two forms of function. This organization enables using the same
- // compares for checking the JS_RECEIVER/SPEC_OBJECT range and the
- // NONCALLABLE_JS_OBJECT range.
- JS_FUNCTION_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE
- JS_PROXY_TYPE, // LAST_JS_PROXY_TYPE
- JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
+ // compares for checking the JS_RECEIVER and the NONCALLABLE_JS_OBJECT range.
+ JS_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE
+ JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
JS_MESSAGE_OBJECT_TYPE,
JS_DATE_TYPE,
JS_OBJECT_TYPE,
@@ -721,7 +719,6 @@ enum InstanceType {
JS_GENERATOR_OBJECT_TYPE,
JS_MODULE_TYPE,
JS_GLOBAL_OBJECT_TYPE,
- JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
JS_ARRAY_BUFFER_TYPE,
@@ -734,7 +731,9 @@ enum InstanceType {
JS_ITERATOR_RESULT_TYPE,
JS_WEAK_MAP_TYPE,
JS_WEAK_SET_TYPE,
+ JS_PROMISE_TYPE,
JS_REGEXP_TYPE,
+ JS_BOUND_FUNCTION_TYPE,
JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
// Pseudo-types
@@ -747,6 +746,8 @@ enum InstanceType {
FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
FIRST_PRIMITIVE_TYPE = FIRST_NAME_TYPE,
LAST_PRIMITIVE_TYPE = ODDBALL_TYPE,
+ FIRST_FUNCTION_TYPE = JS_BOUND_FUNCTION_TYPE,
+ LAST_FUNCTION_TYPE = JS_FUNCTION_TYPE,
// Boundaries for testing for a fixed typed array.
FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_UINT8_CLAMPED_ARRAY_TYPE,
@@ -757,23 +758,11 @@ enum InstanceType {
// are not continuous in this enum! The enum ranges instead reflect the
// external class names, where proxies are treated as either ordinary objects,
// or functions.
- FIRST_JS_RECEIVER_TYPE = JS_FUNCTION_PROXY_TYPE,
+ FIRST_JS_RECEIVER_TYPE = JS_PROXY_TYPE,
LAST_JS_RECEIVER_TYPE = LAST_TYPE,
// Boundaries for testing the types represented as JSObject
FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
LAST_JS_OBJECT_TYPE = LAST_TYPE,
- // Boundaries for testing the types represented as JSProxy
- FIRST_JS_PROXY_TYPE = JS_FUNCTION_PROXY_TYPE,
- LAST_JS_PROXY_TYPE = JS_PROXY_TYPE,
- // Boundaries for testing whether the type is a JavaScript object.
- FIRST_SPEC_OBJECT_TYPE = FIRST_JS_RECEIVER_TYPE,
- LAST_SPEC_OBJECT_TYPE = LAST_JS_RECEIVER_TYPE,
- // Boundaries for testing the types for which typeof is "object".
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_PROXY_TYPE,
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
- // Note that the types for which typeof is "function" are not continuous.
- // Define this so that we can put assertions on discrete checks.
- NUM_OF_CALLABLE_SPEC_OBJECT_TYPES = 2
};
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
@@ -782,6 +771,9 @@ STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
+std::ostream& operator<<(std::ostream& os, InstanceType instance_type);
+
+
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
V(FAST_ELEMENTS_SUB_TYPE) \
V(DICTIONARY_ELEMENTS_SUB_TYPE) \
@@ -790,14 +782,13 @@ STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
V(MAP_CODE_CACHE_SUB_TYPE) \
V(SCOPE_INFO_SUB_TYPE) \
V(STRING_TABLE_SUB_TYPE) \
- V(DESCRIPTOR_ARRAY_SUB_TYPE) \
- V(TRANSITION_ARRAY_SUB_TYPE)
+ V(DESCRIPTOR_ARRAY_SUB_TYPE)
enum FixedArraySubInstanceType {
#define DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE(name) name,
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE)
#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
- LAST_FIXED_ARRAY_SUB_TYPE = TRANSITION_ARRAY_SUB_TYPE
+ LAST_FIXED_ARRAY_SUB_TYPE = DESCRIPTOR_ARRAY_SUB_TYPE
};
@@ -850,20 +841,22 @@ class ConsString;
class ElementsAccessor;
class FixedArrayBase;
class FunctionLiteral;
-class GlobalObject;
-class JSBuiltinsObject;
+class JSGlobalObject;
+class KeyAccumulator;
class LayoutDescriptor;
class LiteralsArray;
class LookupIterator;
class ObjectHashTable;
class ObjectVisitor;
class PropertyCell;
+class PropertyDescriptor;
class SafepointEntry;
class SharedFunctionInfo;
class StringStream;
class TypeFeedbackInfo;
class TypeFeedbackVector;
class WeakCell;
+class TransitionArray;
// We cannot just say "class HeapType;" if it is created from a template... =8-?
template<class> class TypeImpl;
@@ -943,6 +936,7 @@ template <class C> inline bool Is(Object* obj);
V(DescriptorArray) \
V(TransitionArray) \
V(LiteralsArray) \
+ V(TypeFeedbackMetadata) \
V(TypeFeedbackVector) \
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
@@ -956,6 +950,7 @@ template <class C> inline bool Is(Object* obj);
V(ScriptContextTable) \
V(NativeContext) \
V(ScopeInfo) \
+ V(JSBoundFunction) \
V(JSFunction) \
V(Code) \
V(Oddball) \
@@ -972,7 +967,6 @@ template <class C> inline bool Is(Object* obj);
V(JSTypedArray) \
V(JSDataView) \
V(JSProxy) \
- V(JSFunctionProxy) \
V(JSSet) \
V(JSMap) \
V(JSSetIterator) \
@@ -991,9 +985,7 @@ template <class C> inline bool Is(Object* obj);
V(PolymorphicCodeCacheHashTable) \
V(MapCache) \
V(Primitive) \
- V(GlobalObject) \
V(JSGlobalObject) \
- V(JSBuiltinsObject) \
V(JSGlobalProxy) \
V(UndetectableObject) \
V(AccessCheckNeeded) \
@@ -1004,6 +996,9 @@ template <class C> inline bool Is(Object* obj);
V(WeakHashTable) \
V(OrderedHashTable)
+// The element types selection for CreateListFromArrayLike.
+enum class ElementTypes { kAll, kStringAndSymbol };
+
// Object is the abstract superclass for all classes in the
// object hierarchy.
// Object does not use any virtual functions to avoid the
@@ -1027,6 +1022,25 @@ class Object {
CERTAINLY_NOT_STORE_FROM_KEYED
};
+ enum ShouldThrow { THROW_ON_ERROR, DONT_THROW };
+
+#define RETURN_FAILURE(isolate, should_throw, call) \
+ do { \
+ if ((should_throw) == DONT_THROW) { \
+ return Just(false); \
+ } else { \
+ isolate->Throw(*isolate->factory()->call); \
+ return Nothing<bool>(); \
+ } \
+ } while (false)
+
+#define MAYBE_RETURN(call, value) \
+ do { \
+ if ((call).IsNothing()) return value; \
+ } while (false)
+
+#define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
+
INLINE(bool IsFixedArrayBase() const);
INLINE(bool IsExternal() const);
INLINE(bool IsAccessorInfo() const);
@@ -1037,13 +1051,18 @@ class Object {
STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
#undef DECLARE_STRUCT_PREDICATE
+ // ES6, section 7.2.2 IsArray. NOT to be confused with %_IsArray.
+ MUST_USE_RESULT static Maybe<bool> IsArray(Handle<Object> object);
+
+ // Test for JSBoundFunction or JSFunction.
+ INLINE(bool IsFunction() const);
+
// ES6, section 7.2.3 IsCallable.
INLINE(bool IsCallable() const);
// ES6, section 7.2.4 IsConstructor.
INLINE(bool IsConstructor() const);
- INLINE(bool IsSpecObject()) const;
INLINE(bool IsTemplateInfo()) const;
INLINE(bool IsNameDictionary() const);
INLINE(bool IsGlobalDictionary() const);
@@ -1084,6 +1103,8 @@ class Object {
// 1 all refer to the same property, so this helper will return true.
inline bool KeyEquals(Object* other);
+ inline bool FilterKey(PropertyFilter filter);
+
Handle<HeapType> OptimalType(Isolate* isolate, Representation representation);
inline static Handle<Object> NewStorageFor(Isolate* isolate,
@@ -1154,6 +1175,13 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Object> GetMethod(
Handle<JSReceiver> receiver, Handle<Name> name);
+ // ES6 section 7.3.17 CreateListFromArrayLike
+ MUST_USE_RESULT static MaybeHandle<FixedArray> CreateListFromArrayLike(
+ Isolate* isolate, Handle<Object> object, ElementTypes element_types);
+
+ // Check whether |object| is an instance of Error or NativeError.
+ static bool IsErrorObject(Isolate* isolate, Handle<Object> object);
+
// ES6 section 12.5.6 The typeof Operator
static Handle<String> TypeOf(Isolate* isolate, Handle<Object> object);
@@ -1211,17 +1239,23 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
LookupIterator* it, LanguageMode language_mode = SLOPPY);
- // Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5.
+ // ES6 [[Set]] (when passed DONT_THROW)
+ // Invariants for this and related functions (unless stated otherwise):
+ // 1) When the result is Nothing, an exception is pending.
+ // 2) When passed THROW_ON_ERROR, the result is never Just(false).
+ // In some cases, an exception is thrown regardless of the ShouldThrow
+ // argument. These cases are either in accordance with the spec or not
+ // covered by it (eg., concerning API callbacks).
+ MUST_USE_RESULT static Maybe<bool> SetProperty(LookupIterator* it,
+ Handle<Object> value,
+ LanguageMode language_mode,
+ StoreFromKeyed store_mode);
MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
LanguageMode language_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
- StoreFromKeyed store_mode);
-
- MUST_USE_RESULT static MaybeHandle<Object> SetSuperProperty(
+ MUST_USE_RESULT static Maybe<bool> SetSuperProperty(
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
StoreFromKeyed store_mode);
@@ -1230,22 +1264,28 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Object> ReadAbsentProperty(
Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyProperty(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyProperty(
+ MUST_USE_RESULT static Maybe<bool> CannotCreateProperty(
Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
- Handle<Object> value, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> RedefineNonconfigurableProperty(
+ Handle<Object> value, ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> WriteToReadOnlyProperty(
+ LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> WriteToReadOnlyProperty(
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
+ Handle<Object> value, ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> RedefineIncompatibleProperty(
Isolate* isolate, Handle<Object> name, Handle<Object> value,
- LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> SetDataProperty(
- LookupIterator* it, Handle<Object> value);
- MUST_USE_RESULT static MaybeHandle<Object> AddDataProperty(
+ ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> SetDataProperty(LookupIterator* it,
+ Handle<Object> value);
+ MUST_USE_RESULT static Maybe<bool> AddDataProperty(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- LanguageMode language_mode, StoreFromKeyed store_mode);
+ ShouldThrow should_throw, StoreFromKeyed store_mode);
MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
Handle<Object> object, Handle<Name> name,
LanguageMode language_mode = SLOPPY);
+ MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
+ Handle<JSReceiver> holder, Handle<Name> name, Handle<Object> receiver,
+ LanguageMode language_mode = SLOPPY);
MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
Isolate* isolate, Handle<Object> object, const char* key,
LanguageMode language_mode = SLOPPY);
@@ -1255,16 +1295,15 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
LookupIterator* it, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithAccessor(
- LookupIterator* it, Handle<Object> value, LanguageMode language_mode);
+ MUST_USE_RESULT static Maybe<bool> SetPropertyWithAccessor(
+ LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithDefinedGetter(
Handle<Object> receiver,
Handle<JSReceiver> getter);
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithDefinedSetter(
- Handle<Object> receiver,
- Handle<JSReceiver> setter,
- Handle<Object> value);
+ MUST_USE_RESULT static Maybe<bool> SetPropertyWithDefinedSetter(
+ Handle<Object> receiver, Handle<JSReceiver> setter, Handle<Object> value,
+ ShouldThrow should_throw);
MUST_USE_RESULT static inline MaybeHandle<Object> GetElement(
Isolate* isolate, Handle<Object> object, uint32_t index,
@@ -1274,10 +1313,13 @@ class Object {
Isolate* isolate, Handle<Object> object, uint32_t index,
Handle<Object> value, LanguageMode language_mode);
- static inline Handle<Object> GetPrototypeSkipHiddenPrototypes(
- Isolate* isolate, Handle<Object> receiver);
+ // Get the first non-hidden prototype.
+ static inline MaybeHandle<Object> GetPrototype(Isolate* isolate,
+ Handle<Object> receiver);
- bool HasInPrototypeChain(Isolate* isolate, Object* object);
+ MUST_USE_RESULT static Maybe<bool> HasInPrototypeChain(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> proto);
// Returns the permanent hash code associated with this object. May return
// undefined if not yet created.
@@ -1304,6 +1346,10 @@ class Object {
// by ES6 Map and Set.
bool SameValueZero(Object* other);
+ // ES6 section 9.4.2.3 ArraySpeciesCreate (part of it)
+ MUST_USE_RESULT static MaybeHandle<Object> ArraySpeciesConstructor(
+ Isolate* isolate, Handle<Object> original_array);
+
// Tries to convert an object to an array length. Returns true and sets the
// output parameter if it succeeds.
inline bool ToArrayLength(uint32_t* index);
@@ -1325,6 +1371,10 @@ class Object {
inline void VerifyApiCallResultType();
+ // ES6 19.1.3.6 Object.prototype.toString
+ MUST_USE_RESULT static MaybeHandle<String> ObjectProtoToString(
+ Isolate* isolate, Handle<Object> object);
+
// Prints this object without details.
void ShortPrint(FILE* out = stdout);
@@ -1357,7 +1407,8 @@ class Object {
Map* GetRootMap(Isolate* isolate);
// Helper for SetProperty and SetSuperProperty.
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyInternal(
+ // Return value is only meaningful if [found] is set to true on return.
+ MUST_USE_RESULT static Maybe<bool> SetPropertyInternal(
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
StoreFromKeyed store_mode, bool* found);
@@ -1471,13 +1522,6 @@ class MapWord BASE_EMBEDDED {
};
-// The content of an heap object (except for the map pointer). kTaggedValues
-// objects can contain both heap pointers and Smis, kMixedValues can contain
-// heap pointers, Smis, and raw values (e.g. doubles or strings), and kRawValues
-// objects can contain raw values and Smis.
-enum class HeapObjectContents { kTaggedValues, kMixedValues, kRawValues };
-
-
// HeapObject is the superclass for all classes describing heap allocated
// objects.
class HeapObject: public Object {
@@ -1522,21 +1566,38 @@ class HeapObject: public Object {
return reinterpret_cast<Address>(this) - kHeapObjectTag;
}
- // Iterates over pointers contained in the object (including the Map)
+ // Iterates over pointers contained in the object (including the Map).
+ // If it's not performance critical iteration use the non-templatized
+ // version.
void Iterate(ObjectVisitor* v);
+ template <typename ObjectVisitor>
+ inline void IterateFast(ObjectVisitor* v);
+
// Iterates over all pointers contained in the object except the
// first map pointer. The object type is given in the first
// parameter. This function does not access the map pointer in the
// object, and so is safe to call while the map pointer is modified.
+ // If it's not performance critical iteration use the non-templatized
+ // version.
+ void IterateBody(ObjectVisitor* v);
void IterateBody(InstanceType type, int object_size, ObjectVisitor* v);
+ template <typename ObjectVisitor>
+ inline void IterateBodyFast(ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ inline void IterateBodyFast(InstanceType type, int object_size,
+ ObjectVisitor* v);
+
+ // Returns true if the object contains a tagged value at given offset.
+ // It is used for invalid slots filtering. If the offset points outside
+ // of the object or to the map word, the result is UNDEFINED (!!!).
+ bool IsValidSlot(int offset);
+
// Returns the heap object's size in bytes
inline int Size();
- // Indicates what type of values this heap object may contain.
- inline HeapObjectContents ContentType();
-
// Given a heap object's map pointer, returns the heap size in bytes
// Useful when the map pointer field is used for other purposes.
// GC internal.
@@ -1590,58 +1651,17 @@ class HeapObject: public Object {
STATIC_ASSERT(kMapOffset == Internals::kHeapObjectMapOffset);
- protected:
- // helpers for calling an ObjectVisitor to iterate over pointers in the
- // half-open range [start, end) specified as integer offsets
- inline void IteratePointers(ObjectVisitor* v, int start, int end);
- // as above, for the single element at "offset"
- inline void IteratePointer(ObjectVisitor* v, int offset);
- // as above, for the next code link of a code object.
- inline void IterateNextCodeLink(ObjectVisitor* v, int offset);
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
};
-// This class describes a body of an object of a fixed size
-// in which all pointer fields are located in the [start_offset, end_offset)
-// interval.
-template<int start_offset, int end_offset, int size>
-class FixedBodyDescriptor {
- public:
- static const int kStartOffset = start_offset;
- static const int kEndOffset = end_offset;
- static const int kSize = size;
+template <int start_offset, int end_offset, int size>
+class FixedBodyDescriptor;
- static inline void IterateBody(HeapObject* obj, ObjectVisitor* v);
- template<typename StaticVisitor>
- static inline void IterateBody(HeapObject* obj) {
- StaticVisitor::VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, end_offset));
- }
-};
-
-
-// This class describes a body of an object of a variable size
-// in which all pointer fields are located in the [start_offset, object_size)
-// interval.
-template<int start_offset>
-class FlexibleBodyDescriptor {
- public:
- static const int kStartOffset = start_offset;
-
- static inline void IterateBody(HeapObject* obj,
- int object_size,
- ObjectVisitor* v);
-
- template<typename StaticVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size) {
- StaticVisitor::VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, object_size));
- }
-};
+template <int start_offset>
+class FlexibleBodyDescriptor;
// The HeapNumber class describes heap allocated numbers that cannot be
@@ -1775,10 +1795,26 @@ enum AccessorComponent {
};
+enum GetKeysConversion { KEEP_NUMBERS, CONVERT_TO_STRING };
+
+
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
class JSReceiver: public HeapObject {
public:
+ // [properties]: Backing storage for properties.
+ // properties is a FixedArray in the fast case and a Dictionary in the
+ // slow case.
+ DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
+ inline void initialize_properties();
+ inline bool HasFastProperties();
+ // Gets slow properties for non-global objects.
+ inline NameDictionary* property_dictionary();
+
+ // Deletes an existing named property in a normalized object.
+ static void DeleteNormalizedProperty(Handle<JSReceiver> object,
+ Handle<Name> name, int entry);
+
DECLARE_CAST(JSReceiver)
// ES6 section 7.1.1 ToPrimitive
@@ -1788,38 +1824,105 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static MaybeHandle<Object> OrdinaryToPrimitive(
Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint);
+ static MaybeHandle<Context> GetFunctionRealm(Handle<JSReceiver> receiver);
+
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
+ MUST_USE_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
MUST_USE_RESULT static inline Maybe<bool> HasProperty(
Handle<JSReceiver> object, Handle<Name> name);
- MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(Handle<JSReceiver>,
- Handle<Name> name);
MUST_USE_RESULT static inline Maybe<bool> HasElement(
Handle<JSReceiver> object, uint32_t index);
- MUST_USE_RESULT static inline Maybe<bool> HasOwnElement(
- Handle<JSReceiver> object, uint32_t index);
- // Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7.
- MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyOrElement(
+ MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(
+ Handle<JSReceiver> object, Handle<Name> name);
+
+ // Implementation of ES6 [[Delete]]
+ MUST_USE_RESULT static Maybe<bool> DeletePropertyOrElement(
Handle<JSReceiver> object, Handle<Name> name,
LanguageMode language_mode = SLOPPY);
- MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty(
+ MUST_USE_RESULT static Maybe<bool> DeleteProperty(
Handle<JSReceiver> object, Handle<Name> name,
LanguageMode language_mode = SLOPPY);
- MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty(
- LookupIterator* it, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> DeleteElement(
+ MUST_USE_RESULT static Maybe<bool> DeleteProperty(LookupIterator* it,
+ LanguageMode language_mode);
+ MUST_USE_RESULT static Maybe<bool> DeleteElement(
Handle<JSReceiver> object, uint32_t index,
LanguageMode language_mode = SLOPPY);
+ MUST_USE_RESULT static Object* DefineProperty(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> name,
+ Handle<Object> attributes);
+ MUST_USE_RESULT static MaybeHandle<Object> DefineProperties(
+ Isolate* isolate, Handle<Object> object, Handle<Object> properties);
+
+ // "virtual" dispatcher to the correct [[DefineOwnProperty]] implementation.
+ MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
+
+ // ES6 7.3.4 (when passed DONT_THROW)
+ MUST_USE_RESULT static Maybe<bool> CreateDataProperty(
+ LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
+
+ // ES6 9.1.6.1
+ MUST_USE_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
+ Isolate* isolate, Handle<JSObject> object, Handle<Object> key,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
+ LookupIterator* it, PropertyDescriptor* desc, ShouldThrow should_throw);
+ // ES6 9.1.6.2
+ MUST_USE_RESULT static Maybe<bool> IsCompatiblePropertyDescriptor(
+ Isolate* isolate, bool extensible, PropertyDescriptor* desc,
+ PropertyDescriptor* current, Handle<Name> property_name,
+ ShouldThrow should_throw);
+ // ES6 9.1.6.3
+ // |it| can be NULL in cases where the ES spec passes |undefined| as the
+ // receiver. Exactly one of |it| and |property_name| must be provided.
+ MUST_USE_RESULT static Maybe<bool> ValidateAndApplyPropertyDescriptor(
+ Isolate* isolate, LookupIterator* it, bool extensible,
+ PropertyDescriptor* desc, PropertyDescriptor* current,
+ ShouldThrow should_throw, Handle<Name> property_name = Handle<Name>());
+
+ MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
+ PropertyDescriptor* desc);
+ MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
+ LookupIterator* it, PropertyDescriptor* desc);
+
+ typedef PropertyAttributes IntegrityLevel;
+
+ // ES6 7.3.14 (when passed DONT_THROW)
+ // 'level' must be SEALED or FROZEN.
+ MUST_USE_RESULT static Maybe<bool> SetIntegrityLevel(
+ Handle<JSReceiver> object, IntegrityLevel lvl, ShouldThrow should_throw);
+
+ // ES6 7.3.15
+ // 'level' must be SEALED or FROZEN.
+ MUST_USE_RESULT static Maybe<bool> TestIntegrityLevel(
+ Handle<JSReceiver> object, IntegrityLevel lvl);
+
+ // ES6 [[PreventExtensions]] (when passed DONT_THROW)
+ MUST_USE_RESULT static Maybe<bool> PreventExtensions(
+ Handle<JSReceiver> object, ShouldThrow should_throw);
+
+ MUST_USE_RESULT static Maybe<bool> IsExtensible(Handle<JSReceiver> object);
+
// Tests for the fast common case for property enumeration.
bool IsSimpleEnum();
// Returns the class name ([[Class]] property in the specification).
String* class_name();
+ // Returns the builtin string tag used in Object.prototype.toString.
+ MUST_USE_RESULT static MaybeHandle<String> BuiltinStringTag(
+ Handle<JSReceiver> object);
+
// Returns the constructor name (the name (possibly, inferred name) of the
// function that was used to instantiate the object).
- String* constructor_name();
+ static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
+
+ Context* GetCreationContext();
MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetPropertyAttributes(
Handle<JSReceiver> object, Handle<Name> name);
@@ -1834,6 +1937,12 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
LookupIterator* it);
+ // Set the object's prototype (only JSReceiver and null are allowed values).
+ MUST_USE_RESULT static Maybe<bool> SetPrototype(Handle<JSReceiver> object,
+ Handle<Object> value,
+ bool from_javascript,
+ ShouldThrow should_throw);
+
static Handle<Object> GetDataProperty(Handle<JSReceiver> object,
Handle<Name> name);
@@ -1851,11 +1960,22 @@ class JSReceiver: public HeapObject {
enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS };
+ // ES6 [[OwnPropertyKeys]] (modulo return type)
+ MUST_USE_RESULT static MaybeHandle<FixedArray> OwnPropertyKeys(
+ Handle<JSReceiver> object) {
+ return GetKeys(object, JSReceiver::OWN_ONLY, ALL_PROPERTIES,
+ CONVERT_TO_STRING);
+ }
+
// Computes the enumerable keys for a JSObject. Used for implementing
// "for (n in object) { }".
MUST_USE_RESULT static MaybeHandle<FixedArray> GetKeys(
- Handle<JSReceiver> object,
- KeyCollectionType type);
+ Handle<JSReceiver> object, KeyCollectionType type, PropertyFilter filter,
+ GetKeysConversion keys_conversion = KEEP_NUMBERS);
+
+ // Layout description.
+ static const int kPropertiesOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = HeapObject::kHeaderSize + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
@@ -1868,17 +1988,15 @@ class JSReceiver: public HeapObject {
// caching.
class JSObject: public JSReceiver {
public:
- // [properties]: Backing storage for properties.
- // properties is a FixedArray in the fast case and a Dictionary in the
- // slow case.
- DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
- inline void initialize_properties();
- inline bool HasFastProperties();
- // Gets slow properties for non-global objects.
- inline NameDictionary* property_dictionary();
+ static MUST_USE_RESULT MaybeHandle<JSObject> New(
+ Handle<JSFunction> constructor, Handle<JSReceiver> new_target,
+ Handle<AllocationSite> site = Handle<AllocationSite>::null());
+
// Gets global object properties.
inline GlobalDictionary* global_dictionary();
+ static MaybeHandle<Context> GetFunctionRealm(Handle<JSObject> object);
+
// [elements]: The elements (properties with names that are integers).
//
// Elements can be in two general modes: fast and slow. Each mode
@@ -1954,7 +2072,7 @@ class JSObject: public JSReceiver {
static Handle<Object> PrepareSlowElementsForSort(Handle<JSObject> object,
uint32_t limit);
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithInterceptor(
+ MUST_USE_RESULT static Maybe<bool> SetPropertyWithInterceptor(
LookupIterator* it, Handle<Object> value);
// SetLocalPropertyIgnoreAttributes converts callbacks to fields. We need to
@@ -1965,6 +2083,11 @@ class JSObject: public JSReceiver {
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+ MUST_USE_RESULT static Maybe<bool> DefineOwnPropertyIgnoreAttributes(
+ LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+ ShouldThrow should_throw,
+ ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+
MUST_USE_RESULT static MaybeHandle<Object> SetOwnPropertyIgnoreAttributes(
Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes,
@@ -1991,6 +2114,9 @@ class JSObject: public JSReceiver {
static void AddProperty(Handle<JSObject> object, Handle<Name> name,
Handle<Object> value, PropertyAttributes attributes);
+ MUST_USE_RESULT static Maybe<bool> AddDataElement(
+ Handle<JSObject> receiver, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, ShouldThrow should_throw);
MUST_USE_RESULT static MaybeHandle<Object> AddDataElement(
Handle<JSObject> receiver, uint32_t index, Handle<Object> value,
PropertyAttributes attributes);
@@ -2024,6 +2150,9 @@ class JSObject: public JSReceiver {
PrototypeOptimizationMode mode);
static void ReoptimizeIfPrototype(Handle<JSObject> object);
static void LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate);
+ static void UpdatePrototypeUserRegistration(Handle<Map> old_map,
+ Handle<Map> new_map,
+ Isolate* isolate);
static bool UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate);
static void InvalidatePrototypeChains(Map* map);
@@ -2035,7 +2164,7 @@ class JSObject: public JSReceiver {
// Retrieve interceptors.
InterceptorInfo* GetNamedInterceptor();
- InterceptorInfo* GetIndexedInterceptor();
+ inline InterceptorInfo* GetIndexedInterceptor();
// Used from JSReceiver.
MUST_USE_RESULT static Maybe<PropertyAttributes>
@@ -2057,6 +2186,10 @@ class JSObject: public JSReceiver {
Handle<Object> getter,
Handle<Object> setter,
PropertyAttributes attributes);
+ static MaybeHandle<Object> DefineAccessor(LookupIterator* it,
+ Handle<Object> getter,
+ Handle<Object> setter,
+ PropertyAttributes attributes);
// Defines an AccessorInfo property on the given object.
MUST_USE_RESULT static MaybeHandle<Object> SetAccessor(
@@ -2138,15 +2271,6 @@ class JSObject: public JSReceiver {
inline bool HasNamedInterceptor();
inline bool HasIndexedInterceptor();
- // Computes the enumerable keys from interceptors. Used for debug mirrors and
- // by JSReceiver::GetKeys.
- MUST_USE_RESULT static MaybeHandle<JSObject> GetKeysForNamedInterceptor(
- Handle<JSObject> object,
- Handle<JSReceiver> receiver);
- MUST_USE_RESULT static MaybeHandle<JSObject> GetKeysForIndexedInterceptor(
- Handle<JSObject> object,
- Handle<JSReceiver> receiver);
-
// Support functions for v8 api (needed for correct interceptor behavior).
MUST_USE_RESULT static Maybe<bool> HasRealNamedProperty(
Handle<JSObject> object, Handle<Name> name);
@@ -2157,36 +2281,31 @@ class JSObject: public JSReceiver {
// Get the header size for a JSObject. Used to compute the index of
// internal fields as well as the number of internal fields.
+ static inline int GetHeaderSize(InstanceType instance_type);
inline int GetHeaderSize();
+ static inline int GetInternalFieldCount(Map* map);
inline int GetInternalFieldCount();
inline int GetInternalFieldOffset(int index);
inline Object* GetInternalField(int index);
inline void SetInternalField(int index, Object* value);
inline void SetInternalField(int index, Smi* value);
- // Returns the number of properties on this object filtering out properties
- // with the specified attributes (ignoring interceptors).
- int NumberOfOwnProperties(PropertyAttributes filter = NONE);
- // Fill in details for properties into storage starting at the specified
- // index. Returns the number of properties added.
- int GetOwnPropertyNames(FixedArray* storage, int index,
- PropertyAttributes filter = NONE);
+ void CollectOwnPropertyNames(KeyAccumulator* keys,
+ PropertyFilter filter = ALL_PROPERTIES);
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
- int NumberOfOwnElements(PropertyAttributes filter);
- // Returns the number of enumerable elements (ignoring interceptors).
- int NumberOfEnumElements();
+ // TODO(jkummerow): Deprecated, only used by Object.observe.
+ int NumberOfOwnElements(PropertyFilter filter);
// Returns the number of elements on this object filtering out elements
// with the specified attributes (ignoring interceptors).
- int GetOwnElementKeys(FixedArray* storage, PropertyAttributes filter);
- // Count and fill in the enumerable elements into storage.
- // (storage->length() == NumberOfEnumElements()).
- // If storage is NULL, will count the elements without adding
- // them to any storage.
- // Returns the number of enumerable elements.
- int GetEnumElementKeys(FixedArray* storage);
+ // TODO(jkummerow): Deprecated, only used by Object.observe.
+ int GetOwnElementKeys(FixedArray* storage, PropertyFilter filter);
+
+ static void CollectOwnElementKeys(Handle<JSObject> object,
+ KeyAccumulator* keys,
+ PropertyFilter filter);
static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
bool cache_result);
@@ -2247,32 +2366,26 @@ class JSObject: public JSReceiver {
= UPDATE_WRITE_BARRIER);
// Set the object's prototype (only JSReceiver and null are allowed values).
- MUST_USE_RESULT static MaybeHandle<Object> SetPrototype(
- Handle<JSObject> object, Handle<Object> value, bool from_javascript);
+ MUST_USE_RESULT static Maybe<bool> SetPrototype(Handle<JSObject> object,
+ Handle<Object> value,
+ bool from_javascript,
+ ShouldThrow should_throw);
- // Initializes the body after properties slot, properties slot is
- // initialized by set_properties. Fill the pre-allocated fields with
+ // Initializes the body starting at |start_offset|. It is responsibility of
+ // the caller to initialize object header. Fill the pre-allocated fields with
// pre_allocated_value and the rest with filler_value.
// Note: this call does not update write barrier, the caller is responsible
// to ensure that |filler_value| can be collected without WB here.
- inline void InitializeBody(Map* map,
- Object* pre_allocated_value,
- Object* filler_value);
+ inline void InitializeBody(Map* map, int start_offset,
+ Object* pre_allocated_value, Object* filler_value);
// Check whether this object references another object
bool ReferencesObject(Object* obj);
- // Disalow further properties to be added to the oject.
- MUST_USE_RESULT static MaybeHandle<Object> PreventExtensions(
- Handle<JSObject> object);
+ MUST_USE_RESULT static Maybe<bool> PreventExtensions(
+ Handle<JSObject> object, ShouldThrow should_throw);
- bool IsExtensible();
-
- // ES5 Object.seal
- MUST_USE_RESULT static MaybeHandle<Object> Seal(Handle<JSObject> object);
-
- // ES5 Object.freeze
- MUST_USE_RESULT static MaybeHandle<Object> Freeze(Handle<JSObject> object);
+ static bool IsExtensible(Handle<JSObject> object);
// Called the first time an object is observed with ES7 Object.observe.
static void SetObserved(Handle<JSObject> object);
@@ -2359,10 +2472,6 @@ class JSObject: public JSReceiver {
// don't want to be wasteful with long lived objects.
static const int kMaxUncheckedOldFastElementsLength = 500;
- // Note that Page::kMaxRegularHeapObjectSize puts a limit on
- // permissible values (see the DCHECK in heap.cc).
- static const int kInitialMaxFastElementArray = 100000;
-
// This constant applies only to the initial map of "global.Object" and
// not to arbitrary other JSObject maps.
static const int kInitialGlobalObjectUnusedPropertiesCount = 4;
@@ -2374,18 +2483,12 @@ class JSObject: public JSReceiver {
static const int kFieldsAdded = 3;
// Layout description.
- static const int kPropertiesOffset = HeapObject::kHeaderSize;
- static const int kElementsOffset = kPropertiesOffset + kPointerSize;
+ static const int kElementsOffset = JSReceiver::kHeaderSize;
static const int kHeaderSize = kElementsOffset + kPointerSize;
STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize);
- class BodyDescriptor : public FlexibleBodyDescriptor<kPropertiesOffset> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object);
- };
-
- Context* GetCreationContext();
+ typedef FlexibleBodyDescriptor<JSReceiver::kPropertiesOffset> BodyDescriptor;
// Enqueue change record for Object.observe. May cause GC.
MUST_USE_RESULT static MaybeHandle<Object> EnqueueChangeRecord(
@@ -2395,10 +2498,6 @@ class JSObject: public JSReceiver {
// Gets the number of currently used elements.
int GetFastElementsUsage();
- // Deletes an existing named property in a normalized object.
- static void DeleteNormalizedProperty(Handle<JSObject> object,
- Handle<Name> name, int entry);
-
static bool AllCanRead(LookupIterator* it);
static bool AllCanWrite(LookupIterator* it);
@@ -2415,8 +2514,8 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithFailedAccessCheck(
LookupIterator* it);
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithFailedAccessCheck(
- LookupIterator* it, Handle<Object> value);
+ MUST_USE_RESULT static Maybe<bool> SetPropertyWithFailedAccessCheck(
+ LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
// Add a property to a slow-case object.
static void AddSlowProperty(Handle<JSObject> object,
@@ -2424,7 +2523,7 @@ class JSObject: public JSReceiver {
Handle<Object> value,
PropertyAttributes attributes);
- MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithInterceptor(
+ MUST_USE_RESULT static Maybe<bool> DeletePropertyWithInterceptor(
LookupIterator* it);
bool ReferencesObjectFromElements(FixedArray* elements,
@@ -2456,8 +2555,12 @@ class JSObject: public JSReceiver {
// Helper for fast versions of preventExtensions, seal, and freeze.
// attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
template <PropertyAttributes attrs>
- MUST_USE_RESULT static MaybeHandle<Object> PreventExtensionsWithTransition(
- Handle<JSObject> object);
+ MUST_USE_RESULT static Maybe<bool> PreventExtensionsWithTransition(
+ Handle<JSObject> object, ShouldThrow should_throw);
+
+ MUST_USE_RESULT static Maybe<bool> SetPrototypeUnobserved(
+ Handle<JSObject> object, Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2519,8 +2622,6 @@ class FixedArray: public FixedArrayBase {
// Shrink length and insert filler objects.
void Shrink(int length);
- enum KeyFilter { ALL_KEYS, NON_SYMBOL_KEYS };
-
// Copy a sub array from the receiver to dest.
void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
@@ -2560,10 +2661,7 @@ class FixedArray: public FixedArrayBase {
// object, the prefix of this array is sorted.
void SortPairs(FixedArray* numbers, uint32_t len);
- class BodyDescriptor : public FlexibleBodyDescriptor<kHeaderSize> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object);
- };
+ typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
protected:
// Set operation on FixedArray without using write barriers. Can
@@ -2572,13 +2670,6 @@ class FixedArray: public FixedArrayBase {
int index,
Object* value);
- // Set operation on FixedArray without incremental write barrier. Can
- // only be used if the object is guaranteed to be white (whiteness witness
- // is present).
- static inline void NoIncrementalWriteBarrierSet(FixedArray* array,
- int index,
- Object* value);
-
private:
STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize);
@@ -2717,6 +2808,7 @@ class ArrayList : public FixedArray {
inline Object** Slot(int index);
inline void Set(int index, Object* obj);
inline void Clear(int index, Object* undefined);
+ bool IsFull();
DECLARE_CAST(ArrayList)
private:
@@ -2768,9 +2860,9 @@ class DescriptorArray: public FixedArray {
// Initialize or change the enum cache,
// using the supplied storage for the small "bridge".
- void SetEnumCache(FixedArray* bridge_storage,
- FixedArray* new_cache,
- Object* new_index_cache);
+ static void SetEnumCache(Handle<DescriptorArray> descriptors,
+ Isolate* isolate, Handle<FixedArray> new_cache,
+ Handle<FixedArray> new_index_cache);
bool CanHoldValue(int descriptor, Object* value);
@@ -2827,6 +2919,8 @@ class DescriptorArray: public FixedArray {
// necessary.
INLINE(int SearchWithCache(Name* name, Map* map));
+ bool IsEqualUpTo(DescriptorArray* desc, int nof_descriptors);
+
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
static Handle<DescriptorArray> Allocate(Isolate* isolate,
@@ -2887,23 +2981,6 @@ class DescriptorArray: public FixedArray {
}
private:
- // WhitenessWitness is used to prove that a descriptor array is white
- // (unmarked), so incremental write barriers can be skipped because the
- // marking invariant cannot be broken and slots pointing into evacuation
- // candidates will be discovered when the object is scanned. A witness is
- // always stack-allocated right after creating an array. By allocating a
- // witness, incremental marking is globally disabled. The witness is then
- // passed along wherever needed to statically prove that the array is known to
- // be white.
- class WhitenessWitness {
- public:
- inline explicit WhitenessWitness(DescriptorArray* array);
- inline ~WhitenessWitness();
-
- private:
- IncrementalMarking* marking_;
- };
-
// An entry in a DescriptorArray, represented as an (array, index) pair.
class Entry {
public:
@@ -2939,11 +3016,9 @@ class DescriptorArray: public FixedArray {
// Transfer a complete descriptor from the src descriptor array to this
// descriptor array.
- void CopyFrom(int index, DescriptorArray* src, const WhitenessWitness&);
+ void CopyFrom(int index, DescriptorArray* src);
- inline void Set(int descriptor_number,
- Descriptor* desc,
- const WhitenessWitness&);
+ inline void SetDescriptor(int descriptor_number, Descriptor* desc);
// Swap first and second descriptor.
inline void SwapSortedKeys(int first, int second);
@@ -3146,6 +3221,9 @@ class HashTable : public HashTableBase {
Key key,
PretenureFlag pretenure = NOT_TENURED);
+ // Returns true if this table has sufficient capacity for adding n elements.
+ bool HasSufficientCapacity(int n);
+
// Sets the capacity of the hash table.
void SetCapacity(int capacity) {
// To scale a computed hash code to fit within the hash table, we
@@ -3301,12 +3379,13 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
// Returns the number of elements in the dictionary filtering out properties
// with the specified attributes.
- int NumberOfElementsFilterAttributes(PropertyAttributes filter);
+ // TODO(jkummerow): Deprecated, only used by Object.observe.
+ int NumberOfElementsFilterAttributes(PropertyFilter filter);
// Returns the number of enumerable elements in the dictionary.
+ // TODO(jkummerow): Deprecated, only used by Object.observe.
int NumberOfEnumElements() {
- return NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(DONT_ENUM | SYMBOLIC));
+ return NumberOfElementsFilterAttributes(ENUMERABLE_STRINGS);
}
// Returns true if the dictionary contains any elements that are non-writable,
@@ -3317,8 +3396,13 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
// Fill in details for properties into storage.
// Returns the number of properties added.
- int CopyKeysTo(FixedArray* storage, int index, PropertyAttributes filter,
+ // TODO(jkummerow): Deprecated, only used by Object.observe.
+ int CopyKeysTo(FixedArray* storage, int index, PropertyFilter filter,
SortMode sort_mode);
+ // Collect the keys into the given KeyAccumulator, in ascending chronological
+ // order of property creation.
+ static void CollectKeysTo(Handle<Dictionary<Derived, Shape, Key> > dictionary,
+ KeyAccumulator* keys, PropertyFilter filter);
// Copies enumerable keys to preallocated fixed array.
void CopyEnumKeysTo(FixedArray* storage);
@@ -3339,6 +3423,9 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
int at_least_space_for,
PretenureFlag pretenure = NOT_TENURED);
+ // Ensures that a new dictionary is created when the capacity is checked.
+ void SetRequiresCopyOnCapacityChange();
+
// Ensure enough space for n additional elements.
static Handle<Derived> EnsureCapacity(Handle<Derived> obj, int n, Key key);
@@ -3948,6 +4035,9 @@ class ScopeInfo : public FixedArray {
// or context-allocated?
bool HasAllocatedReceiver();
+ // Does this scope declare a "new.target" binding?
+ bool HasNewTarget();
+
// Is this scope the scope of a named function expression?
bool HasFunctionName();
@@ -4156,9 +4246,10 @@ class ScopeInfo : public FixedArray {
class ReceiverVariableField
: public BitField<VariableAllocationInfo, DeclarationScopeField::kNext,
2> {};
+ class HasNewTargetField
+ : public BitField<bool, ReceiverVariableField::kNext, 1> {};
class FunctionVariableField
- : public BitField<VariableAllocationInfo, ReceiverVariableField::kNext,
- 2> {};
+ : public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
class FunctionVariableMode
: public BitField<VariableMode, FunctionVariableField::kNext, 3> {};
class AsmModuleField : public BitField<bool, FunctionVariableMode::kNext, 1> {
@@ -4293,7 +4384,6 @@ class BytecodeArray : public FixedArrayBase {
// Dispatched behavior.
inline int BytecodeArraySize();
- inline void BytecodeArrayIterateBody(ObjectVisitor* v);
DECLARE_PRINTER(BytecodeArray)
DECLARE_VERIFIER(BytecodeArray)
@@ -4313,6 +4403,8 @@ class BytecodeArray : public FixedArrayBase {
// Maximal length of a single BytecodeArray.
static const int kMaxLength = kMaxSize - kHeaderSize;
+ class BodyDescriptor;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArray);
};
@@ -4336,7 +4428,6 @@ class FreeSpace: public HeapObject {
// Accessors for the next field.
inline FreeSpace* next();
- inline FreeSpace** next_address();
inline void set_next(FreeSpace* next);
inline static FreeSpace* cast(HeapObject* obj);
@@ -4379,11 +4470,6 @@ class FixedTypedArrayBase: public FixedArrayBase {
DECL_ACCESSORS(external_pointer, void)
// Dispatched behavior.
- inline void FixedTypedArrayBaseIterateBody(ObjectVisitor* v);
-
- template <typename StaticVisitor>
- inline void FixedTypedArrayBaseIterateBody();
-
DECLARE_CAST(FixedTypedArrayBase)
static const int kBasePointerOffset = FixedArrayBase::kHeaderSize;
@@ -4393,6 +4479,8 @@ class FixedTypedArrayBase: public FixedArrayBase {
static const int kDataOffset = kHeaderSize;
+ class BodyDescriptor;
+
inline int size();
static inline int TypedArraySize(InstanceType type, int length);
@@ -4705,9 +4793,9 @@ class Code: public HeapObject {
NUMBER_OF_KINDS
};
- // No more than 16 kinds. The value is currently encoded in four bits in
+ // No more than 32 kinds. The value is currently encoded in five bits in
// Flags.
- STATIC_ASSERT(NUMBER_OF_KINDS <= 16);
+ STATIC_ASSERT(NUMBER_OF_KINDS <= 32);
static const char* Kind2String(Kind kind);
@@ -4809,6 +4897,7 @@ class Code: public HeapObject {
inline bool is_to_boolean_ic_stub();
inline bool is_keyed_stub();
inline bool is_optimized_code();
+ inline bool is_interpreter_entry_trampoline();
inline bool embeds_maps_weakly();
inline bool IsCodeStubOrIC();
@@ -4890,12 +4979,6 @@ class Code: public HeapObject {
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline uint16_t to_boolean_state();
- // [has_function_cache]: For kind STUB tells whether there is a function
- // cache is passed to the stub.
- inline bool has_function_cache();
- inline void set_has_function_cache(bool flag);
-
-
// [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
// the code is going to be deoptimized because of dead embedded maps.
inline bool marked_for_deoptimization();
@@ -5020,10 +5103,6 @@ class Code: public HeapObject {
// Dispatched behavior.
inline int CodeSize();
- inline void CodeIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void CodeIterateBody(Heap* heap);
DECLARE_PRINTER(Code)
DECLARE_VERIFIER(Code)
@@ -5122,6 +5201,8 @@ class Code: public HeapObject {
static const int kHeaderSize =
(kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
+ class BodyDescriptor;
+
// Byte offsets within kKindSpecificFlags1Offset.
static const int kFullCodeFlags = kKindSpecificFlags1Offset;
class FullCodeFlagsHasDeoptimizationSupportField:
@@ -5133,19 +5214,18 @@ class Code: public HeapObject {
class ProfilerTicksField : public BitField<int, 4, 28> {};
// Flags layout. BitField<type, shift, size>.
- class ICStateField : public BitField<InlineCacheState, 0, 4> {};
- class TypeField : public BitField<StubType, 4, 1> {};
- class CacheHolderField : public BitField<CacheHolderFlag, 5, 2> {};
- class KindField : public BitField<Kind, 7, 4> {};
+ class ICStateField : public BitField<InlineCacheState, 0, 3> {};
+ class TypeField : public BitField<StubType, 3, 1> {};
+ class CacheHolderField : public BitField<CacheHolderFlag, 4, 2> {};
+ class KindField : public BitField<Kind, 6, 5> {};
class ExtraICStateField: public BitField<ExtraICState, 11,
PlatformSmiTagging::kSmiValueSize - 11 + 1> {}; // NOLINT
// KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
static const int kStackSlotsBitCount = 24;
- static const int kHasFunctionCacheBit =
+ static const int kMarkedForDeoptimizationBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kMarkedForDeoptimizationBit = kHasFunctionCacheBit + 1;
static const int kIsTurbofannedBit = kMarkedForDeoptimizationBit + 1;
static const int kCanHaveWeakObjects = kIsTurbofannedBit + 1;
@@ -5154,10 +5234,8 @@ class Code: public HeapObject {
class StackSlotsField: public BitField<int,
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
- class HasFunctionCacheField : public BitField<bool, kHasFunctionCacheBit, 1> {
- }; // NOLINT
class MarkedForDeoptimizationField
- : public BitField<bool, kMarkedForDeoptimizationBit, 1> {}; // NOLINT
+ : public BitField<bool, kMarkedForDeoptimizationBit, 1> {}; // NOLINT
class IsTurbofannedField : public BitField<bool, kIsTurbofannedBit, 1> {
}; // NOLINT
class CanHaveWeakObjectsField
@@ -5217,24 +5295,24 @@ class Code: public HeapObject {
};
-// This class describes the layout of dependent codes array of a map. The
-// array is partitioned into several groups of dependent codes. Each group
-// contains codes with the same dependency on the map. The array has the
-// following layout for n dependency groups:
-//
-// +----+----+-----+----+---------+----------+-----+---------+-----------+
-// | C1 | C2 | ... | Cn | group 1 | group 2 | ... | group n | undefined |
-// +----+----+-----+----+---------+----------+-----+---------+-----------+
+// Dependent code is a singly linked list of fixed arrays. Each array contains
+// code objects in weak cells for one dependent group. The suffix of the array
+// can be filled with the undefined value if the number of codes is less than
+// the length of the array.
//
-// The first n elements are Smis, each of them specifies the number of codes
-// in the corresponding group. The subsequent elements contain grouped code
-// objects in weak cells. The suffix of the array can be filled with the
-// undefined value if the number of codes is less than the length of the
-// array. The order of the code objects within a group is not preserved.
+// +------+-----------------+--------+--------+-----+--------+-----------+-----+
+// | next | count & group 1 | code 1 | code 2 | ... | code n | undefined | ... |
+// +------+-----------------+--------+--------+-----+--------+-----------+-----+
+// |
+// V
+// +------+-----------------+--------+--------+-----+--------+-----------+-----+
+// | next | count & group 2 | code 1 | code 2 | ... | code m | undefined | ... |
+// +------+-----------------+--------+--------+-----+--------+-----------+-----+
+// |
+// V
+// empty_fixed_array()
//
-// All code indexes used in the class are counted starting from the first
-// code object of the first group. In other words, code index 0 corresponds
-// to array index n = kCodesStartIndex.
+// The list of fixed arrays is ordered by dependency groups.
class DependentCode: public FixedArray {
public:
@@ -5269,19 +5347,8 @@ class DependentCode: public FixedArray {
static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
- // Array for holding the index of the first code object of each group.
- // The last element stores the total number of code objects.
- class GroupStartIndexes {
- public:
- explicit GroupStartIndexes(DependentCode* entries);
- void Recompute(DependentCode* entries);
- int at(int i) { return start_indexes_[i]; }
- int number_of_entries() { return start_indexes_[kGroupCount]; }
- private:
- int start_indexes_[kGroupCount + 1];
- };
-
bool Contains(DependencyGroup group, WeakCell* code_cell);
+ bool IsEmpty(DependencyGroup group);
static Handle<DependentCode> InsertCompilationDependencies(
Handle<DependentCode> entries, DependencyGroup group,
@@ -5305,8 +5372,12 @@ class DependentCode: public FixedArray {
// The following low-level accessors should only be used by this class
// and the mark compact collector.
- inline int number_of_entries(DependencyGroup group);
- inline void set_number_of_entries(DependencyGroup group, int value);
+ inline DependentCode* next_link();
+ inline void set_next_link(DependentCode* next);
+ inline int count();
+ inline void set_count(int value);
+ inline DependencyGroup group();
+ inline void set_group(DependencyGroup group);
inline Object* object_at(int i);
inline void set_object_at(int i, Object* object);
inline void clear_at(int i);
@@ -5320,10 +5391,9 @@ class DependentCode: public FixedArray {
static Handle<DependentCode> Insert(Handle<DependentCode> entries,
DependencyGroup group,
Handle<Object> object);
+ static Handle<DependentCode> New(DependencyGroup group, Handle<Object> object,
+ Handle<DependentCode> next);
static Handle<DependentCode> EnsureSpace(Handle<DependentCode> entries);
- // Make a room at the end of the given group by moving out the first
- // code objects of the subsequent groups.
- inline void ExtendGroup(DependencyGroup group);
// Compact by removing cleared weak cells and return true if there was
// any cleared weak cell.
bool Compact();
@@ -5331,7 +5401,14 @@ class DependentCode: public FixedArray {
if (number_of_entries < 5) return number_of_entries + 1;
return number_of_entries * 5 / 4;
}
- static const int kCodesStartIndex = kGroupCount;
+ inline int flags();
+ inline void set_flags(int flags);
+ class GroupField : public BitField<int, 0, 3> {};
+ class CountField : public BitField<int, 3, 27> {};
+ STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
+ static const int kNextLinkIndex = 0;
+ static const int kFlagsIndex = 1;
+ static const int kCodesStartIndex = 2;
};
@@ -5367,6 +5444,8 @@ class Map: public HeapObject {
static const int kNoConstructorFunctionIndex = 0;
inline int GetConstructorFunctionIndex();
inline void SetConstructorFunctionIndex(int value);
+ static MaybeHandle<JSFunction> GetConstructorFunction(
+ Handle<Map> map, Handle<Context> native_context);
// Instance type.
inline InstanceType instance_type();
@@ -5401,18 +5480,66 @@ class Map: public HeapObject {
class IsUnstable : public BitField<bool, 24, 1> {};
class IsMigrationTarget : public BitField<bool, 25, 1> {};
class IsStrong : public BitField<bool, 26, 1> {};
- // Bit 27 is free.
+ class NewTargetIsBase : public BitField<bool, 27, 1> {};
+ // Bit 28 is free.
// Keep this bit field at the very end for better code in
// Builtins::kJSConstructStubGeneric stub.
- // This counter is used for in-object slack tracking and for map aging.
+ // This counter is used for in-object slack tracking.
// The in-object slack tracking is considered enabled when the counter is
- // in the range [kSlackTrackingCounterStart, kSlackTrackingCounterEnd].
- class Counter : public BitField<int, 28, 4> {};
- static const int kSlackTrackingCounterStart = 14;
- static const int kSlackTrackingCounterEnd = 8;
- static const int kRetainingCounterStart = kSlackTrackingCounterEnd - 1;
- static const int kRetainingCounterEnd = 0;
+ // non zero.
+ class ConstructionCounter : public BitField<int, 29, 3> {};
+ static const int kSlackTrackingCounterStart = 7;
+ static const int kSlackTrackingCounterEnd = 1;
+ static const int kNoSlackTracking = 0;
+ STATIC_ASSERT(kSlackTrackingCounterStart <= ConstructionCounter::kMax);
+
+
+ // Inobject slack tracking is the way to reclaim unused inobject space.
+ //
+ // The instance size is initially determined by adding some slack to
+ // expected_nof_properties (to allow for a few extra properties added
+ // after the constructor). There is no guarantee that the extra space
+ // will not be wasted.
+ //
+ // Here is the algorithm to reclaim the unused inobject space:
+ // - Detect the first constructor call for this JSFunction.
+ // When it happens enter the "in progress" state: initialize construction
+ // counter in the initial_map.
+ // - While the tracking is in progress initialize unused properties of a new
+ // object with one_pointer_filler_map instead of undefined_value (the "used"
+ // part is initialized with undefined_value as usual). This way they can
+ // be resized quickly and safely.
+ // - Once enough objects have been created compute the 'slack'
+ // (traverse the map transition tree starting from the
+ // initial_map and find the lowest value of unused_property_fields).
+ // - Traverse the transition tree again and decrease the instance size
+ // of every map. Existing objects will resize automatically (they are
+ // filled with one_pointer_filler_map). All further allocations will
+ // use the adjusted instance size.
+ // - SharedFunctionInfo's expected_nof_properties left unmodified since
+ // allocations made using different closures could actually create different
+ // kind of objects (see prototype inheritance pattern).
+ //
+ // Important: inobject slack tracking is not attempted during the snapshot
+ // creation.
+
+ static const int kGenerousAllocationCount =
+ kSlackTrackingCounterStart - kSlackTrackingCounterEnd + 1;
+
+ // Starts the tracking by initializing object constructions countdown counter.
+ void StartInobjectSlackTracking();
+
+ // True if the object constructions countdown counter is a range
+ // [kSlackTrackingCounterEnd, kSlackTrackingCounterStart].
+ inline bool IsInobjectSlackTrackingInProgress();
+
+ // Does the tracking step.
+ inline void InobjectSlackTrackingStep();
+
+ // Completes inobject slack tracking for the transition tree starting at this
+ // initial map.
+ void CompleteInobjectSlackTracking();
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
@@ -5424,7 +5551,7 @@ class Map: public HeapObject {
// Tells whether the instance has a [[Construct]] internal method.
// This property is implemented according to ES6, section 7.2.4.
- inline void set_is_constructor(bool value);
+ inline void set_is_constructor();
inline bool is_constructor() const;
// Tells whether the instance with this map should be ignored by the
@@ -5460,6 +5587,8 @@ class Map: public HeapObject {
inline void set_is_strong();
inline bool is_strong();
+ inline void set_new_target_is_base(bool value);
+ inline bool new_target_is_base();
inline void set_is_extensible(bool value);
inline bool is_extensible();
inline void set_is_prototype_map(bool value);
@@ -5550,10 +5679,6 @@ class Map: public HeapObject {
static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode,
const char* reason);
- // Returns the constructor name (the name (possibly, inferred name) of the
- // function that was used to instantiate the object).
- String* constructor_name();
-
// Tells whether the map is used for JSObjects in dictionary mode (ie
// normalized objects, ie objects for which HasFastProperties returns false).
// A map can never be used for both dictionary mode and fast mode JSObjects.
@@ -5636,8 +5761,8 @@ class Map: public HeapObject {
inline bool is_stable();
inline void set_migration_target(bool value);
inline bool is_migration_target();
- inline void set_counter(int value);
- inline int counter();
+ inline void set_construction_counter(int value);
+ inline int construction_counter();
inline void deprecate();
inline bool is_deprecated();
inline bool CanBeDeprecated();
@@ -5653,6 +5778,10 @@ class Map: public HeapObject {
// gathering type feedback. Use TryUpdate in those cases instead.
static Handle<Map> Update(Handle<Map> map);
+ static inline Handle<Map> CopyInitialMap(Handle<Map> map);
+ static Handle<Map> CopyInitialMap(Handle<Map> map, int instance_size,
+ int in_object_properties,
+ int unused_property_fields);
static Handle<Map> CopyDropDescriptors(Handle<Map> map);
static Handle<Map> CopyInsertDescriptor(Handle<Map> map,
Descriptor* descriptor,
@@ -5684,6 +5813,11 @@ class Map: public HeapObject {
ElementsKind kind,
TransitionFlag flag);
+ static Handle<Map> AsLanguageMode(Handle<Map> initial_map,
+ LanguageMode language_mode,
+ FunctionKind kind);
+
+
static Handle<Map> CopyForObserved(Handle<Map> map);
static Handle<Map> CopyForPreventExtensions(Handle<Map> map,
@@ -5729,7 +5863,7 @@ class Map: public HeapObject {
// Returns the number of properties described in instance_descriptors
// filtering out properties with the specified attributes.
int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
- PropertyAttributes filter = NONE);
+ PropertyFilter filter = ALL_PROPERTIES);
DECLARE_CAST(Map)
@@ -5779,7 +5913,9 @@ class Map: public HeapObject {
inline bool CanTransition();
+ inline bool IsBooleanMap();
inline bool IsPrimitiveMap();
+ inline bool IsJSReceiverMap();
inline bool IsJSObjectMap();
inline bool IsJSArrayMap();
inline bool IsJSFunctionMap();
@@ -5787,7 +5923,8 @@ class Map: public HeapObject {
inline bool IsJSProxyMap();
inline bool IsJSGlobalProxyMap();
inline bool IsJSGlobalObjectMap();
- inline bool IsGlobalObjectMap();
+ inline bool IsJSTypedArrayMap();
+ inline bool IsJSDataViewMap();
inline bool CanOmitMapChecks();
@@ -5926,9 +6063,9 @@ class Map: public HeapObject {
static void TraceAllTransitions(Map* map);
#endif
- static inline Handle<Map> CopyInstallDescriptorsForTesting(
- Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> layout_descriptor);
+ static inline Handle<Map> AddMissingTransitionsForTesting(
+ Handle<Map> split_map, Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor);
private:
static void ConnectTransition(Handle<Map> parent, Handle<Map> child,
@@ -5939,9 +6076,13 @@ class Map: public HeapObject {
static Handle<Map> ShareDescriptor(Handle<Map> map,
Handle<DescriptorArray> descriptors,
Descriptor* descriptor);
- static Handle<Map> CopyInstallDescriptors(
- Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> layout_descriptor);
+ static Handle<Map> AddMissingTransitions(
+ Handle<Map> map, Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor);
+ static void InstallDescriptors(
+ Handle<Map> parent_map, Handle<Map> child_map, int new_descriptor,
+ Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor);
static Handle<Map> CopyAddDescriptor(Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag);
@@ -5969,10 +6110,10 @@ class Map: public HeapObject {
inline void NotifyLeafMapLayoutChange();
void DeprecateTransitionTree();
- bool DeprecateTarget(PropertyKind kind, Name* key,
- PropertyAttributes attributes,
- DescriptorArray* new_descriptors,
- LayoutDescriptor* new_layout_descriptor);
+
+ void ReplaceDescriptors(DescriptorArray* new_descriptors,
+ LayoutDescriptor* new_layout_descriptor);
+
Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
@@ -6052,8 +6193,6 @@ class PrototypeInfo : public Struct {
// given receiver embed the currently valid cell for that receiver's prototype
// during their compilation and check it on execution.
DECL_ACCESSORS(validity_cell, Object)
- // [constructor_name]: User-friendly name of the original constructor.
- DECL_ACCESSORS(constructor_name, Object)
DECLARE_CAST(PrototypeInfo)
@@ -6345,8 +6484,8 @@ class SharedFunctionInfo: public HeapObject {
inline void ReplaceCode(Code* code);
// [optimized_code_map]: Map from native context to optimized code
- // and a shared literals array or Smi(0) if none.
- DECL_ACCESSORS(optimized_code_map, Object)
+ // and a shared literals array.
+ DECL_ACCESSORS(optimized_code_map, FixedArray)
// Returns entry from optimized code map for specified context and OSR entry.
// Note that {code == nullptr, literals == nullptr} indicates no matching
@@ -6358,6 +6497,11 @@ class SharedFunctionInfo: public HeapObject {
// Clear optimized code map.
void ClearOptimizedCodeMap();
+ // We have a special root FixedArray with the right shape and values
+ // to represent the cleared optimized code map. This predicate checks
+ // if that root is installed.
+ inline bool OptimizedCodeMapIsCleared() const;
+
// Removes a specific optimized code object from the optimized code map.
// In case of non-OSR the code reference is cleared from the cache entry but
// the entry itself is left in the map in order to proceed sharing literals.
@@ -6371,13 +6515,17 @@ class SharedFunctionInfo: public HeapObject {
Handle<Code> code);
// Add a new entry to the optimized code map for context-dependent code.
- // |code| is either a code object or an undefined value. In the latter case
- // the entry just maps |native_context, osr_ast_id| pair to |literals| array.
- static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
- Handle<Context> native_context,
- Handle<HeapObject> code,
- Handle<LiteralsArray> literals,
- BailoutId osr_ast_id);
+ inline static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ Handle<Code> code,
+ Handle<LiteralsArray> literals,
+ BailoutId osr_ast_id);
+
+ // We may already have cached the code, but want to store literals in the
+ // cache.
+ inline static void AddLiteralsToOptimizedCodeMap(
+ Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
+ Handle<LiteralsArray> literals);
// Set up the link between shared function info and the script. The shared
// function info is added to the list on the script.
@@ -6385,9 +6533,8 @@ class SharedFunctionInfo: public HeapObject {
Handle<Object> script_object);
// Layout description of the optimized code map.
- static const int kNextMapIndex = 0;
- static const int kSharedCodeIndex = 1;
- static const int kEntriesStart = 2;
+ static const int kSharedCodeIndex = 0;
+ static const int kEntriesStart = 1;
static const int kContextOffset = 0;
static const int kCachedCodeOffset = 1;
static const int kLiteralsOffset = 2;
@@ -6579,10 +6726,6 @@ class SharedFunctionInfo: public HeapObject {
// see a binding for it.
DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
- // Indicates whether the function is a bound function created using
- // the bind function.
- DECL_BOOLEAN_ACCESSORS(bound)
-
// Indicates that the function is anonymous (the name field can be set
// through the API, which does not change this flag).
DECL_BOOLEAN_ACCESSORS(is_anonymous)
@@ -6683,12 +6826,9 @@ class SharedFunctionInfo: public HeapObject {
// Source size of this function.
int SourceSize();
- // Calculate the instance size.
- int CalculateInstanceSize();
-
- // Calculate the number of in-object properties.
- int CalculateInObjectProperties();
-
+ // Returns `false` if formal parameters include rest parameters, optional
+ // parameters, or destructuring parameters.
+ // TODO(caitp): make this a flag set during parsing
inline bool has_simple_parameters();
// Initialize a SharedFunctionInfo from a parsed function literal.
@@ -6701,9 +6841,7 @@ class SharedFunctionInfo: public HeapObject {
void ResetForNewContext(int new_ic_age);
- // Iterate over all shared function infos that are created from a script.
- // That excludes shared function infos created for API functions and C++
- // builtins.
+ // Iterate over all shared function infos.
class Iterator {
public:
explicit Iterator(Isolate* isolate);
@@ -6776,15 +6914,14 @@ class SharedFunctionInfo: public HeapObject {
// Total size.
static const int kSize = kProfilerTicksOffset + kPointerSize;
#else
- // The only reason to use smi fields instead of int fields
- // is to allow iteration without maps decoding during
- // garbage collections.
- // To avoid wasting space on 64-bit architectures we use
- // the following trick: we group integer fields into pairs
-// The least significant integer in each pair is shifted left by 1.
-// By doing this we guarantee that LSB of each kPointerSize aligned
-// word is not set and thus this word cannot be treated as pointer
-// to HeapObject during old space traversal.
+// The only reason to use smi fields instead of int fields is to allow
+// iteration without maps decoding during garbage collections.
+// To avoid wasting space on 64-bit architectures we use the following trick:
+// we group integer fields into pairs
+// The least significant integer in each pair is shifted left by 1. By doing
+// this we guarantee that LSB of each kPointerSize aligned word is not set and
+// thus this word cannot be treated as pointer to HeapObject during old space
+// traversal.
#if V8_TARGET_LITTLE_ENDIAN
static const int kLengthOffset = kLastPointerFieldOffset + kPointerSize;
static const int kFormalParameterCountOffset =
@@ -6866,6 +7003,7 @@ class SharedFunctionInfo: public HeapObject {
// Bit positions in compiler_hints.
enum CompilerHints {
+ // byte 0
kAllowLazyCompilation,
kAllowLazyCompilationWithoutContext,
kOptimizationDisabled,
@@ -6874,29 +7012,47 @@ class SharedFunctionInfo: public HeapObject {
kStrongModeFunction,
kUsesArguments,
kNeedsHomeObject,
+ // byte 1
kHasDuplicateParameters,
kForceInline,
- kBoundFunction,
+ kIsAsmFunction,
kIsAnonymous,
kNameShouldPrintAsAnonymous,
kIsFunction,
kDontCrankshaft,
kDontFlush,
- kIsArrow,
+ // byte 2
+ kFunctionKind,
+ kIsArrow = kFunctionKind,
kIsGenerator,
kIsConciseMethod,
kIsAccessorFunction,
kIsDefaultConstructor,
kIsSubclassConstructor,
kIsBaseConstructor,
- kInClassLiteral,
- kIsAsmFunction,
+ kIsInObjectLiteral,
+ // byte 3
kDeserialized,
kNeverCompiled,
- kCompilerHintsCount // Pseudo entry
+ kCompilerHintsCount, // Pseudo entry
};
// Add hints for other modes when they're added.
STATIC_ASSERT(LANGUAGE_END == 3);
+ // kFunctionKind has to be byte-aligned
+ STATIC_ASSERT((kFunctionKind % kBitsPerByte) == 0);
+// Make sure that FunctionKind and byte 2 are in sync:
+#define ASSERT_FUNCTION_KIND_ORDER(functionKind, compilerFunctionKind) \
+ STATIC_ASSERT(FunctionKind::functionKind == \
+ 1 << (compilerFunctionKind - kFunctionKind))
+ ASSERT_FUNCTION_KIND_ORDER(kArrowFunction, kIsArrow);
+ ASSERT_FUNCTION_KIND_ORDER(kGeneratorFunction, kIsGenerator);
+ ASSERT_FUNCTION_KIND_ORDER(kConciseMethod, kIsConciseMethod);
+ ASSERT_FUNCTION_KIND_ORDER(kAccessorFunction, kIsAccessorFunction);
+ ASSERT_FUNCTION_KIND_ORDER(kDefaultConstructor, kIsDefaultConstructor);
+ ASSERT_FUNCTION_KIND_ORDER(kSubclassConstructor, kIsSubclassConstructor);
+ ASSERT_FUNCTION_KIND_ORDER(kBaseConstructor, kIsBaseConstructor);
+ ASSERT_FUNCTION_KIND_ORDER(kInObjectLiteral, kIsInObjectLiteral);
+#undef ASSERT_FUNCTION_KIND_ORDER
class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 8> {};
@@ -6923,46 +7079,44 @@ class SharedFunctionInfo: public HeapObject {
public:
// Constants for optimizing codegen for strict mode function and
+ // native tests when using integer-width instructions.
+ static const int kStrictModeBit =
+ kStrictModeFunction + kCompilerHintsSmiTagSize;
+ static const int kStrongModeBit =
+ kStrongModeFunction + kCompilerHintsSmiTagSize;
+ static const int kNativeBit = kNative + kCompilerHintsSmiTagSize;
+
+ static const int kClassConstructorBits =
+ FunctionKind::kClassConstructor
+ << (kFunctionKind + kCompilerHintsSmiTagSize);
+
+ // Constants for optimizing codegen for strict mode function and
// native tests.
// Allows to use byte-width instructions.
- static const int kStrictModeBitWithinByte =
- (kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
- static const int kStrongModeBitWithinByte =
- (kStrongModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
-
- static const int kNativeBitWithinByte =
- (kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
+ static const int kStrictModeBitWithinByte = kStrictModeBit % kBitsPerByte;
+ static const int kStrongModeBitWithinByte = kStrongModeBit % kBitsPerByte;
+ static const int kNativeBitWithinByte = kNativeBit % kBitsPerByte;
- static const int kBoundBitWithinByte =
- (kBoundFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
+ static const int kClassConstructorBitsWithinByte =
+ FunctionKind::kClassConstructor << kCompilerHintsSmiTagSize;
+ STATIC_ASSERT(kClassConstructorBitsWithinByte < (1 << kBitsPerByte));
#if defined(V8_TARGET_LITTLE_ENDIAN)
- static const int kStrictModeByteOffset = kCompilerHintsOffset +
- (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
- static const int kStrongModeByteOffset =
- kCompilerHintsOffset +
- (kStrongModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
- static const int kNativeByteOffset = kCompilerHintsOffset +
- (kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
- static const int kBoundByteOffset =
- kCompilerHintsOffset +
- (kBoundFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+#define BYTE_OFFSET(compiler_hint) \
+ kCompilerHintsOffset + \
+ (compiler_hint + kCompilerHintsSmiTagSize) / kBitsPerByte
#elif defined(V8_TARGET_BIG_ENDIAN)
- static const int kStrictModeByteOffset = kCompilerHintsOffset +
- (kCompilerHintsSize - 1) -
- ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
- static const int kStrongModeByteOffset =
- kCompilerHintsOffset + (kCompilerHintsSize - 1) -
- ((kStrongModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
- static const int kNativeByteOffset = kCompilerHintsOffset +
- (kCompilerHintsSize - 1) -
- ((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
- static const int kBoundByteOffset =
- kCompilerHintsOffset + (kCompilerHintsSize - 1) -
- ((kBoundFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+#define BYTE_OFFSET(compiler_hint) \
+ kCompilerHintsOffset + (kCompilerHintsSize - 1) - \
+ ((compiler_hint + kCompilerHintsSmiTagSize) / kBitsPerByte)
#else
#error Unknown byte ordering
#endif
+ static const int kStrictModeByteOffset = BYTE_OFFSET(kStrictModeFunction);
+ static const int kStrongModeByteOffset = BYTE_OFFSET(kStrongModeFunction);
+ static const int kNativeByteOffset = BYTE_OFFSET(kNative);
+ static const int kFunctionKindByteOffset = BYTE_OFFSET(kFunctionKind);
+#undef BYTE_OFFSET
private:
// Returns entry from optimized code map for specified context and OSR entry.
@@ -6971,6 +7125,13 @@ class SharedFunctionInfo: public HeapObject {
int SearchOptimizedCodeMapEntry(Context* native_context,
BailoutId osr_ast_id);
+ // If code is undefined, then existing code won't be overwritten.
+ static void AddToOptimizedCodeMapInternal(Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ Handle<HeapObject> code,
+ Handle<LiteralsArray> literals,
+ BailoutId osr_ast_id);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -7063,6 +7224,64 @@ class JSModule: public JSObject {
};
+// JSBoundFunction describes a bound function exotic object.
+class JSBoundFunction : public JSObject {
+ public:
+ // [length]: The bound function "length" property.
+ DECL_ACCESSORS(length, Object)
+
+ // [name]: The bound function "name" property.
+ DECL_ACCESSORS(name, Object)
+
+ // [bound_target_function]: The wrapped function object.
+ DECL_ACCESSORS(bound_target_function, JSReceiver)
+
+ // [bound_this]: The value that is always passed as the this value when
+ // calling the wrapped function.
+ DECL_ACCESSORS(bound_this, Object)
+
+ // [bound_arguments]: A list of values whose elements are used as the first
+ // arguments to any call to the wrapped function.
+ DECL_ACCESSORS(bound_arguments, FixedArray)
+
+ // [creation_context]: The native context in which the function was bound.
+ // TODO(bmeurer, verwaest): Can we (mis)use (unused) constructor field in
+ // the Map instead of putting this into the object? Only required for
+ // JSReceiver::GetCreationContext() anyway.
+ DECL_ACCESSORS(creation_context, Context)
+
+ static MaybeHandle<Context> GetFunctionRealm(
+ Handle<JSBoundFunction> function);
+
+ DECLARE_CAST(JSBoundFunction)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSBoundFunction)
+ DECLARE_VERIFIER(JSBoundFunction)
+
+ // The bound function's string representation implemented according
+ // to ES6 section 19.2.3.5 Function.prototype.toString ( ).
+ static Handle<String> ToString(Handle<JSBoundFunction> function);
+
+ // Layout description.
+ static const int kBoundTargetFunctionOffset = JSObject::kHeaderSize;
+ static const int kBoundThisOffset = kBoundTargetFunctionOffset + kPointerSize;
+ static const int kBoundArgumentsOffset = kBoundThisOffset + kPointerSize;
+ static const int kCreationContextOffset =
+ kBoundArgumentsOffset + kPointerSize;
+ static const int kLengthOffset = kCreationContextOffset + kPointerSize;
+ static const int kNameOffset = kLengthOffset + kPointerSize;
+ static const int kSize = kNameOffset + kPointerSize;
+
+ // Indices of in-object properties.
+ static const int kLengthIndex = 0;
+ static const int kNameIndex = 1;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSBoundFunction);
+};
+
+
// JSFunction describes JavaScript functions.
class JSFunction: public JSObject {
public:
@@ -7077,6 +7296,9 @@ class JSFunction: public JSObject {
inline Context* context();
inline void set_context(Object* context);
inline JSObject* global_proxy();
+ inline Context* native_context();
+
+ static Handle<Context> GetFunctionRealm(Handle<JSFunction> function);
// [code]: The generated code object for this function. Executed
// when the function is invoked, e.g. foo() or new foo(). See
@@ -7087,18 +7309,9 @@ class JSFunction: public JSObject {
inline void set_code_no_write_barrier(Code* code);
inline void ReplaceCode(Code* code);
- // Tells whether this function is builtin.
- inline bool IsBuiltin();
-
// Tells whether this function inlines the given shared function info.
bool Inlines(SharedFunctionInfo* candidate);
- // Tells whether this function should be subject to debugging.
- inline bool IsSubjectToDebugging();
-
- // Tells whether or not the function needs arguments adaption.
- inline bool NeedsArgumentsAdaption();
-
// Tells whether or not this function has been optimized.
inline bool IsOptimized();
@@ -7115,49 +7328,10 @@ class JSFunction: public JSObject {
// Tells whether or not the function is on the concurrent recompilation queue.
inline bool IsInOptimizationQueue();
- // Inobject slack tracking is the way to reclaim unused inobject space.
- //
- // The instance size is initially determined by adding some slack to
- // expected_nof_properties (to allow for a few extra properties added
- // after the constructor). There is no guarantee that the extra space
- // will not be wasted.
- //
- // Here is the algorithm to reclaim the unused inobject space:
- // - Detect the first constructor call for this JSFunction.
- // When it happens enter the "in progress" state: initialize construction
- // counter in the initial_map.
- // - While the tracking is in progress create objects filled with
- // one_pointer_filler_map instead of undefined_value. This way they can be
- // resized quickly and safely.
- // - Once enough objects have been created compute the 'slack'
- // (traverse the map transition tree starting from the
- // initial_map and find the lowest value of unused_property_fields).
- // - Traverse the transition tree again and decrease the instance size
- // of every map. Existing objects will resize automatically (they are
- // filled with one_pointer_filler_map). All further allocations will
- // use the adjusted instance size.
- // - SharedFunctionInfo's expected_nof_properties left unmodified since
- // allocations made using different closures could actually create different
- // kind of objects (see prototype inheritance pattern).
- //
- // Important: inobject slack tracking is not attempted during the snapshot
- // creation.
+ // Completes inobject slack tracking on initial map if it is active.
+ inline void CompleteInobjectSlackTrackingIfActive();
- // True if the initial_map is set and the object constructions countdown
- // counter is not zero.
- static const int kGenerousAllocationCount =
- Map::kSlackTrackingCounterStart - Map::kSlackTrackingCounterEnd + 1;
- inline bool IsInobjectSlackTrackingInProgress();
-
- // Starts the tracking.
- // Initializes object constructions countdown counter in the initial map.
- void StartInobjectSlackTracking();
-
- // Completes the tracking.
- void CompleteInobjectSlackTracking();
-
- // [literals_or_bindings]: Fixed array holding either
- // the materialized literals or the bindings of a bound function.
+ // [literals]: Fixed array holding the materialized literals.
//
// If the function contains object, regexp or array literals, the
// literals array prefix contains the object, regexp, and array
@@ -7166,17 +7340,7 @@ class JSFunction: public JSObject {
// or array functions. Performing a dynamic lookup, we might end up
// using the functions from a new context that we should not have
// access to.
- //
- // On bound functions, the array is a (copy-on-write) fixed-array containing
- // the function that was bound, bound this-value and any bound
- // arguments. Bound functions never contain literals.
- DECL_ACCESSORS(literals_or_bindings, FixedArray)
-
- inline LiteralsArray* literals();
- inline void set_literals(LiteralsArray* literals);
-
- inline FixedArray* function_bindings();
- inline void set_function_bindings(FixedArray* bindings);
+ DECL_ACCESSORS(literals, LiteralsArray)
// The initial map for an object created by this constructor.
inline Map* initial_map();
@@ -7185,6 +7349,13 @@ class JSFunction: public JSObject {
inline bool has_initial_map();
static void EnsureHasInitialMap(Handle<JSFunction> function);
+ // Creates a map that matches the constructor's initial map, but with
+ // [[prototype]] being new.target.prototype. Because new.target can be a
+ // JSProxy, this can call back into JavaScript.
+ static MUST_USE_RESULT MaybeHandle<Map> GetDerivedMap(
+ Isolate* isolate, Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target);
+
// Get and set the prototype property on a JSFunction. If the
// function has an initial map the prototype is set on the initial
// map. Otherwise, the prototype is put in the initial map field
@@ -7202,25 +7373,9 @@ class JSFunction: public JSObject {
// [[Construct]] from this function will not be allowed.
bool RemovePrototype();
- // Accessor for this function's initial map's [[class]]
- // property. This is primarily used by ECMA native functions. This
- // method sets the class_name field of this function's initial map
- // to a given value. It creates an initial map if this function does
- // not have one. Note that this method does not copy the initial map
- // if it has one already, but simply replaces it with the new value.
- // Instances created afterwards will have a map whose [[class]] is
- // set to 'value', but there is no guarantees on instances created
- // before.
- void SetInstanceClassName(String* name);
-
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
- // Returns `false` if formal parameters include rest parameters, optional
- // parameters, or destructuring parameters.
- // TODO(caitp): make this a flag set during parsing
- inline bool has_simple_parameters();
-
// [next_function_link]: Links functions into various lists, e.g. the list
// of optimized functions hanging off the native_context. The CodeFlusher
// uses this link to chain together flushing candidates. Treated weakly
@@ -7232,9 +7387,35 @@ class JSFunction: public JSObject {
DECLARE_CAST(JSFunction)
- // Iterates the objects, including code objects indirectly referenced
- // through pointers to the first instruction in the code object.
- void JSFunctionIterateBody(int object_size, ObjectVisitor* v);
+ // Calculate the instance size and in-object properties count.
+ void CalculateInstanceSize(InstanceType instance_type,
+ int requested_internal_fields, int* instance_size,
+ int* in_object_properties);
+ void CalculateInstanceSizeForDerivedClass(InstanceType instance_type,
+ int requested_internal_fields,
+ int* instance_size,
+ int* in_object_properties);
+
+ // Visiting policy flags define whether the code entry or next function
+ // should be visited or not.
+ enum BodyVisitingPolicy {
+ kVisitCodeEntry = 1 << 0,
+ kVisitNextFunction = 1 << 1,
+
+ kSkipCodeEntryAndNextFunction = 0,
+ kVisitCodeEntryAndNextFunction = kVisitCodeEntry | kVisitNextFunction
+ };
+ // Iterates the function object according to the visiting policy.
+ template <BodyVisitingPolicy>
+ class BodyDescriptorImpl;
+
+ // Visit the whole object.
+ typedef BodyDescriptorImpl<kVisitCodeEntryAndNextFunction> BodyDescriptor;
+
+ // Don't visit next function.
+ typedef BodyDescriptorImpl<kVisitCodeEntry> BodyDescriptorStrongCode;
+ typedef BodyDescriptorImpl<kSkipCodeEntryAndNextFunction>
+ BodyDescriptorWeakCode;
// Dispatched behavior.
DECLARE_PRINTER(JSFunction)
@@ -7248,26 +7429,29 @@ class JSFunction: public JSObject {
// The function's name if it is configured, otherwise shared function info
// debug name.
+ static Handle<String> GetName(Handle<JSFunction> function);
+
+ // The function's displayName if it is set, otherwise name if it is
+ // configured, otherwise shared function info
+ // debug name.
static Handle<String> GetDebugName(Handle<JSFunction> function);
+ // The function's string representation implemented according to
+ // ES6 section 19.2.3.5 Function.prototype.toString ( ).
+ static Handle<String> ToString(Handle<JSFunction> function);
+
// Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
// kSize) is weak and has special handling during garbage collection.
- static const int kCodeEntryOffset = JSObject::kHeaderSize;
- static const int kPrototypeOrInitialMapOffset =
- kCodeEntryOffset + kPointerSize;
+ static const int kPrototypeOrInitialMapOffset = JSObject::kHeaderSize;
static const int kSharedFunctionInfoOffset =
kPrototypeOrInitialMapOffset + kPointerSize;
static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
static const int kLiteralsOffset = kContextOffset + kPointerSize;
static const int kNonWeakFieldsEndOffset = kLiteralsOffset + kPointerSize;
- static const int kNextFunctionLinkOffset = kNonWeakFieldsEndOffset;
+ static const int kCodeEntryOffset = kNonWeakFieldsEndOffset;
+ static const int kNextFunctionLinkOffset = kCodeEntryOffset + kPointerSize;
static const int kSize = kNextFunctionLinkOffset + kPointerSize;
- // Layout of the bound-function binding array.
- static const int kBoundFunctionIndex = 0;
- static const int kBoundThisIndex = 1;
- static const int kBoundArgumentsStartIndex = 2;
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
};
@@ -7292,7 +7476,7 @@ class JSGlobalProxy : public JSObject {
DECLARE_CAST(JSGlobalProxy)
- inline bool IsDetachedFrom(GlobalObject* global) const;
+ inline bool IsDetachedFrom(JSGlobalObject* global) const;
// Dispatched behavior.
DECLARE_PRINTER(JSGlobalProxy)
@@ -7308,41 +7492,22 @@ class JSGlobalProxy : public JSObject {
};
-// Common super class for JavaScript global objects and the special
-// builtins global objects.
-class GlobalObject: public JSObject {
+// JavaScript global object.
+class JSGlobalObject : public JSObject {
public:
- // [builtins]: the object holding the runtime routines written in JS.
- DECL_ACCESSORS(builtins, JSBuiltinsObject)
-
// [native context]: the natives corresponding to this global object.
DECL_ACCESSORS(native_context, Context)
// [global proxy]: the global proxy object of the context
DECL_ACCESSORS(global_proxy, JSObject)
- DECLARE_CAST(GlobalObject)
- static void InvalidatePropertyCell(Handle<GlobalObject> object,
+ static void InvalidatePropertyCell(Handle<JSGlobalObject> object,
Handle<Name> name);
// Ensure that the global object has a cell for the given property name.
- static Handle<PropertyCell> EnsurePropertyCell(Handle<GlobalObject> global,
+ static Handle<PropertyCell> EnsurePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name);
- // Layout description.
- static const int kBuiltinsOffset = JSObject::kHeaderSize;
- static const int kNativeContextOffset = kBuiltinsOffset + kPointerSize;
- static const int kGlobalProxyOffset = kNativeContextOffset + kPointerSize;
- static const int kHeaderSize = kGlobalProxyOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject);
-};
-
-
-// JavaScript global object.
-class JSGlobalObject: public GlobalObject {
- public:
DECLARE_CAST(JSGlobalObject)
inline bool IsDetached();
@@ -7352,31 +7517,16 @@ class JSGlobalObject: public GlobalObject {
DECLARE_VERIFIER(JSGlobalObject)
// Layout description.
- static const int kSize = GlobalObject::kHeaderSize;
+ static const int kNativeContextOffset = JSObject::kHeaderSize;
+ static const int kGlobalProxyOffset = kNativeContextOffset + kPointerSize;
+ static const int kHeaderSize = kGlobalProxyOffset + kPointerSize;
+ static const int kSize = kHeaderSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalObject);
};
-// Builtins global object which holds the runtime routines written in
-// JavaScript.
-class JSBuiltinsObject: public GlobalObject {
- public:
- DECLARE_CAST(JSBuiltinsObject)
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSBuiltinsObject)
- DECLARE_VERIFIER(JSBuiltinsObject)
-
- // Layout description.
- static const int kSize = GlobalObject::kHeaderSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSBuiltinsObject);
-};
-
-
// Representation for JS Wrapper objects, String, Number, Boolean, etc.
class JSValue: public JSObject {
public:
@@ -7403,6 +7553,10 @@ class DateCache;
// Representation for JS date objects.
class JSDate: public JSObject {
public:
+ static MUST_USE_RESULT MaybeHandle<JSDate> New(Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target,
+ double tv);
+
// If one component is NaN, all of them are, indicating a NaN time value.
// [value]: the time value.
DECL_ACCESSORS(value, Object)
@@ -7426,10 +7580,15 @@ class JSDate: public JSObject {
DECLARE_CAST(JSDate)
+ // Returns the time value (UTC) identifying the current time.
+ static double CurrentTimeValue(Isolate* isolate);
+
// Returns the date field with the specified index.
// See FieldIndex for the list of date fields.
static Object* GetField(Object* date, Smi* index);
+ static Handle<Object> SetValue(Handle<JSDate> date, double v);
+
void SetValue(Object* value, bool is_value_nan);
// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ]
@@ -7569,28 +7728,28 @@ class JSRegExp: public JSObject {
// IRREGEXP_NATIVE: Compiled to native code with Irregexp.
enum Type { NOT_COMPILED, ATOM, IRREGEXP };
enum Flag {
- NONE = 0,
- GLOBAL = 1,
- IGNORE_CASE = 2,
- MULTILINE = 4,
- STICKY = 8,
- UNICODE_ESCAPES = 16
- };
-
- class Flags {
- public:
- explicit Flags(uint32_t value) : value_(value) { }
- bool is_global() { return (value_ & GLOBAL) != 0; }
- bool is_ignore_case() { return (value_ & IGNORE_CASE) != 0; }
- bool is_multiline() { return (value_ & MULTILINE) != 0; }
- bool is_sticky() { return (value_ & STICKY) != 0; }
- bool is_unicode() { return (value_ & UNICODE_ESCAPES) != 0; }
- uint32_t value() { return value_; }
- private:
- uint32_t value_;
+ kNone = 0,
+ kGlobal = 1 << 0,
+ kIgnoreCase = 1 << 1,
+ kMultiline = 1 << 2,
+ kSticky = 1 << 3,
+ kUnicode = 1 << 4,
};
+ typedef base::Flags<Flag> Flags;
DECL_ACCESSORS(data, Object)
+ DECL_ACCESSORS(flags, Object)
+ DECL_ACCESSORS(source, Object)
+
+ static MaybeHandle<JSRegExp> New(Handle<String> source, Flags flags);
+ static MaybeHandle<JSRegExp> New(Handle<String> source, Handle<String> flags);
+ static Handle<JSRegExp> Copy(Handle<JSRegExp> regexp);
+
+ static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source, Flags flags);
+ static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source,
+ Handle<String> flags_string);
inline Type TypeTag();
inline int CaptureCount();
@@ -7619,10 +7778,13 @@ class JSRegExp: public JSObject {
DECLARE_CAST(JSRegExp)
// Dispatched behavior.
+ DECLARE_PRINTER(JSRegExp)
DECLARE_VERIFIER(JSRegExp)
static const int kDataOffset = JSObject::kHeaderSize;
- static const int kSize = kDataOffset + kPointerSize;
+ static const int kSourceOffset = kDataOffset + kPointerSize;
+ static const int kFlagsOffset = kSourceOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
// Indices in the data array.
static const int kTagIndex = 0;
@@ -7671,12 +7833,8 @@ class JSRegExp: public JSObject {
FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
// In-object fields.
- static const int kSourceFieldIndex = 0;
- static const int kGlobalFieldIndex = 1;
- static const int kIgnoreCaseFieldIndex = 2;
- static const int kMultilineFieldIndex = 3;
- static const int kLastIndexFieldIndex = 4;
- static const int kInObjectFieldCount = 5;
+ static const int kLastIndexFieldIndex = 0;
+ static const int kInObjectFieldCount = 1;
// The uninitialized value for a regexp code object.
static const int kUninitializedValue = -1;
@@ -7691,6 +7849,8 @@ class JSRegExp: public JSObject {
static const int kCodeAgeMask = 0xff;
};
+DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
+
class CompilationCacheShape : public BaseShape<HashTableKey*> {
public:
@@ -8001,7 +8161,7 @@ class AllocationSite: public Struct {
// Increments the mementos found counter and returns true when the first
// memento was found for a given allocation site.
- inline bool IncrementMementoFoundCount();
+ inline bool IncrementMementoFoundCount(int increment = 1);
inline void IncrementMementoCreateCount();
@@ -8374,6 +8534,11 @@ class Symbol: public Name {
// be used to designate own properties of objects.
DECL_BOOLEAN_ACCESSORS(is_private)
+ // [is_well_known_symbol]: Whether this is a spec-defined well-known symbol,
+ // or not. Well-known symbols do not throw when an access check fails during
+ // a load.
+ DECL_BOOLEAN_ACCESSORS(is_well_known_symbol)
+
DECLARE_CAST(Symbol)
// Dispatched behavior.
@@ -8391,6 +8556,7 @@ class Symbol: public Name {
private:
static const int kPrivateBit = 0;
+ static const int kWellKnownSymbolBit = 1;
const char* PrivateSymbolToName() const;
@@ -9034,11 +9200,7 @@ class ExternalOneByteString : public ExternalString {
DECLARE_CAST(ExternalOneByteString)
- // Garbage collection support.
- inline void ExternalOneByteStringIterateBody(ObjectVisitor* v);
-
- template <typename StaticVisitor>
- inline void ExternalOneByteStringIterateBody();
+ class BodyDescriptor;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalOneByteString);
@@ -9073,11 +9235,7 @@ class ExternalTwoByteString: public ExternalString {
DECLARE_CAST(ExternalTwoByteString)
- // Garbage collection support.
- inline void ExternalTwoByteStringIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void ExternalTwoByteStringIterateBody();
+ class BodyDescriptor;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
@@ -9371,7 +9529,7 @@ class WeakCell : public HeapObject {
DECL_ACCESSORS(next, Object)
- inline void clear_next(Heap* heap);
+ inline void clear_next(Object* the_hole_value);
inline bool next_cleared();
@@ -9395,117 +9553,108 @@ class WeakCell : public HeapObject {
// The JSProxy describes EcmaScript Harmony proxies
class JSProxy: public JSReceiver {
public:
+ MUST_USE_RESULT static MaybeHandle<JSProxy> New(Isolate* isolate,
+ Handle<Object>,
+ Handle<Object>);
+
// [handler]: The handler property.
DECL_ACCESSORS(handler, Object)
-
+ // [target]: The target property.
+ DECL_ACCESSORS(target, JSReceiver)
// [hash]: The hash code property (undefined if not initialized yet).
DECL_ACCESSORS(hash, Object)
- DECLARE_CAST(JSProxy)
+ static MaybeHandle<Context> GetFunctionRealm(Handle<JSProxy> proxy);
- MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithHandler(
- Handle<JSProxy> proxy,
- Handle<Object> receiver,
- Handle<Name> name);
+ DECLARE_CAST(JSProxy)
- // If the handler defines an accessor property with a setter, invoke it.
- // If it defines an accessor property without a setter, or a data property
- // that is read-only, throw. In all these cases set '*done' to true,
- // otherwise set it to false.
- MUST_USE_RESULT
- static MaybeHandle<Object> SetPropertyViaPrototypesWithHandler(
- Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, LanguageMode language_mode, bool* done);
+ INLINE(bool IsRevoked() const);
+ static void Revoke(Handle<JSProxy> proxy);
+
+ // ES6 9.5.1
+ static MaybeHandle<Object> GetPrototype(Handle<JSProxy> receiver);
+
+ // ES6 9.5.2
+ MUST_USE_RESULT static Maybe<bool> SetPrototype(Handle<JSProxy> proxy,
+ Handle<Object> value,
+ bool from_javascript,
+ ShouldThrow should_throw);
+ // ES6 9.5.3
+ MUST_USE_RESULT static Maybe<bool> IsExtensible(Handle<JSProxy> proxy);
+
+ // ES6 9.5.4 (when passed DONT_THROW)
+ MUST_USE_RESULT static Maybe<bool> PreventExtensions(
+ Handle<JSProxy> proxy, ShouldThrow should_throw);
+
+ // ES6 9.5.5
+ MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
+ Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
+ PropertyDescriptor* desc);
+
+ // ES6 9.5.6
+ MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSProxy> object, Handle<Object> key,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
+
+ // ES6 9.5.7
+ MUST_USE_RESULT static Maybe<bool> HasProperty(Isolate* isolate,
+ Handle<JSProxy> proxy,
+ Handle<Name> name);
- MUST_USE_RESULT static Maybe<PropertyAttributes>
- GetPropertyAttributesWithHandler(Handle<JSProxy> proxy,
- Handle<Object> receiver,
- Handle<Name> name);
- MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithHandler(
- Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, LanguageMode language_mode);
+ // ES6 9.5.8
+ MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
+ Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
+ Handle<Object> receiver, LanguageMode language_mode);
+
+ // ES6 9.5.9
+ MUST_USE_RESULT static Maybe<bool> SetProperty(Handle<JSProxy> proxy,
+ Handle<Name> name,
+ Handle<Object> value,
+ Handle<Object> receiver,
+ LanguageMode language_mode);
+
+ // ES6 9.5.10 (when passed SLOPPY)
+ MUST_USE_RESULT static Maybe<bool> DeletePropertyOrElement(
+ Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode);
- // Turn the proxy into an (empty) JSObject.
- static void Fix(Handle<JSProxy> proxy);
+ // ES6 9.5.11
+ MUST_USE_RESULT static Maybe<bool> Enumerate(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSProxy> proxy,
+ KeyAccumulator* accumulator);
- // Initializes the body after the handler slot.
- inline void InitializeBody(int object_size, Object* value);
+ // ES6 9.5.12
+ MUST_USE_RESULT static Maybe<bool> OwnPropertyKeys(
+ Isolate* isolate, Handle<JSReceiver> receiver, Handle<JSProxy> proxy,
+ PropertyFilter filter, KeyAccumulator* accumulator);
- // Invoke a trap by name. If the trap does not exist on this's handler,
- // but derived_trap is non-NULL, invoke that instead. May cause GC.
- MUST_USE_RESULT static MaybeHandle<Object> CallTrap(
- Handle<JSProxy> proxy,
- const char* name,
- Handle<Object> derived_trap,
- int argc,
- Handle<Object> args[]);
+ MUST_USE_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
+ LookupIterator* it);
// Dispatched behavior.
DECLARE_PRINTER(JSProxy)
DECLARE_VERIFIER(JSProxy)
- // Layout description. We add padding so that a proxy has the same
- // size as a virgin JSObject. This is essential for becoming a JSObject
- // upon freeze.
- static const int kHandlerOffset = HeapObject::kHeaderSize;
+ // Layout description.
+ static const int kTargetOffset = JSReceiver::kHeaderSize;
+ static const int kHandlerOffset = kTargetOffset + kPointerSize;
static const int kHashOffset = kHandlerOffset + kPointerSize;
- static const int kPaddingOffset = kHashOffset + kPointerSize;
- static const int kSize = JSObject::kHeaderSize;
- static const int kHeaderSize = kPaddingOffset;
- static const int kPaddingSize = kSize - kPaddingOffset;
-
- STATIC_ASSERT(kPaddingSize >= 0);
-
- typedef FixedBodyDescriptor<kHandlerOffset,
- kPaddingOffset,
- kSize> BodyDescriptor;
-
- private:
- friend class JSReceiver;
-
- MUST_USE_RESULT static Maybe<bool> HasPropertyWithHandler(
- Handle<JSProxy> proxy, Handle<Name> name);
+ static const int kSize = kHashOffset + kPointerSize;
- MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithHandler(
- Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode);
+ typedef FixedBodyDescriptor<JSReceiver::kPropertiesOffset, kSize, kSize>
+ BodyDescriptor;
MUST_USE_RESULT Object* GetIdentityHash();
static Handle<Smi> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
-};
-
-
-class JSFunctionProxy: public JSProxy {
- public:
- // [call_trap]: The call trap.
- DECL_ACCESSORS(call_trap, JSReceiver)
-
- // [construct_trap]: The construct trap.
- DECL_ACCESSORS(construct_trap, Object)
-
- DECLARE_CAST(JSFunctionProxy)
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSFunctionProxy)
- DECLARE_VERIFIER(JSFunctionProxy)
-
- // Layout description.
- static const int kCallTrapOffset = JSProxy::kPaddingOffset;
- static const int kConstructTrapOffset = kCallTrapOffset + kPointerSize;
- static const int kPaddingOffset = kConstructTrapOffset + kPointerSize;
- static const int kSize = JSFunction::kSize;
- static const int kPaddingSize = kSize - kPaddingOffset;
-
- STATIC_ASSERT(kPaddingSize >= 0);
-
- typedef FixedBodyDescriptor<kHandlerOffset,
- kConstructTrapOffset + kPointerSize,
- kSize> BodyDescriptor;
-
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunctionProxy);
+ static Maybe<bool> AddPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Symbol> private_name,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
};
@@ -9715,6 +9864,20 @@ class JSWeakCollection: public JSObject {
static const int kNextOffset = kTableOffset + kPointerSize;
static const int kSize = kNextOffset + kPointerSize;
+ // Visiting policy defines whether the table and next collection fields
+ // should be visited or not.
+ enum BodyVisitingPolicy { kVisitStrong, kVisitWeak };
+
+ // Iterates the function object according to the visiting policy.
+ template <BodyVisitingPolicy>
+ class BodyDescriptorImpl;
+
+ // Visit the whole object.
+ typedef BodyDescriptorImpl<kVisitStrong> BodyDescriptor;
+
+ // Don't visit table and next collection fields.
+ typedef BodyDescriptorImpl<kVisitWeak> BodyDescriptorWeak;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakCollection);
};
@@ -9793,11 +9956,6 @@ class JSArrayBuffer: public JSObject {
DECLARE_VERIFIER(JSArrayBuffer)
static const int kByteLengthOffset = JSObject::kHeaderSize;
-
- // NOTE: GC will visit objects fields:
- // 1. From JSObject::BodyDescriptor::kStartOffset to kByteLengthOffset +
- // kPointerSize
- // 2. From start of the internal fields and up to the end of them
static const int kBackingStoreOffset = kByteLengthOffset + kPointerSize;
static const int kBitFieldSlot = kBackingStoreOffset + kPointerSize;
#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
@@ -9810,11 +9968,9 @@ class JSArrayBuffer: public JSObject {
static const int kSizeWithInternalFields =
kSize + v8::ArrayBuffer::kInternalFieldCount * kPointerSize;
- template <typename StaticVisitor>
- static inline void JSArrayBufferIterateBody(Heap* heap, HeapObject* obj);
-
- static inline void JSArrayBufferIterateBody(HeapObject* obj,
- ObjectVisitor* v);
+ // Iterates all fields in the object including internal ones except
+ // kBackingStoreOffset and kBitFieldSlot.
+ class BodyDescriptor;
class IsExternal : public BitField<bool, 1, 1> {};
class IsNeuterable : public BitField<bool, 2, 1> {};
@@ -9920,12 +10076,6 @@ class Foreign: public HeapObject {
DECLARE_CAST(Foreign)
// Dispatched behavior.
- inline void ForeignIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void ForeignIterateBody();
-
- // Dispatched behavior.
DECLARE_PRINTER(Foreign)
DECLARE_VERIFIER(Foreign)
@@ -9936,6 +10086,8 @@ class Foreign: public HeapObject {
STATIC_ASSERT(kForeignAddressOffset == Internals::kForeignAddressOffset);
+ class BodyDescriptor;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
};
@@ -9957,7 +10109,6 @@ class JSArray: public JSObject {
static bool HasReadOnlyLength(Handle<JSArray> array);
static bool WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index);
- static MaybeHandle<Object> ReadOnlyLengthError(Handle<JSArray> array);
// Initialize the array with the given capacity. The function may
// fail due to out-of-memory situations, but only if the requested
@@ -9981,6 +10132,19 @@ class JSArray: public JSObject {
static inline void SetContent(Handle<JSArray> array,
Handle<FixedArrayBase> storage);
+ // ES6 9.4.2.1
+ MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSArray> o, Handle<Object> name,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
+
+ static bool AnythingToArrayLength(Isolate* isolate,
+ Handle<Object> length_object,
+ uint32_t* output);
+ MUST_USE_RESULT static Maybe<bool> ArraySetLength(Isolate* isolate,
+ Handle<JSArray> a,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw);
+
DECLARE_CAST(JSArray)
// Dispatched behavior.
@@ -9994,6 +10158,14 @@ class JSArray: public JSObject {
static const int kLengthOffset = JSObject::kHeaderSize;
static const int kSize = kLengthOffset + kPointerSize;
+ // 600 * KB is the Page::kMaxRegularHeapObjectSize defined in spaces.h which
+ // we do not want to include in objects.h
+ // Note that Page::kMaxRegularHeapObjectSize has to be in sync with
+ // kInitialMaxFastElementArray which is checked in a DCHECK in heap.cc.
+ static const int kInitialMaxFastElementArray =
+ (600 * KB - FixedArray::kHeaderSize - kSize - AllocationMemento::kSize) /
+ kPointerSize;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
};
@@ -10162,6 +10334,7 @@ class AccessCheckInfo: public Struct {
public:
DECL_ACCESSORS(named_callback, Object)
DECL_ACCESSORS(indexed_callback, Object)
+ DECL_ACCESSORS(callback, Object)
DECL_ACCESSORS(data, Object)
DECLARE_CAST(AccessCheckInfo)
@@ -10172,7 +10345,8 @@ class AccessCheckInfo: public Struct {
static const int kNamedCallbackOffset = HeapObject::kHeaderSize;
static const int kIndexedCallbackOffset = kNamedCallbackOffset + kPointerSize;
- static const int kDataOffset = kIndexedCallbackOffset + kPointerSize;
+ static const int kCallbackOffset = kIndexedCallbackOffset + kPointerSize;
+ static const int kDataOffset = kCallbackOffset + kPointerSize;
static const int kSize = kDataOffset + kPointerSize;
private:
@@ -10223,6 +10397,7 @@ class CallHandlerInfo: public Struct {
public:
DECL_ACCESSORS(callback, Object)
DECL_ACCESSORS(data, Object)
+ DECL_ACCESSORS(fast_handler, Object)
DECLARE_CAST(CallHandlerInfo)
@@ -10232,7 +10407,8 @@ class CallHandlerInfo: public Struct {
static const int kCallbackOffset = HeapObject::kHeaderSize;
static const int kDataOffset = kCallbackOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
+ static const int kFastHandlerOffset = kDataOffset + kPointerSize;
+ static const int kSize = kFastHandlerOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CallHandlerInfo);
@@ -10254,7 +10430,9 @@ class TemplateInfo: public Struct {
static const int kPropertyListOffset = kNumberOfProperties + kPointerSize;
static const int kPropertyAccessorsOffset =
kPropertyListOffset + kPointerSize;
- static const int kHeaderSize = kPropertyAccessorsOffset + kPointerSize;
+ static const int kPropertyIntrinsicsOffset =
+ kPropertyAccessorsOffset + kPointerSize;
+ static const int kHeaderSize = kPropertyIntrinsicsOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
@@ -10359,21 +10537,6 @@ class ObjectTemplateInfo: public TemplateInfo {
};
-class TypeSwitchInfo: public Struct {
- public:
- DECL_ACCESSORS(types, Object)
-
- DECLARE_CAST(TypeSwitchInfo)
-
- // Dispatched behavior.
- DECLARE_PRINTER(TypeSwitchInfo)
- DECLARE_VERIFIER(TypeSwitchInfo)
-
- static const int kTypesOffset = Struct::kHeaderSize;
- static const int kSize = kTypesOffset + kPointerSize;
-};
-
-
// The DebugInfo class holds additional information for a function being
// debugged.
class DebugInfo: public Struct {
@@ -10577,13 +10740,6 @@ class ObjectVisitor BASE_EMBEDDED {
};
-class StructBodyDescriptor : public
- FlexibleBodyDescriptor<HeapObject::kHeaderSize> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object);
-};
-
-
// BooleanBit is a helper class for setting and getting a bit in an integer.
class BooleanBit : public AllStatic {
public:
@@ -10602,28 +10758,7 @@ class BooleanBit : public AllStatic {
};
-class KeyAccumulator final BASE_EMBEDDED {
- public:
- explicit KeyAccumulator(Isolate* isolate) : isolate_(isolate), length_(0) {}
-
- void AddKey(Handle<Object> key, int check_limit);
- void AddKeys(Handle<FixedArray> array, FixedArray::KeyFilter filter);
- void AddKeys(Handle<JSObject> array, FixedArray::KeyFilter filter);
- void PrepareForComparisons(int count);
- Handle<FixedArray> GetKeys();
-
- int GetLength() { return length_; }
-
- private:
- void EnsureCapacity(int capacity);
- void Grow();
-
- Isolate* isolate_;
- Handle<FixedArray> keys_;
- Handle<OrderedHashSet> set_;
- int length_;
- DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
-};
-} } // namespace v8::internal
+} // NOLINT, false-positive due to second-order macros.
+} // NOLINT, false-positive due to second-order macros.
#endif // V8_OBJECTS_H_
diff --git a/chromium/v8/src/optimizing-compile-dispatcher.cc b/chromium/v8/src/optimizing-compile-dispatcher.cc
index 8e3e96ad000..7062db640d5 100644
--- a/chromium/v8/src/optimizing-compile-dispatcher.cc
+++ b/chromium/v8/src/optimizing-compile-dispatcher.cc
@@ -6,7 +6,6 @@
#include "src/base/atomicops.h"
#include "src/full-codegen/full-codegen.h"
-#include "src/hydrogen.h"
#include "src/isolate.h"
#include "src/v8.h"
diff --git a/chromium/v8/src/optimizing-compile-dispatcher.h b/chromium/v8/src/optimizing-compile-dispatcher.h
index ad09dfa734b..9c4e4cb8df1 100644
--- a/chromium/v8/src/optimizing-compile-dispatcher.h
+++ b/chromium/v8/src/optimizing-compile-dispatcher.h
@@ -131,7 +131,7 @@ class OptimizingCompileDispatcher {
// is not safe to access them directly.
int recompilation_delay_;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_OPTIMIZING_COMPILE_DISPATCHER_H_
diff --git a/chromium/v8/src/ostreams.cc b/chromium/v8/src/ostreams.cc
index c3532bdbb14..a7a67f5d2f0 100644
--- a/chromium/v8/src/ostreams.cc
+++ b/chromium/v8/src/ostreams.cc
@@ -71,6 +71,7 @@ std::ostream& operator<<(std::ostream& os, const AsReversiblyEscapedUC16& c) {
std::ostream& operator<<(std::ostream& os, const AsEscapedUC16ForJSON& c) {
if (c.value == '\n') return os << "\\n";
if (c.value == '\r') return os << "\\r";
+ if (c.value == '\t') return os << "\\t";
if (c.value == '\"') return os << "\\\"";
return PrintUC16(os, c.value, IsOK);
}
diff --git a/chromium/v8/src/ostreams.h b/chromium/v8/src/ostreams.h
index 6f8600e7b1c..56f4aa7e45b 100644
--- a/chromium/v8/src/ostreams.h
+++ b/chromium/v8/src/ostreams.h
@@ -36,7 +36,7 @@ class OFStreamBase : public std::streambuf {
class OFStream : public std::ostream {
public:
explicit OFStream(FILE* f);
- ~OFStream();
+ virtual ~OFStream();
private:
OFStreamBase buf_;
diff --git a/chromium/v8/src/parsing/OWNERS b/chromium/v8/src/parsing/OWNERS
new file mode 100644
index 00000000000..a5daeb3b72a
--- /dev/null
+++ b/chromium/v8/src/parsing/OWNERS
@@ -0,0 +1,7 @@
+set noparent
+
+adamk@chromium.org
+littledan@chromium.org
+marja@chromium.org
+rossberg@chromium.org
+
diff --git a/chromium/v8/src/expression-classifier.h b/chromium/v8/src/parsing/expression-classifier.h
index fb45f41fa16..96ccf871f4c 100644
--- a/chromium/v8/src/expression-classifier.h
+++ b/chromium/v8/src/parsing/expression-classifier.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_EXPRESSION_CLASSIFIER_H
-#define V8_EXPRESSION_CLASSIFIER_H
+#ifndef V8_PARSING_EXPRESSION_CLASSIFIER_H
+#define V8_PARSING_EXPRESSION_CLASSIFIER_H
#include "src/messages.h"
-#include "src/scanner.h"
-#include "src/token.h"
+#include "src/parsing/scanner.h"
+#include "src/parsing/token.h"
namespace v8 {
namespace internal {
@@ -19,10 +19,12 @@ class ExpressionClassifier {
Error()
: location(Scanner::Location::invalid()),
message(MessageTemplate::kNone),
+ type(kSyntaxError),
arg(nullptr) {}
Scanner::Location location;
- MessageTemplate::Template message;
+ MessageTemplate::Template message : 30;
+ ParseErrorType type : 2;
const char* arg;
};
@@ -35,17 +37,20 @@ class ExpressionClassifier {
StrictModeFormalParametersProduction = 1 << 5,
StrongModeFormalParametersProduction = 1 << 6,
ArrowFormalParametersProduction = 1 << 7,
+ LetPatternProduction = 1 << 8,
+ CoverInitializedNameProduction = 1 << 9,
ExpressionProductions =
(ExpressionProduction | FormalParameterInitializerProduction),
- PatternProductions =
- (BindingPatternProduction | AssignmentPatternProduction),
+ PatternProductions = (BindingPatternProduction |
+ AssignmentPatternProduction | LetPatternProduction),
FormalParametersProductions = (DistinctFormalParametersProduction |
StrictModeFormalParametersProduction |
StrongModeFormalParametersProduction),
StandardProductions = ExpressionProductions | PatternProductions,
- AllProductions = (StandardProductions | FormalParametersProductions |
- ArrowFormalParametersProduction)
+ AllProductions =
+ (StandardProductions | FormalParametersProductions |
+ ArrowFormalParametersProduction | CoverInitializedNameProduction)
};
enum FunctionProperties { NonSimpleParameter = 1 << 0 };
@@ -100,6 +105,8 @@ class ExpressionClassifier {
return is_valid(StrongModeFormalParametersProduction);
}
+ bool is_valid_let_pattern() const { return is_valid(LetPatternProduction); }
+
const Error& expression_error() const { return expression_error_; }
const Error& formal_parameter_initializer_error() const {
@@ -128,6 +135,15 @@ class ExpressionClassifier {
return strong_mode_formal_parameter_error_;
}
+ const Error& let_pattern_error() const { return let_pattern_error_; }
+
+ bool has_cover_initialized_name() const {
+ return !is_valid(CoverInitializedNameProduction);
+ }
+ const Error& cover_initialized_name_error() const {
+ return cover_initialized_name_error_;
+ }
+
bool is_simple_parameter_list() const {
return !(function_properties_ & NonSimpleParameter);
}
@@ -146,6 +162,17 @@ class ExpressionClassifier {
expression_error_.arg = arg;
}
+ void RecordExpressionError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ ParseErrorType type, const char* arg = nullptr) {
+ if (!is_valid_expression()) return;
+ invalid_productions_ |= ExpressionProduction;
+ expression_error_.location = loc;
+ expression_error_.message = message;
+ expression_error_.arg = arg;
+ expression_error_.type = type;
+ }
+
void RecordFormalParameterInitializerError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
@@ -176,6 +203,13 @@ class ExpressionClassifier {
assignment_pattern_error_.arg = arg;
}
+ void RecordPatternError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ RecordBindingPatternError(loc, message, arg);
+ RecordAssignmentPatternError(loc, message, arg);
+ }
+
void RecordArrowFormalParametersError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
@@ -217,6 +251,36 @@ class ExpressionClassifier {
strong_mode_formal_parameter_error_.arg = arg;
}
+ void RecordLetPatternError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (!is_valid_let_pattern()) return;
+ invalid_productions_ |= LetPatternProduction;
+ let_pattern_error_.location = loc;
+ let_pattern_error_.message = message;
+ let_pattern_error_.arg = arg;
+ }
+
+ void RecordCoverInitializedNameError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (has_cover_initialized_name()) return;
+ invalid_productions_ |= CoverInitializedNameProduction;
+ cover_initialized_name_error_.location = loc;
+ cover_initialized_name_error_.message = message;
+ cover_initialized_name_error_.arg = arg;
+ }
+
+ void ForgiveCoverInitializedNameError() {
+ invalid_productions_ &= ~CoverInitializedNameProduction;
+ cover_initialized_name_error_ = Error();
+ }
+
+ void ForgiveAssignmentPatternError() {
+ invalid_productions_ &= ~AssignmentPatternProduction;
+ assignment_pattern_error_ = Error();
+ }
+
void Accumulate(const ExpressionClassifier& inner,
unsigned productions = StandardProductions) {
// Propagate errors from inner, but don't overwrite already recorded
@@ -249,6 +313,10 @@ class ExpressionClassifier {
if (errors & StrongModeFormalParametersProduction)
strong_mode_formal_parameter_error_ =
inner.strong_mode_formal_parameter_error_;
+ if (errors & LetPatternProduction)
+ let_pattern_error_ = inner.let_pattern_error_;
+ if (errors & CoverInitializedNameProduction)
+ cover_initialized_name_error_ = inner.cover_initialized_name_error_;
}
// As an exception to the above, the result continues to be a valid arrow
@@ -277,9 +345,12 @@ class ExpressionClassifier {
Error duplicate_formal_parameter_error_;
Error strict_mode_formal_parameter_error_;
Error strong_mode_formal_parameter_error_;
+ Error let_pattern_error_;
+ Error cover_initialized_name_error_;
DuplicateFinder* duplicate_finder_;
};
-}
-} // v8::internal
-#endif // V8_EXPRESSION_CLASSIFIER_H
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_EXPRESSION_CLASSIFIER_H
diff --git a/chromium/v8/src/func-name-inferrer.cc b/chromium/v8/src/parsing/func-name-inferrer.cc
index 5006c03eb62..12013afd282 100644
--- a/chromium/v8/src/func-name-inferrer.cc
+++ b/chromium/v8/src/parsing/func-name-inferrer.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/func-name-inferrer.h"
+#include "src/parsing/func-name-inferrer.h"
-#include "src/ast.h"
-#include "src/ast-value-factory.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-value-factory.h"
#include "src/list-inl.h"
namespace v8 {
diff --git a/chromium/v8/src/func-name-inferrer.h b/chromium/v8/src/parsing/func-name-inferrer.h
index 1be63323ca2..ba38ffeb241 100644
--- a/chromium/v8/src/func-name-inferrer.h
+++ b/chromium/v8/src/parsing/func-name-inferrer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FUNC_NAME_INFERRER_H_
-#define V8_FUNC_NAME_INFERRER_H_
+#ifndef V8_PARSING_FUNC_NAME_INFERRER_H_
+#define V8_PARSING_FUNC_NAME_INFERRER_H_
#include "src/handles.h"
#include "src/zone.h"
@@ -121,6 +121,7 @@ class FuncNameInferrer : public ZoneObject {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_FUNC_NAME_INFERRER_H_
+#endif // V8_PARSING_FUNC_NAME_INFERRER_H_
diff --git a/chromium/v8/src/json-parser.h b/chromium/v8/src/parsing/json-parser.h
index cac49798598..e23c73383eb 100644
--- a/chromium/v8/src/json-parser.h
+++ b/chromium/v8/src/parsing/json-parser.h
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_JSON_PARSER_H_
-#define V8_JSON_PARSER_H_
+#ifndef V8_PARSING_JSON_PARSER_H_
+#define V8_PARSING_JSON_PARSER_H_
#include "src/char-predicates.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/factory.h"
#include "src/messages.h"
-#include "src/scanner.h"
-#include "src/token.h"
+#include "src/parsing/scanner.h"
+#include "src/parsing/token.h"
#include "src/transitions.h"
#include "src/types.h"
@@ -761,17 +761,8 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
position_);
}
if (c0 < 0x20) return Handle<String>::null();
- if (static_cast<uint32_t>(c0) >
- unibrow::Utf16::kMaxNonSurrogateCharCode) {
- running_hash =
- StringHasher::AddCharacterCore(running_hash,
- unibrow::Utf16::LeadSurrogate(c0));
- running_hash =
- StringHasher::AddCharacterCore(running_hash,
- unibrow::Utf16::TrailSurrogate(c0));
- } else {
- running_hash = StringHasher::AddCharacterCore(running_hash, c0);
- }
+ running_hash = StringHasher::AddCharacterCore(running_hash,
+ static_cast<uint16_t>(c0));
position++;
if (position >= source_length_) return Handle<String>::null();
c0 = seq_source_->SeqOneByteStringGet(position);
@@ -845,6 +836,7 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
return result;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_JSON_PARSER_H_
+#endif // V8_PARSING_JSON_PARSER_H_
diff --git a/chromium/v8/src/parsing/parameter-initializer-rewriter.cc b/chromium/v8/src/parsing/parameter-initializer-rewriter.cc
new file mode 100644
index 00000000000..003bbebae01
--- /dev/null
+++ b/chromium/v8/src/parsing/parameter-initializer-rewriter.cc
@@ -0,0 +1,88 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/parsing/parameter-initializer-rewriter.h"
+
+#include "src/ast/ast.h"
+#include "src/ast/ast-expression-visitor.h"
+#include "src/ast/scopes.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+
+class Rewriter final : public AstExpressionVisitor {
+ public:
+ Rewriter(uintptr_t stack_limit, Expression* initializer, Scope* old_scope,
+ Scope* new_scope)
+ : AstExpressionVisitor(stack_limit, initializer),
+ old_scope_(old_scope),
+ new_scope_(new_scope) {}
+
+ private:
+ void VisitExpression(Expression* expr) override {}
+
+ void VisitFunctionLiteral(FunctionLiteral* expr) override;
+ void VisitClassLiteral(ClassLiteral* expr) override;
+ void VisitVariableProxy(VariableProxy* expr) override;
+
+ Scope* old_scope_;
+ Scope* new_scope_;
+};
+
+
+void Rewriter::VisitFunctionLiteral(FunctionLiteral* function_literal) {
+ function_literal->scope()->ReplaceOuterScope(new_scope_);
+}
+
+
+void Rewriter::VisitClassLiteral(ClassLiteral* class_literal) {
+ class_literal->scope()->ReplaceOuterScope(new_scope_);
+ if (class_literal->extends() != nullptr) {
+ Visit(class_literal->extends());
+ }
+ // No need to visit the constructor since it will have the class
+ // scope on its scope chain.
+ ZoneList<ObjectLiteralProperty*>* props = class_literal->properties();
+ for (int i = 0; i < props->length(); ++i) {
+ ObjectLiteralProperty* prop = props->at(i);
+ if (!prop->key()->IsLiteral()) {
+ Visit(prop->key());
+ }
+ // No need to visit the values, since all values are functions with
+ // the class scope on their scope chain.
+ DCHECK(prop->value()->IsFunctionLiteral());
+ }
+}
+
+
+void Rewriter::VisitVariableProxy(VariableProxy* proxy) {
+ if (proxy->is_resolved()) {
+ Variable* var = proxy->var();
+ DCHECK_EQ(var->mode(), TEMPORARY);
+ if (old_scope_->RemoveTemporary(var)) {
+ var->set_scope(new_scope_);
+ new_scope_->AddTemporary(var);
+ }
+ } else if (old_scope_->RemoveUnresolved(proxy)) {
+ new_scope_->AddUnresolved(proxy);
+ }
+}
+
+
+} // anonymous namespace
+
+
+void RewriteParameterInitializerScope(uintptr_t stack_limit,
+ Expression* initializer, Scope* old_scope,
+ Scope* new_scope) {
+ Rewriter rewriter(stack_limit, initializer, old_scope, new_scope);
+ rewriter.Run();
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/parsing/parameter-initializer-rewriter.h b/chromium/v8/src/parsing/parameter-initializer-rewriter.h
new file mode 100644
index 00000000000..255534c99e7
--- /dev/null
+++ b/chromium/v8/src/parsing/parameter-initializer-rewriter.h
@@ -0,0 +1,22 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
+#define V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
+
+#include "src/ast/ast.h"
+
+namespace v8 {
+namespace internal {
+
+
+void RewriteParameterInitializerScope(uintptr_t stack_limit,
+ Expression* initializer, Scope* old_scope,
+ Scope* new_scope);
+
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
diff --git a/chromium/v8/src/preparser.h b/chromium/v8/src/parsing/parser-base.h
index 85844a08750..2955b0b9d9f 100644
--- a/chromium/v8/src/preparser.h
+++ b/chromium/v8/src/parsing/parser-base.h
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PREPARSER_H
-#define V8_PREPARSER_H
+#ifndef V8_PARSING_PARSER_BASE_H
+#define V8_PARSING_PARSER_BASE_H
+#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
-#include "src/expression-classifier.h"
-#include "src/func-name-inferrer.h"
#include "src/hashmap.h"
#include "src/messages.h"
-#include "src/scanner.h"
-#include "src/scopes.h"
-#include "src/token.h"
+#include "src/parsing/expression-classifier.h"
+#include "src/parsing/func-name-inferrer.h"
+#include "src/parsing/scanner.h"
+#include "src/parsing/token.h"
namespace v8 {
namespace internal {
@@ -31,7 +31,6 @@ struct FormalParametersBase {
bool has_rest = false;
bool is_simple = true;
int materialized_literals_count = 0;
- mutable int rest_array_literal_index = -1;
};
@@ -88,6 +87,7 @@ class ParserBase : public Traits {
typedef typename Traits::Type::FunctionLiteral FunctionLiteralT;
typedef typename Traits::Type::Literal LiteralT;
typedef typename Traits::Type::ObjectLiteralProperty ObjectLiteralPropertyT;
+ typedef typename Traits::Type::StatementList StatementListT;
ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
v8::Extension* extension, AstValueFactory* ast_value_factory,
@@ -107,18 +107,16 @@ class ParserBase : public Traits {
stack_overflow_(false),
allow_lazy_(false),
allow_natives_(false),
- allow_harmony_arrow_functions_(false),
allow_harmony_sloppy_(false),
allow_harmony_sloppy_function_(false),
allow_harmony_sloppy_let_(false),
- allow_harmony_rest_parameters_(false),
allow_harmony_default_parameters_(false),
- allow_harmony_spread_calls_(false),
- allow_harmony_destructuring_(false),
- allow_harmony_spread_arrays_(false),
- allow_harmony_new_target_(false),
+ allow_harmony_destructuring_bind_(false),
+ allow_harmony_destructuring_assignment_(false),
allow_strong_mode_(false),
- allow_legacy_const_(true) {}
+ allow_legacy_const_(true),
+ allow_harmony_do_expressions_(false),
+ allow_harmony_function_name_(false) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
@@ -126,20 +124,20 @@ class ParserBase : public Traits {
ALLOW_ACCESSORS(lazy);
ALLOW_ACCESSORS(natives);
- ALLOW_ACCESSORS(harmony_arrow_functions);
ALLOW_ACCESSORS(harmony_sloppy);
ALLOW_ACCESSORS(harmony_sloppy_function);
ALLOW_ACCESSORS(harmony_sloppy_let);
- ALLOW_ACCESSORS(harmony_rest_parameters);
ALLOW_ACCESSORS(harmony_default_parameters);
- ALLOW_ACCESSORS(harmony_spread_calls);
- ALLOW_ACCESSORS(harmony_destructuring);
- ALLOW_ACCESSORS(harmony_spread_arrays);
- ALLOW_ACCESSORS(harmony_new_target);
+ ALLOW_ACCESSORS(harmony_destructuring_bind);
+ ALLOW_ACCESSORS(harmony_destructuring_assignment);
ALLOW_ACCESSORS(strong_mode);
ALLOW_ACCESSORS(legacy_const);
+ ALLOW_ACCESSORS(harmony_do_expressions);
+ ALLOW_ACCESSORS(harmony_function_name);
#undef ALLOW_ACCESSORS
+ uintptr_t stack_limit() const { return stack_limit_; }
+
protected:
enum AllowRestrictedIdentifiers {
kAllowRestrictedIdentifiers,
@@ -178,6 +176,15 @@ class ParserBase : public Traits {
Scope* outer_scope_;
};
+ struct DestructuringAssignment {
+ public:
+ DestructuringAssignment(ExpressionT expression, Scope* scope)
+ : assignment(expression), scope(scope) {}
+
+ ExpressionT assignment;
+ Scope* scope;
+ };
+
class FunctionState BASE_EMBEDDED {
public:
FunctionState(FunctionState** function_state_stack, Scope** scope_stack,
@@ -230,6 +237,15 @@ class ParserBase : public Traits {
typename Traits::Type::Factory* factory() { return factory_; }
+ const List<DestructuringAssignment>& destructuring_assignments_to_rewrite()
+ const {
+ return destructuring_assignments_to_rewrite_;
+ }
+
+ void AddDestructuringAssignment(DestructuringAssignment pair) {
+ destructuring_assignments_to_rewrite_.Add(pair);
+ }
+
private:
// Used to assign an index to each literal that needs materialization in
// the function. Includes regexp literals, and boilerplate for object and
@@ -258,6 +274,11 @@ class ParserBase : public Traits {
FunctionState* outer_function_state_;
Scope** scope_stack_;
Scope* outer_scope_;
+
+ List<DestructuringAssignment> destructuring_assignments_to_rewrite_;
+
+ void RewriteDestructuringAssignments();
+
typename Traits::Type::Factory* factory_;
friend class ParserTraits;
@@ -309,16 +330,14 @@ class ParserBase : public Traits {
};
Scope* NewScope(Scope* parent, ScopeType scope_type) {
- // Must always pass the function kind for FUNCTION_SCOPE and ARROW_SCOPE.
+ // Must always pass the function kind for FUNCTION_SCOPE.
DCHECK(scope_type != FUNCTION_SCOPE);
- DCHECK(scope_type != ARROW_SCOPE);
return NewScope(parent, scope_type, kNormalFunction);
}
Scope* NewScope(Scope* parent, ScopeType scope_type, FunctionKind kind) {
DCHECK(ast_value_factory());
DCHECK(scope_type != MODULE_SCOPE || FLAG_harmony_modules);
- DCHECK(!IsArrowFunction(kind) || scope_type == ARROW_SCOPE);
Scope* result = new (zone())
Scope(zone(), parent, scope_type, ast_value_factory(), kind);
result->Initialize();
@@ -426,8 +445,7 @@ class ParserBase : public Traits {
}
}
- bool CheckInOrOf(
- bool accept_OF, ForEachStatement::VisitMode* visit_mode, bool* ok) {
+ bool CheckInOrOf(ForEachStatement::VisitMode* visit_mode, bool* ok) {
if (Check(Token::IN)) {
if (is_strong(language_mode())) {
ReportMessageAt(scanner()->location(), MessageTemplate::kStrongForIn);
@@ -436,7 +454,7 @@ class ParserBase : public Traits {
*visit_mode = ForEachStatement::ENUMERATE;
}
return true;
- } else if (accept_OF && CheckContextualKeyword(CStrVector("of"))) {
+ } else if (CheckContextualKeyword(CStrVector("of"))) {
*visit_mode = ForEachStatement::ITERATE;
return true;
}
@@ -466,6 +484,10 @@ class ParserBase : public Traits {
ok);
}
+ void CheckDestructuringElement(ExpressionT element,
+ ExpressionClassifier* classifier, int beg_pos,
+ int end_pos);
+
// Checking the name of a function literal. This has to be done after parsing
// the function, since the function can declare itself strict.
void CheckFunctionName(LanguageMode language_mode, IdentifierT function_name,
@@ -544,12 +566,20 @@ class ParserBase : public Traits {
void ReportClassifierError(const ExpressionClassifier::Error& error) {
Traits::ReportMessageAt(error.location, error.message, error.arg,
- kSyntaxError);
+ error.type);
}
void ValidateExpression(const ExpressionClassifier* classifier, bool* ok) {
- if (!classifier->is_valid_expression()) {
- ReportClassifierError(classifier->expression_error());
+ if (!classifier->is_valid_expression() ||
+ classifier->has_cover_initialized_name()) {
+ const Scanner::Location& a = classifier->expression_error().location;
+ const Scanner::Location& b =
+ classifier->cover_initialized_name_error().location;
+ if (a.beg_pos < 0 || (b.beg_pos >= 0 && a.beg_pos > b.beg_pos)) {
+ ReportClassifierError(classifier->cover_initialized_name_error());
+ } else {
+ ReportClassifierError(classifier->expression_error());
+ }
*ok = false;
}
}
@@ -620,6 +650,13 @@ class ParserBase : public Traits {
}
}
+ void ValidateLetPattern(const ExpressionClassifier* classifier, bool* ok) {
+ if (!classifier->is_valid_let_pattern()) {
+ ReportClassifierError(classifier->let_pattern_error());
+ *ok = false;
+ }
+ }
+
void ExpressionUnexpectedToken(ExpressionClassifier* classifier) {
MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
const char* arg;
@@ -663,9 +700,20 @@ class ParserBase : public Traits {
IdentifierT ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
bool* ok);
// Parses an identifier or a strict mode future reserved word, and indicate
- // whether it is strict mode future reserved.
- IdentifierT ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
+ // whether it is strict mode future reserved. Allows passing in is_generator
+ // for the case of parsing the identifier in a function expression, where the
+ // relevant "is_generator" bit is of the function being parsed, not the
+ // containing
+ // function.
+ IdentifierT ParseIdentifierOrStrictReservedWord(bool is_generator,
+ bool* is_strict_reserved,
bool* ok);
+ IdentifierT ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
+ bool* ok) {
+ return ParseIdentifierOrStrictReservedWord(this->is_generator(),
+ is_strict_reserved, ok);
+ }
+
IdentifierT ParseIdentifierName(bool* ok);
// Parses an identifier and determines whether or not it is 'get' or 'set'.
IdentifierT ParseIdentifierNameOrGetOrSet(bool* is_get, bool* is_set,
@@ -680,21 +728,37 @@ class ParserBase : public Traits {
ExpressionT ParseExpression(bool accept_IN, bool* ok);
ExpressionT ParseExpression(bool accept_IN, ExpressionClassifier* classifier,
bool* ok);
+ ExpressionT ParseExpression(bool accept_IN, int flags,
+ ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseArrayLiteral(ExpressionClassifier* classifier, bool* ok);
ExpressionT ParsePropertyName(IdentifierT* name, bool* is_get, bool* is_set,
bool* is_static, bool* is_computed_name,
+ bool* is_identifier, bool* is_escaped_keyword,
ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseObjectLiteral(ExpressionClassifier* classifier, bool* ok);
ObjectLiteralPropertyT ParsePropertyDefinition(
ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
bool is_static, bool* is_computed_name, bool* has_seen_constructor,
- ExpressionClassifier* classifier, bool* ok);
+ ExpressionClassifier* classifier, IdentifierT* name, bool* ok);
typename Traits::Type::ExpressionList ParseArguments(
Scanner::Location* first_spread_pos, ExpressionClassifier* classifier,
bool* ok);
- ExpressionT ParseAssignmentExpression(bool accept_IN,
+
+ enum AssignmentExpressionFlags {
+ kIsNormalAssignment = 0,
+ kIsPossiblePatternElement = 1 << 0,
+ kIsPossibleArrowFormals = 1 << 1
+ };
+
+ ExpressionT ParseAssignmentExpression(bool accept_IN, int flags,
ExpressionClassifier* classifier,
bool* ok);
+ ExpressionT ParseAssignmentExpression(bool accept_IN,
+ ExpressionClassifier* classifier,
+ bool* ok) {
+ return ParseAssignmentExpression(accept_IN, kIsNormalAssignment, classifier,
+ ok);
+ }
ExpressionT ParseYieldExpression(ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseConditionalExpression(bool accept_IN,
ExpressionClassifier* classifier,
@@ -711,9 +775,10 @@ class ParserBase : public Traits {
ExpressionT ParseMemberExpression(ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseMemberExpressionContinuation(
ExpressionT expression, ExpressionClassifier* classifier, bool* ok);
- ExpressionT ParseArrowFunctionLiteral(
- const FormalParametersT& parameters,
- const ExpressionClassifier& classifier, bool* ok);
+ ExpressionT ParseArrowFunctionLiteral(bool accept_IN,
+ const FormalParametersT& parameters,
+ const ExpressionClassifier& classifier,
+ bool* ok);
ExpressionT ParseTemplateLiteral(ExpressionT tag, int start,
ExpressionClassifier* classifier, bool* ok);
void AddTemplateExpression(ExpressionT);
@@ -741,10 +806,40 @@ class ParserBase : public Traits {
ExpressionT CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, bool* ok);
+ ExpressionT ClassifyAndRewriteReferenceExpression(
+ ExpressionClassifier* classifier, ExpressionT expression, int beg_pos,
+ int end_pos, MessageTemplate::Template message,
+ ParseErrorType type = kSyntaxError);
ExpressionT CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, ParseErrorType type, bool* ok);
+ bool IsValidReferenceExpression(ExpressionT expression);
+
+ bool IsAssignableIdentifier(ExpressionT expression) {
+ if (!Traits::IsIdentifier(expression)) return false;
+ if (is_strict(language_mode()) &&
+ Traits::IsEvalOrArguments(Traits::AsIdentifier(expression))) {
+ return false;
+ }
+ if (is_strong(language_mode()) &&
+ Traits::IsUndefined(Traits::AsIdentifier(expression))) {
+ return false;
+ }
+ return true;
+ }
+
+ // Keep track of eval() calls since they disable all local variable
+ // optimizations. This checks if expression is an eval call, and if yes,
+ // forwards the information to scope.
+ void CheckPossibleEvalCall(ExpressionT expression, Scope* scope) {
+ if (Traits::IsIdentifier(expression) &&
+ Traits::IsEval(Traits::AsIdentifier(expression))) {
+ scope->DeclarationScope()->RecordEvalCall();
+ scope->RecordEvalCall();
+ }
+ }
+
// Used to validate property names in object literals and class literals
enum PropertyKind {
kAccessorProperty,
@@ -827,1102 +922,19 @@ class ParserBase : public Traits {
bool allow_lazy_;
bool allow_natives_;
- bool allow_harmony_arrow_functions_;
bool allow_harmony_sloppy_;
bool allow_harmony_sloppy_function_;
bool allow_harmony_sloppy_let_;
- bool allow_harmony_rest_parameters_;
bool allow_harmony_default_parameters_;
- bool allow_harmony_spread_calls_;
- bool allow_harmony_destructuring_;
- bool allow_harmony_spread_arrays_;
- bool allow_harmony_new_target_;
+ bool allow_harmony_destructuring_bind_;
+ bool allow_harmony_destructuring_assignment_;
bool allow_strong_mode_;
bool allow_legacy_const_;
+ bool allow_harmony_do_expressions_;
+ bool allow_harmony_function_name_;
};
-class PreParserIdentifier {
- public:
- PreParserIdentifier() : type_(kUnknownIdentifier) {}
- static PreParserIdentifier Default() {
- return PreParserIdentifier(kUnknownIdentifier);
- }
- static PreParserIdentifier Eval() {
- return PreParserIdentifier(kEvalIdentifier);
- }
- static PreParserIdentifier Arguments() {
- return PreParserIdentifier(kArgumentsIdentifier);
- }
- static PreParserIdentifier Undefined() {
- return PreParserIdentifier(kUndefinedIdentifier);
- }
- static PreParserIdentifier FutureReserved() {
- return PreParserIdentifier(kFutureReservedIdentifier);
- }
- static PreParserIdentifier FutureStrictReserved() {
- return PreParserIdentifier(kFutureStrictReservedIdentifier);
- }
- static PreParserIdentifier Let() {
- return PreParserIdentifier(kLetIdentifier);
- }
- static PreParserIdentifier Static() {
- return PreParserIdentifier(kStaticIdentifier);
- }
- static PreParserIdentifier Yield() {
- return PreParserIdentifier(kYieldIdentifier);
- }
- static PreParserIdentifier Prototype() {
- return PreParserIdentifier(kPrototypeIdentifier);
- }
- static PreParserIdentifier Constructor() {
- return PreParserIdentifier(kConstructorIdentifier);
- }
- bool IsEval() const { return type_ == kEvalIdentifier; }
- bool IsArguments() const { return type_ == kArgumentsIdentifier; }
- bool IsEvalOrArguments() const { return IsEval() || IsArguments(); }
- bool IsUndefined() const { return type_ == kUndefinedIdentifier; }
- bool IsLet() const { return type_ == kLetIdentifier; }
- bool IsStatic() const { return type_ == kStaticIdentifier; }
- bool IsYield() const { return type_ == kYieldIdentifier; }
- bool IsPrototype() const { return type_ == kPrototypeIdentifier; }
- bool IsConstructor() const { return type_ == kConstructorIdentifier; }
- bool IsFutureReserved() const { return type_ == kFutureReservedIdentifier; }
- bool IsFutureStrictReserved() const {
- return type_ == kFutureStrictReservedIdentifier ||
- type_ == kLetIdentifier || type_ == kStaticIdentifier ||
- type_ == kYieldIdentifier;
- }
-
- // Allow identifier->name()[->length()] to work. The preparser
- // does not need the actual positions/lengths of the identifiers.
- const PreParserIdentifier* operator->() const { return this; }
- const PreParserIdentifier raw_name() const { return *this; }
-
- int position() const { return 0; }
- int length() const { return 0; }
-
- private:
- enum Type {
- kUnknownIdentifier,
- kFutureReservedIdentifier,
- kFutureStrictReservedIdentifier,
- kLetIdentifier,
- kStaticIdentifier,
- kYieldIdentifier,
- kEvalIdentifier,
- kArgumentsIdentifier,
- kUndefinedIdentifier,
- kPrototypeIdentifier,
- kConstructorIdentifier
- };
-
- explicit PreParserIdentifier(Type type) : type_(type) {}
- Type type_;
-
- friend class PreParserExpression;
-};
-
-
-class PreParserExpression {
- public:
- static PreParserExpression Default() {
- return PreParserExpression(TypeField::encode(kExpression));
- }
-
- static PreParserExpression Spread(PreParserExpression expression) {
- return PreParserExpression(TypeField::encode(kSpreadExpression));
- }
-
- static PreParserExpression FromIdentifier(PreParserIdentifier id) {
- return PreParserExpression(TypeField::encode(kIdentifierExpression) |
- IdentifierTypeField::encode(id.type_));
- }
-
- static PreParserExpression BinaryOperation(PreParserExpression left,
- Token::Value op,
- PreParserExpression right) {
- return PreParserExpression(
- TypeField::encode(kBinaryOperationExpression) |
- HasRestField::encode(op == Token::COMMA &&
- right->IsSpreadExpression()));
- }
-
- static PreParserExpression StringLiteral() {
- return PreParserExpression(TypeField::encode(kStringLiteralExpression));
- }
-
- static PreParserExpression UseStrictStringLiteral() {
- return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
- IsUseStrictField::encode(true));
- }
-
- static PreParserExpression UseStrongStringLiteral() {
- return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
- IsUseStrongField::encode(true));
- }
-
- static PreParserExpression This() {
- return PreParserExpression(TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kThisExpression));
- }
-
- static PreParserExpression ThisProperty() {
- return PreParserExpression(
- TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kThisPropertyExpression));
- }
-
- static PreParserExpression Property() {
- return PreParserExpression(
- TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kPropertyExpression));
- }
-
- static PreParserExpression Call() {
- return PreParserExpression(TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kCallExpression));
- }
-
- static PreParserExpression SuperCallReference() {
- return PreParserExpression(
- TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kSuperCallReference));
- }
-
- static PreParserExpression NoTemplateTag() {
- return PreParserExpression(
- TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kNoTemplateTagExpression));
- }
-
- bool IsIdentifier() const {
- return TypeField::decode(code_) == kIdentifierExpression;
- }
-
- PreParserIdentifier AsIdentifier() const {
- DCHECK(IsIdentifier());
- return PreParserIdentifier(IdentifierTypeField::decode(code_));
- }
-
- bool IsStringLiteral() const {
- return TypeField::decode(code_) == kStringLiteralExpression;
- }
-
- bool IsUseStrictLiteral() const {
- return TypeField::decode(code_) == kStringLiteralExpression &&
- IsUseStrictField::decode(code_);
- }
-
- bool IsUseStrongLiteral() const {
- return TypeField::decode(code_) == kStringLiteralExpression &&
- IsUseStrongField::decode(code_);
- }
-
- bool IsThis() const {
- return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kThisExpression;
- }
-
- bool IsThisProperty() const {
- return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kThisPropertyExpression;
- }
-
- bool IsProperty() const {
- return TypeField::decode(code_) == kExpression &&
- (ExpressionTypeField::decode(code_) == kPropertyExpression ||
- ExpressionTypeField::decode(code_) == kThisPropertyExpression);
- }
-
- bool IsCall() const {
- return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kCallExpression;
- }
-
- bool IsSuperCallReference() const {
- return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kSuperCallReference;
- }
-
- bool IsValidReferenceExpression() const {
- return IsIdentifier() || IsProperty();
- }
-
- // At the moment PreParser doesn't track these expression types.
- bool IsFunctionLiteral() const { return false; }
- bool IsCallNew() const { return false; }
-
- bool IsNoTemplateTag() const {
- return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kNoTemplateTagExpression;
- }
-
- bool IsSpreadExpression() const {
- return TypeField::decode(code_) == kSpreadExpression;
- }
-
- bool IsArrowFunctionFormalParametersWithRestParameter() const {
- // Iff the expression classifier has determined that this expression is a
- // valid arrow fformal parameter list, return true if the formal parameter
- // list ends with a rest parameter.
- return IsSpreadExpression() ||
- (IsBinaryOperation() && HasRestField::decode(code_));
- }
-
- PreParserExpression AsFunctionLiteral() { return *this; }
-
- bool IsBinaryOperation() const {
- return TypeField::decode(code_) == kBinaryOperationExpression;
- }
-
- // Dummy implementation for making expression->somefunc() work in both Parser
- // and PreParser.
- PreParserExpression* operator->() { return this; }
-
- // More dummy implementations of things PreParser doesn't need to track:
- void set_index(int index) {} // For YieldExpressions
- void set_should_eager_compile() {}
-
- int position() const { return RelocInfo::kNoPosition; }
- void set_function_token_position(int position) {}
-
- private:
- enum Type {
- kExpression,
- kIdentifierExpression,
- kStringLiteralExpression,
- kBinaryOperationExpression,
- kSpreadExpression
- };
-
- enum ExpressionType {
- kThisExpression,
- kThisPropertyExpression,
- kPropertyExpression,
- kCallExpression,
- kSuperCallReference,
- kNoTemplateTagExpression
- };
-
- explicit PreParserExpression(uint32_t expression_code)
- : code_(expression_code) {}
-
- // The first three bits are for the Type.
- typedef BitField<Type, 0, 3> TypeField;
-
- // The rest of the bits are interpreted depending on the value
- // of the Type field, so they can share the storage.
- typedef BitField<ExpressionType, TypeField::kNext, 3> ExpressionTypeField;
- typedef BitField<bool, TypeField::kNext, 1> IsUseStrictField;
- typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseStrongField;
- typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 10>
- IdentifierTypeField;
- typedef BitField<bool, TypeField::kNext, 1> HasRestField;
-
- uint32_t code_;
-};
-
-
-// The pre-parser doesn't need to build lists of expressions, identifiers, or
-// the like.
-template <typename T>
-class PreParserList {
- public:
- // These functions make list->Add(some_expression) work (and do nothing).
- PreParserList() : length_(0) {}
- PreParserList* operator->() { return this; }
- void Add(T, void*) { ++length_; }
- int length() const { return length_; }
- private:
- int length_;
-};
-
-
-typedef PreParserList<PreParserExpression> PreParserExpressionList;
-
-
-class PreParserStatement {
- public:
- static PreParserStatement Default() {
- return PreParserStatement(kUnknownStatement);
- }
-
- static PreParserStatement Jump() {
- return PreParserStatement(kJumpStatement);
- }
-
- static PreParserStatement FunctionDeclaration() {
- return PreParserStatement(kFunctionDeclaration);
- }
-
- // Creates expression statement from expression.
- // Preserves being an unparenthesized string literal, possibly
- // "use strict".
- static PreParserStatement ExpressionStatement(
- PreParserExpression expression) {
- if (expression.IsUseStrictLiteral()) {
- return PreParserStatement(kUseStrictExpressionStatement);
- }
- if (expression.IsUseStrongLiteral()) {
- return PreParserStatement(kUseStrongExpressionStatement);
- }
- if (expression.IsStringLiteral()) {
- return PreParserStatement(kStringLiteralExpressionStatement);
- }
- return Default();
- }
-
- bool IsStringLiteral() {
- return code_ == kStringLiteralExpressionStatement;
- }
-
- bool IsUseStrictLiteral() {
- return code_ == kUseStrictExpressionStatement;
- }
-
- bool IsUseStrongLiteral() { return code_ == kUseStrongExpressionStatement; }
-
- bool IsFunctionDeclaration() {
- return code_ == kFunctionDeclaration;
- }
-
- bool IsJumpStatement() {
- return code_ == kJumpStatement;
- }
-
- private:
- enum Type {
- kUnknownStatement,
- kJumpStatement,
- kStringLiteralExpressionStatement,
- kUseStrictExpressionStatement,
- kUseStrongExpressionStatement,
- kFunctionDeclaration
- };
-
- explicit PreParserStatement(Type code) : code_(code) {}
- Type code_;
-};
-
-
-typedef PreParserList<PreParserStatement> PreParserStatementList;
-
-
-class PreParserFactory {
- public:
- explicit PreParserFactory(void* unused_value_factory) {}
- PreParserExpression NewStringLiteral(PreParserIdentifier identifier,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewNumberLiteral(double number,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
- PreParserIdentifier js_flags,
- int literal_index,
- bool is_strong,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewArrayLiteral(PreParserExpressionList values,
- int literal_index,
- bool is_strong,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewArrayLiteral(PreParserExpressionList values,
- int first_spread_index, int literal_index,
- bool is_strong, int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
- PreParserExpression value,
- ObjectLiteralProperty::Kind kind,
- bool is_static,
- bool is_computed_name) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
- PreParserExpression value,
- bool is_static,
- bool is_computed_name) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
- int literal_index,
- int boilerplate_properties,
- bool has_function,
- bool is_strong,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewVariableProxy(void* variable) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewProperty(PreParserExpression obj,
- PreParserExpression key,
- int pos) {
- if (obj.IsThis()) {
- return PreParserExpression::ThisProperty();
- }
- return PreParserExpression::Property();
- }
- PreParserExpression NewUnaryOperation(Token::Value op,
- PreParserExpression expression,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewBinaryOperation(Token::Value op,
- PreParserExpression left,
- PreParserExpression right, int pos) {
- return PreParserExpression::BinaryOperation(left, op, right);
- }
- PreParserExpression NewCompareOperation(Token::Value op,
- PreParserExpression left,
- PreParserExpression right, int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewAssignment(Token::Value op,
- PreParserExpression left,
- PreParserExpression right,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewYield(PreParserExpression generator_object,
- PreParserExpression expression,
- Yield::Kind yield_kind,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewConditional(PreParserExpression condition,
- PreParserExpression then_expression,
- PreParserExpression else_expression,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewCountOperation(Token::Value op,
- bool is_prefix,
- PreParserExpression expression,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewCall(PreParserExpression expression,
- PreParserExpressionList arguments,
- int pos) {
- return PreParserExpression::Call();
- }
- PreParserExpression NewCallNew(PreParserExpression expression,
- PreParserExpressionList arguments,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewCallRuntime(const AstRawString* name,
- const Runtime::Function* function,
- PreParserExpressionList arguments,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserStatement NewReturnStatement(PreParserExpression expression,
- int pos) {
- return PreParserStatement::Default();
- }
- PreParserExpression NewFunctionLiteral(
- PreParserIdentifier name, AstValueFactory* ast_value_factory,
- Scope* scope, PreParserStatementList body, int materialized_literal_count,
- int expected_property_count, int parameter_count,
- FunctionLiteral::ParameterFlag has_duplicate_parameters,
- FunctionLiteral::FunctionType function_type,
- FunctionLiteral::IsFunctionFlag is_function,
- FunctionLiteral::EagerCompileHint eager_compile_hint, FunctionKind kind,
- int position) {
- return PreParserExpression::Default();
- }
-
- PreParserExpression NewSpread(PreParserExpression expression, int pos) {
- return PreParserExpression::Spread(expression);
- }
-
- PreParserExpression NewEmptyParentheses(int pos) {
- return PreParserExpression::Default();
- }
-
- // Return the object itself as AstVisitor and implement the needed
- // dummy method right in this class.
- PreParserFactory* visitor() { return this; }
- int* ast_properties() {
- static int dummy = 42;
- return &dummy;
- }
-};
-
-
-struct PreParserFormalParameters : FormalParametersBase {
- explicit PreParserFormalParameters(Scope* scope)
- : FormalParametersBase(scope) {}
- int arity = 0;
-
- int Arity() const { return arity; }
- PreParserIdentifier at(int i) { return PreParserIdentifier(); } // Dummy
-};
-
-
-class PreParser;
-
-class PreParserTraits {
- public:
- struct Type {
- // TODO(marja): To be removed. The Traits object should contain all the data
- // it needs.
- typedef PreParser* Parser;
-
- // PreParser doesn't need to store generator variables.
- typedef void GeneratorVariable;
-
- typedef int AstProperties;
-
- // Return types for traversing functions.
- typedef PreParserIdentifier Identifier;
- typedef PreParserExpression Expression;
- typedef PreParserExpression YieldExpression;
- typedef PreParserExpression FunctionLiteral;
- typedef PreParserExpression ClassLiteral;
- typedef PreParserExpression ObjectLiteralProperty;
- typedef PreParserExpression Literal;
- typedef PreParserExpressionList ExpressionList;
- typedef PreParserExpressionList PropertyList;
- typedef PreParserIdentifier FormalParameter;
- typedef PreParserFormalParameters FormalParameters;
- typedef PreParserStatementList StatementList;
-
- // For constructing objects returned by the traversing functions.
- typedef PreParserFactory Factory;
- };
-
- explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
-
- // Helper functions for recursive descent.
- static bool IsEval(PreParserIdentifier identifier) {
- return identifier.IsEval();
- }
-
- static bool IsArguments(PreParserIdentifier identifier) {
- return identifier.IsArguments();
- }
-
- static bool IsEvalOrArguments(PreParserIdentifier identifier) {
- return identifier.IsEvalOrArguments();
- }
-
- static bool IsUndefined(PreParserIdentifier identifier) {
- return identifier.IsUndefined();
- }
-
- static bool IsPrototype(PreParserIdentifier identifier) {
- return identifier.IsPrototype();
- }
-
- static bool IsConstructor(PreParserIdentifier identifier) {
- return identifier.IsConstructor();
- }
-
- // Returns true if the expression is of type "this.foo".
- static bool IsThisProperty(PreParserExpression expression) {
- return expression.IsThisProperty();
- }
-
- static bool IsIdentifier(PreParserExpression expression) {
- return expression.IsIdentifier();
- }
-
- static PreParserIdentifier AsIdentifier(PreParserExpression expression) {
- return expression.AsIdentifier();
- }
-
- static bool IsFutureStrictReserved(PreParserIdentifier identifier) {
- return identifier.IsFutureStrictReserved();
- }
-
- static bool IsBoilerplateProperty(PreParserExpression property) {
- // PreParser doesn't count boilerplate properties.
- return false;
- }
-
- static bool IsArrayIndex(PreParserIdentifier string, uint32_t* index) {
- return false;
- }
-
- static PreParserExpression GetPropertyValue(PreParserExpression property) {
- return PreParserExpression::Default();
- }
-
- // Functions for encapsulating the differences between parsing and preparsing;
- // operations interleaved with the recursive descent.
- static void PushLiteralName(FuncNameInferrer* fni, PreParserIdentifier id) {
- // PreParser should not use FuncNameInferrer.
- UNREACHABLE();
- }
-
- static void PushPropertyName(FuncNameInferrer* fni,
- PreParserExpression expression) {
- // PreParser should not use FuncNameInferrer.
- UNREACHABLE();
- }
-
- static void InferFunctionName(FuncNameInferrer* fni,
- PreParserExpression expression) {
- // PreParser should not use FuncNameInferrer.
- UNREACHABLE();
- }
-
- static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
- Scope* scope, PreParserExpression property, bool* has_function) {}
-
- static void CheckAssigningFunctionLiteralToProperty(
- PreParserExpression left, PreParserExpression right) {}
-
- static void CheckPossibleEvalCall(PreParserExpression expression,
- Scope* scope) {
- if (IsIdentifier(expression) && IsEval(AsIdentifier(expression))) {
- scope->DeclarationScope()->RecordEvalCall();
- scope->RecordEvalCall();
- }
- }
-
- static PreParserExpression MarkExpressionAsAssigned(
- PreParserExpression expression) {
- // TODO(marja): To be able to produce the same errors, the preparser needs
- // to start tracking which expressions are variables and which are assigned.
- return expression;
- }
-
- bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
- PreParserExpression y,
- Token::Value op,
- int pos,
- PreParserFactory* factory) {
- return false;
- }
-
- PreParserExpression BuildUnaryExpression(PreParserExpression expression,
- Token::Value op, int pos,
- PreParserFactory* factory) {
- return PreParserExpression::Default();
- }
-
- PreParserExpression NewThrowReferenceError(MessageTemplate::Template message,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewThrowSyntaxError(MessageTemplate::Template message,
- Handle<Object> arg, int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewThrowTypeError(MessageTemplate::Template message,
- Handle<Object> arg, int pos) {
- return PreParserExpression::Default();
- }
-
- // Reporting errors.
- void ReportMessageAt(Scanner::Location location,
- MessageTemplate::Template message,
- const char* arg = NULL,
- ParseErrorType error_type = kSyntaxError);
- void ReportMessageAt(int start_pos, int end_pos,
- MessageTemplate::Template message,
- const char* arg = NULL,
- ParseErrorType error_type = kSyntaxError);
-
- // "null" return type creators.
- static PreParserIdentifier EmptyIdentifier() {
- return PreParserIdentifier::Default();
- }
- static PreParserIdentifier EmptyIdentifierString() {
- return PreParserIdentifier::Default();
- }
- static PreParserExpression EmptyExpression() {
- return PreParserExpression::Default();
- }
- static PreParserExpression EmptyLiteral() {
- return PreParserExpression::Default();
- }
- static PreParserExpression EmptyObjectLiteralProperty() {
- return PreParserExpression::Default();
- }
- static PreParserExpression EmptyFunctionLiteral() {
- return PreParserExpression::Default();
- }
- static PreParserExpressionList NullExpressionList() {
- return PreParserExpressionList();
- }
-
- // Odd-ball literal creators.
- static PreParserExpression GetLiteralTheHole(int position,
- PreParserFactory* factory) {
- return PreParserExpression::Default();
- }
-
- // Producing data during the recursive descent.
- PreParserIdentifier GetSymbol(Scanner* scanner);
- PreParserIdentifier GetNumberAsSymbol(Scanner* scanner);
-
- static PreParserIdentifier GetNextSymbol(Scanner* scanner) {
- return PreParserIdentifier::Default();
- }
-
- static PreParserExpression ThisExpression(Scope* scope,
- PreParserFactory* factory,
- int pos) {
- return PreParserExpression::This();
- }
-
- static PreParserExpression SuperPropertyReference(Scope* scope,
- PreParserFactory* factory,
- int pos) {
- return PreParserExpression::Default();
- }
-
- static PreParserExpression SuperCallReference(Scope* scope,
- PreParserFactory* factory,
- int pos) {
- return PreParserExpression::SuperCallReference();
- }
-
- static PreParserExpression NewTargetExpression(Scope* scope,
- PreParserFactory* factory,
- int pos) {
- return PreParserExpression::Default();
- }
-
- static PreParserExpression DefaultConstructor(bool call_super, Scope* scope,
- int pos, int end_pos) {
- return PreParserExpression::Default();
- }
-
- static PreParserExpression ExpressionFromLiteral(
- Token::Value token, int pos, Scanner* scanner,
- PreParserFactory* factory) {
- return PreParserExpression::Default();
- }
-
- static PreParserExpression ExpressionFromIdentifier(
- PreParserIdentifier name, int start_position, int end_position,
- Scope* scope, PreParserFactory* factory) {
- return PreParserExpression::FromIdentifier(name);
- }
-
- PreParserExpression ExpressionFromString(int pos,
- Scanner* scanner,
- PreParserFactory* factory = NULL);
-
- PreParserExpression GetIterator(PreParserExpression iterable,
- PreParserFactory* factory) {
- return PreParserExpression::Default();
- }
-
- static PreParserExpressionList NewExpressionList(int size, Zone* zone) {
- return PreParserExpressionList();
- }
-
- static PreParserStatementList NewStatementList(int size, Zone* zone) {
- return PreParserStatementList();
- }
-
- static PreParserExpressionList NewPropertyList(int size, Zone* zone) {
- return PreParserExpressionList();
- }
-
- static void AddParameterInitializationBlock(
- const PreParserFormalParameters& parameters,
- PreParserStatementList list, bool* ok) {}
-
- V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
- int* expected_property_count, bool* ok) {
- UNREACHABLE();
- }
-
- V8_INLINE PreParserStatementList ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok);
-
- V8_INLINE void ParseArrowFunctionFormalParameterList(
- PreParserFormalParameters* parameters,
- PreParserExpression expression, const Scanner::Location& params_loc,
- Scanner::Location* duplicate_loc, bool* ok);
-
- void ReindexLiterals(const PreParserFormalParameters& paramaters) {}
-
- struct TemplateLiteralState {};
-
- TemplateLiteralState OpenTemplateLiteral(int pos) {
- return TemplateLiteralState();
- }
- void AddTemplateSpan(TemplateLiteralState*, bool) {}
- void AddTemplateExpression(TemplateLiteralState*, PreParserExpression) {}
- PreParserExpression CloseTemplateLiteral(TemplateLiteralState*, int,
- PreParserExpression tag) {
- if (IsTaggedTemplate(tag)) {
- // Emulate generation of array literals for tag callsite
- // 1st is array of cooked strings, second is array of raw strings
- MaterializeTemplateCallsiteLiterals();
- }
- return EmptyExpression();
- }
- inline void MaterializeTemplateCallsiteLiterals();
- PreParserExpression NoTemplateTag() {
- return PreParserExpression::NoTemplateTag();
- }
- static bool IsTaggedTemplate(const PreParserExpression tag) {
- return !tag.IsNoTemplateTag();
- }
-
- void AddFormalParameter(
- PreParserFormalParameters* parameters, PreParserExpression pattern,
- PreParserExpression initializer, bool is_rest) {
- ++parameters->arity;
- }
- void DeclareFormalParameter(Scope* scope, PreParserIdentifier parameter,
- ExpressionClassifier* classifier) {
- if (!classifier->is_simple_parameter_list()) {
- scope->SetHasNonSimpleParameters();
- }
- }
-
- void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
-
- // Temporary glue; these functions will move to ParserBase.
- PreParserExpression ParseV8Intrinsic(bool* ok);
- PreParserExpression ParseFunctionLiteral(
- PreParserIdentifier name, Scanner::Location function_name_location,
- FunctionNameValidity function_name_validity, FunctionKind kind,
- int function_token_position, FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction,
- LanguageMode language_mode, bool* ok);
-
- PreParserExpression ParseClassLiteral(PreParserIdentifier name,
- Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos,
- bool* ok);
-
- PreParserExpressionList PrepareSpreadArguments(PreParserExpressionList list) {
- return list;
- }
-
- inline void MaterializeUnspreadArgumentsLiterals(int count);
-
- inline PreParserExpression SpreadCall(PreParserExpression function,
- PreParserExpressionList args, int pos);
-
- inline PreParserExpression SpreadCallNew(PreParserExpression function,
- PreParserExpressionList args,
- int pos);
-
- private:
- PreParser* pre_parser_;
-};
-
-
-// Preparsing checks a JavaScript program and emits preparse-data that helps
-// a later parsing to be faster.
-// See preparse-data-format.h for the data format.
-
-// The PreParser checks that the syntax follows the grammar for JavaScript,
-// and collects some information about the program along the way.
-// The grammar check is only performed in order to understand the program
-// sufficiently to deduce some information about it, that can be used
-// to speed up later parsing. Finding errors is not the goal of pre-parsing,
-// rather it is to speed up properly written and correct programs.
-// That means that contextual checks (like a label being declared where
-// it is used) are generally omitted.
-class PreParser : public ParserBase<PreParserTraits> {
- public:
- typedef PreParserIdentifier Identifier;
- typedef PreParserExpression Expression;
- typedef PreParserStatement Statement;
-
- enum PreParseResult {
- kPreParseStackOverflow,
- kPreParseSuccess
- };
-
- PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
- ParserRecorder* log, uintptr_t stack_limit)
- : ParserBase<PreParserTraits>(zone, scanner, stack_limit, NULL,
- ast_value_factory, log, this) {}
-
- // Pre-parse the program from the character stream; returns true on
- // success (even if parsing failed, the pre-parse data successfully
- // captured the syntax error), and false if a stack-overflow happened
- // during parsing.
- PreParseResult PreParseProgram(int* materialized_literals = 0) {
- Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
- PreParserFactory factory(NULL);
- FunctionState top_scope(&function_state_, &scope_, scope, kNormalFunction,
- &factory);
- bool ok = true;
- int start_position = scanner()->peek_location().beg_pos;
- ParseStatementList(Token::EOS, &ok);
- if (stack_overflow()) return kPreParseStackOverflow;
- if (!ok) {
- ReportUnexpectedToken(scanner()->current_token());
- } else if (is_strict(scope_->language_mode())) {
- CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
- &ok);
- }
- if (materialized_literals) {
- *materialized_literals = function_state_->materialized_literal_count();
- }
- return kPreParseSuccess;
- }
-
- // Parses a single function literal, from the opening parentheses before
- // parameters to the closing brace after the body.
- // Returns a FunctionEntry describing the body of the function in enough
- // detail that it can be lazily compiled.
- // The scanner is expected to have matched the "function" or "function*"
- // keyword and parameters, and have consumed the initial '{'.
- // At return, unless an error occurred, the scanner is positioned before the
- // the final '}'.
- PreParseResult PreParseLazyFunction(
- LanguageMode language_mode, FunctionKind kind, bool has_simple_parameters,
- ParserRecorder* log, Scanner::BookmarkScope* bookmark = nullptr);
-
- private:
- friend class PreParserTraits;
-
- static const int kLazyParseTrialLimit = 200;
-
- // These types form an algebra over syntactic categories that is just
- // rich enough to let us recognize and propagate the constructs that
- // are either being counted in the preparser data, or is important
- // to throw the correct syntax error exceptions.
-
- // All ParseXXX functions take as the last argument an *ok parameter
- // which is set to false if parsing failed; it is unchanged otherwise.
- // By making the 'exception handling' explicit, we are forced to check
- // for failure at the call sites.
- Statement ParseStatementListItem(bool* ok);
- void ParseStatementList(int end_token, bool* ok,
- Scanner::BookmarkScope* bookmark = nullptr);
- Statement ParseStatement(bool* ok);
- Statement ParseSubStatement(bool* ok);
- Statement ParseFunctionDeclaration(bool* ok);
- Statement ParseClassDeclaration(bool* ok);
- Statement ParseBlock(bool* ok);
- Statement ParseVariableStatement(VariableDeclarationContext var_context,
- bool* ok);
- Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
- int* num_decl,
- Scanner::Location* first_initializer_loc,
- Scanner::Location* bindings_loc,
- bool* ok);
- Statement ParseExpressionOrLabelledStatement(bool* ok);
- Statement ParseIfStatement(bool* ok);
- Statement ParseContinueStatement(bool* ok);
- Statement ParseBreakStatement(bool* ok);
- Statement ParseReturnStatement(bool* ok);
- Statement ParseWithStatement(bool* ok);
- Statement ParseSwitchStatement(bool* ok);
- Statement ParseDoWhileStatement(bool* ok);
- Statement ParseWhileStatement(bool* ok);
- Statement ParseForStatement(bool* ok);
- Statement ParseThrowStatement(bool* ok);
- Statement ParseTryStatement(bool* ok);
- Statement ParseDebuggerStatement(bool* ok);
- Expression ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression ParseObjectLiteral(bool* ok);
- Expression ParseV8Intrinsic(bool* ok);
-
- V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
- int* expected_property_count, bool* ok);
- V8_INLINE PreParserStatementList ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok);
-
- Expression ParseFunctionLiteral(
- Identifier name, Scanner::Location function_name_location,
- FunctionNameValidity function_name_validity, FunctionKind kind,
- int function_token_pos, FunctionLiteral::FunctionType function_type,
- FunctionLiteral::ArityRestriction arity_restriction,
- LanguageMode language_mode, bool* ok);
- void ParseLazyFunctionLiteralBody(bool* ok,
- Scanner::BookmarkScope* bookmark = nullptr);
-
- PreParserExpression ParseClassLiteral(PreParserIdentifier name,
- Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos,
- bool* ok);
-};
-
-
-void PreParserTraits::MaterializeTemplateCallsiteLiterals() {
- pre_parser_->function_state_->NextMaterializedLiteralIndex();
- pre_parser_->function_state_->NextMaterializedLiteralIndex();
-}
-
-
-void PreParserTraits::MaterializeUnspreadArgumentsLiterals(int count) {
- for (int i = 0; i < count; ++i) {
- pre_parser_->function_state_->NextMaterializedLiteralIndex();
- }
-}
-
-
-PreParserExpression PreParserTraits::SpreadCall(PreParserExpression function,
- PreParserExpressionList args,
- int pos) {
- return pre_parser_->factory()->NewCall(function, args, pos);
-}
-
-PreParserExpression PreParserTraits::SpreadCallNew(PreParserExpression function,
- PreParserExpressionList args,
- int pos) {
- return pre_parser_->factory()->NewCallNew(function, args, pos);
-}
-
-
-void PreParserTraits::ParseArrowFunctionFormalParameterList(
- PreParserFormalParameters* parameters,
- PreParserExpression params, const Scanner::Location& params_loc,
- Scanner::Location* duplicate_loc, bool* ok) {
- // TODO(wingo): Detect duplicated identifiers in paramlists. Detect parameter
- // lists that are too long.
-
- // Accomodate array literal for rest parameter.
- if (params.IsArrowFunctionFormalParametersWithRestParameter()) {
- ++parameters->materialized_literals_count;
- pre_parser_->function_state_->NextMaterializedLiteralIndex();
- }
-}
-
-
-PreParserStatementList PreParser::ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok) {
- ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
-
- ParseStatementList(Token::RBRACE, ok);
- if (!*ok) return PreParserStatementList();
-
- Expect(Token::RBRACE, ok);
- return PreParserStatementList();
-}
-
-
-PreParserStatementList PreParserTraits::ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok) {
- return pre_parser_->ParseEagerFunctionBody(function_name, pos, parameters,
- kind, function_type, ok);
-}
-
-
template <class Traits>
ParserBase<Traits>::FunctionState::FunctionState(
FunctionState** function_state_stack, Scope** scope_stack, Scope* scope,
@@ -1992,6 +1004,11 @@ void ParserBase<Traits>::GetUnexpectedTokenMessage(
*message = MessageTemplate::kUnexpectedTemplateString;
*arg = nullptr;
break;
+ case Token::ESCAPED_STRICT_RESERVED_WORD:
+ case Token::ESCAPED_KEYWORD:
+ *message = MessageTemplate::kInvalidEscapedReservedWord;
+ *arg = nullptr;
+ break;
default:
const char* name = Token::String(token);
DCHECK(name != NULL);
@@ -2029,9 +1046,6 @@ typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParseIdentifier(
if (!*ok) return Traits::EmptyIdentifier();
ValidateBindingPattern(&classifier, ok);
if (!*ok) return Traits::EmptyIdentifier();
- } else {
- ValidateExpression(&classifier, ok);
- if (!*ok) return Traits::EmptyIdentifier();
}
return result;
@@ -2091,10 +1105,21 @@ ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
return name;
} else if (is_sloppy(language_mode()) &&
(next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ next == Token::ESCAPED_STRICT_RESERVED_WORD ||
next == Token::LET || next == Token::STATIC ||
(next == Token::YIELD && !is_generator()))) {
classifier->RecordStrictModeFormalParameterError(
scanner()->location(), MessageTemplate::kUnexpectedStrictReserved);
+ if (next == Token::ESCAPED_STRICT_RESERVED_WORD &&
+ is_strict(language_mode())) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
+ if (next == Token::LET) {
+ classifier->RecordLetPatternError(scanner()->location(),
+ MessageTemplate::kLetInLexicalBinding);
+ }
return this->GetSymbol(scanner());
} else {
this->ReportUnexpectedToken(next);
@@ -2105,15 +1130,14 @@ ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
template <class Traits>
-typename ParserBase<Traits>::IdentifierT ParserBase<
- Traits>::ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
- bool* ok) {
+typename ParserBase<Traits>::IdentifierT
+ParserBase<Traits>::ParseIdentifierOrStrictReservedWord(
+ bool is_generator, bool* is_strict_reserved, bool* ok) {
Token::Value next = Next();
if (next == Token::IDENTIFIER) {
*is_strict_reserved = false;
} else if (next == Token::FUTURE_STRICT_RESERVED_WORD || next == Token::LET ||
- next == Token::STATIC ||
- (next == Token::YIELD && !this->is_generator())) {
+ next == Token::STATIC || (next == Token::YIELD && !is_generator)) {
*is_strict_reserved = true;
} else {
ReportUnexpectedToken(next);
@@ -2133,7 +1157,9 @@ ParserBase<Traits>::ParseIdentifierName(bool* ok) {
Token::Value next = Next();
if (next != Token::IDENTIFIER && next != Token::FUTURE_RESERVED_WORD &&
next != Token::LET && next != Token::STATIC && next != Token::YIELD &&
- next != Token::FUTURE_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
+ next != Token::FUTURE_STRICT_RESERVED_WORD &&
+ next != Token::ESCAPED_KEYWORD &&
+ next != Token::ESCAPED_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
this->ReportUnexpectedToken(next);
*ok = false;
return Traits::EmptyIdentifier();
@@ -2171,13 +1197,14 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseRegExpLiteral(
int literal_index = function_state_->NextMaterializedLiteralIndex();
IdentifierT js_pattern = this->GetNextSymbol(scanner());
- if (!scanner()->ScanRegExpFlags()) {
+ Maybe<RegExp::Flags> flags = scanner()->ScanRegExpFlags();
+ if (flags.IsNothing()) {
Next();
ReportMessage(MessageTemplate::kMalformedRegExpFlags);
*ok = false;
return Traits::EmptyExpression();
}
- IdentifierT js_flags = this->GetNextSymbol(scanner());
+ int js_flags = flags.FromJust();
Next();
return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index,
is_strong(language_mode()), pos);
@@ -2216,12 +1243,10 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
// ClassLiteral
// '(' Expression ')'
// TemplateLiteral
+ // do Block
- int beg_pos = scanner()->peek_location().beg_pos;
- int end_pos = scanner()->peek_location().end_pos;
- ExpressionT result = this->EmptyExpression();
- Token::Value token = peek();
- switch (token) {
+ int beg_pos = peek_position();
+ switch (peek()) {
case Token::THIS: {
BindingPatternUnexpectedToken(classifier);
Consume(Token::THIS);
@@ -2231,77 +1256,65 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
if (IsClassConstructor(function_state_->kind())) {
ReportMessage(MessageTemplate::kStrongConstructorThis);
*ok = false;
- break;
+ return this->EmptyExpression();
}
}
- result = this->ThisExpression(scope_, factory(), beg_pos);
- break;
+ return this->ThisExpression(scope_, factory(), beg_pos);
}
case Token::NULL_LITERAL:
case Token::TRUE_LITERAL:
case Token::FALSE_LITERAL:
BindingPatternUnexpectedToken(classifier);
- Next();
- result =
- this->ExpressionFromLiteral(token, beg_pos, scanner(), factory());
- break;
+ return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
case Token::SMI:
case Token::NUMBER:
classifier->RecordBindingPatternError(
scanner()->peek_location(), MessageTemplate::kUnexpectedTokenNumber);
- Next();
- result =
- this->ExpressionFromLiteral(token, beg_pos, scanner(), factory());
- break;
+ return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
case Token::IDENTIFIER:
case Token::LET:
case Token::STATIC:
case Token::YIELD:
+ case Token::ESCAPED_STRICT_RESERVED_WORD:
case Token::FUTURE_STRICT_RESERVED_WORD: {
// Using eval or arguments in this context is OK even in strict mode.
IdentifierT name = ParseAndClassifyIdentifier(classifier, CHECK_OK);
- result = this->ExpressionFromIdentifier(name, beg_pos, end_pos, scope_,
- factory());
- break;
+ return this->ExpressionFromIdentifier(
+ name, beg_pos, scanner()->location().end_pos, scope_, factory());
}
case Token::STRING: {
classifier->RecordBindingPatternError(
scanner()->peek_location(), MessageTemplate::kUnexpectedTokenString);
Consume(Token::STRING);
- result = this->ExpressionFromString(beg_pos, scanner(), factory());
- break;
+ return this->ExpressionFromString(beg_pos, scanner(), factory());
}
case Token::ASSIGN_DIV:
classifier->RecordBindingPatternError(
scanner()->peek_location(), MessageTemplate::kUnexpectedTokenRegExp);
- result = this->ParseRegExpLiteral(true, classifier, CHECK_OK);
- break;
+ return this->ParseRegExpLiteral(true, classifier, ok);
case Token::DIV:
classifier->RecordBindingPatternError(
scanner()->peek_location(), MessageTemplate::kUnexpectedTokenRegExp);
- result = this->ParseRegExpLiteral(false, classifier, CHECK_OK);
- break;
+ return this->ParseRegExpLiteral(false, classifier, ok);
case Token::LBRACK:
- if (!allow_harmony_destructuring()) {
+ if (!allow_harmony_destructuring_bind()) {
BindingPatternUnexpectedToken(classifier);
}
- result = this->ParseArrayLiteral(classifier, CHECK_OK);
- break;
+ return this->ParseArrayLiteral(classifier, ok);
case Token::LBRACE:
- if (!allow_harmony_destructuring()) {
+ if (!allow_harmony_destructuring_bind()) {
BindingPatternUnexpectedToken(classifier);
}
- result = this->ParseObjectLiteral(classifier, CHECK_OK);
- break;
+ return this->ParseObjectLiteral(classifier, ok);
- case Token::LPAREN:
+ case Token::LPAREN: {
// Arrow function formal parameters are either a single identifier or a
// list of BindingPattern productions enclosed in parentheses.
// Parentheses are not valid on the LHS of a BindingPattern, so we use the
@@ -2321,28 +1334,17 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
classifier->RecordBindingPatternError(scanner()->location(),
MessageTemplate::kUnexpectedToken,
Token::String(Token::RPAREN));
- result = factory()->NewEmptyParentheses(beg_pos);
- } else if (allow_harmony_rest_parameters() && Check(Token::ELLIPSIS)) {
+ return factory()->NewEmptyParentheses(beg_pos);
+ } else if (Check(Token::ELLIPSIS)) {
// (...x)=>x. The continuation that looks for the => is in
// ParseAssignmentExpression.
- int ellipsis_pos = scanner()->location().beg_pos;
+ int ellipsis_pos = position();
classifier->RecordExpressionError(scanner()->location(),
MessageTemplate::kUnexpectedToken,
Token::String(Token::ELLIPSIS));
classifier->RecordNonSimpleParameter();
- Scanner::Location expr_loc = scanner()->peek_location();
- Token::Value tok = peek();
- result = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
- // Patterns are not allowed as rest parameters. There is no way we can
- // succeed so go ahead and use the convenient ReportUnexpectedToken
- // interface.
- if (!Traits::IsIdentifier(result)) {
- ReportUnexpectedTokenAt(expr_loc, tok);
- *ok = false;
- return this->EmptyExpression();
- }
- result = factory()->NewSpread(result, ellipsis_pos);
-
+ ExpressionT expr =
+ this->ParseAssignmentExpression(true, classifier, CHECK_OK);
if (peek() == Token::COMMA) {
ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kParamAfterRest);
@@ -2350,14 +1352,19 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
return this->EmptyExpression();
}
Expect(Token::RPAREN, CHECK_OK);
- } else {
- // Heuristically try to detect immediately called functions before
- // seeing the call parentheses.
- parenthesized_function_ = (peek() == Token::FUNCTION);
- result = this->ParseExpression(true, classifier, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
+ return factory()->NewSpread(expr, ellipsis_pos);
}
- break;
+ // Heuristically try to detect immediately called functions before
+ // seeing the call parentheses.
+ parenthesized_function_ = (peek() == Token::FUNCTION);
+ ExpressionT expr = this->ParseExpression(true, kIsPossibleArrowFormals,
+ classifier, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ if (peek() != Token::ARROW) {
+ expr->set_is_parenthesized();
+ }
+ return expr;
+ }
case Token::CLASS: {
BindingPatternUnexpectedToken(classifier);
@@ -2365,7 +1372,7 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
ReportMessage(MessageTemplate::kSloppyLexical);
*ok = false;
- break;
+ return this->EmptyExpression();
}
int class_token_position = position();
IdentifierT name = this->EmptyIdentifier();
@@ -2376,10 +1383,9 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
CHECK_OK);
class_name_location = scanner()->location();
}
- result = this->ParseClassLiteral(name, class_name_location,
- is_strict_reserved_name,
- class_token_position, CHECK_OK);
- break;
+ return this->ParseClassLiteral(name, class_name_location,
+ is_strict_reserved_name,
+ class_token_position, ok);
}
case Token::TEMPLATE_SPAN:
@@ -2387,26 +1393,30 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
classifier->RecordBindingPatternError(
scanner()->peek_location(),
MessageTemplate::kUnexpectedTemplateString);
- result = this->ParseTemplateLiteral(Traits::NoTemplateTag(), beg_pos,
- classifier, CHECK_OK);
- break;
+ return this->ParseTemplateLiteral(Traits::NoTemplateTag(), beg_pos,
+ classifier, ok);
case Token::MOD:
if (allow_natives() || extension_ != NULL) {
- result = this->ParseV8Intrinsic(CHECK_OK);
- break;
+ BindingPatternUnexpectedToken(classifier);
+ return this->ParseV8Intrinsic(ok);
}
- // If we're not allowing special syntax we fall-through to the
- // default case.
+ break;
- default: {
- Next();
- ReportUnexpectedToken(token);
- *ok = false;
- }
+ case Token::DO:
+ if (allow_harmony_do_expressions()) {
+ BindingPatternUnexpectedToken(classifier);
+ return Traits::ParseDoExpression(ok);
+ }
+ break;
+
+ default:
+ break;
}
- return result;
+ ReportUnexpectedToken(Next());
+ *ok = false;
+ return this->EmptyExpression();
}
@@ -2415,22 +1425,28 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
bool accept_IN, bool* ok) {
ExpressionClassifier classifier;
ExpressionT result = ParseExpression(accept_IN, &classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ result = Traits::RewriteNonPattern(result, &classifier, CHECK_OK);
return result;
}
-// Precedence = 1
template <class Traits>
typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
+ return ParseExpression(accept_IN, kIsNormalAssignment, classifier, ok);
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
+ bool accept_IN, int flags, ExpressionClassifier* classifier, bool* ok) {
// Expression ::
// AssignmentExpression
// Expression ',' AssignmentExpression
ExpressionClassifier binding_classifier;
- ExpressionT result =
- this->ParseAssignmentExpression(accept_IN, &binding_classifier, CHECK_OK);
+ ExpressionT result = this->ParseAssignmentExpression(
+ accept_IN, flags, &binding_classifier, CHECK_OK);
classifier->Accumulate(binding_classifier,
ExpressionClassifier::AllProductions);
bool is_simple_parameter_list = this->IsIdentifier(result);
@@ -2444,7 +1460,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
}
Consume(Token::COMMA);
bool is_rest = false;
- if (allow_harmony_rest_parameters() && peek() == Token::ELLIPSIS) {
+ if (peek() == Token::ELLIPSIS) {
// 'x, y, ...z' in CoverParenthesizedExpressionAndArrowParameterList only
// as the formal parameters of'(x, y, ...z) => foo', and is not itself a
// valid expression or binding pattern.
@@ -2455,7 +1471,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
}
int pos = position();
ExpressionT right = this->ParseAssignmentExpression(
- accept_IN, &binding_classifier, CHECK_OK);
+ accept_IN, flags, &binding_classifier, CHECK_OK);
if (is_rest) right = factory()->NewSpread(right, pos);
is_simple_parameter_list =
is_simple_parameter_list && this->IsIdentifier(right);
@@ -2466,6 +1482,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
if (!is_simple_parameter_list || seen_rest) {
classifier->RecordNonSimpleParameter();
}
+
return result;
}
@@ -2482,7 +1499,6 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
int first_spread_index = -1;
Expect(Token::LBRACK, CHECK_OK);
while (peek() != Token::RBRACK) {
- bool seen_spread = false;
ExpressionT elem = this->EmptyExpression();
if (peek() == Token::COMMA) {
if (is_strong(language_mode())) {
@@ -2493,26 +1509,36 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
}
elem = this->GetLiteralTheHole(peek_position(), factory());
} else if (peek() == Token::ELLIPSIS) {
- if (!allow_harmony_spread_arrays()) {
- ExpressionUnexpectedToken(classifier);
- }
int start_pos = peek_position();
Consume(Token::ELLIPSIS);
ExpressionT argument =
this->ParseAssignmentExpression(true, classifier, CHECK_OK);
elem = factory()->NewSpread(argument, start_pos);
- seen_spread = true;
+
if (first_spread_index < 0) {
first_spread_index = values->length();
}
+
+ if (argument->IsAssignment()) {
+ classifier->RecordPatternError(
+ Scanner::Location(start_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
+ } else {
+ CheckDestructuringElement(argument, classifier, start_pos,
+ scanner()->location().end_pos);
+ }
+
+ if (peek() == Token::COMMA) {
+ classifier->RecordPatternError(
+ Scanner::Location(start_pos, scanner()->location().end_pos),
+ MessageTemplate::kElementAfterRest);
+ }
} else {
- elem = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+ elem = this->ParseAssignmentExpression(true, kIsPossiblePatternElement,
+ classifier, CHECK_OK);
}
values->Add(elem, zone_);
if (peek() != Token::RBRACK) {
- if (seen_spread) {
- BindingPatternUnexpectedToken(classifier);
- }
Expect(Token::COMMA, CHECK_OK);
}
}
@@ -2529,7 +1555,8 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
template <class Traits>
typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
IdentifierT* name, bool* is_get, bool* is_set, bool* is_static,
- bool* is_computed_name, ExpressionClassifier* classifier, bool* ok) {
+ bool* is_computed_name, bool* is_identifier, bool* is_escaped_keyword,
+ ExpressionClassifier* classifier, bool* ok) {
Token::Value token = peek();
int pos = peek_position();
@@ -2564,17 +1591,25 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
ExpressionClassifier computed_name_classifier;
ExpressionT expression =
ParseAssignmentExpression(true, &computed_name_classifier, CHECK_OK);
+ expression = Traits::RewriteNonPattern(
+ expression, &computed_name_classifier, CHECK_OK);
classifier->Accumulate(computed_name_classifier,
ExpressionClassifier::ExpressionProductions);
Expect(Token::RBRACK, CHECK_OK);
return expression;
}
+ case Token::ESCAPED_KEYWORD:
+ *is_escaped_keyword = true;
+ *name = ParseIdentifierNameOrGetOrSet(is_get, is_set, CHECK_OK);
+ break;
+
case Token::STATIC:
*is_static = true;
// Fall through.
default:
+ *is_identifier = true;
*name = ParseIdentifierNameOrGetOrSet(is_get, is_set, CHECK_OK);
break;
}
@@ -2591,10 +1626,9 @@ typename ParserBase<Traits>::ObjectLiteralPropertyT
ParserBase<Traits>::ParsePropertyDefinition(
ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
bool is_static, bool* is_computed_name, bool* has_seen_constructor,
- ExpressionClassifier* classifier, bool* ok) {
+ ExpressionClassifier* classifier, IdentifierT* name, bool* ok) {
DCHECK(!in_class || is_static || has_seen_constructor != nullptr);
ExpressionT value = this->EmptyExpression();
- IdentifierT name = this->EmptyIdentifier();
bool is_get = false;
bool is_set = false;
bool name_is_static = false;
@@ -2603,14 +1637,21 @@ ParserBase<Traits>::ParsePropertyDefinition(
Token::Value name_token = peek();
int next_beg_pos = scanner()->peek_location().beg_pos;
int next_end_pos = scanner()->peek_location().end_pos;
+ bool is_identifier = false;
+ bool is_escaped_keyword = false;
ExpressionT name_expression = ParsePropertyName(
- &name, &is_get, &is_set, &name_is_static, is_computed_name, classifier,
+ name, &is_get, &is_set, &name_is_static, is_computed_name, &is_identifier,
+ &is_escaped_keyword, classifier,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
if (fni_ != nullptr && !*is_computed_name) {
- this->PushLiteralName(fni_, name);
+ this->PushLiteralName(fni_, *name);
}
+ bool escaped_static =
+ is_escaped_keyword &&
+ scanner()->is_literal_contextual_keyword(CStrVector("static"));
+
if (!in_class && !is_generator) {
DCHECK(!is_static);
@@ -2623,13 +1664,14 @@ ParserBase<Traits>::ParsePropertyDefinition(
}
Consume(Token::COLON);
value = this->ParseAssignmentExpression(
- true, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ true, kIsPossiblePatternElement, classifier,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
return factory()->NewObjectLiteralProperty(name_expression, value, false,
*is_computed_name);
}
- if (Token::IsIdentifier(name_token, language_mode(),
- this->is_generator()) &&
+ if ((is_identifier || is_escaped_keyword) &&
(peek() == Token::COMMA || peek() == Token::RBRACE ||
peek() == Token::ASSIGN)) {
// PropertyDefinition
@@ -2638,24 +1680,41 @@ ParserBase<Traits>::ParsePropertyDefinition(
//
// CoverInitializedName
// IdentifierReference Initializer?
+ if (!Token::IsIdentifier(name_token, language_mode(),
+ this->is_generator())) {
+ if (!escaped_static) {
+ ReportUnexpectedTokenAt(scanner()->location(), name_token);
+ *ok = false;
+ return this->EmptyObjectLiteralProperty();
+ }
+ }
if (classifier->duplicate_finder() != nullptr &&
scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
classifier->RecordDuplicateFormalParameterError(scanner()->location());
}
+ if (name_token == Token::LET) {
+ classifier->RecordLetPatternError(
+ scanner()->location(), MessageTemplate::kLetInLexicalBinding);
+ }
ExpressionT lhs = this->ExpressionFromIdentifier(
- name, next_beg_pos, next_end_pos, scope_, factory());
+ *name, next_beg_pos, next_end_pos, scope_, factory());
+ CheckDestructuringElement(lhs, classifier, next_beg_pos, next_end_pos);
if (peek() == Token::ASSIGN) {
- this->ExpressionUnexpectedToken(classifier);
Consume(Token::ASSIGN);
ExpressionClassifier rhs_classifier;
ExpressionT rhs = this->ParseAssignmentExpression(
true, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ rhs = Traits::RewriteNonPattern(
+ rhs, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
classifier->Accumulate(rhs_classifier,
ExpressionClassifier::ExpressionProductions);
value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
RelocInfo::kNoPosition);
+ classifier->RecordCoverInitializedNameError(
+ Scanner::Location(next_beg_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidCoverInitializedName);
} else {
value = lhs;
}
@@ -2666,6 +1725,16 @@ ParserBase<Traits>::ParsePropertyDefinition(
}
}
+ if (in_class && escaped_static && !is_static) {
+ ReportUnexpectedTokenAt(scanner()->location(), name_token);
+ *ok = false;
+ return this->EmptyObjectLiteralProperty();
+ }
+
+ // Method definitions are never valid in patterns.
+ classifier->RecordPatternError(
+ Scanner::Location(next_beg_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
if (is_generator || peek() == Token::LPAREN) {
// MethodDefinition
@@ -2680,7 +1749,7 @@ ParserBase<Traits>::ParsePropertyDefinition(
FunctionKind kind = is_generator ? FunctionKind::kConciseGeneratorMethod
: FunctionKind::kConciseMethod;
- if (in_class && !is_static && this->IsConstructor(name)) {
+ if (in_class && !is_static && this->IsConstructor(*name)) {
*has_seen_constructor = true;
kind = has_extends ? FunctionKind::kSubclassConstructor
: FunctionKind::kBaseConstructor;
@@ -2689,9 +1758,9 @@ ParserBase<Traits>::ParsePropertyDefinition(
if (!in_class) kind = WithObjectLiteralBit(kind);
value = this->ParseFunctionLiteral(
- name, scanner()->location(), kSkipFunctionNameCheck, kind,
- RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
- FunctionLiteral::NORMAL_ARITY, language_mode(),
+ *name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ RelocInfo::kNoPosition, FunctionLiteral::kAnonymousExpression,
+ FunctionLiteral::kNormalArity, language_mode(),
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
return factory()->NewObjectLiteralProperty(name_expression, value,
@@ -2702,21 +1771,26 @@ ParserBase<Traits>::ParsePropertyDefinition(
if (in_class && name_is_static && !is_static) {
// ClassElement (static)
// 'static' MethodDefinition
- return ParsePropertyDefinition(checker, true, has_extends, true,
- is_computed_name, nullptr, classifier, ok);
+ *name = this->EmptyIdentifier();
+ ObjectLiteralPropertyT property = ParsePropertyDefinition(
+ checker, true, has_extends, true, is_computed_name, nullptr, classifier,
+ name, ok);
+ property = Traits::RewriteNonPatternObjectLiteralProperty(property,
+ classifier, ok);
+ return property;
}
if (is_get || is_set) {
// MethodDefinition (Accessors)
// get PropertyName '(' ')' '{' FunctionBody '}'
// set PropertyName '(' PropertySetParameterList ')' '{' FunctionBody '}'
- name = this->EmptyIdentifier();
+ *name = this->EmptyIdentifier();
bool dont_care = false;
name_token = peek();
name_expression = ParsePropertyName(
- &name, &dont_care, &dont_care, &dont_care, is_computed_name, classifier,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ name, &dont_care, &dont_care, &dont_care, is_computed_name, &dont_care,
+ &dont_care, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
if (!*is_computed_name) {
checker->CheckProperty(name_token, kAccessorProperty, is_static,
@@ -2727,9 +1801,9 @@ ParserBase<Traits>::ParsePropertyDefinition(
FunctionKind kind = FunctionKind::kAccessorFunction;
if (!in_class) kind = WithObjectLiteralBit(kind);
typename Traits::Type::FunctionLiteral value = this->ParseFunctionLiteral(
- name, scanner()->location(), kSkipFunctionNameCheck, kind,
- RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
- is_get ? FunctionLiteral::GETTER_ARITY : FunctionLiteral::SETTER_ARITY,
+ *name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ RelocInfo::kNoPosition, FunctionLiteral::kAnonymousExpression,
+ is_get ? FunctionLiteral::kGetterArity : FunctionLiteral::kSetterArity,
language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
// Make sure the name expression is a string since we need a Name for
@@ -2737,7 +1811,7 @@ ParserBase<Traits>::ParsePropertyDefinition(
// statically we can skip the extra runtime check.
if (!*is_computed_name) {
name_expression =
- factory()->NewStringLiteral(name, name_expression->position());
+ factory()->NewStringLiteral(*name, name_expression->position());
}
return factory()->NewObjectLiteralProperty(
@@ -2776,9 +1850,10 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
const bool is_static = false;
const bool has_extends = false;
bool is_computed_name = false;
+ IdentifierT name = this->EmptyIdentifier();
ObjectLiteralPropertyT property = this->ParsePropertyDefinition(
&checker, in_class, has_extends, is_static, &is_computed_name, NULL,
- classifier, CHECK_OK);
+ classifier, &name, CHECK_OK);
if (is_computed_name) {
has_computed_names = true;
@@ -2802,6 +1877,10 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
}
if (fni_ != nullptr) fni_->Infer();
+
+ if (allow_harmony_function_name()) {
+ Traits::SetFunctionNameFromPropertyName(property, name);
+ }
}
Expect(Token::RBRACE, CHECK_OK);
@@ -2832,13 +1911,13 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
bool was_unspread = false;
int unspread_sequences_count = 0;
while (!done) {
- bool is_spread =
- allow_harmony_spread_calls() && (peek() == Token::ELLIPSIS);
int start_pos = peek_position();
- if (is_spread) Consume(Token::ELLIPSIS);
+ bool is_spread = Check(Token::ELLIPSIS);
ExpressionT argument = this->ParseAssignmentExpression(
true, classifier, CHECK_OK_CUSTOM(NullExpressionList));
+ argument = Traits::RewriteNonPattern(argument, classifier,
+ CHECK_OK_CUSTOM(NullExpressionList));
if (is_spread) {
if (!spread_arg.IsValid()) {
spread_arg.beg_pos = start_pos;
@@ -2888,7 +1967,7 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
// Precedence = 2
template <class Traits>
typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
+ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, int flags,
ExpressionClassifier* classifier,
bool* ok) {
// AssignmentExpression ::
@@ -2896,7 +1975,9 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
// ArrowFunction
// YieldExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
-
+ bool maybe_pattern_element = flags & kIsPossiblePatternElement;
+ bool maybe_arrow_formals = flags & kIsPossibleArrowFormals;
+ bool is_destructuring_assignment = false;
int lhs_beg_pos = peek_position();
if (peek() == Token::YIELD && is_generator()) {
@@ -2912,56 +1993,95 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
}
ExpressionT expression = this->ParseConditionalExpression(
accept_IN, &arrow_formals_classifier, CHECK_OK);
- if (allow_harmony_arrow_functions() && peek() == Token::ARROW) {
+ if (peek() == Token::ARROW) {
BindingPatternUnexpectedToken(classifier);
ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
parenthesized_formals, CHECK_OK);
Scanner::Location loc(lhs_beg_pos, scanner()->location().end_pos);
Scope* scope =
- this->NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
+ this->NewScope(scope_, FUNCTION_SCOPE, FunctionKind::kArrowFunction);
+ // Because the arrow's parameters were parsed in the outer scope, any
+ // usage flags that might have been triggered there need to be copied
+ // to the arrow scope.
+ scope_->PropagateUsageFlagsToScope(scope);
FormalParametersT parameters(scope);
if (!arrow_formals_classifier.is_simple_parameter_list()) {
scope->SetHasNonSimpleParameters();
parameters.is_simple = false;
}
- Scanner::Location duplicate_loc = Scanner::Location::invalid();
- this->ParseArrowFunctionFormalParameterList(&parameters, expression, loc,
- &duplicate_loc, CHECK_OK);
-
checkpoint.Restore(&parameters.materialized_literals_count);
scope->set_start_position(lhs_beg_pos);
+ Scanner::Location duplicate_loc = Scanner::Location::invalid();
+ this->ParseArrowFunctionFormalParameterList(&parameters, expression, loc,
+ &duplicate_loc, CHECK_OK);
if (duplicate_loc.IsValid()) {
arrow_formals_classifier.RecordDuplicateFormalParameterError(
duplicate_loc);
}
expression = this->ParseArrowFunctionLiteral(
- parameters, arrow_formals_classifier, CHECK_OK);
+ accept_IN, parameters, arrow_formals_classifier, CHECK_OK);
+ if (maybe_pattern_element) {
+ classifier->RecordPatternError(
+ Scanner::Location(lhs_beg_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
+ }
if (fni_ != nullptr) fni_->Infer();
return expression;
}
+ if (this->IsValidReferenceExpression(expression)) {
+ arrow_formals_classifier.ForgiveAssignmentPatternError();
+ }
+
// "expression" was not itself an arrow function parameter list, but it might
// form part of one. Propagate speculative formal parameter error locations.
- classifier->Accumulate(arrow_formals_classifier,
- ExpressionClassifier::StandardProductions |
- ExpressionClassifier::FormalParametersProductions);
+ classifier->Accumulate(
+ arrow_formals_classifier,
+ ExpressionClassifier::StandardProductions |
+ ExpressionClassifier::FormalParametersProductions |
+ ExpressionClassifier::CoverInitializedNameProduction);
+
+ bool maybe_pattern =
+ (expression->IsObjectLiteral() || expression->IsArrayLiteral()) &&
+ !expression->is_parenthesized();
if (!Token::IsAssignmentOp(peek())) {
// Parsed conditional expression only (no assignment).
+ if (maybe_pattern_element) {
+ CheckDestructuringElement(expression, classifier, lhs_beg_pos,
+ scanner()->location().end_pos);
+ }
return expression;
}
- if (!(allow_harmony_destructuring() || allow_harmony_default_parameters())) {
+ if (!(allow_harmony_destructuring_bind() ||
+ allow_harmony_default_parameters())) {
BindingPatternUnexpectedToken(classifier);
}
- expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_beg_pos, scanner()->location().end_pos,
- MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
+ if (allow_harmony_destructuring_assignment() && maybe_pattern &&
+ peek() == Token::ASSIGN) {
+ classifier->ForgiveCoverInitializedNameError();
+ ValidateAssignmentPattern(classifier, CHECK_OK);
+ is_destructuring_assignment = true;
+ } else if (maybe_arrow_formals) {
+ expression = this->ClassifyAndRewriteReferenceExpression(
+ classifier, expression, lhs_beg_pos, scanner()->location().end_pos,
+ MessageTemplate::kInvalidLhsInAssignment);
+ } else {
+ if (maybe_pattern_element) {
+ CheckDestructuringElement(expression, classifier, lhs_beg_pos,
+ scanner()->location().end_pos);
+ }
+ expression = this->CheckAndRewriteReferenceExpression(
+ expression, lhs_beg_pos, scanner()->location().end_pos,
+ MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
+ }
+
expression = this->MarkExpressionAsAssigned(expression);
Token::Value op = Next(); // Get assignment operator.
@@ -2973,10 +2093,13 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
int pos = position();
ExpressionClassifier rhs_classifier;
+
ExpressionT right =
this->ParseAssignmentExpression(accept_IN, &rhs_classifier, CHECK_OK);
- classifier->Accumulate(rhs_classifier,
- ExpressionClassifier::ExpressionProductions);
+ right = Traits::RewriteNonPattern(right, &rhs_classifier, CHECK_OK);
+ classifier->Accumulate(
+ rhs_classifier, ExpressionClassifier::ExpressionProductions |
+ ExpressionClassifier::CoverInitializedNameProduction);
// TODO(1231235): We try to estimate the set of properties set by
// constructors. We define a new property whenever there is an
@@ -2987,23 +2110,38 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
function_state_->AddProperty();
}
+ if (op != Token::ASSIGN && maybe_pattern_element) {
+ classifier->RecordAssignmentPatternError(
+ Scanner::Location(lhs_beg_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
+ }
+
this->CheckAssigningFunctionLiteralToProperty(expression, right);
if (fni_ != NULL) {
// Check if the right hand side is a call to avoid inferring a
// name if we're dealing with "a = function(){...}();"-like
// expression.
- if ((op == Token::INIT_VAR
- || op == Token::INIT_CONST_LEGACY
- || op == Token::ASSIGN)
- && (!right->IsCall() && !right->IsCallNew())) {
+ if ((op == Token::INIT || op == Token::ASSIGN) &&
+ (!right->IsCall() && !right->IsCallNew())) {
fni_->Infer();
} else {
fni_->RemoveLastFunction();
}
}
- return factory()->NewAssignment(op, expression, right, pos);
+ if (op == Token::ASSIGN && allow_harmony_function_name()) {
+ Traits::SetFunctionNameFromIdentifierRef(right, expression);
+ }
+
+ ExpressionT result = factory()->NewAssignment(op, expression, right, pos);
+
+ if (is_destructuring_assignment) {
+ result = factory()->NewRewritableAssignmentExpression(result);
+ Traits::QueueDestructuringAssignmentForRewriting(result);
+ }
+
+ return result;
}
template <class Traits>
@@ -3040,13 +2178,19 @@ ParserBase<Traits>::ParseYieldExpression(ExpressionClassifier* classifier,
// Delegating yields require an RHS; fall through.
default:
expression = ParseAssignmentExpression(false, classifier, CHECK_OK);
+ expression =
+ Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
break;
}
}
if (kind == Yield::kDelegating) {
// var iterator = subject[Symbol.iterator]();
- expression = this->GetIterator(expression, factory());
+ // Hackily disambiguate o from o.next and o [Symbol.iterator]().
+ // TODO(verwaest): Come up with a better solution.
+ expression = this->GetIterator(expression, factory(), pos + 1);
}
+ // Hackily disambiguate o from o.next and o [Symbol.iterator]().
+ // TODO(verwaest): Come up with a better solution.
typename Traits::Type::YieldExpression yield =
factory()->NewYield(generator_object, expression, kind, pos);
return yield;
@@ -3068,6 +2212,7 @@ ParserBase<Traits>::ParseConditionalExpression(bool accept_IN,
ExpressionT expression =
this->ParseBinaryExpression(4, accept_IN, classifier, CHECK_OK);
if (peek() != Token::CONDITIONAL) return expression;
+ expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
ArrowFormalParametersUnexpectedToken(classifier);
BindingPatternUnexpectedToken(classifier);
Consume(Token::CONDITIONAL);
@@ -3075,9 +2220,11 @@ ParserBase<Traits>::ParseConditionalExpression(bool accept_IN,
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
ExpressionT left = ParseAssignmentExpression(true, classifier, CHECK_OK);
+ left = Traits::RewriteNonPattern(left, classifier, CHECK_OK);
Expect(Token::COLON, CHECK_OK);
ExpressionT right =
ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
+ right = Traits::RewriteNonPattern(right, classifier, CHECK_OK);
return factory()->NewConditional(expression, left, right, pos);
}
@@ -3093,6 +2240,7 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
+ x = Traits::RewriteNonPattern(x, classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
Token::Value op = Next();
@@ -3100,6 +2248,7 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
int pos = position();
ExpressionT y =
ParseBinaryExpression(prec1 + 1, accept_IN, classifier, CHECK_OK);
+ y = Traits::RewriteNonPattern(y, classifier, CHECK_OK);
if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
factory())) {
@@ -3162,6 +2311,7 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
op = Next();
int pos = position();
ExpressionT expression = ParseUnaryExpression(classifier, CHECK_OK);
+ expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
if (op == Token::DELETE && is_strict(language_mode())) {
if (is_strong(language_mode())) {
@@ -3188,6 +2338,7 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
expression, beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
this->MarkExpressionAsAssigned(expression);
+ expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
return factory()->NewCountOperation(op,
true /* prefix */,
@@ -3219,6 +2370,7 @@ ParserBase<Traits>::ParsePostfixExpression(ExpressionClassifier* classifier,
expression, lhs_beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
expression = this->MarkExpressionAsAssigned(expression);
+ expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
Token::Value next = Next();
expression =
@@ -3249,12 +2401,14 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = ParseExpression(true, classifier, CHECK_OK);
+ index = Traits::RewriteNonPattern(index, classifier, CHECK_OK);
result = factory()->NewProperty(result, index, pos);
Expect(Token::RBRACK, CHECK_OK);
break;
}
case Token::LPAREN: {
+ result = Traits::RewriteNonPattern(result, classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
@@ -3265,7 +2419,8 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
return this->EmptyExpression();
}
int pos;
- if (scanner()->current_token() == Token::IDENTIFIER) {
+ if (scanner()->current_token() == Token::IDENTIFIER ||
+ scanner()->current_token() == Token::SUPER) {
// For call of an identifier we want to report position of
// the identifier as position of the call in the stack trace.
pos = position();
@@ -3308,8 +2463,8 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
// implicit binding assignment to the 'this' variable.
if (is_super_call) {
ExpressionT this_expr = this->ThisExpression(scope_, factory(), pos);
- result = factory()->NewAssignment(Token::INIT_CONST, this_expr,
- result, pos);
+ result =
+ factory()->NewAssignment(Token::INIT, this_expr, result, pos);
}
if (fni_ != NULL) fni_->RemoveLastFunction();
@@ -3376,11 +2531,12 @@ ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(
if (peek() == Token::SUPER) {
const bool is_new = true;
result = ParseSuperExpression(is_new, classifier, CHECK_OK);
- } else if (allow_harmony_new_target() && peek() == Token::PERIOD) {
+ } else if (peek() == Token::PERIOD) {
return ParseNewTargetExpression(CHECK_OK);
} else {
result = this->ParseMemberWithNewPrefixesExpression(classifier, CHECK_OK);
}
+ result = Traits::RewriteNonPattern(result, classifier, CHECK_OK);
if (peek() == Token::LPAREN) {
// NewExpression with arguments.
Scanner::Location spread_pos;
@@ -3432,12 +2588,12 @@ ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
bool is_strict_reserved_name = false;
Scanner::Location function_name_location = Scanner::Location::invalid();
FunctionLiteral::FunctionType function_type =
- FunctionLiteral::ANONYMOUS_EXPRESSION;
+ FunctionLiteral::kAnonymousExpression;
if (peek_any_identifier()) {
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
- CHECK_OK);
+ name = ParseIdentifierOrStrictReservedWord(
+ is_generator, &is_strict_reserved_name, CHECK_OK);
function_name_location = scanner()->location();
- function_type = FunctionLiteral::NAMED_EXPRESSION;
+ function_type = FunctionLiteral::kNamedExpression;
}
result = this->ParseFunctionLiteral(
name, function_name_location,
@@ -3445,7 +2601,7 @@ ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
: kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
- function_token_position, function_type, FunctionLiteral::NORMAL_ARITY,
+ function_token_position, function_type, FunctionLiteral::kNormalArity,
language_mode(), CHECK_OK);
} else if (peek() == Token::SUPER) {
const bool is_new = false;
@@ -3480,6 +2636,7 @@ ParserBase<Traits>::ParseStrongInitializationExpression(
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
+ index = Traits::RewriteNonPattern(index, classifier, CHECK_OK);
left = factory()->NewProperty(this_expr, index, pos);
if (fni_ != NULL) {
this->PushPropertyName(fni_, index);
@@ -3515,6 +2672,7 @@ ParserBase<Traits>::ParseStrongInitializationExpression(
ExpressionT right =
this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+ right = Traits::RewriteNonPattern(right, classifier, CHECK_OK);
this->CheckAssigningFunctionLiteralToProperty(left, right);
function_state_->AddProperty();
if (fni_ != NULL) {
@@ -3595,7 +2753,7 @@ ParserBase<Traits>::ParseStrongSuperCallExpression(
// Explicit calls to the super constructor using super() perform an implicit
// binding assignment to the 'this' variable.
ExpressionT this_expr = this->ThisExpression(scope_, factory(), pos);
- return factory()->NewAssignment(Token::INIT_CONST, this_expr, expr, pos);
+ return factory()->NewAssignment(Token::INIT, this_expr, expr, pos);
}
@@ -3604,8 +2762,8 @@ typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseSuperExpression(bool is_new,
ExpressionClassifier* classifier,
bool* ok) {
- int pos = position();
Expect(Token::SUPER, CHECK_OK);
+ int pos = position();
Scope* scope = scope_->ReceiverScope();
FunctionKind kind = scope->function_kind();
@@ -3671,6 +2829,7 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
+ index = Traits::RewriteNonPattern(index, classifier, CHECK_OK);
expression = factory()->NewProperty(expression, index, pos);
if (fni_ != NULL) {
this->PushPropertyName(fni_, index);
@@ -3735,7 +2894,7 @@ void ParserBase<Traits>::ParseFormalParameter(
if (!*ok) return;
if (!Traits::IsIdentifier(pattern)) {
- if (is_rest || !allow_harmony_destructuring()) {
+ if (!allow_harmony_destructuring_bind()) {
ReportUnexpectedToken(next);
*ok = false;
return;
@@ -3746,25 +2905,20 @@ void ParserBase<Traits>::ParseFormalParameter(
classifier->RecordNonSimpleParameter();
}
- if (is_rest) {
- parameters->rest_array_literal_index =
- function_state_->NextMaterializedLiteralIndex();
- ++parameters->materialized_literals_count;
- }
-
ExpressionT initializer = Traits::EmptyExpression();
if (!is_rest && allow_harmony_default_parameters() && Check(Token::ASSIGN)) {
ExpressionClassifier init_classifier;
initializer = ParseAssignmentExpression(true, &init_classifier, ok);
if (!*ok) return;
- ValidateExpression(&init_classifier, ok);
+ initializer = Traits::RewriteNonPattern(initializer, &init_classifier, ok);
ValidateFormalParameterInitializer(&init_classifier, ok);
if (!*ok) return;
parameters->is_simple = false;
classifier->RecordNonSimpleParameter();
}
- Traits::AddFormalParameter(parameters, pattern, initializer, is_rest);
+ Traits::AddFormalParameter(parameters, pattern, initializer,
+ scanner()->location().end_pos, is_rest);
}
@@ -3794,8 +2948,7 @@ void ParserBase<Traits>::ParseFormalParameterList(
*ok = false;
return;
}
- parameters->has_rest =
- allow_harmony_rest_parameters() && Check(Token::ELLIPSIS);
+ parameters->has_rest = Check(Token::ELLIPSIS);
ParseFormalParameter(parameters, classifier, ok);
if (!*ok) return;
} while (!parameters->has_rest && Check(Token::COMMA));
@@ -3824,14 +2977,14 @@ void ParserBase<Traits>::CheckArityRestrictions(
int param_count, FunctionLiteral::ArityRestriction arity_restriction,
bool has_rest, int formals_start_pos, int formals_end_pos, bool* ok) {
switch (arity_restriction) {
- case FunctionLiteral::GETTER_ARITY:
+ case FunctionLiteral::kGetterArity:
if (param_count != 0) {
ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
MessageTemplate::kBadGetterArity);
*ok = false;
}
break;
- case FunctionLiteral::SETTER_ARITY:
+ case FunctionLiteral::kSetterArity:
if (param_count != 1) {
ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
MessageTemplate::kBadSetterArity);
@@ -3873,7 +3026,7 @@ bool ParserBase<Traits>::IsNextLetKeyword() {
template <class Traits>
typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseArrowFunctionLiteral(
- const FormalParametersT& formal_parameters,
+ bool accept_IN, const FormalParametersT& formal_parameters,
const ExpressionClassifier& formals_classifier, bool* ok) {
if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
// ASI inserts `;` after arrow parameters if a line terminator is found.
@@ -3912,7 +3065,6 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
body = this->NewStatementList(0, zone());
this->SkipLazyFunctionBody(&materialized_literal_count,
&expected_property_count, CHECK_OK);
-
if (formal_parameters.materialized_literals_count > 0) {
materialized_literal_count +=
formal_parameters.materialized_literals_count;
@@ -3920,7 +3072,7 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
} else {
body = this->ParseEagerFunctionBody(
this->EmptyIdentifier(), RelocInfo::kNoPosition, formal_parameters,
- kArrowFunction, FunctionLiteral::ANONYMOUS_EXPRESSION, CHECK_OK);
+ kArrowFunction, FunctionLiteral::kAnonymousExpression, CHECK_OK);
materialized_literal_count =
function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
@@ -3931,8 +3083,8 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
parenthesized_function_ = false;
ExpressionClassifier classifier;
ExpressionT expression =
- ParseAssignmentExpression(true, &classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ ParseAssignmentExpression(accept_IN, &classifier, CHECK_OK);
+ expression = Traits::RewriteNonPattern(expression, &classifier, CHECK_OK);
body = this->NewStatementList(1, zone());
this->AddParameterInitializationBlock(formal_parameters, body, CHECK_OK);
body->Add(factory()->NewReturnStatement(expression, pos), zone());
@@ -3959,14 +3111,15 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
if (is_strict(language_mode()) || allow_harmony_sloppy()) {
this->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
}
+
+ Traits::RewriteDestructuringAssignments();
}
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
- this->EmptyIdentifierString(), ast_value_factory(),
- formal_parameters.scope, body, materialized_literal_count,
- expected_property_count, num_parameters,
+ this->EmptyIdentifierString(), formal_parameters.scope, body,
+ materialized_literal_count, expected_property_count, num_parameters,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction,
+ FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kShouldLazyCompile, FunctionKind::kArrowFunction,
formal_parameters.scope->start_position());
@@ -4037,6 +3190,7 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start,
int expr_pos = peek_position();
ExpressionT expression = this->ParseExpression(true, classifier, CHECK_OK);
+ expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
Traits::AddTemplateExpression(&ts, expression);
if (peek() != Token::RBRACE) {
@@ -4090,21 +3244,33 @@ typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, ParseErrorType type, bool* ok) {
+ ExpressionClassifier classifier;
+ ExpressionT result = ClassifyAndRewriteReferenceExpression(
+ &classifier, expression, beg_pos, end_pos, message, type);
+ ValidateExpression(&classifier, ok);
+ if (!*ok) return this->EmptyExpression();
+ return result;
+}
+
+
+template <typename Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ClassifyAndRewriteReferenceExpression(
+ ExpressionClassifier* classifier, ExpressionT expression, int beg_pos,
+ int end_pos, MessageTemplate::Template message, ParseErrorType type) {
Scanner::Location location(beg_pos, end_pos);
if (this->IsIdentifier(expression)) {
if (is_strict(language_mode()) &&
this->IsEvalOrArguments(this->AsIdentifier(expression))) {
- this->ReportMessageAt(location, MessageTemplate::kStrictEvalArguments,
- kSyntaxError);
- *ok = false;
- return this->EmptyExpression();
+ classifier->RecordExpressionError(
+ location, MessageTemplate::kStrictEvalArguments, kSyntaxError);
+ return expression;
}
if (is_strong(language_mode()) &&
this->IsUndefined(this->AsIdentifier(expression))) {
- this->ReportMessageAt(location, MessageTemplate::kStrongUndefined,
- kSyntaxError);
- *ok = false;
- return this->EmptyExpression();
+ classifier->RecordExpressionError(
+ location, MessageTemplate::kStrongUndefined, kSyntaxError);
+ return expression;
}
}
if (expression->IsValidReferenceExpression()) {
@@ -4116,9 +3282,37 @@ ParserBase<Traits>::CheckAndRewriteReferenceExpression(
ExpressionT error = this->NewThrowReferenceError(message, pos);
return factory()->NewProperty(expression, error, pos);
} else {
- this->ReportMessageAt(location, message, type);
- *ok = false;
- return this->EmptyExpression();
+ classifier->RecordExpressionError(location, message, type);
+ return expression;
+ }
+}
+
+
+template <typename Traits>
+bool ParserBase<Traits>::IsValidReferenceExpression(ExpressionT expression) {
+ return this->IsAssignableIdentifier(expression) || expression->IsProperty();
+}
+
+
+template <typename Traits>
+void ParserBase<Traits>::CheckDestructuringElement(
+ ExpressionT expression, ExpressionClassifier* classifier, int begin,
+ int end) {
+ static const MessageTemplate::Template message =
+ MessageTemplate::kInvalidDestructuringTarget;
+ const Scanner::Location location(begin, end);
+ if (expression->IsArrayLiteral() || expression->IsObjectLiteral() ||
+ expression->IsAssignment()) {
+ if (expression->is_parenthesized()) {
+ classifier->RecordPatternError(location, message);
+ }
+ return;
+ }
+
+ if (expression->IsProperty()) {
+ classifier->RecordBindingPatternError(location, message);
+ } else if (!this->IsAssignableIdentifier(expression)) {
+ classifier->RecordPatternError(location, message);
}
}
@@ -4180,6 +3374,7 @@ void ParserBase<Traits>::ClassLiteralChecker::CheckProperty(
return;
}
}
-} } // v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_PREPARSER_H
+#endif // V8_PARSING_PARSER_BASE_H
diff --git a/chromium/v8/src/parser.cc b/chromium/v8/src/parsing/parser.cc
index 60a6024608b..b1b8c1316bc 100644
--- a/chromium/v8/src/parser.cc
+++ b/chromium/v8/src/parsing/parser.cc
@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/api.h"
-#include "src/ast.h"
-#include "src/ast-literal-reindexer.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-expression-visitor.h"
+#include "src/ast/ast-literal-reindexer.h"
+#include "src/ast/scopeinfo.h"
#include "src/bailout-reason.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
@@ -14,10 +16,11 @@
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/messages.h"
-#include "src/preparser.h"
+#include "src/parsing/parameter-initializer-rewriter.h"
+#include "src/parsing/parser-base.h"
+#include "src/parsing/rewriter.h"
+#include "src/parsing/scanner-character-streams.h"
#include "src/runtime/runtime.h"
-#include "src/scanner-character-streams.h"
-#include "src/scopeinfo.h"
#include "src/string-stream.h"
namespace v8 {
@@ -92,162 +95,6 @@ ParseInfo::ParseInfo(Zone* zone, Handle<Script> script) : ParseInfo(zone) {
}
-RegExpBuilder::RegExpBuilder(Zone* zone)
- : zone_(zone),
- pending_empty_(false),
- characters_(NULL),
- terms_(),
- alternatives_()
-#ifdef DEBUG
- , last_added_(ADD_NONE)
-#endif
- {}
-
-
-void RegExpBuilder::FlushCharacters() {
- pending_empty_ = false;
- if (characters_ != NULL) {
- RegExpTree* atom = new(zone()) RegExpAtom(characters_->ToConstVector());
- characters_ = NULL;
- text_.Add(atom, zone());
- LAST(ADD_ATOM);
- }
-}
-
-
-void RegExpBuilder::FlushText() {
- FlushCharacters();
- int num_text = text_.length();
- if (num_text == 0) {
- return;
- } else if (num_text == 1) {
- terms_.Add(text_.last(), zone());
- } else {
- RegExpText* text = new(zone()) RegExpText(zone());
- for (int i = 0; i < num_text; i++)
- text_.Get(i)->AppendToText(text, zone());
- terms_.Add(text, zone());
- }
- text_.Clear();
-}
-
-
-void RegExpBuilder::AddCharacter(uc16 c) {
- pending_empty_ = false;
- if (characters_ == NULL) {
- characters_ = new(zone()) ZoneList<uc16>(4, zone());
- }
- characters_->Add(c, zone());
- LAST(ADD_CHAR);
-}
-
-
-void RegExpBuilder::AddEmpty() {
- pending_empty_ = true;
-}
-
-
-void RegExpBuilder::AddAtom(RegExpTree* term) {
- if (term->IsEmpty()) {
- AddEmpty();
- return;
- }
- if (term->IsTextElement()) {
- FlushCharacters();
- text_.Add(term, zone());
- } else {
- FlushText();
- terms_.Add(term, zone());
- }
- LAST(ADD_ATOM);
-}
-
-
-void RegExpBuilder::AddAssertion(RegExpTree* assert) {
- FlushText();
- terms_.Add(assert, zone());
- LAST(ADD_ASSERT);
-}
-
-
-void RegExpBuilder::NewAlternative() {
- FlushTerms();
-}
-
-
-void RegExpBuilder::FlushTerms() {
- FlushText();
- int num_terms = terms_.length();
- RegExpTree* alternative;
- if (num_terms == 0) {
- alternative = new (zone()) RegExpEmpty();
- } else if (num_terms == 1) {
- alternative = terms_.last();
- } else {
- alternative = new(zone()) RegExpAlternative(terms_.GetList(zone()));
- }
- alternatives_.Add(alternative, zone());
- terms_.Clear();
- LAST(ADD_NONE);
-}
-
-
-RegExpTree* RegExpBuilder::ToRegExp() {
- FlushTerms();
- int num_alternatives = alternatives_.length();
- if (num_alternatives == 0) return new (zone()) RegExpEmpty();
- if (num_alternatives == 1) return alternatives_.last();
- return new(zone()) RegExpDisjunction(alternatives_.GetList(zone()));
-}
-
-
-void RegExpBuilder::AddQuantifierToAtom(
- int min, int max, RegExpQuantifier::QuantifierType quantifier_type) {
- if (pending_empty_) {
- pending_empty_ = false;
- return;
- }
- RegExpTree* atom;
- if (characters_ != NULL) {
- DCHECK(last_added_ == ADD_CHAR);
- // Last atom was character.
- Vector<const uc16> char_vector = characters_->ToConstVector();
- int num_chars = char_vector.length();
- if (num_chars > 1) {
- Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
- text_.Add(new(zone()) RegExpAtom(prefix), zone());
- char_vector = char_vector.SubVector(num_chars - 1, num_chars);
- }
- characters_ = NULL;
- atom = new(zone()) RegExpAtom(char_vector);
- FlushText();
- } else if (text_.length() > 0) {
- DCHECK(last_added_ == ADD_ATOM);
- atom = text_.RemoveLast();
- FlushText();
- } else if (terms_.length() > 0) {
- DCHECK(last_added_ == ADD_ATOM);
- atom = terms_.RemoveLast();
- if (atom->max_match() == 0) {
- // Guaranteed to only match an empty string.
- LAST(ADD_TERM);
- if (min == 0) {
- return;
- }
- terms_.Add(atom, zone());
- return;
- }
- } else {
- // Only call immediately after adding an atom or character!
- UNREACHABLE();
- return;
- }
- terms_.Add(
- new(zone()) RegExpQuantifier(min, max, quantifier_type, atom), zone());
- LAST(ADD_TERM);
-}
-
-
FunctionEntry ParseData::GetFunctionEntry(int start) {
// The current pre-data entry must be a FunctionEntry with the given
// start position.
@@ -344,8 +191,8 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
FunctionKind kind = call_super ? FunctionKind::kDefaultSubclassConstructor
: FunctionKind::kDefaultBaseConstructor;
Scope* function_scope = NewScope(scope, FUNCTION_SCOPE, kind);
- function_scope->SetLanguageMode(
- static_cast<LanguageMode>(language_mode | STRICT));
+ SetLanguageMode(function_scope,
+ static_cast<LanguageMode>(language_mode | STRICT));
// Set start and end position to the same value
function_scope->set_start_position(pos);
function_scope->set_end_position(pos);
@@ -357,26 +204,30 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
kind, &function_factory);
body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
- AddAssertIsConstruct(body, pos);
if (call_super) {
- // %_DefaultConstructorCallSuper(new.target, %GetPrototype(<this-fun>))
+ // $super_constructor = %_GetSuperConstructor(<this-function>)
+ // %reflect_construct($super_constructor, arguments, new.target)
ZoneList<Expression*>* args =
new (zone()) ZoneList<Expression*>(2, zone());
- VariableProxy* new_target_proxy = scope_->NewUnresolved(
- factory(), ast_value_factory()->new_target_string(), Variable::NORMAL,
- pos);
- args->Add(new_target_proxy, zone());
VariableProxy* this_function_proxy = scope_->NewUnresolved(
factory(), ast_value_factory()->this_function_string(),
Variable::NORMAL, pos);
ZoneList<Expression*>* tmp =
new (zone()) ZoneList<Expression*>(1, zone());
tmp->Add(this_function_proxy, zone());
- Expression* get_prototype =
- factory()->NewCallRuntime(Runtime::kGetPrototype, tmp, pos);
- args->Add(get_prototype, zone());
+ Expression* super_constructor = factory()->NewCallRuntime(
+ Runtime::kInlineGetSuperConstructor, tmp, pos);
+ args->Add(super_constructor, zone());
+ VariableProxy* arguments_proxy = scope_->NewUnresolved(
+ factory(), ast_value_factory()->arguments_string(), Variable::NORMAL,
+ pos);
+ args->Add(arguments_proxy, zone());
+ VariableProxy* new_target_proxy = scope_->NewUnresolved(
+ factory(), ast_value_factory()->new_target_string(), Variable::NORMAL,
+ pos);
+ args->Add(new_target_proxy, zone());
CallRuntime* call = factory()->NewCallRuntime(
- Runtime::kInlineDefaultConstructorCallSuper, args, pos);
+ Context::REFLECT_CONSTRUCT_INDEX, args, pos);
body->Add(factory()->NewReturnStatement(call, pos), zone());
}
@@ -385,10 +236,10 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
}
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
- name, ast_value_factory(), function_scope, body,
- materialized_literal_count, expected_property_count, parameter_count,
+ name, function_scope, body, materialized_literal_count,
+ expected_property_count, parameter_count,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction,
+ FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kShouldLazyCompile, kind, pos);
return function_literal;
@@ -524,17 +375,6 @@ void ParserTraits::CheckAssigningFunctionLiteralToProperty(Expression* left,
}
-void ParserTraits::CheckPossibleEvalCall(Expression* expression,
- Scope* scope) {
- VariableProxy* callee = expression->AsVariableProxy();
- if (callee != NULL &&
- callee->raw_name() == parser_->ast_value_factory()->eval_string()) {
- scope->DeclarationScope()->RecordEvalCall();
- scope->RecordEvalCall();
- }
-}
-
-
Expression* ParserTraits::MarkExpressionAsAssigned(Expression* expression) {
VariableProxy* proxy =
expression != NULL ? expression->AsVariableProxy() : NULL;
@@ -550,49 +390,52 @@ bool ParserTraits::ShortcutNumericLiteralBinaryExpression(
y->AsLiteral() && y->AsLiteral()->raw_value()->IsNumber()) {
double x_val = (*x)->AsLiteral()->raw_value()->AsNumber();
double y_val = y->AsLiteral()->raw_value()->AsNumber();
+ bool x_has_dot = (*x)->AsLiteral()->raw_value()->ContainsDot();
+ bool y_has_dot = y->AsLiteral()->raw_value()->ContainsDot();
+ bool has_dot = x_has_dot || y_has_dot;
switch (op) {
case Token::ADD:
- *x = factory->NewNumberLiteral(x_val + y_val, pos);
+ *x = factory->NewNumberLiteral(x_val + y_val, pos, has_dot);
return true;
case Token::SUB:
- *x = factory->NewNumberLiteral(x_val - y_val, pos);
+ *x = factory->NewNumberLiteral(x_val - y_val, pos, has_dot);
return true;
case Token::MUL:
- *x = factory->NewNumberLiteral(x_val * y_val, pos);
+ *x = factory->NewNumberLiteral(x_val * y_val, pos, has_dot);
return true;
case Token::DIV:
- *x = factory->NewNumberLiteral(x_val / y_val, pos);
+ *x = factory->NewNumberLiteral(x_val / y_val, pos, has_dot);
return true;
case Token::BIT_OR: {
int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
- *x = factory->NewNumberLiteral(value, pos);
+ *x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::BIT_AND: {
int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
- *x = factory->NewNumberLiteral(value, pos);
+ *x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::BIT_XOR: {
int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
- *x = factory->NewNumberLiteral(value, pos);
+ *x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::SHL: {
int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
- *x = factory->NewNumberLiteral(value, pos);
+ *x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::SHR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
uint32_t value = DoubleToUint32(x_val) >> shift;
- *x = factory->NewNumberLiteral(value, pos);
+ *x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::SAR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
- *x = factory->NewNumberLiteral(value, pos);
+ *x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
default:
@@ -616,13 +459,14 @@ Expression* ParserTraits::BuildUnaryExpression(Expression* expression,
} else if (literal->IsNumber()) {
// Compute some expressions involving only number literals.
double value = literal->AsNumber();
+ bool has_dot = literal->ContainsDot();
switch (op) {
case Token::ADD:
return expression;
case Token::SUB:
- return factory->NewNumberLiteral(-value, pos);
+ return factory->NewNumberLiteral(-value, pos, has_dot);
case Token::BIT_NOT:
- return factory->NewNumberLiteral(~DoubleToInt32(value), pos);
+ return factory->NewNumberLiteral(~DoubleToInt32(value), pos, has_dot);
default:
break;
}
@@ -851,10 +695,9 @@ Expression* ParserTraits::ExpressionFromString(int pos, Scanner* scanner,
Expression* ParserTraits::GetIterator(Expression* iterable,
- AstNodeFactory* factory) {
+ AstNodeFactory* factory, int pos) {
Expression* iterator_symbol_literal =
factory->NewSymbolLiteral("iterator_symbol", RelocInfo::kNoPosition);
- int pos = iterable->position();
Expression* prop =
factory->NewProperty(iterable, iterator_symbol_literal, pos);
Zone* zone = parser_->zone();
@@ -913,18 +756,17 @@ Parser::Parser(ParseInfo* info)
DCHECK(!info->script().is_null() || info->source_stream() != NULL);
set_allow_lazy(info->allow_lazy_parsing());
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
- set_allow_harmony_arrow_functions(FLAG_harmony_arrow_functions);
set_allow_harmony_sloppy(FLAG_harmony_sloppy);
set_allow_harmony_sloppy_function(FLAG_harmony_sloppy_function);
set_allow_harmony_sloppy_let(FLAG_harmony_sloppy_let);
- set_allow_harmony_rest_parameters(FLAG_harmony_rest_parameters);
set_allow_harmony_default_parameters(FLAG_harmony_default_parameters);
- set_allow_harmony_spread_calls(FLAG_harmony_spread_calls);
- set_allow_harmony_destructuring(FLAG_harmony_destructuring);
- set_allow_harmony_spread_arrays(FLAG_harmony_spread_arrays);
- set_allow_harmony_new_target(FLAG_harmony_new_target);
+ set_allow_harmony_destructuring_bind(FLAG_harmony_destructuring_bind);
+ set_allow_harmony_destructuring_assignment(
+ FLAG_harmony_destructuring_assignment);
set_allow_strong_mode(FLAG_strong_mode);
set_allow_legacy_const(FLAG_legacy_const);
+ set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
+ set_allow_harmony_function_name(FLAG_harmony_function_name);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -1050,6 +892,8 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
FunctionState function_state(&function_state_, &scope_, scope,
kNormalFunction, &function_factory);
+ // Don't count the mode in the use counters--give the program a chance
+ // to enable script/module-wide strict/strong mode below.
scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
@@ -1067,7 +911,15 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
if (ok && is_strict(language_mode())) {
CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
}
- if (ok && (is_strict(language_mode()) || allow_harmony_sloppy())) {
+ if (ok && is_sloppy(language_mode()) && allow_harmony_sloppy_function()) {
+ // TODO(littledan): Function bindings on the global object that modify
+ // pre-existing bindings should be made writable, enumerable and
+ // nonconfigurable if possible, whereas this code will leave attributes
+ // unchanged if the property already exists.
+ InsertSloppyBlockFunctionVarBindings(scope, &ok);
+ }
+ if (ok && (is_strict(language_mode()) || allow_harmony_sloppy() ||
+ allow_harmony_destructuring_bind())) {
CheckConflictingVarDeclarations(scope_, &ok);
}
@@ -1082,14 +934,14 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
}
if (ok) {
+ ParserTraits::RewriteDestructuringAssignments();
result = factory()->NewFunctionLiteral(
- ast_value_factory()->empty_string(), ast_value_factory(), scope_,
- body, function_state.materialized_literal_count(),
+ ast_value_factory()->empty_string(), scope_, body,
+ function_state.materialized_literal_count(),
function_state.expected_property_count(), 0,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kGlobalOrEval,
- FunctionLiteral::kShouldLazyCompile, FunctionKind::kNormalFunction,
- 0);
+ FunctionLiteral::kGlobalOrEval, FunctionLiteral::kShouldLazyCompile,
+ FunctionKind::kNormalFunction, 0);
}
}
@@ -1175,17 +1027,28 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
DCHECK(is_sloppy(scope->language_mode()) ||
is_strict(info->language_mode()));
DCHECK(info->language_mode() == shared_info->language_mode());
- FunctionLiteral::FunctionType function_type = shared_info->is_expression()
- ? (shared_info->is_anonymous()
- ? FunctionLiteral::ANONYMOUS_EXPRESSION
- : FunctionLiteral::NAMED_EXPRESSION)
- : FunctionLiteral::DECLARATION;
+ FunctionLiteral::FunctionType function_type =
+ shared_info->is_expression()
+ ? (shared_info->is_anonymous()
+ ? FunctionLiteral::kAnonymousExpression
+ : FunctionLiteral::kNamedExpression)
+ : FunctionLiteral::kDeclaration;
bool ok = true;
if (shared_info->is_arrow()) {
+ // TODO(adamk): We should construct this scope from the ScopeInfo.
Scope* scope =
- NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
- scope->SetLanguageMode(shared_info->language_mode());
+ NewScope(scope_, FUNCTION_SCOPE, FunctionKind::kArrowFunction);
+
+ // These two bits only need to be explicitly set because we're
+ // not passing the ScopeInfo to the Scope constructor.
+ // TODO(adamk): Remove these calls once the above NewScope call
+ // passes the ScopeInfo.
+ if (shared_info->scope_info()->CallsEval()) {
+ scope->RecordEvalCall();
+ }
+ SetLanguageMode(scope, shared_info->language_mode());
+
scope->set_start_position(shared_info->start_position());
ExpressionClassifier formals_classifier;
ParserFormalParameters formals(scope);
@@ -1211,8 +1074,10 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
if (ok) {
checkpoint.Restore(&formals.materialized_literals_count);
+ // Pass `accept_IN=true` to ParseArrowFunctionLiteral --- This should
+ // not be observable, or else the preparser would have failed.
Expression* expression =
- ParseArrowFunctionLiteral(formals, formals_classifier, &ok);
+ ParseArrowFunctionLiteral(true, formals, formals_classifier, &ok);
if (ok) {
// Scanning must end at the same position that was recorded
// previously. If not, parsing has been interrupted due to a stack
@@ -1239,7 +1104,7 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
result = ParseFunctionLiteral(
raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck,
shared_info->kind(), RelocInfo::kNoPosition, function_type,
- FunctionLiteral::NORMAL_ARITY, shared_info->language_mode(), &ok);
+ FunctionLiteral::kNormalArity, shared_info->language_mode(), &ok);
}
// Make sure the results agree.
DCHECK(ok == (result != NULL));
@@ -1328,13 +1193,11 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
// Strong mode implies strict mode. If there are several "use strict"
// / "use strong" directives, do the strict mode changes only once.
if (is_sloppy(scope_->language_mode())) {
- scope_->SetLanguageMode(
- static_cast<LanguageMode>(scope_->language_mode() | STRICT));
+ RaiseLanguageMode(STRICT);
}
if (use_strong_found) {
- scope_->SetLanguageMode(
- static_cast<LanguageMode>(scope_->language_mode() | STRONG));
+ RaiseLanguageMode(STRONG);
if (IsClassConstructor(function_state_->kind())) {
// "use strong" cannot occur in a class constructor body, to avoid
// unintuitive strong class object semantics.
@@ -1370,11 +1233,18 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
// incremented after parsing is done.
++use_counts_[v8::Isolate::kUseAsm];
scope_->SetAsmModule();
+ } else {
+ // Should not change mode, but will increment UseCounter
+ // if appropriate. Ditto usages below.
+ RaiseLanguageMode(SLOPPY);
}
} else {
// End of the directive prologue.
directive_prologue = false;
+ RaiseLanguageMode(SLOPPY);
}
+ } else {
+ RaiseLanguageMode(SLOPPY);
}
body->Add(stat, zone());
@@ -1451,8 +1321,7 @@ void* Parser::ParseModuleItemList(ZoneList<Statement*>* body, bool* ok) {
// ModuleItem*
DCHECK(scope_->is_module_scope());
- scope_->SetLanguageMode(
- static_cast<LanguageMode>(scope_->language_mode() | STRICT));
+ RaiseLanguageMode(STRICT);
while (peek() != Token::EOS) {
Statement* stat = ParseModuleItem(CHECK_OK);
@@ -1706,7 +1575,7 @@ Statement* Parser::ParseExportDefault(bool* ok) {
int pos = peek_position();
ExpressionClassifier classifier;
Expression* expr = ParseAssignmentExpression(true, &classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ expr = ParserTraits::RewriteNonPattern(expr, &classifier, CHECK_OK);
ExpectSemicolon(CHECK_OK);
result = factory()->NewExpressionStatement(expr, pos);
@@ -1927,7 +1796,7 @@ Statement* Parser::ParseSubStatement(ZoneList<const AstRawString*>* labels,
factory()->NewBlock(labels, 1, false, RelocInfo::kNoPosition);
Target target(&this->target_stack_, result);
Statement* statement = ParseStatementAsUnlabelled(labels, CHECK_OK);
- if (result) result->AddStatement(statement, zone());
+ if (result) result->statements()->Add(statement, zone());
return result;
}
}
@@ -2011,9 +1880,11 @@ VariableProxy* Parser::NewUnresolved(const AstRawString* name,
// scope.
// Let/const variables in harmony mode are always added to the immediately
// enclosing scope.
- return DeclarationScope(mode)->NewUnresolved(
- factory(), name, Variable::NORMAL, scanner()->location().beg_pos,
- scanner()->location().end_pos);
+ Scope* scope =
+ IsLexicalVariableMode(mode) ? scope_ : scope_->DeclarationScope();
+ return scope->NewUnresolved(factory(), name, Variable::NORMAL,
+ scanner()->location().beg_pos,
+ scanner()->location().end_pos);
}
@@ -2024,6 +1895,7 @@ Variable* Parser::Declare(Declaration* declaration,
DCHECK(proxy->raw_name() != NULL);
const AstRawString* name = proxy->raw_name();
VariableMode mode = declaration->mode();
+ bool is_function_declaration = declaration->IsFunctionDeclaration();
if (scope == nullptr) scope = scope_;
Scope* declaration_scope =
IsLexicalVariableMode(mode) ? scope : scope->DeclarationScope();
@@ -2050,7 +1922,7 @@ Variable* Parser::Declare(Declaration* declaration,
// Declare the name.
Variable::Kind kind = Variable::NORMAL;
int declaration_group_start = -1;
- if (declaration->IsFunctionDeclaration()) {
+ if (is_function_declaration) {
kind = Variable::FUNCTION;
} else if (declaration->IsVariableDeclaration() &&
declaration->AsVariableDeclaration()->is_class_declaration()) {
@@ -2061,8 +1933,11 @@ Variable* Parser::Declare(Declaration* declaration,
var = declaration_scope->DeclareLocal(
name, mode, declaration->initialization(), kind, kNotAssigned,
declaration_group_start);
- } else if (IsLexicalVariableMode(mode) ||
- IsLexicalVariableMode(var->mode()) ||
+ } else if (((IsLexicalVariableMode(mode) ||
+ IsLexicalVariableMode(var->mode())) &&
+ // Allow duplicate function decls for web compat, see bug 4693.
+ (is_strict(language_mode()) || !is_function_declaration ||
+ !var->is_function())) ||
((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
!declaration_scope->is_script_scope())) {
// The name was declared in this scope before; check for conflicting
@@ -2079,7 +1954,9 @@ Variable* Parser::Declare(Declaration* declaration,
// because the var declaration is hoisted to the function scope where 'x'
// is already bound.
DCHECK(IsDeclaredVariableMode(var->mode()));
- if (is_strict(language_mode()) || allow_harmony_sloppy()) {
+ if (is_strict(language_mode()) ||
+ (allow_harmony_sloppy() && mode != CONST_LEGACY &&
+ var->mode() != CONST_LEGACY)) {
// In harmony we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
if (declaration_kind == DeclarationDescriptor::NORMAL) {
@@ -2108,6 +1985,7 @@ Variable* Parser::Declare(Declaration* declaration,
var = new (zone()) Variable(declaration_scope, name, mode, kind,
declaration->initialization(), kNotAssigned);
var->AllocateTo(VariableLocation::LOOKUP, -1);
+ var->SetFromEval();
resolve = true;
}
@@ -2195,7 +2073,8 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// isn't lazily compiled. The extension structures are only
// accessible while parsing the first time not when reparsing
// because of lazy compilation.
- DeclarationScope(VAR)->ForceEagerCompilation();
+ // TODO(adamk): Should this be ClosureScope()?
+ scope_->DeclarationScope()->ForceEagerCompilation();
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
@@ -2207,8 +2086,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral(
name, extension_, RelocInfo::kNoPosition);
return factory()->NewExpressionStatement(
- factory()->NewAssignment(
- Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition),
+ factory()->NewAssignment(Token::INIT, proxy, lit, RelocInfo::kNoPosition),
pos);
}
@@ -2235,7 +2113,7 @@ Statement* Parser::ParseFunctionDeclaration(
: kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
- pos, FunctionLiteral::DECLARATION, FunctionLiteral::NORMAL_ARITY,
+ pos, FunctionLiteral::kDeclaration, FunctionLiteral::kNormalArity,
language_mode(), CHECK_OK);
// Even if we're not at the top-level of the global or a function
@@ -2323,9 +2201,8 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
outer_class_variable->AsClassVariable()->declaration_group_start());
}
- Token::Value init_op =
- is_strong(language_mode()) ? Token::INIT_CONST : Token::INIT_LET;
- Assignment* assignment = factory()->NewAssignment(init_op, proxy, value, pos);
+ Assignment* assignment =
+ factory()->NewAssignment(Token::INIT, proxy, value, pos);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
if (names) names->Add(name, zone());
@@ -2333,35 +2210,8 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
}
-Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
- if (is_strict(language_mode()) || allow_harmony_sloppy()) {
- return ParseScopedBlock(labels, ok);
- }
-
- // Block ::
- // '{' Statement* '}'
-
- // Note that a Block does not introduce a new execution scope!
- // (ECMA-262, 3rd, 12.2)
- //
- // Construct block expecting 16 statements.
- Block* result =
- factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
- Target target(&this->target_stack_, result);
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- Statement* stat = ParseStatement(NULL, CHECK_OK);
- if (stat && !stat->IsEmpty()) {
- result->AddStatement(stat, zone());
- }
- }
- Expect(Token::RBRACE, CHECK_OK);
- return result;
-}
-
-
-Block* Parser::ParseScopedBlock(ZoneList<const AstRawString*>* labels,
- bool* ok) {
+Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels,
+ bool finalize_block_scope, bool* ok) {
// The harmony mode uses block elements instead of statements.
//
// Block ::
@@ -2381,25 +2231,22 @@ Block* Parser::ParseScopedBlock(ZoneList<const AstRawString*>* labels,
while (peek() != Token::RBRACE) {
Statement* stat = ParseStatementListItem(CHECK_OK);
if (stat && !stat->IsEmpty()) {
- body->AddStatement(stat, zone());
+ body->statements()->Add(stat, zone());
}
}
}
Expect(Token::RBRACE, CHECK_OK);
block_scope->set_end_position(scanner()->location().end_pos);
- block_scope = block_scope->FinalizeBlockScope();
+ if (finalize_block_scope) {
+ block_scope = block_scope->FinalizeBlockScope();
+ }
body->set_scope(block_scope);
return body;
}
-const AstRawString* Parser::DeclarationParsingResult::SingleName() const {
- if (declarations.length() != 1) return nullptr;
- const Declaration& declaration = declarations.at(0);
- if (declaration.pattern->IsVariableProxy()) {
- return declaration.pattern->AsVariableProxy()->raw_name();
- }
- return nullptr;
+Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
+ return ParseBlock(labels, true, ok);
}
@@ -2469,8 +2316,6 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
// need initialization. 'var' declared bindings are always initialized
// immediately by their declaration nodes.
parsing_result->descriptor.needs_init = false;
- parsing_result->descriptor.is_const = false;
- parsing_result->descriptor.init_op = Token::INIT_VAR;
if (peek() == Token::VAR) {
if (is_strong(language_mode())) {
Scanner::Location location = scanner()->peek_location();
@@ -2483,28 +2328,22 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
Consume(Token::CONST);
if (is_sloppy(language_mode()) && allow_legacy_const()) {
parsing_result->descriptor.mode = CONST_LEGACY;
- parsing_result->descriptor.init_op = Token::INIT_CONST_LEGACY;
++use_counts_[v8::Isolate::kLegacyConst];
} else {
DCHECK(is_strict(language_mode()) || allow_harmony_sloppy());
DCHECK(var_context != kStatement);
parsing_result->descriptor.mode = CONST;
- parsing_result->descriptor.init_op = Token::INIT_CONST;
}
- parsing_result->descriptor.is_const = true;
parsing_result->descriptor.needs_init = true;
} else if (peek() == Token::LET && allow_let()) {
Consume(Token::LET);
DCHECK(var_context != kStatement);
parsing_result->descriptor.mode = LET;
parsing_result->descriptor.needs_init = true;
- parsing_result->descriptor.init_op = Token::INIT_LET;
} else {
UNREACHABLE(); // by current callers
}
- parsing_result->descriptor.declaration_scope =
- DeclarationScope(parsing_result->descriptor.mode);
parsing_result->descriptor.scope = scope_;
parsing_result->descriptor.hoist_scope = nullptr;
@@ -2519,6 +2358,7 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
if (!first_declaration) Consume(Token::COMMA);
Expression* pattern;
+ int decl_pos = peek_position();
{
ExpressionClassifier pattern_classifier;
Token::Value next = peek();
@@ -2526,13 +2366,21 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
if (!*ok) return;
ValidateBindingPattern(&pattern_classifier, ok);
if (!*ok) return;
- if (!allow_harmony_destructuring() && !pattern->IsVariableProxy()) {
+ if (IsLexicalVariableMode(parsing_result->descriptor.mode)) {
+ ValidateLetPattern(&pattern_classifier, ok);
+ if (!*ok) return;
+ }
+ if (!allow_harmony_destructuring_bind() && !pattern->IsVariableProxy()) {
ReportUnexpectedToken(next);
*ok = false;
return;
}
}
+ bool is_pattern =
+ (pattern->IsObjectLiteral() || pattern->IsArrayLiteral()) &&
+ !pattern->is_parenthesized();
+
Scanner::Location variable_loc = scanner()->location();
const AstRawString* single_name =
pattern->IsVariableProxy() ? pattern->AsVariableProxy()->raw_name()
@@ -2544,22 +2392,21 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
is_for_iteration_variable =
var_context == kForStatement &&
(peek() == Token::IN || PeekContextualKeyword(CStrVector("of")));
- if (is_for_iteration_variable && parsing_result->descriptor.mode == CONST) {
+ if (is_for_iteration_variable &&
+ (parsing_result->descriptor.mode == CONST ||
+ parsing_result->descriptor.mode == CONST_LEGACY)) {
parsing_result->descriptor.needs_init = false;
}
Expression* value = NULL;
// Harmony consts have non-optional initializers.
int initializer_position = RelocInfo::kNoPosition;
- if (peek() == Token::ASSIGN || (parsing_result->descriptor.mode == CONST &&
- !is_for_iteration_variable)) {
- Expect(Token::ASSIGN, ok);
- if (!*ok) return;
+ if (Check(Token::ASSIGN)) {
ExpressionClassifier classifier;
value = ParseAssignmentExpression(var_context != kForStatement,
&classifier, ok);
if (!*ok) return;
- ValidateExpression(&classifier, ok);
+ value = ParserTraits::RewriteNonPattern(value, &classifier, ok);
if (!*ok) return;
variable_loc.end_pos = scanner()->location().end_pos;
@@ -2576,9 +2423,33 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
fni_->RemoveLastFunction();
}
}
+
+ if (allow_harmony_function_name() && single_name) {
+ if (value->IsFunctionLiteral()) {
+ auto function_literal = value->AsFunctionLiteral();
+ if (function_literal->is_anonymous()) {
+ function_literal->set_raw_name(single_name);
+ }
+ } else if (value->IsClassLiteral()) {
+ auto class_literal = value->AsClassLiteral();
+ if (class_literal->raw_name() == nullptr) {
+ class_literal->set_raw_name(single_name);
+ }
+ }
+ }
+
// End position of the initializer is after the assignment expression.
initializer_position = scanner()->location().end_pos;
} else {
+ if ((parsing_result->descriptor.mode == CONST || is_pattern) &&
+ !is_for_iteration_variable) {
+ ParserTraits::ReportMessageAt(
+ Scanner::Location(decl_pos, scanner()->location().end_pos),
+ MessageTemplate::kDeclarationMissingInitializer,
+ is_pattern ? "destructuring" : "const");
+ *ok = false;
+ return;
+ }
// End position of the initializer is after the variable.
initializer_position = position();
}
@@ -2646,7 +2517,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
} else {
expr = ParseStrongSuperCallExpression(&classifier, CHECK_OK);
}
- ValidateExpression(&classifier, CHECK_OK);
+ expr = ParserTraits::RewriteNonPattern(expr, &classifier, CHECK_OK);
switch (peek()) {
case Token::SEMICOLON:
Consume(Token::SEMICOLON);
@@ -2864,7 +2735,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// Is rewritten as:
//
// return (temp = expr) === undefined ? this :
- // %_IsSpecObject(temp) ? temp : throw new TypeError(...);
+ // %_IsJSReceiver(temp) ? temp : throw new TypeError(...);
Variable* temp = scope_->NewTemporary(
ast_value_factory()->empty_string());
Assignment* assign = factory()->NewAssignment(
@@ -2874,14 +2745,14 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
NewThrowTypeError(MessageTemplate::kDerivedConstructorReturn,
ast_value_factory()->empty_string(), pos);
- // %_IsSpecObject(temp)
+ // %_IsJSReceiver(temp)
ZoneList<Expression*>* is_spec_object_args =
new (zone()) ZoneList<Expression*>(1, zone());
is_spec_object_args->Add(factory()->NewVariableProxy(temp), zone());
Expression* is_spec_object_call = factory()->NewCallRuntime(
- Runtime::kInlineIsSpecObject, is_spec_object_args, pos);
+ Runtime::kInlineIsJSReceiver, is_spec_object_args, pos);
- // %_IsSpecObject(temp) ? temp : throw_expression
+ // %_IsJSReceiver(temp) ? temp : throw_expression
Expression* is_object_conditional = factory()->NewConditional(
is_spec_object_call, factory()->NewVariableProxy(temp),
throw_expression, pos);
@@ -2896,6 +2767,8 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
is_undefined, ThisExpression(scope_, factory(), pos),
is_object_conditional, pos);
}
+
+ return_value->MarkTail();
}
ExpectSemicolon(CHECK_OK);
@@ -2939,13 +2812,28 @@ Statement* Parser::ParseWithStatement(ZoneList<const AstRawString*>* labels,
scope_->DeclarationScope()->RecordWithStatement();
Scope* with_scope = NewScope(scope_, WITH_SCOPE);
- Statement* stmt;
+ Block* body;
{ BlockState block_state(&scope_, with_scope);
with_scope->set_start_position(scanner()->peek_location().beg_pos);
- stmt = ParseSubStatement(labels, CHECK_OK);
+
+ // The body of the with statement must be enclosed in an additional
+ // lexical scope in case the body is a FunctionDeclaration.
+ body = factory()->NewBlock(labels, 1, false, RelocInfo::kNoPosition);
+ Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
+ block_scope->set_start_position(scanner()->location().beg_pos);
+ {
+ BlockState block_state(&scope_, block_scope);
+ Target target(&this->target_stack_, body);
+ Statement* stmt = ParseSubStatement(labels, CHECK_OK);
+ body->statements()->Add(stmt, zone());
+ block_scope->set_end_position(scanner()->location().end_pos);
+ block_scope = block_scope->FinalizeBlockScope();
+ body->set_scope(block_scope);
+ }
+
with_scope->set_end_position(scanner()->location().end_pos);
}
- return factory()->NewWithStatement(with_scope, expr, stmt, pos);
+ return factory()->NewWithStatement(with_scope, expr, body, pos);
}
@@ -3019,12 +2907,12 @@ Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
tag->position());
Statement* tag_statement =
factory()->NewExpressionStatement(tag_assign, RelocInfo::kNoPosition);
- switch_block->AddStatement(tag_statement, zone());
+ switch_block->statements()->Add(tag_statement, zone());
// make statement: undefined;
// This is needed so the tag isn't returned as the value, in case the switch
// statements don't have a value.
- switch_block->AddStatement(
+ switch_block->statements()->Add(
factory()->NewExpressionStatement(
factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
RelocInfo::kNoPosition),
@@ -3054,7 +2942,7 @@ Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
cases->Add(clause, zone());
}
switch_statement->Initialize(tag_read, cases);
- cases_block->AddStatement(switch_statement, zone());
+ cases_block->statements()->Add(switch_statement, zone());
}
Expect(Token::RBRACE, CHECK_OK);
@@ -3062,7 +2950,7 @@ Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
cases_scope = cases_scope->FinalizeBlockScope();
cases_block->set_scope(cases_scope);
- switch_block->AddStatement(cases_block, zone());
+ switch_block->statements()->Add(cases_block, zone());
return switch_block;
}
@@ -3114,21 +3002,76 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Scope* catch_scope = NULL;
Variable* catch_variable = NULL;
Block* catch_block = NULL;
- const AstRawString* name = NULL;
if (tok == Token::CATCH) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
catch_scope = NewScope(scope_, CATCH_SCOPE);
catch_scope->set_start_position(scanner()->location().beg_pos);
- name = ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
- Expect(Token::RPAREN, CHECK_OK);
+ ExpressionClassifier pattern_classifier;
+ Expression* pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
+ ValidateBindingPattern(&pattern_classifier, CHECK_OK);
+
+ const AstRawString* name = ast_value_factory()->dot_catch_string();
+ bool is_simple = pattern->IsVariableProxy();
+ if (is_simple) {
+ auto proxy = pattern->AsVariableProxy();
+ scope_->RemoveUnresolved(proxy);
+ name = proxy->raw_name();
+ }
catch_variable = catch_scope->DeclareLocal(name, VAR, kCreatedInitialized,
Variable::NORMAL);
- BlockState block_state(&scope_, catch_scope);
- catch_block = ParseBlock(NULL, CHECK_OK);
+
+ Expect(Token::RPAREN, CHECK_OK);
+
+ {
+ BlockState block_state(&scope_, catch_scope);
+
+ // TODO(adamk): Make a version of ParseBlock that takes a scope and
+ // a block.
+ catch_block =
+ factory()->NewBlock(nullptr, 16, false, RelocInfo::kNoPosition);
+ Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
+
+ block_scope->set_start_position(scanner()->location().beg_pos);
+ {
+ BlockState block_state(&scope_, block_scope);
+ Target target(&this->target_stack_, catch_block);
+
+ if (!is_simple) {
+ DeclarationDescriptor descriptor;
+ descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
+ descriptor.parser = this;
+ descriptor.scope = scope_;
+ descriptor.hoist_scope = nullptr;
+ descriptor.mode = LET;
+ descriptor.needs_init = true;
+ descriptor.declaration_pos = pattern->position();
+ descriptor.initialization_pos = pattern->position();
+
+ DeclarationParsingResult::Declaration decl(
+ pattern, pattern->position(),
+ factory()->NewVariableProxy(catch_variable));
+
+ PatternRewriter::DeclareAndInitializeVariables(
+ catch_block, &descriptor, &decl, nullptr, CHECK_OK);
+ }
+
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
+ Statement* stat = ParseStatementListItem(CHECK_OK);
+ if (stat && !stat->IsEmpty()) {
+ catch_block->statements()->Add(stat, zone());
+ }
+ }
+ Consume(Token::RBRACE);
+ }
+ block_scope->set_end_position(scanner()->location().end_pos);
+ block_scope = block_scope->FinalizeBlockScope();
+ catch_block->set_scope(block_scope);
+ }
catch_scope->set_end_position(scanner()->location().end_pos);
tok = peek();
@@ -3153,7 +3096,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
factory()->NewTryCatchStatement(try_block, catch_scope, catch_variable,
catch_block, RelocInfo::kNoPosition);
try_block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
- try_block->AddStatement(statement, zone());
+ try_block->statements()->Add(statement, zone());
catch_block = NULL; // Clear to indicate it's been handled.
}
@@ -3219,7 +3162,7 @@ WhileStatement* Parser::ParseWhileStatement(
}
-// !%_IsSpecObject(result = iterator.next()) &&
+// !%_IsJSReceiver(result = iterator.next()) &&
// %ThrowIteratorResultNotAnObject(result)
Expression* Parser::BuildIteratorNextResult(Expression* iterator,
Variable* result, int pos) {
@@ -3235,12 +3178,12 @@ Expression* Parser::BuildIteratorNextResult(Expression* iterator,
Expression* left =
factory()->NewAssignment(Token::ASSIGN, result_proxy, next_call, pos);
- // %_IsSpecObject(...)
+ // %_IsJSReceiver(...)
ZoneList<Expression*>* is_spec_object_args =
new (zone()) ZoneList<Expression*>(1, zone());
is_spec_object_args->Add(left, zone());
Expression* is_spec_object_call = factory()->NewCallRuntime(
- Runtime::kInlineIsSpecObject, is_spec_object_args, pos);
+ Runtime::kInlineIsJSReceiver, is_spec_object_args, pos);
// %ThrowIteratorResultNotAnObject(result)
Expression* result_proxy_again = factory()->NewVariableProxy(result);
@@ -3258,9 +3201,10 @@ Expression* Parser::BuildIteratorNextResult(Expression* iterator,
void Parser::InitializeForEachStatement(ForEachStatement* stmt,
- Expression* each,
- Expression* subject,
- Statement* body) {
+ Expression* each, Expression* subject,
+ Statement* body,
+ bool is_destructuring) {
+ DCHECK(!is_destructuring || allow_harmony_destructuring_assignment());
ForOfStatement* for_of = stmt->AsForOfStatement();
if (for_of != NULL) {
@@ -3275,17 +3219,22 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
Expression* assign_each;
// iterator = subject[Symbol.iterator]()
+ // Hackily disambiguate o from o.next and o [Symbol.iterator]().
+ // TODO(verwaest): Come up with a better solution.
assign_iterator = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(iterator),
- GetIterator(subject, factory()), subject->position());
+ GetIterator(subject, factory(), subject->position() - 2),
+ subject->position());
- // !%_IsSpecObject(result = iterator.next()) &&
+ // !%_IsJSReceiver(result = iterator.next()) &&
// %ThrowIteratorResultNotAnObject(result)
{
// result = iterator.next()
Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
- next_result =
- BuildIteratorNextResult(iterator_proxy, result, subject->position());
+ // Hackily disambiguate o from o.next and o [Symbol.iterator]().
+ // TODO(verwaest): Come up with a better solution.
+ next_result = BuildIteratorNextResult(iterator_proxy, result,
+ subject->position() - 1);
}
// result.done
@@ -3306,6 +3255,10 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
result_proxy, value_literal, RelocInfo::kNoPosition);
assign_each = factory()->NewAssignment(Token::ASSIGN, each, result_value,
RelocInfo::kNoPosition);
+ if (is_destructuring) {
+ assign_each = PatternRewriter::RewriteDestructuringAssignment(
+ this, assign_each->AsAssignment(), scope_);
+ }
}
for_of->Initialize(each, subject, body,
@@ -3314,6 +3267,23 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
result_done,
assign_each);
} else {
+ if (is_destructuring) {
+ Variable* temp =
+ scope_->NewTemporary(ast_value_factory()->empty_string());
+ VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
+ Expression* assign_each = PatternRewriter::RewriteDestructuringAssignment(
+ this, factory()->NewAssignment(Token::ASSIGN, each, temp_proxy,
+ RelocInfo::kNoPosition),
+ scope_);
+ auto block =
+ factory()->NewBlock(nullptr, 2, false, RelocInfo::kNoPosition);
+ block->statements()->Add(factory()->NewExpressionStatement(
+ assign_each, RelocInfo::kNoPosition),
+ zone());
+ block->statements()->Add(body, zone());
+ body = block;
+ each = factory()->NewVariableProxy(temp);
+ }
stmt->Initialize(each, subject, body);
}
}
@@ -3323,16 +3293,18 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Scope* inner_scope, bool is_const, ZoneList<const AstRawString*>* names,
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
Statement* body, bool* ok) {
- // ES6 13.6.3.4 specifies that on each loop iteration the let variables are
- // copied into a new environment. After copying, the "next" statement of the
- // loop is executed to update the loop variables. The loop condition is
- // checked and the loop body is executed.
+ // ES6 13.7.4.8 specifies that on each loop iteration the let variables are
+ // copied into a new environment. Moreover, the "next" statement must be
+ // evaluated not in the environment of the just completed iteration but in
+ // that of the upcoming one. We achieve this with the following desugaring.
+ // Extra care is needed to preserve the completion value of the original loop.
//
- // We rewrite a for statement of the form
+ // We are given a for statement of the form
//
// labels: for (let/const x = i; cond; next) body
//
- // into
+ // and rewrite it as follows. Here we write {{ ... }} for init-blocks, ie.,
+ // blocks whose ignore_completion_value_ flag is set.
//
// {
// let/const x = i;
@@ -3340,29 +3312,21 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// first = 1;
// undefined;
// outer: for (;;) {
- // { // This block's only function is to ensure that the statements it
- // // contains do not affect the normal completion value. This is
- // // accomplished by setting its ignore_completion_value bit.
- // // No new lexical scope is introduced, so lexically scoped variables
- // // declared here will be scoped to the outer for loop.
- // let/const x = temp_x;
- // if (first == 1) {
- // first = 0;
- // } else {
- // next;
- // }
- // flag = 1;
- // }
+ // let/const x = temp_x;
+ // {{ if (first == 1) {
+ // first = 0;
+ // } else {
+ // next;
+ // }
+ // flag = 1;
+ // if (!cond) break;
+ // }}
// labels: for (; flag == 1; flag = 0, temp_x = x) {
- // if (cond) {
- // body
- // } else {
- // break outer;
- // }
- // }
- // if (flag == 1) {
- // break;
+ // body
// }
+ // {{ if (flag == 1) // Body used break.
+ // break;
+ // }}
// }
// }
@@ -3370,11 +3334,11 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Scope* for_scope = scope_;
ZoneList<Variable*> temps(names->length(), zone());
- Block* outer_block = factory()->NewBlock(NULL, names->length() + 3, false,
+ Block* outer_block = factory()->NewBlock(NULL, names->length() + 4, false,
RelocInfo::kNoPosition);
// Add statement: let/const x = i.
- outer_block->AddStatement(init, zone());
+ outer_block->statements()->Add(init, zone());
const AstRawString* temp_name = ast_value_factory()->dot_for_string();
@@ -3388,7 +3352,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
Statement* assignment_statement = factory()->NewExpressionStatement(
assignment, RelocInfo::kNoPosition);
- outer_block->AddStatement(assignment_statement, zone());
+ outer_block->statements()->Add(assignment_statement, zone());
temps.Add(temp, zone());
}
@@ -3402,11 +3366,11 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Token::ASSIGN, first_proxy, const1, RelocInfo::kNoPosition);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
- outer_block->AddStatement(assignment_statement, zone());
+ outer_block->statements()->Add(assignment_statement, zone());
}
// make statement: undefined;
- outer_block->AddStatement(
+ outer_block->statements()->Add(
factory()->NewExpressionStatement(
factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
RelocInfo::kNoPosition),
@@ -3419,7 +3383,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// in this function that looks up break targets.
ForStatement* outer_loop =
factory()->NewForStatement(NULL, RelocInfo::kNoPosition);
- outer_block->AddStatement(outer_loop, zone());
+ outer_block->statements()->Add(outer_loop, zone());
outer_block->set_scope(for_scope);
scope_ = inner_scope;
@@ -3427,7 +3391,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Block* inner_block =
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
Block* ignore_completion_block = factory()->NewBlock(
- NULL, names->length() + 2, true, RelocInfo::kNoPosition);
+ NULL, names->length() + 3, true, RelocInfo::kNoPosition);
ZoneList<Variable*> inner_vars(names->length(), zone());
// For each let variable x:
// make statement: let/const x = temp_x.
@@ -3439,14 +3403,13 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
inner_vars.Add(declaration->proxy()->var(), zone());
VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
- Assignment* assignment =
- factory()->NewAssignment(is_const ? Token::INIT_CONST : Token::INIT_LET,
- proxy, temp_proxy, RelocInfo::kNoPosition);
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT, proxy, temp_proxy, RelocInfo::kNoPosition);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
DCHECK(init->position() != RelocInfo::kNoPosition);
proxy->var()->set_initializer_position(init->position());
- ignore_completion_block->AddStatement(assignment_statement, zone());
+ ignore_completion_block->statements()->Add(assignment_statement, zone());
}
// Make statement: if (first == 1) { first = 0; } else { next; }
@@ -3472,7 +3435,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
}
Statement* clear_first_or_next = factory()->NewIfStatement(
compare, clear_first, next, RelocInfo::kNoPosition);
- ignore_completion_block->AddStatement(clear_first_or_next, zone());
+ ignore_completion_block->statements()->Add(clear_first_or_next, zone());
}
Variable* flag = scope_->NewTemporary(temp_name);
@@ -3484,9 +3447,19 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
- ignore_completion_block->AddStatement(assignment_statement, zone());
+ ignore_completion_block->statements()->Add(assignment_statement, zone());
}
- inner_block->AddStatement(ignore_completion_block, zone());
+
+ // Make statement: if (!cond) break.
+ if (cond) {
+ Statement* stop =
+ factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
+ Statement* noop = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ ignore_completion_block->statements()->Add(
+ factory()->NewIfStatement(cond, noop, stop, cond->position()), zone());
+ }
+
+ inner_block->statements()->Add(ignore_completion_block, zone());
// Make cond expression for main loop: flag == 1.
Expression* flag_cond = NULL;
{
@@ -3524,23 +3497,14 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
compound_next, RelocInfo::kNoPosition);
}
- // Make statement: if (cond) { body; } else { break outer; }
- Statement* body_or_stop = body;
- if (cond) {
- Statement* stop =
- factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
- body_or_stop =
- factory()->NewIfStatement(cond, body, stop, cond->position());
- }
-
// Make statement: labels: for (; flag == 1; flag = 0, temp_x = x)
// Note that we re-use the original loop node, which retains its labels
// and ensures that any break or continue statements in body point to
// the right place.
- loop->Initialize(NULL, flag_cond, compound_next_statement, body_or_stop);
- inner_block->AddStatement(loop, zone());
+ loop->Initialize(NULL, flag_cond, compound_next_statement, body);
+ inner_block->statements()->Add(loop, zone());
- // Make statement: if (flag == 1) { break; }
+ // Make statement: {{if (flag == 1) break;}}
{
Expression* compare = NULL;
// Make compare expresion: flag == 1.
@@ -3555,7 +3519,10 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Statement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
Statement* if_flag_break =
factory()->NewIfStatement(compare, stop, empty, RelocInfo::kNoPosition);
- inner_block->AddStatement(if_flag_break, zone());
+ Block* ignore_completion_block =
+ factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
+ ignore_completion_block->statements()->Add(if_flag_break, zone());
+ inner_block->statements()->Add(ignore_completion_block, zone());
}
inner_scope->set_end_position(scanner()->location().end_pos);
@@ -3594,12 +3561,11 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
int num_decl = parsing_result.declarations.length();
bool accept_IN = num_decl >= 1;
- bool accept_OF = true;
ForEachStatement::VisitMode mode;
int each_beg_pos = scanner()->location().beg_pos;
int each_end_pos = scanner()->location().end_pos;
- if (accept_IN && CheckInOrOf(accept_OF, &mode, ok)) {
+ if (accept_IN && CheckInOrOf(&mode, ok)) {
if (!*ok) return nullptr;
if (num_decl != 1) {
const char* loop_type =
@@ -3610,9 +3576,12 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
*ok = false;
return nullptr;
}
+ DeclarationParsingResult::Declaration& decl =
+ parsing_result.declarations[0];
if (parsing_result.first_initializer_loc.IsValid() &&
(is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
- IsLexicalVariableMode(parsing_result.descriptor.mode))) {
+ IsLexicalVariableMode(parsing_result.descriptor.mode) ||
+ !decl.pattern->IsVariableProxy())) {
if (mode == ForEachStatement::ITERATE) {
ReportMessageAt(parsing_result.first_initializer_loc,
MessageTemplate::kForOfLoopInitializer);
@@ -3625,23 +3594,22 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
return nullptr;
}
- DCHECK(parsing_result.declarations.length() == 1);
Block* init_block = nullptr;
// special case for legacy for (var/const x =.... in)
if (!IsLexicalVariableMode(parsing_result.descriptor.mode) &&
- parsing_result.declarations[0].initializer != nullptr) {
+ decl.pattern->IsVariableProxy() && decl.initializer != nullptr) {
+ const AstRawString* name =
+ decl.pattern->AsVariableProxy()->raw_name();
VariableProxy* single_var = scope_->NewUnresolved(
- factory(), parsing_result.SingleName(), Variable::NORMAL,
- each_beg_pos, each_end_pos);
+ factory(), name, Variable::NORMAL, each_beg_pos, each_end_pos);
init_block = factory()->NewBlock(
nullptr, 2, true, parsing_result.descriptor.declaration_pos);
- init_block->AddStatement(
+ init_block->statements()->Add(
factory()->NewExpressionStatement(
- factory()->NewAssignment(
- Token::ASSIGN, single_var,
- parsing_result.declarations[0].initializer,
- RelocInfo::kNoPosition),
+ factory()->NewAssignment(Token::ASSIGN, single_var,
+ decl.initializer,
+ RelocInfo::kNoPosition),
RelocInfo::kNoPosition),
zone());
}
@@ -3684,9 +3652,6 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
auto each_initialization_block =
factory()->NewBlock(nullptr, 1, true, RelocInfo::kNoPosition);
{
- DCHECK(parsing_result.declarations.length() == 1);
- DeclarationParsingResult::Declaration decl =
- parsing_result.declarations[0];
auto descriptor = parsing_result.descriptor;
descriptor.declaration_pos = RelocInfo::kNoPosition;
descriptor.initialization_pos = RelocInfo::kNoPosition;
@@ -3699,11 +3664,12 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
CHECK_OK);
}
- body_block->AddStatement(each_initialization_block, zone());
- body_block->AddStatement(body, zone());
+ body_block->statements()->Add(each_initialization_block, zone());
+ body_block->statements()->Add(body, zone());
VariableProxy* temp_proxy =
factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
- InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
+ InitializeForEachStatement(loop, temp_proxy, enumerable, body_block,
+ false);
scope_ = for_scope;
body_scope->set_end_position(scanner()->location().end_pos);
body_scope = body_scope->FinalizeBlockScope();
@@ -3736,7 +3702,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
for_scope = for_scope->FinalizeBlockScope();
// Parsed for-in loop w/ variable declarations.
if (init_block != nullptr) {
- init_block->AddStatement(loop, zone());
+ init_block->statements()->Add(loop, zone());
if (for_scope != nullptr) {
init_block->set_scope(for_scope);
}
@@ -3754,20 +3720,34 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
}
} else {
int lhs_beg_pos = peek_position();
- Expression* expression = ParseExpression(false, CHECK_OK);
+ ExpressionClassifier classifier;
+ Expression* expression = ParseExpression(false, &classifier, CHECK_OK);
int lhs_end_pos = scanner()->location().end_pos;
ForEachStatement::VisitMode mode;
- bool accept_OF = expression->IsVariableProxy();
is_let_identifier_expression =
expression->IsVariableProxy() &&
expression->AsVariableProxy()->raw_name() ==
ast_value_factory()->let_string();
- if (CheckInOrOf(accept_OF, &mode, ok)) {
- if (!*ok) return nullptr;
- expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_beg_pos, lhs_end_pos,
- MessageTemplate::kInvalidLhsInFor, kSyntaxError, CHECK_OK);
+ bool is_for_each = CheckInOrOf(&mode, ok);
+ if (!*ok) return nullptr;
+ bool is_destructuring =
+ is_for_each && allow_harmony_destructuring_assignment() &&
+ (expression->IsArrayLiteral() || expression->IsObjectLiteral());
+
+ if (is_destructuring) {
+ ValidateAssignmentPattern(&classifier, CHECK_OK);
+ } else {
+ expression =
+ ParserTraits::RewriteNonPattern(expression, &classifier, CHECK_OK);
+ }
+
+ if (is_for_each) {
+ if (!is_destructuring) {
+ expression = this->CheckAndRewriteReferenceExpression(
+ expression, lhs_beg_pos, lhs_end_pos,
+ MessageTemplate::kInvalidLhsInFor, kSyntaxError, CHECK_OK);
+ }
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, stmt_pos);
@@ -3776,12 +3756,29 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Expression* enumerable = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
+ // Make a block around the statement in case a lexical binding
+ // is introduced, e.g. by a FunctionDeclaration.
+ // This block must not use for_scope as its scope because if a
+ // lexical binding is introduced which overlaps with the for-in/of,
+ // expressions in head of the loop should actually have variables
+ // resolved in the outer scope.
+ Scope* body_scope = NewScope(for_scope, BLOCK_SCOPE);
+ scope_ = body_scope;
+ Block* block =
+ factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
Statement* body = ParseSubStatement(NULL, CHECK_OK);
- InitializeForEachStatement(loop, expression, enumerable, body);
+ block->statements()->Add(body, zone());
+ InitializeForEachStatement(loop, expression, enumerable, block,
+ is_destructuring);
scope_ = saved_scope;
+ body_scope->set_end_position(scanner()->location().end_pos);
+ body_scope = body_scope->FinalizeBlockScope();
+ if (body_scope != nullptr) {
+ block->set_scope(body_scope);
+ }
for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
- DCHECK(for_scope == NULL);
+ DCHECK(for_scope == nullptr);
// Parsed for-in loop.
return loop;
@@ -3851,11 +3848,21 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// const x = i;
// for (; c; n) b
// }
- DCHECK(init != NULL);
+ //
+ // or, desugar
+ // for (; c; n) b
+ // into
+ // {
+ // for (; c; n) b
+ // }
+ // just in case b introduces a lexical binding some other way, e.g., if b
+ // is a FunctionDeclaration.
Block* block =
factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
- block->AddStatement(init, zone());
- block->AddStatement(loop, zone());
+ if (init != nullptr) {
+ block->statements()->Add(init, zone());
+ }
+ block->statements()->Add(loop, zone());
block->set_scope(for_scope);
loop->Initialize(NULL, cond, next, body);
result = block;
@@ -3967,9 +3974,6 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
if (is_rest) {
expr = expr->AsSpread()->expression();
parameters->has_rest = true;
- parameters->rest_array_literal_index =
- parser_->function_state_->NextMaterializedLiteralIndex();
- ++parameters->materialized_literals_count;
}
if (parameters->is_simple) {
parameters->is_simple = !is_rest && expr->IsVariableProxy();
@@ -3989,9 +3993,36 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
DCHECK(!assignment->is_compound());
initializer = assignment->value();
expr = assignment->target();
+
+ // TODO(adamk): Only call this if necessary.
+ RewriteParameterInitializerScope(parser_->stack_limit(), initializer,
+ parser_->scope_, parameters->scope);
}
- AddFormalParameter(parameters, expr, initializer, is_rest);
+ // TODO(adamk): params_loc.end_pos is not the correct initializer position,
+ // but it should be conservative enough to trigger hole checks for variables
+ // referenced in the initializer (if any).
+ AddFormalParameter(parameters, expr, initializer, params_loc.end_pos,
+ is_rest);
+}
+
+
+DoExpression* Parser::ParseDoExpression(bool* ok) {
+ // AssignmentExpression ::
+ // do '{' StatementList '}'
+ int pos = peek_position();
+
+ Expect(Token::DO, CHECK_OK);
+ Variable* result =
+ scope_->NewTemporary(ast_value_factory()->dot_result_string());
+ Block* block = ParseBlock(nullptr, false, CHECK_OK);
+ DoExpression* expr = factory()->NewDoExpression(block, result, pos);
+ if (!Rewriter::Rewrite(this, expr, ast_value_factory())) {
+ *ok = false;
+ return nullptr;
+ }
+ block->set_scope(block->scope()->FinalizeBlockScope());
+ return expr;
}
@@ -4025,10 +4056,7 @@ void ParserTraits::ReindexLiterals(const ParserFormalParameters& parameters) {
for (const auto p : parameters.params) {
if (p.pattern != nullptr) reindexer.Reindex(p.pattern);
- }
-
- if (parameters.has_rest) {
- parameters.rest_array_literal_index = reindexer.NextIndex();
+ if (p.initializer != nullptr) reindexer.Reindex(p.initializer);
}
DCHECK(reindexer.count() <=
@@ -4096,14 +4124,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// nested function, and hoisting works normally relative to that.
Scope* declaration_scope = scope_->DeclarationScope();
Scope* original_declaration_scope = original_scope_->DeclarationScope();
- Scope* scope = function_type == FunctionLiteral::DECLARATION &&
+ Scope* scope = function_type == FunctionLiteral::kDeclaration &&
is_sloppy(language_mode) &&
!allow_harmony_sloppy_function() &&
(original_scope_ == original_declaration_scope ||
declaration_scope != original_declaration_scope)
? NewScope(declaration_scope, FUNCTION_SCOPE, kind)
: NewScope(scope_, FUNCTION_SCOPE, kind);
- scope->SetLanguageMode(language_mode);
+ SetLanguageMode(scope, language_mode);
ZoneList<Statement*>* body = NULL;
int arity = -1;
int materialized_literal_count = -1;
@@ -4230,7 +4258,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// - The function literal shouldn't be hinted to eagerly compile.
bool use_temp_zone =
FLAG_lazy && !allow_natives() && extension_ == NULL && allow_lazy() &&
- function_type == FunctionLiteral::DECLARATION &&
+ function_type == FunctionLiteral::kDeclaration &&
eager_compile_hint != FunctionLiteral::kShouldEagerCompile;
// Open a new BodyScope, which sets our AstNodeFactory to allocate in the
// new temporary zone if the preconditions are satisfied, and ensures that
@@ -4284,9 +4312,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (is_sloppy(language_mode) && allow_harmony_sloppy_function()) {
InsertSloppyBlockFunctionVarBindings(scope, CHECK_OK);
}
- if (is_strict(language_mode) || allow_harmony_sloppy()) {
+ if (is_strict(language_mode) || allow_harmony_sloppy() ||
+ allow_harmony_destructuring_bind()) {
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
+
+ if (body) {
+ // If body can be inspected, rewrite queued destructuring assignments
+ ParserTraits::RewriteDestructuringAssignments();
+ }
}
bool has_duplicate_parameters =
@@ -4296,14 +4330,17 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
: FunctionLiteral::kNoDuplicateParameters;
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
- function_name, ast_value_factory(), scope, body,
- materialized_literal_count, expected_property_count, arity,
- duplicate_parameters, function_type, FunctionLiteral::kIsFunction,
+ function_name, scope, body, materialized_literal_count,
+ expected_property_count, arity, duplicate_parameters, function_type,
eager_compile_hint, kind, pos);
function_literal->set_function_token_position(function_token_pos);
if (should_be_used_once_hint)
function_literal->set_should_be_used_once_hint();
+ if (scope->has_rest_parameter()) {
+ function_literal->set_dont_optimize_reason(kRestParameter);
+ }
+
if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
return function_literal;
}
@@ -4335,7 +4372,7 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
total_preparse_skipped_ += scope_->end_position() - function_block_pos;
*materialized_literal_count = entry.literal_count();
*expected_property_count = entry.property_count();
- scope_->SetLanguageMode(entry.language_mode());
+ SetLanguageMode(scope_, entry.language_mode());
if (entry.uses_super_property()) scope_->RecordSuperPropertyUsage();
if (entry.calls_eval()) scope_->RecordEvalCall();
return;
@@ -4371,7 +4408,7 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
total_preparse_skipped_ += scope_->end_position() - function_block_pos;
*materialized_literal_count = logger.literals();
*expected_property_count = logger.properties();
- scope_->SetLanguageMode(logger.language_mode());
+ SetLanguageMode(scope_, logger.language_mode());
if (logger.uses_super_property()) {
scope_->RecordSuperPropertyUsage();
}
@@ -4389,21 +4426,6 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
}
-void Parser::AddAssertIsConstruct(ZoneList<Statement*>* body, int pos) {
- ZoneList<Expression*>* arguments =
- new (zone()) ZoneList<Expression*>(0, zone());
- CallRuntime* construct_check = factory()->NewCallRuntime(
- Runtime::kInlineIsConstructCall, arguments, pos);
- CallRuntime* non_callable_error = factory()->NewCallRuntime(
- Runtime::kThrowConstructorNonCallableError, arguments, pos);
- IfStatement* if_statement = factory()->NewIfStatement(
- factory()->NewUnaryOperation(Token::NOT, construct_check, pos),
- factory()->NewReturnStatement(non_callable_error, pos),
- factory()->NewEmptyStatement(pos), pos);
- body->Add(if_statement, zone());
-}
-
-
Statement* Parser::BuildAssertIsCoercible(Variable* var) {
// if (var === null || var === undefined)
// throw /* type error kNonCoercible) */;
@@ -4430,6 +4452,36 @@ Statement* Parser::BuildAssertIsCoercible(Variable* var) {
}
+class InitializerRewriter : public AstExpressionVisitor {
+ public:
+ InitializerRewriter(uintptr_t stack_limit, Expression* root, Parser* parser,
+ Scope* scope)
+ : AstExpressionVisitor(stack_limit, root),
+ parser_(parser),
+ scope_(scope) {}
+
+ private:
+ void VisitExpression(Expression* expr) {
+ RewritableAssignmentExpression* to_rewrite =
+ expr->AsRewritableAssignmentExpression();
+ if (to_rewrite == nullptr || to_rewrite->is_rewritten()) return;
+
+ Parser::PatternRewriter::RewriteDestructuringAssignment(parser_, to_rewrite,
+ scope_);
+ }
+
+ private:
+ Parser* parser_;
+ Scope* scope_;
+};
+
+
+void Parser::RewriteParameterInitializer(Expression* expr, Scope* scope) {
+ InitializerRewriter rewriter(stack_limit_, expr, this, scope);
+ rewriter.Run();
+}
+
+
Block* Parser::BuildParameterInitializationBlock(
const ParserFormalParameters& parameters, bool* ok) {
DCHECK(!parameters.is_simple);
@@ -4438,23 +4490,32 @@ Block* Parser::BuildParameterInitializationBlock(
factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
for (int i = 0; i < parameters.params.length(); ++i) {
auto parameter = parameters.params[i];
+ if (parameter.is_rest && parameter.pattern->IsVariableProxy()) break;
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
descriptor.parser = this;
- descriptor.declaration_scope = scope_;
descriptor.scope = scope_;
descriptor.hoist_scope = nullptr;
descriptor.mode = LET;
- descriptor.is_const = false;
descriptor.needs_init = true;
descriptor.declaration_pos = parameter.pattern->position();
+ // The position that will be used by the AssignmentExpression
+ // which copies from the temp parameter to the pattern.
+ //
+ // TODO(adamk): Should this be RelocInfo::kNoPosition, since
+ // it's just copying from a temp var to the real param var?
descriptor.initialization_pos = parameter.pattern->position();
- descriptor.init_op = Token::INIT_LET;
+ // The initializer position which will end up in,
+ // Variable::initializer_position(), used for hole check elimination.
+ int initializer_position = parameter.pattern->position();
Expression* initial_value =
factory()->NewVariableProxy(parameters.scope->parameter(i));
if (parameter.initializer != nullptr) {
// IS_UNDEFINED($param) ? initializer : $param
- DCHECK(!parameter.is_rest);
+
+ // Ensure initializer is rewritten
+ RewriteParameterInitializer(parameter.initializer, scope_);
+
auto condition = factory()->NewCompareOperation(
Token::EQ_STRICT,
factory()->NewVariableProxy(parameters.scope->parameter(i)),
@@ -4464,83 +4525,7 @@ Block* Parser::BuildParameterInitializationBlock(
condition, parameter.initializer, initial_value,
RelocInfo::kNoPosition);
descriptor.initialization_pos = parameter.initializer->position();
- } else if (parameter.is_rest) {
- // $rest = [];
- // for (var $argument_index = $rest_index;
- // $argument_index < %_ArgumentsLength();
- // ++$argument_index) {
- // %AppendElement($rest, %_Arguments($argument_index));
- // }
- // let <param> = $rest;
- DCHECK(parameter.pattern->IsVariableProxy());
- DCHECK_EQ(i, parameters.params.length() - 1);
-
- int pos = parameter.pattern->position();
- Variable* temp_var = parameters.scope->parameter(i);
- auto empty_values = new (zone()) ZoneList<Expression*>(0, zone());
- auto empty_array = factory()->NewArrayLiteral(
- empty_values, parameters.rest_array_literal_index,
- is_strong(language_mode()), RelocInfo::kNoPosition);
-
- auto init_array = factory()->NewAssignment(
- Token::INIT_VAR, factory()->NewVariableProxy(temp_var), empty_array,
- RelocInfo::kNoPosition);
-
- auto loop = factory()->NewForStatement(NULL, RelocInfo::kNoPosition);
-
- auto argument_index =
- parameters.scope->NewTemporary(ast_value_factory()->empty_string());
- auto init = factory()->NewExpressionStatement(
- factory()->NewAssignment(
- Token::INIT_VAR, factory()->NewVariableProxy(argument_index),
- factory()->NewSmiLiteral(i, RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
-
- auto empty_arguments = new (zone()) ZoneList<Expression*>(0, zone());
-
- // $arguments_index < arguments.length
- auto cond = factory()->NewCompareOperation(
- Token::LT, factory()->NewVariableProxy(argument_index),
- factory()->NewCallRuntime(Runtime::kInlineArgumentsLength,
- empty_arguments, RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
-
- // ++argument_index
- auto next = factory()->NewExpressionStatement(
- factory()->NewCountOperation(
- Token::INC, true, factory()->NewVariableProxy(argument_index),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
-
- // %_Arguments($arguments_index)
- auto arguments_args = new (zone()) ZoneList<Expression*>(1, zone());
- arguments_args->Add(factory()->NewVariableProxy(argument_index), zone());
-
- // %AppendElement($rest, %_Arguments($arguments_index))
- auto append_element_args = new (zone()) ZoneList<Expression*>(2, zone());
-
- append_element_args->Add(factory()->NewVariableProxy(temp_var), zone());
- append_element_args->Add(
- factory()->NewCallRuntime(Runtime::kInlineArguments, arguments_args,
- RelocInfo::kNoPosition),
- zone());
-
- auto body = factory()->NewExpressionStatement(
- factory()->NewCallRuntime(Runtime::kAppendElement,
- append_element_args,
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
-
- loop->Initialize(init, cond, next, body);
-
- init_block->AddStatement(
- factory()->NewExpressionStatement(init_array, RelocInfo::kNoPosition),
- zone());
-
- init_block->AddStatement(loop, zone());
-
- descriptor.initialization_pos = pos;
+ initializer_position = parameter.initializer_end_position;
}
Scope* param_scope = scope_;
@@ -4559,7 +4544,7 @@ Block* Parser::BuildParameterInitializationBlock(
{
BlockState block_state(&scope_, param_scope);
DeclarationParsingResult::Declaration decl(
- parameter.pattern, parameter.pattern->position(), initial_value);
+ parameter.pattern, initializer_position, initial_value);
PatternRewriter::DeclareAndInitializeVariables(param_block, &descriptor,
&decl, nullptr, CHECK_OK);
}
@@ -4569,7 +4554,7 @@ Block* Parser::BuildParameterInitializationBlock(
if (param_scope != nullptr) {
CheckConflictingVarDeclarations(param_scope, CHECK_OK);
}
- init_block->AddStatement(param_block, zone());
+ init_block->statements()->Add(param_block, zone());
}
}
return init_block;
@@ -4586,7 +4571,7 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
ZoneList<Statement*>* result = new(zone()) ZoneList<Statement*>(8, zone());
static const int kFunctionNameAssignmentIndex = 0;
- if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
+ if (function_type == FunctionLiteral::kNamedExpression) {
DCHECK(function_name != NULL);
// If we have a named function expression, we add a local variable
// declaration to the body of the function with the name of the
@@ -4597,12 +4582,6 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
result->Add(NULL, zone());
}
- // For concise constructors, check that they are constructed,
- // not called.
- if (IsClassConstructor(kind)) {
- AddAssertIsConstruct(result, pos);
- }
-
ZoneList<Statement*>* body = result;
Scope* inner_scope = scope_;
Block* inner_block = nullptr;
@@ -4627,7 +4606,7 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
VariableProxy* init_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
Assignment* assignment = factory()->NewAssignment(
- Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition);
+ Token::INIT, init_proxy, allocation, RelocInfo::kNoPosition);
VariableProxy* get_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
Yield* yield = factory()->NewYield(
@@ -4664,7 +4643,7 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
if (!parameters.is_simple) {
DCHECK_NOT_NULL(inner_scope);
DCHECK_EQ(body, inner_block->statements());
- scope_->SetLanguageMode(inner_scope->language_mode());
+ SetLanguageMode(scope_, inner_scope->language_mode());
Block* init_block = BuildParameterInitializationBlock(parameters, CHECK_OK);
DCHECK_NOT_NULL(init_block);
@@ -4672,25 +4651,20 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
inner_scope = inner_scope->FinalizeBlockScope();
if (inner_scope != nullptr) {
CheckConflictingVarDeclarations(inner_scope, CHECK_OK);
+ InsertShadowingVarBindingInitializers(inner_block);
}
result->Add(init_block, zone());
result->Add(inner_block, zone());
}
- if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
+ if (function_type == FunctionLiteral::kNamedExpression) {
// Now that we know the language mode, we can create the const assignment
// in the previously reserved spot.
// NOTE: We create a proxy and resolve it here so that in the
// future we can change the AST to only refer to VariableProxies
// instead of Variables and Proxies as is the case now.
- Token::Value fvar_init_op = Token::INIT_CONST_LEGACY;
- bool use_strict_const = is_strict(scope_->language_mode()) ||
- (!allow_legacy_const() && allow_harmony_sloppy());
- if (use_strict_const) {
- fvar_init_op = Token::INIT_CONST;
- }
- VariableMode fvar_mode = use_strict_const ? CONST : CONST_LEGACY;
+ VariableMode fvar_mode = is_strict(language_mode()) ? CONST : CONST_LEGACY;
Variable* fvar = new (zone())
Variable(scope_, function_name, fvar_mode, Variable::NORMAL,
kCreatedInitialized, kNotAssigned);
@@ -4699,11 +4673,10 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
scope_->DeclareFunctionVar(fvar_declaration);
- VariableProxy* fproxy = scope_->NewUnresolved(factory(), function_name);
- fproxy->BindTo(fvar);
+ VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
result->Set(kFunctionNameAssignmentIndex,
factory()->NewExpressionStatement(
- factory()->NewAssignment(fvar_init_op, fproxy,
+ factory()->NewAssignment(Token::INIT, fproxy,
factory()->NewThisFunction(pos),
RelocInfo::kNoPosition),
RelocInfo::kNoPosition));
@@ -4728,16 +4701,14 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
reusable_preparser_->set_allow_lazy(true);
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
- SET_ALLOW(harmony_arrow_functions);
SET_ALLOW(harmony_sloppy);
SET_ALLOW(harmony_sloppy_let);
- SET_ALLOW(harmony_rest_parameters);
SET_ALLOW(harmony_default_parameters);
- SET_ALLOW(harmony_spread_calls);
- SET_ALLOW(harmony_destructuring);
- SET_ALLOW(harmony_spread_arrays);
- SET_ALLOW(harmony_new_target);
+ SET_ALLOW(harmony_destructuring_bind);
+ SET_ALLOW(harmony_destructuring_assignment);
SET_ALLOW(strong_mode);
+ SET_ALLOW(harmony_do_expressions);
+ SET_ALLOW(harmony_function_name);
#undef SET_ALLOW
}
PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
@@ -4774,8 +4745,7 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
BlockState block_state(&scope_, block_scope);
- scope_->SetLanguageMode(
- static_cast<LanguageMode>(scope_->language_mode() | STRICT));
+ RaiseLanguageMode(STRICT);
scope_->SetScopeName(name);
VariableProxy* proxy = NULL;
@@ -4793,7 +4763,7 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
block_scope->set_start_position(scanner()->location().end_pos);
ExpressionClassifier classifier;
extends = ParseLeftHandSideExpression(&classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ extends = ParserTraits::RewriteNonPattern(extends, &classifier, CHECK_OK);
} else {
block_scope->set_start_position(scanner()->location().end_pos);
}
@@ -4815,10 +4785,12 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
bool is_computed_name = false; // Classes do not care about computed
// property names here.
ExpressionClassifier classifier;
+ const AstRawString* name = nullptr;
ObjectLiteral::Property* property = ParsePropertyDefinition(
&checker, in_class, has_extends, is_static, &is_computed_name,
- &has_seen_constructor, &classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ &has_seen_constructor, &classifier, &name, CHECK_OK);
+ property = ParserTraits::RewriteNonPatternObjectLiteralProperty(
+ property, &classifier, CHECK_OK);
if (has_seen_constructor && constructor == NULL) {
constructor = GetPropertyValue(property)->AsFunctionLiteral();
@@ -4828,6 +4800,10 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
}
if (fni_ != NULL) fni_->Infer();
+
+ if (allow_harmony_function_name()) {
+ SetFunctionNameFromPropertyName(property, name);
+ }
}
Expect(Token::RBRACE, CHECK_OK);
@@ -4838,15 +4814,13 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
block_scope->language_mode());
}
+ // Note that we do not finalize this block scope because strong
+ // mode uses it as a sentinel value indicating an anonymous class.
block_scope->set_end_position(end_pos);
if (name != NULL) {
DCHECK_NOT_NULL(proxy);
proxy->var()->set_initializer_position(end_pos);
- } else {
- // Unnamed classes should not have scopes (the scope will be empty).
- DCHECK_EQ(block_scope->num_var_or_const(), 0);
- block_scope = nullptr;
}
return factory()->NewClassLiteral(name, block_scope, proxy, extends,
@@ -4867,7 +4841,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
ExpressionClassifier classifier;
ZoneList<Expression*>* args =
ParseArguments(&spread_pos, &classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ args = RewriteNonPatternArguments(args, &classifier, CHECK_OK);
DCHECK(!spread_pos.IsValid());
@@ -4929,8 +4903,7 @@ Literal* Parser::GetLiteralUndefined(int position) {
void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
Declaration* decl = scope->CheckConflictingVarDeclarations();
if (decl != NULL) {
- // In harmony mode we treat conflicting variable bindinds as early
- // errors. See ES5 16 for a definition of early errors.
+ // In ES6, conflicting variable bindings are early errors.
const AstRawString* name = decl->proxy()->raw_name();
int position = decl->proxy()->position();
Scanner::Location location = position == RelocInfo::kNoPosition
@@ -4943,6 +4916,31 @@ void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
}
+void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
+ // For each var-binding that shadows a parameter, insert an assignment
+ // initializing the variable with the parameter.
+ Scope* inner_scope = inner_block->scope();
+ DCHECK(inner_scope->is_declaration_scope());
+ Scope* function_scope = inner_scope->outer_scope();
+ DCHECK(function_scope->is_function_scope());
+ ZoneList<Declaration*>* decls = inner_scope->declarations();
+ for (int i = 0; i < decls->length(); ++i) {
+ Declaration* decl = decls->at(i);
+ if (decl->mode() != VAR || !decl->IsVariableDeclaration()) continue;
+ const AstRawString* name = decl->proxy()->raw_name();
+ Variable* parameter = function_scope->LookupLocal(name);
+ if (parameter == nullptr) continue;
+ VariableProxy* to = inner_scope->NewUnresolved(factory(), name);
+ VariableProxy* from = factory()->NewVariableProxy(parameter);
+ Expression* assignment = factory()->NewAssignment(
+ Token::ASSIGN, to, from, RelocInfo::kNoPosition);
+ Statement* statement = factory()->NewExpressionStatement(
+ assignment, RelocInfo::kNoPosition);
+ inner_block->statements()->InsertAt(0, statement, zone());
+ }
+}
+
+
void Parser::InsertSloppyBlockFunctionVarBindings(Scope* scope, bool* ok) {
// For each variable which is used as a function declaration in a sloppy
// block,
@@ -5059,917 +5057,8 @@ void Parser::Internalize(Isolate* isolate, Handle<Script> script, bool error) {
// ----------------------------------------------------------------------------
-// Regular expressions
-
-
-RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
- bool multiline, bool unicode, Isolate* isolate,
- Zone* zone)
- : isolate_(isolate),
- zone_(zone),
- error_(error),
- captures_(NULL),
- in_(in),
- current_(kEndMarker),
- next_pos_(0),
- capture_count_(0),
- has_more_(true),
- multiline_(multiline),
- unicode_(unicode),
- simple_(false),
- contains_anchor_(false),
- is_scanned_for_captures_(false),
- failed_(false) {
- Advance();
-}
-
-
-uc32 RegExpParser::Next() {
- if (has_next()) {
- return in()->Get(next_pos_);
- } else {
- return kEndMarker;
- }
-}
-
-
-void RegExpParser::Advance() {
- if (next_pos_ < in()->length()) {
- StackLimitCheck check(isolate());
- if (check.HasOverflowed()) {
- ReportError(CStrVector(Isolate::kStackOverflowMessage));
- } else if (zone()->excess_allocation()) {
- ReportError(CStrVector("Regular expression too large"));
- } else {
- current_ = in()->Get(next_pos_);
- next_pos_++;
- }
- } else {
- current_ = kEndMarker;
- // Advance so that position() points to 1-after-the-last-character. This is
- // important so that Reset() to this position works correctly.
- next_pos_ = in()->length() + 1;
- has_more_ = false;
- }
-}
-
-
-void RegExpParser::Reset(int pos) {
- next_pos_ = pos;
- has_more_ = (pos < in()->length());
- Advance();
-}
-
-
-void RegExpParser::Advance(int dist) {
- next_pos_ += dist - 1;
- Advance();
-}
-
-
-bool RegExpParser::simple() {
- return simple_;
-}
-
-
-bool RegExpParser::IsSyntaxCharacter(uc32 c) {
- return c == '^' || c == '$' || c == '\\' || c == '.' || c == '*' ||
- c == '+' || c == '?' || c == '(' || c == ')' || c == '[' || c == ']' ||
- c == '{' || c == '}' || c == '|';
-}
-
-
-RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
- failed_ = true;
- *error_ = isolate()->factory()->NewStringFromAscii(message).ToHandleChecked();
- // Zip to the end to make sure the no more input is read.
- current_ = kEndMarker;
- next_pos_ = in()->length();
- return NULL;
-}
-
-
-// Pattern ::
-// Disjunction
-RegExpTree* RegExpParser::ParsePattern() {
- RegExpTree* result = ParseDisjunction(CHECK_FAILED);
- DCHECK(!has_more());
- // If the result of parsing is a literal string atom, and it has the
- // same length as the input, then the atom is identical to the input.
- if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
- simple_ = true;
- }
- return result;
-}
-
-
-// Disjunction ::
-// Alternative
-// Alternative | Disjunction
-// Alternative ::
-// [empty]
-// Term Alternative
-// Term ::
-// Assertion
-// Atom
-// Atom Quantifier
-RegExpTree* RegExpParser::ParseDisjunction() {
- // Used to store current state while parsing subexpressions.
- RegExpParserState initial_state(NULL, INITIAL, 0, zone());
- RegExpParserState* stored_state = &initial_state;
- // Cache the builder in a local variable for quick access.
- RegExpBuilder* builder = initial_state.builder();
- while (true) {
- switch (current()) {
- case kEndMarker:
- if (stored_state->IsSubexpression()) {
- // Inside a parenthesized group when hitting end of input.
- ReportError(CStrVector("Unterminated group") CHECK_FAILED);
- }
- DCHECK_EQ(INITIAL, stored_state->group_type());
- // Parsing completed successfully.
- return builder->ToRegExp();
- case ')': {
- if (!stored_state->IsSubexpression()) {
- ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
- }
- DCHECK_NE(INITIAL, stored_state->group_type());
-
- Advance();
- // End disjunction parsing and convert builder content to new single
- // regexp atom.
- RegExpTree* body = builder->ToRegExp();
-
- int end_capture_index = captures_started();
-
- int capture_index = stored_state->capture_index();
- SubexpressionType group_type = stored_state->group_type();
-
- // Restore previous state.
- stored_state = stored_state->previous_state();
- builder = stored_state->builder();
-
- // Build result of subexpression.
- if (group_type == CAPTURE) {
- RegExpCapture* capture = new(zone()) RegExpCapture(body, capture_index);
- captures_->at(capture_index - 1) = capture;
- body = capture;
- } else if (group_type != GROUPING) {
- DCHECK(group_type == POSITIVE_LOOKAHEAD ||
- group_type == NEGATIVE_LOOKAHEAD);
- bool is_positive = (group_type == POSITIVE_LOOKAHEAD);
- body = new(zone()) RegExpLookahead(body,
- is_positive,
- end_capture_index - capture_index,
- capture_index);
- }
- builder->AddAtom(body);
- // For compatability with JSC and ES3, we allow quantifiers after
- // lookaheads, and break in all cases.
- break;
- }
- case '|': {
- Advance();
- builder->NewAlternative();
- continue;
- }
- case '*':
- case '+':
- case '?':
- return ReportError(CStrVector("Nothing to repeat"));
- case '^': {
- Advance();
- if (multiline_) {
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::START_OF_LINE));
- } else {
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::START_OF_INPUT));
- set_contains_anchor();
- }
- continue;
- }
- case '$': {
- Advance();
- RegExpAssertion::AssertionType assertion_type =
- multiline_ ? RegExpAssertion::END_OF_LINE :
- RegExpAssertion::END_OF_INPUT;
- builder->AddAssertion(new(zone()) RegExpAssertion(assertion_type));
- continue;
- }
- case '.': {
- Advance();
- // everything except \x0a, \x0d, \u2028 and \u2029
- ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2, zone());
- CharacterRange::AddClassEscape('.', ranges, zone());
- RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
- builder->AddAtom(atom);
- break;
- }
- case '(': {
- SubexpressionType subexpr_type = CAPTURE;
- Advance();
- if (current() == '?') {
- switch (Next()) {
- case ':':
- subexpr_type = GROUPING;
- break;
- case '=':
- subexpr_type = POSITIVE_LOOKAHEAD;
- break;
- case '!':
- subexpr_type = NEGATIVE_LOOKAHEAD;
- break;
- default:
- ReportError(CStrVector("Invalid group") CHECK_FAILED);
- break;
- }
- Advance(2);
- } else {
- if (captures_ == NULL) {
- captures_ = new(zone()) ZoneList<RegExpCapture*>(2, zone());
- }
- if (captures_started() >= kMaxCaptures) {
- ReportError(CStrVector("Too many captures") CHECK_FAILED);
- }
- captures_->Add(NULL, zone());
- }
- // Store current state and begin new disjunction parsing.
- stored_state = new(zone()) RegExpParserState(stored_state, subexpr_type,
- captures_started(), zone());
- builder = stored_state->builder();
- continue;
- }
- case '[': {
- RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
- builder->AddAtom(atom);
- break;
- }
- // Atom ::
- // \ AtomEscape
- case '\\':
- switch (Next()) {
- case kEndMarker:
- return ReportError(CStrVector("\\ at end of pattern"));
- case 'b':
- Advance(2);
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::BOUNDARY));
- continue;
- case 'B':
- Advance(2);
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
- continue;
- // AtomEscape ::
- // CharacterClassEscape
- //
- // CharacterClassEscape :: one of
- // d D s S w W
- case 'd': case 'D': case 's': case 'S': case 'w': case 'W': {
- uc32 c = Next();
- Advance(2);
- ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2, zone());
- CharacterRange::AddClassEscape(c, ranges, zone());
- RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
- builder->AddAtom(atom);
- break;
- }
- case '1': case '2': case '3': case '4': case '5': case '6':
- case '7': case '8': case '9': {
- int index = 0;
- if (ParseBackReferenceIndex(&index)) {
- RegExpCapture* capture = NULL;
- if (captures_ != NULL && index <= captures_->length()) {
- capture = captures_->at(index - 1);
- }
- if (capture == NULL) {
- builder->AddEmpty();
- break;
- }
- RegExpTree* atom = new(zone()) RegExpBackReference(capture);
- builder->AddAtom(atom);
- break;
- }
- uc32 first_digit = Next();
- if (first_digit == '8' || first_digit == '9') {
- // If the 'u' flag is present, only syntax characters can be escaped,
- // no other identity escapes are allowed. If the 'u' flag is not
- // present, all identity escapes are allowed.
- if (!FLAG_harmony_unicode_regexps || !unicode_) {
- builder->AddCharacter(first_digit);
- Advance(2);
- } else {
- return ReportError(CStrVector("Invalid escape"));
- }
- break;
- }
- }
- // FALLTHROUGH
- case '0': {
- Advance();
- uc32 octal = ParseOctalLiteral();
- builder->AddCharacter(octal);
- break;
- }
- // ControlEscape :: one of
- // f n r t v
- case 'f':
- Advance(2);
- builder->AddCharacter('\f');
- break;
- case 'n':
- Advance(2);
- builder->AddCharacter('\n');
- break;
- case 'r':
- Advance(2);
- builder->AddCharacter('\r');
- break;
- case 't':
- Advance(2);
- builder->AddCharacter('\t');
- break;
- case 'v':
- Advance(2);
- builder->AddCharacter('\v');
- break;
- case 'c': {
- Advance();
- uc32 controlLetter = Next();
- // Special case if it is an ASCII letter.
- // Convert lower case letters to uppercase.
- uc32 letter = controlLetter & ~('a' ^ 'A');
- if (letter < 'A' || 'Z' < letter) {
- // controlLetter is not in range 'A'-'Z' or 'a'-'z'.
- // This is outside the specification. We match JSC in
- // reading the backslash as a literal character instead
- // of as starting an escape.
- builder->AddCharacter('\\');
- } else {
- Advance(2);
- builder->AddCharacter(controlLetter & 0x1f);
- }
- break;
- }
- case 'x': {
- Advance(2);
- uc32 value;
- if (ParseHexEscape(2, &value)) {
- builder->AddCharacter(value);
- } else if (!FLAG_harmony_unicode_regexps || !unicode_) {
- builder->AddCharacter('x');
- } else {
- // If the 'u' flag is present, invalid escapes are not treated as
- // identity escapes.
- return ReportError(CStrVector("Invalid escape"));
- }
- break;
- }
- case 'u': {
- Advance(2);
- uc32 value;
- if (ParseUnicodeEscape(&value)) {
- builder->AddCharacter(value);
- } else if (!FLAG_harmony_unicode_regexps || !unicode_) {
- builder->AddCharacter('u');
- } else {
- // If the 'u' flag is present, invalid escapes are not treated as
- // identity escapes.
- return ReportError(CStrVector("Invalid unicode escape"));
- }
- break;
- }
- default:
- Advance();
- // If the 'u' flag is present, only syntax characters can be escaped, no
- // other identity escapes are allowed. If the 'u' flag is not present,
- // all identity escapes are allowed.
- if (!FLAG_harmony_unicode_regexps || !unicode_ ||
- IsSyntaxCharacter(current())) {
- builder->AddCharacter(current());
- Advance();
- } else {
- return ReportError(CStrVector("Invalid escape"));
- }
- break;
- }
- break;
- case '{': {
- int dummy;
- if (ParseIntervalQuantifier(&dummy, &dummy)) {
- ReportError(CStrVector("Nothing to repeat") CHECK_FAILED);
- }
- // fallthrough
- }
- default:
- builder->AddCharacter(current());
- Advance();
- break;
- } // end switch(current())
-
- int min;
- int max;
- switch (current()) {
- // QuantifierPrefix ::
- // *
- // +
- // ?
- // {
- case '*':
- min = 0;
- max = RegExpTree::kInfinity;
- Advance();
- break;
- case '+':
- min = 1;
- max = RegExpTree::kInfinity;
- Advance();
- break;
- case '?':
- min = 0;
- max = 1;
- Advance();
- break;
- case '{':
- if (ParseIntervalQuantifier(&min, &max)) {
- if (max < min) {
- ReportError(CStrVector("numbers out of order in {} quantifier.")
- CHECK_FAILED);
- }
- break;
- } else {
- continue;
- }
- default:
- continue;
- }
- RegExpQuantifier::QuantifierType quantifier_type = RegExpQuantifier::GREEDY;
- if (current() == '?') {
- quantifier_type = RegExpQuantifier::NON_GREEDY;
- Advance();
- } else if (FLAG_regexp_possessive_quantifier && current() == '+') {
- // FLAG_regexp_possessive_quantifier is a debug-only flag.
- quantifier_type = RegExpQuantifier::POSSESSIVE;
- Advance();
- }
- builder->AddQuantifierToAtom(min, max, quantifier_type);
- }
-}
-
-
-#ifdef DEBUG
-// Currently only used in an DCHECK.
-static bool IsSpecialClassEscape(uc32 c) {
- switch (c) {
- case 'd': case 'D':
- case 's': case 'S':
- case 'w': case 'W':
- return true;
- default:
- return false;
- }
-}
-#endif
-
-
-// In order to know whether an escape is a backreference or not we have to scan
-// the entire regexp and find the number of capturing parentheses. However we
-// don't want to scan the regexp twice unless it is necessary. This mini-parser
-// is called when needed. It can see the difference between capturing and
-// noncapturing parentheses and can skip character classes and backslash-escaped
-// characters.
-void RegExpParser::ScanForCaptures() {
- // Start with captures started previous to current position
- int capture_count = captures_started();
- // Add count of captures after this position.
- int n;
- while ((n = current()) != kEndMarker) {
- Advance();
- switch (n) {
- case '\\':
- Advance();
- break;
- case '[': {
- int c;
- while ((c = current()) != kEndMarker) {
- Advance();
- if (c == '\\') {
- Advance();
- } else {
- if (c == ']') break;
- }
- }
- break;
- }
- case '(':
- if (current() != '?') capture_count++;
- break;
- }
- }
- capture_count_ = capture_count;
- is_scanned_for_captures_ = true;
-}
-
-
-bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
- DCHECK_EQ('\\', current());
- DCHECK('1' <= Next() && Next() <= '9');
- // Try to parse a decimal literal that is no greater than the total number
- // of left capturing parentheses in the input.
- int start = position();
- int value = Next() - '0';
- Advance(2);
- while (true) {
- uc32 c = current();
- if (IsDecimalDigit(c)) {
- value = 10 * value + (c - '0');
- if (value > kMaxCaptures) {
- Reset(start);
- return false;
- }
- Advance();
- } else {
- break;
- }
- }
- if (value > captures_started()) {
- if (!is_scanned_for_captures_) {
- int saved_position = position();
- ScanForCaptures();
- Reset(saved_position);
- }
- if (value > capture_count_) {
- Reset(start);
- return false;
- }
- }
- *index_out = value;
- return true;
-}
-
-
-// QuantifierPrefix ::
-// { DecimalDigits }
-// { DecimalDigits , }
-// { DecimalDigits , DecimalDigits }
-//
-// Returns true if parsing succeeds, and set the min_out and max_out
-// values. Values are truncated to RegExpTree::kInfinity if they overflow.
-bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
- DCHECK_EQ(current(), '{');
- int start = position();
- Advance();
- int min = 0;
- if (!IsDecimalDigit(current())) {
- Reset(start);
- return false;
- }
- while (IsDecimalDigit(current())) {
- int next = current() - '0';
- if (min > (RegExpTree::kInfinity - next) / 10) {
- // Overflow. Skip past remaining decimal digits and return -1.
- do {
- Advance();
- } while (IsDecimalDigit(current()));
- min = RegExpTree::kInfinity;
- break;
- }
- min = 10 * min + next;
- Advance();
- }
- int max = 0;
- if (current() == '}') {
- max = min;
- Advance();
- } else if (current() == ',') {
- Advance();
- if (current() == '}') {
- max = RegExpTree::kInfinity;
- Advance();
- } else {
- while (IsDecimalDigit(current())) {
- int next = current() - '0';
- if (max > (RegExpTree::kInfinity - next) / 10) {
- do {
- Advance();
- } while (IsDecimalDigit(current()));
- max = RegExpTree::kInfinity;
- break;
- }
- max = 10 * max + next;
- Advance();
- }
- if (current() != '}') {
- Reset(start);
- return false;
- }
- Advance();
- }
- } else {
- Reset(start);
- return false;
- }
- *min_out = min;
- *max_out = max;
- return true;
-}
-
-
-uc32 RegExpParser::ParseOctalLiteral() {
- DCHECK(('0' <= current() && current() <= '7') || current() == kEndMarker);
- // For compatibility with some other browsers (not all), we parse
- // up to three octal digits with a value below 256.
- uc32 value = current() - '0';
- Advance();
- if ('0' <= current() && current() <= '7') {
- value = value * 8 + current() - '0';
- Advance();
- if (value < 32 && '0' <= current() && current() <= '7') {
- value = value * 8 + current() - '0';
- Advance();
- }
- }
- return value;
-}
-
-
-bool RegExpParser::ParseHexEscape(int length, uc32* value) {
- int start = position();
- uc32 val = 0;
- for (int i = 0; i < length; ++i) {
- uc32 c = current();
- int d = HexValue(c);
- if (d < 0) {
- Reset(start);
- return false;
- }
- val = val * 16 + d;
- Advance();
- }
- *value = val;
- return true;
-}
-
-
-bool RegExpParser::ParseUnicodeEscape(uc32* value) {
- // Accept both \uxxxx and \u{xxxxxx} (if harmony unicode escapes are
- // allowed). In the latter case, the number of hex digits between { } is
- // arbitrary. \ and u have already been read.
- if (current() == '{' && FLAG_harmony_unicode_regexps && unicode_) {
- int start = position();
- Advance();
- if (ParseUnlimitedLengthHexNumber(0x10ffff, value)) {
- if (current() == '}') {
- Advance();
- return true;
- }
- }
- Reset(start);
- return false;
- }
- // \u but no {, or \u{...} escapes not allowed.
- return ParseHexEscape(4, value);
-}
-
-
-bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
- uc32 x = 0;
- int d = HexValue(current());
- if (d < 0) {
- return false;
- }
- while (d >= 0) {
- x = x * 16 + d;
- if (x > max_value) {
- return false;
- }
- Advance();
- d = HexValue(current());
- }
- *value = x;
- return true;
-}
-
-
-uc32 RegExpParser::ParseClassCharacterEscape() {
- DCHECK(current() == '\\');
- DCHECK(has_next() && !IsSpecialClassEscape(Next()));
- Advance();
- switch (current()) {
- case 'b':
- Advance();
- return '\b';
- // ControlEscape :: one of
- // f n r t v
- case 'f':
- Advance();
- return '\f';
- case 'n':
- Advance();
- return '\n';
- case 'r':
- Advance();
- return '\r';
- case 't':
- Advance();
- return '\t';
- case 'v':
- Advance();
- return '\v';
- case 'c': {
- uc32 controlLetter = Next();
- uc32 letter = controlLetter & ~('A' ^ 'a');
- // For compatibility with JSC, inside a character class
- // we also accept digits and underscore as control characters.
- if ((controlLetter >= '0' && controlLetter <= '9') ||
- controlLetter == '_' ||
- (letter >= 'A' && letter <= 'Z')) {
- Advance(2);
- // Control letters mapped to ASCII control characters in the range
- // 0x00-0x1f.
- return controlLetter & 0x1f;
- }
- // We match JSC in reading the backslash as a literal
- // character instead of as starting an escape.
- return '\\';
- }
- case '0': case '1': case '2': case '3': case '4': case '5':
- case '6': case '7':
- // For compatibility, we interpret a decimal escape that isn't
- // a back reference (and therefore either \0 or not valid according
- // to the specification) as a 1..3 digit octal character code.
- return ParseOctalLiteral();
- case 'x': {
- Advance();
- uc32 value;
- if (ParseHexEscape(2, &value)) {
- return value;
- }
- if (!FLAG_harmony_unicode_regexps || !unicode_) {
- // If \x is not followed by a two-digit hexadecimal, treat it
- // as an identity escape.
- return 'x';
- }
- // If the 'u' flag is present, invalid escapes are not treated as
- // identity escapes.
- ReportError(CStrVector("Invalid escape"));
- return 0;
- }
- case 'u': {
- Advance();
- uc32 value;
- if (ParseUnicodeEscape(&value)) {
- return value;
- }
- if (!FLAG_harmony_unicode_regexps || !unicode_) {
- return 'u';
- }
- // If the 'u' flag is present, invalid escapes are not treated as
- // identity escapes.
- ReportError(CStrVector("Invalid unicode escape"));
- return 0;
- }
- default: {
- uc32 result = current();
- // If the 'u' flag is present, only syntax characters can be escaped, no
- // other identity escapes are allowed. If the 'u' flag is not present, all
- // identity escapes are allowed.
- if (!FLAG_harmony_unicode_regexps || !unicode_ ||
- IsSyntaxCharacter(result)) {
- Advance();
- return result;
- }
- ReportError(CStrVector("Invalid escape"));
- return 0;
- }
- }
- return 0;
-}
-
-
-CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
- DCHECK_EQ(0, *char_class);
- uc32 first = current();
- if (first == '\\') {
- switch (Next()) {
- case 'w': case 'W': case 'd': case 'D': case 's': case 'S': {
- *char_class = Next();
- Advance(2);
- return CharacterRange::Singleton(0); // Return dummy value.
- }
- case kEndMarker:
- return ReportError(CStrVector("\\ at end of pattern"));
- default:
- uc32 c = ParseClassCharacterEscape(CHECK_FAILED);
- return CharacterRange::Singleton(c);
- }
- } else {
- Advance();
- return CharacterRange::Singleton(first);
- }
-}
-
-
-static const uc16 kNoCharClass = 0;
-
-// Adds range or pre-defined character class to character ranges.
-// If char_class is not kInvalidClass, it's interpreted as a class
-// escape (i.e., 's' means whitespace, from '\s').
-static inline void AddRangeOrEscape(ZoneList<CharacterRange>* ranges,
- uc16 char_class,
- CharacterRange range,
- Zone* zone) {
- if (char_class != kNoCharClass) {
- CharacterRange::AddClassEscape(char_class, ranges, zone);
- } else {
- ranges->Add(range, zone);
- }
-}
-
-
-RegExpTree* RegExpParser::ParseCharacterClass() {
- static const char* kUnterminated = "Unterminated character class";
- static const char* kRangeOutOfOrder = "Range out of order in character class";
-
- DCHECK_EQ(current(), '[');
- Advance();
- bool is_negated = false;
- if (current() == '^') {
- is_negated = true;
- Advance();
- }
- ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2, zone());
- while (has_more() && current() != ']') {
- uc16 char_class = kNoCharClass;
- CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
- if (current() == '-') {
- Advance();
- if (current() == kEndMarker) {
- // If we reach the end we break out of the loop and let the
- // following code report an error.
- break;
- } else if (current() == ']') {
- AddRangeOrEscape(ranges, char_class, first, zone());
- ranges->Add(CharacterRange::Singleton('-'), zone());
- break;
- }
- uc16 char_class_2 = kNoCharClass;
- CharacterRange next = ParseClassAtom(&char_class_2 CHECK_FAILED);
- if (char_class != kNoCharClass || char_class_2 != kNoCharClass) {
- // Either end is an escaped character class. Treat the '-' verbatim.
- AddRangeOrEscape(ranges, char_class, first, zone());
- ranges->Add(CharacterRange::Singleton('-'), zone());
- AddRangeOrEscape(ranges, char_class_2, next, zone());
- continue;
- }
- if (first.from() > next.to()) {
- return ReportError(CStrVector(kRangeOutOfOrder) CHECK_FAILED);
- }
- ranges->Add(CharacterRange::Range(first.from(), next.to()), zone());
- } else {
- AddRangeOrEscape(ranges, char_class, first, zone());
- }
- }
- if (!has_more()) {
- return ReportError(CStrVector(kUnterminated) CHECK_FAILED);
- }
- Advance();
- if (ranges->length() == 0) {
- ranges->Add(CharacterRange::Everything(), zone());
- is_negated = !is_negated;
- }
- return new(zone()) RegExpCharacterClass(ranges, is_negated);
-}
-
-
-// ----------------------------------------------------------------------------
// The Parser interface.
-bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
- FlatStringReader* input, bool multiline,
- bool unicode, RegExpCompileData* result) {
- DCHECK(result != NULL);
- RegExpParser parser(input, &result->error, multiline, unicode, isolate, zone);
- RegExpTree* tree = parser.ParsePattern();
- if (parser.failed()) {
- DCHECK(tree == NULL);
- DCHECK(!result->error.is_null());
- } else {
- DCHECK(tree != NULL);
- DCHECK(result->error.is_null());
- result->tree = tree;
- int capture_count = parser.captures_started();
- result->simple = tree->IsAtom() && parser.simple() && capture_count == 0;
- result->contains_anchor = parser.contains_anchor();
- result->capture_count = capture_count;
- }
- return !parser.failed();
-}
-
bool Parser::ParseStatic(ParseInfo* info) {
Parser parser(info);
@@ -6125,7 +5214,6 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
Smi* hash_obj = Smi::cast(Internals::IntToSmi(static_cast<int>(hash)));
args->Add(factory()->NewSmiLiteral(hash_obj->value(), pos), zone());
- this->CheckPossibleEvalCall(tag, scope_);
Expression* call_site = factory()->NewCallRuntime(
Context::GET_TEMPLATE_CALL_SITE_INDEX, args, start);
@@ -6239,12 +5327,13 @@ Expression* Parser::SpreadCall(Expression* function,
int pos) {
if (function->IsSuperCallReference()) {
// Super calls
- // %reflect_construct(%GetPrototype(<this-function>), args, new.target))
+ // $super_constructor = %_GetSuperConstructor(<this-function>)
+ // %reflect_construct($super_constructor, args, new.target)
ZoneList<Expression*>* tmp = new (zone()) ZoneList<Expression*>(1, zone());
tmp->Add(function->AsSuperCallReference()->this_function_var(), zone());
- Expression* get_prototype =
- factory()->NewCallRuntime(Runtime::kGetPrototype, tmp, pos);
- args->InsertAt(0, get_prototype, zone());
+ Expression* super_constructor = factory()->NewCallRuntime(
+ Runtime::kInlineGetSuperConstructor, tmp, pos);
+ args->InsertAt(0, super_constructor, zone());
args->Add(function->AsSuperCallReference()->new_target_var(), zone());
return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args,
pos);
@@ -6287,5 +5376,173 @@ Expression* Parser::SpreadCallNew(Expression* function,
return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args, pos);
}
+
+
+void Parser::SetLanguageMode(Scope* scope, LanguageMode mode) {
+ v8::Isolate::UseCounterFeature feature;
+ if (is_sloppy(mode))
+ feature = v8::Isolate::kSloppyMode;
+ else if (is_strong(mode))
+ feature = v8::Isolate::kStrongMode;
+ else if (is_strict(mode))
+ feature = v8::Isolate::kStrictMode;
+ else
+ UNREACHABLE();
+ ++use_counts_[feature];
+ scope->SetLanguageMode(mode);
+}
+
+
+void Parser::RaiseLanguageMode(LanguageMode mode) {
+ SetLanguageMode(scope_,
+ static_cast<LanguageMode>(scope_->language_mode() | mode));
+}
+
+
+void ParserTraits::RewriteDestructuringAssignments() {
+ parser_->RewriteDestructuringAssignments();
+}
+
+
+Expression* ParserTraits::RewriteNonPattern(
+ Expression* expr, const ExpressionClassifier* classifier, bool* ok) {
+ return parser_->RewriteNonPattern(expr, classifier, ok);
+}
+
+
+ZoneList<Expression*>* ParserTraits::RewriteNonPatternArguments(
+ ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
+ bool* ok) {
+ return parser_->RewriteNonPatternArguments(args, classifier, ok);
+}
+
+
+ObjectLiteralProperty* ParserTraits::RewriteNonPatternObjectLiteralProperty(
+ ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
+ bool* ok) {
+ return parser_->RewriteNonPatternObjectLiteralProperty(property, classifier,
+ ok);
+}
+
+
+Expression* Parser::RewriteNonPattern(Expression* expr,
+ const ExpressionClassifier* classifier,
+ bool* ok) {
+ // For the time being, this does no rewriting at all.
+ ValidateExpression(classifier, ok);
+ return expr;
+}
+
+
+ZoneList<Expression*>* Parser::RewriteNonPatternArguments(
+ ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
+ bool* ok) {
+ // For the time being, this does no rewriting at all.
+ ValidateExpression(classifier, ok);
+ return args;
+}
+
+
+ObjectLiteralProperty* Parser::RewriteNonPatternObjectLiteralProperty(
+ ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
+ bool* ok) {
+ if (property != nullptr) {
+ Expression* key = RewriteNonPattern(property->key(), classifier, ok);
+ property->set_key(key);
+ Expression* value = RewriteNonPattern(property->value(), classifier, ok);
+ property->set_value(value);
+ }
+ return property;
+}
+
+
+void Parser::RewriteDestructuringAssignments() {
+ FunctionState* func = function_state_;
+ if (!allow_harmony_destructuring_assignment()) return;
+ const List<DestructuringAssignment>& assignments =
+ func->destructuring_assignments_to_rewrite();
+ for (int i = assignments.length() - 1; i >= 0; --i) {
+ // Rewrite list in reverse, so that nested assignment patterns are rewritten
+ // correctly.
+ DestructuringAssignment pair = assignments.at(i);
+ RewritableAssignmentExpression* to_rewrite =
+ pair.assignment->AsRewritableAssignmentExpression();
+ Scope* scope = pair.scope;
+ DCHECK_NOT_NULL(to_rewrite);
+ if (!to_rewrite->is_rewritten()) {
+ PatternRewriter::RewriteDestructuringAssignment(this, to_rewrite, scope);
+ }
+ }
+}
+
+
+void ParserTraits::QueueDestructuringAssignmentForRewriting(Expression* expr) {
+ DCHECK(expr->IsRewritableAssignmentExpression());
+ parser_->function_state_->AddDestructuringAssignment(
+ Parser::DestructuringAssignment(expr, parser_->scope_));
+}
+
+
+void ParserTraits::SetFunctionNameFromPropertyName(
+ ObjectLiteralProperty* property, const AstRawString* name) {
+ Expression* value = property->value();
+ if (!value->IsFunctionLiteral() && !value->IsClassLiteral()) return;
+
+ // TODO(adamk): Support computed names.
+ if (property->is_computed_name()) return;
+ DCHECK_NOT_NULL(name);
+
+ // Ignore "__proto__" as a name when it's being used to set the [[Prototype]]
+ // of an object literal.
+ if (property->kind() == ObjectLiteralProperty::PROTOTYPE) return;
+
+ if (value->IsFunctionLiteral()) {
+ auto function = value->AsFunctionLiteral();
+ if (function->is_anonymous()) {
+ if (property->kind() == ObjectLiteralProperty::GETTER) {
+ function->set_raw_name(parser_->ast_value_factory()->NewConsString(
+ parser_->ast_value_factory()->get_space_string(), name));
+ } else if (property->kind() == ObjectLiteralProperty::SETTER) {
+ function->set_raw_name(parser_->ast_value_factory()->NewConsString(
+ parser_->ast_value_factory()->set_space_string(), name));
+ } else {
+ function->set_raw_name(name);
+ DCHECK_EQ(ObjectLiteralProperty::COMPUTED, property->kind());
+ }
+ }
+ } else {
+ DCHECK(value->IsClassLiteral());
+ DCHECK_EQ(ObjectLiteralProperty::COMPUTED, property->kind());
+ auto class_literal = value->AsClassLiteral();
+ if (class_literal->raw_name() == nullptr) {
+ class_literal->set_raw_name(name);
+ }
+ }
+}
+
+
+void ParserTraits::SetFunctionNameFromIdentifierRef(Expression* value,
+ Expression* identifier) {
+ if (!value->IsFunctionLiteral() && !value->IsClassLiteral()) return;
+ if (!identifier->IsVariableProxy()) return;
+
+ auto name = identifier->AsVariableProxy()->raw_name();
+ DCHECK_NOT_NULL(name);
+
+ if (value->IsFunctionLiteral()) {
+ auto function = value->AsFunctionLiteral();
+ if (function->is_anonymous()) {
+ function->set_raw_name(name);
+ }
+ } else {
+ DCHECK(value->IsClassLiteral());
+ auto class_literal = value->AsClassLiteral();
+ if (class_literal->raw_name() == nullptr) {
+ class_literal->set_raw_name(name);
+ }
+ }
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/parser.h b/chromium/v8/src/parsing/parser.h
index cf4cdad66bb..7d50221334d 100644
--- a/chromium/v8/src/parser.h
+++ b/chromium/v8/src/parsing/parser.h
@@ -2,17 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PARSER_H_
-#define V8_PARSER_H_
+#ifndef V8_PARSING_PARSER_H_
+#define V8_PARSING_PARSER_H_
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/compiler.h" // TODO(titzer): remove this include dependency
+#include "src/parsing/parser-base.h"
+#include "src/parsing/preparse-data.h"
+#include "src/parsing/preparse-data-format.h"
+#include "src/parsing/preparser.h"
#include "src/pending-compilation-error-handler.h"
-#include "src/preparse-data.h"
-#include "src/preparse-data-format.h"
-#include "src/preparser.h"
-#include "src/scopes.h"
namespace v8 {
@@ -151,6 +152,10 @@ class ParseInfo {
context_ = Handle<Context>(*context_);
}
+#ifdef DEBUG
+ bool script_is_native() { return script_->type() == Script::TYPE_NATIVE; }
+#endif // DEBUG
+
private:
// Various configuration flags for parsing.
enum Flag {
@@ -284,252 +289,6 @@ class ParseData {
};
// ----------------------------------------------------------------------------
-// REGEXP PARSING
-
-// A BufferedZoneList is an automatically growing list, just like (and backed
-// by) a ZoneList, that is optimized for the case of adding and removing
-// a single element. The last element added is stored outside the backing list,
-// and if no more than one element is ever added, the ZoneList isn't even
-// allocated.
-// Elements must not be NULL pointers.
-template <typename T, int initial_size>
-class BufferedZoneList {
- public:
- BufferedZoneList() : list_(NULL), last_(NULL) {}
-
- // Adds element at end of list. This element is buffered and can
- // be read using last() or removed using RemoveLast until a new Add or until
- // RemoveLast or GetList has been called.
- void Add(T* value, Zone* zone) {
- if (last_ != NULL) {
- if (list_ == NULL) {
- list_ = new(zone) ZoneList<T*>(initial_size, zone);
- }
- list_->Add(last_, zone);
- }
- last_ = value;
- }
-
- T* last() {
- DCHECK(last_ != NULL);
- return last_;
- }
-
- T* RemoveLast() {
- DCHECK(last_ != NULL);
- T* result = last_;
- if ((list_ != NULL) && (list_->length() > 0))
- last_ = list_->RemoveLast();
- else
- last_ = NULL;
- return result;
- }
-
- T* Get(int i) {
- DCHECK((0 <= i) && (i < length()));
- if (list_ == NULL) {
- DCHECK_EQ(0, i);
- return last_;
- } else {
- if (i == list_->length()) {
- DCHECK(last_ != NULL);
- return last_;
- } else {
- return list_->at(i);
- }
- }
- }
-
- void Clear() {
- list_ = NULL;
- last_ = NULL;
- }
-
- int length() {
- int length = (list_ == NULL) ? 0 : list_->length();
- return length + ((last_ == NULL) ? 0 : 1);
- }
-
- ZoneList<T*>* GetList(Zone* zone) {
- if (list_ == NULL) {
- list_ = new(zone) ZoneList<T*>(initial_size, zone);
- }
- if (last_ != NULL) {
- list_->Add(last_, zone);
- last_ = NULL;
- }
- return list_;
- }
-
- private:
- ZoneList<T*>* list_;
- T* last_;
-};
-
-
-// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
-class RegExpBuilder: public ZoneObject {
- public:
- explicit RegExpBuilder(Zone* zone);
- void AddCharacter(uc16 character);
- // "Adds" an empty expression. Does nothing except consume a
- // following quantifier
- void AddEmpty();
- void AddAtom(RegExpTree* tree);
- void AddAssertion(RegExpTree* tree);
- void NewAlternative(); // '|'
- void AddQuantifierToAtom(
- int min, int max, RegExpQuantifier::QuantifierType type);
- RegExpTree* ToRegExp();
-
- private:
- void FlushCharacters();
- void FlushText();
- void FlushTerms();
- Zone* zone() const { return zone_; }
-
- Zone* zone_;
- bool pending_empty_;
- ZoneList<uc16>* characters_;
- BufferedZoneList<RegExpTree, 2> terms_;
- BufferedZoneList<RegExpTree, 2> text_;
- BufferedZoneList<RegExpTree, 2> alternatives_;
-#ifdef DEBUG
- enum {ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM} last_added_;
-#define LAST(x) last_added_ = x;
-#else
-#define LAST(x)
-#endif
-};
-
-
-class RegExpParser BASE_EMBEDDED {
- public:
- RegExpParser(FlatStringReader* in, Handle<String>* error, bool multiline_mode,
- bool unicode, Isolate* isolate, Zone* zone);
-
- static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
- bool multiline, bool unicode,
- RegExpCompileData* result);
-
- RegExpTree* ParsePattern();
- RegExpTree* ParseDisjunction();
- RegExpTree* ParseGroup();
- RegExpTree* ParseCharacterClass();
-
- // Parses a {...,...} quantifier and stores the range in the given
- // out parameters.
- bool ParseIntervalQuantifier(int* min_out, int* max_out);
-
- // Parses and returns a single escaped character. The character
- // must not be 'b' or 'B' since they are usually handle specially.
- uc32 ParseClassCharacterEscape();
-
- // Checks whether the following is a length-digit hexadecimal number,
- // and sets the value if it is.
- bool ParseHexEscape(int length, uc32* value);
- bool ParseUnicodeEscape(uc32* value);
- bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
-
- uc32 ParseOctalLiteral();
-
- // Tries to parse the input as a back reference. If successful it
- // stores the result in the output parameter and returns true. If
- // it fails it will push back the characters read so the same characters
- // can be reparsed.
- bool ParseBackReferenceIndex(int* index_out);
-
- CharacterRange ParseClassAtom(uc16* char_class);
- RegExpTree* ReportError(Vector<const char> message);
- void Advance();
- void Advance(int dist);
- void Reset(int pos);
-
- // Reports whether the pattern might be used as a literal search string.
- // Only use if the result of the parse is a single atom node.
- bool simple();
- bool contains_anchor() { return contains_anchor_; }
- void set_contains_anchor() { contains_anchor_ = true; }
- int captures_started() { return captures_ == NULL ? 0 : captures_->length(); }
- int position() { return next_pos_ - 1; }
- bool failed() { return failed_; }
-
- static bool IsSyntaxCharacter(uc32 c);
-
- static const int kMaxCaptures = 1 << 16;
- static const uc32 kEndMarker = (1 << 21);
-
- private:
- enum SubexpressionType {
- INITIAL,
- CAPTURE, // All positive values represent captures.
- POSITIVE_LOOKAHEAD,
- NEGATIVE_LOOKAHEAD,
- GROUPING
- };
-
- class RegExpParserState : public ZoneObject {
- public:
- RegExpParserState(RegExpParserState* previous_state,
- SubexpressionType group_type,
- int disjunction_capture_index,
- Zone* zone)
- : previous_state_(previous_state),
- builder_(new(zone) RegExpBuilder(zone)),
- group_type_(group_type),
- disjunction_capture_index_(disjunction_capture_index) {}
- // Parser state of containing expression, if any.
- RegExpParserState* previous_state() { return previous_state_; }
- bool IsSubexpression() { return previous_state_ != NULL; }
- // RegExpBuilder building this regexp's AST.
- RegExpBuilder* builder() { return builder_; }
- // Type of regexp being parsed (parenthesized group or entire regexp).
- SubexpressionType group_type() { return group_type_; }
- // Index in captures array of first capture in this sub-expression, if any.
- // Also the capture index of this sub-expression itself, if group_type
- // is CAPTURE.
- int capture_index() { return disjunction_capture_index_; }
-
- private:
- // Linked list implementation of stack of states.
- RegExpParserState* previous_state_;
- // Builder for the stored disjunction.
- RegExpBuilder* builder_;
- // Stored disjunction type (capture, look-ahead or grouping), if any.
- SubexpressionType group_type_;
- // Stored disjunction's capture index (if any).
- int disjunction_capture_index_;
- };
-
- Isolate* isolate() { return isolate_; }
- Zone* zone() const { return zone_; }
-
- uc32 current() { return current_; }
- bool has_more() { return has_more_; }
- bool has_next() { return next_pos_ < in()->length(); }
- uc32 Next();
- FlatStringReader* in() { return in_; }
- void ScanForCaptures();
-
- Isolate* isolate_;
- Zone* zone_;
- Handle<String>* error_;
- ZoneList<RegExpCapture*>* captures_;
- FlatStringReader* in_;
- uc32 current_;
- int next_pos_;
- // The capture count is only valid after we have scanned for captures.
- int capture_count_;
- bool has_more_;
- bool multiline_;
- bool unicode_;
- bool simple_;
- bool contains_anchor_;
- bool is_scanned_for_captures_;
- bool failed_;
-};
-
-// ----------------------------------------------------------------------------
// JAVASCRIPT PARSING
class Parser;
@@ -539,12 +298,17 @@ class SingletonLogger;
struct ParserFormalParameters : FormalParametersBase {
struct Parameter {
Parameter(const AstRawString* name, Expression* pattern,
- Expression* initializer, bool is_rest)
- : name(name), pattern(pattern), initializer(initializer),
+ Expression* initializer, int initializer_end_position,
+ bool is_rest)
+ : name(name),
+ pattern(pattern),
+ initializer(initializer),
+ initializer_end_position(initializer_end_position),
is_rest(is_rest) {}
const AstRawString* name;
Expression* pattern;
Expression* initializer;
+ int initializer_end_position;
bool is_rest;
bool is_simple() const {
return pattern->IsVariableProxy() && initializer == nullptr && !is_rest;
@@ -652,11 +416,6 @@ class ParserTraits {
static void CheckAssigningFunctionLiteralToProperty(Expression* left,
Expression* right);
- // Keep track of eval() calls since they disable all local variable
- // optimizations. This checks if expression is an eval call, and if yes,
- // forwards the information to scope.
- void CheckPossibleEvalCall(Expression* expression, Scope* scope);
-
// Determine if the expression is a variable proxy and mark it as being used
// in an assignment or with a increment/decrement operator.
static Expression* MarkExpressionAsAssigned(Expression* expression);
@@ -763,7 +522,8 @@ class ParserTraits {
Scope* scope, AstNodeFactory* factory);
Expression* ExpressionFromString(int pos, Scanner* scanner,
AstNodeFactory* factory);
- Expression* GetIterator(Expression* iterable, AstNodeFactory* factory);
+ Expression* GetIterator(Expression* iterable, AstNodeFactory* factory,
+ int pos);
ZoneList<v8::internal::Expression*>* NewExpressionList(int size, Zone* zone) {
return new(zone) ZoneList<v8::internal::Expression*>(size, zone);
}
@@ -781,9 +541,10 @@ class ParserTraits {
V8_INLINE Scope* NewScope(Scope* parent_scope, ScopeType scope_type,
FunctionKind kind = kNormalFunction);
- V8_INLINE void AddFormalParameter(
- ParserFormalParameters* parameters, Expression* pattern,
- Expression* initializer, bool is_rest);
+ V8_INLINE void AddFormalParameter(ParserFormalParameters* parameters,
+ Expression* pattern,
+ Expression* initializer,
+ int initializer_end_position, bool is_rest);
V8_INLINE void DeclareFormalParameter(
Scope* scope, const ParserFormalParameters::Parameter& parameter,
ExpressionClassifier* classifier);
@@ -796,6 +557,8 @@ class ParserTraits {
const Scanner::Location& params_loc,
Scanner::Location* duplicate_loc, bool* ok);
+ V8_INLINE DoExpression* ParseDoExpression(bool* ok);
+
void ReindexLiterals(const ParserFormalParameters& parameters);
// Temporary glue; these functions will move to ParserBase.
@@ -874,6 +637,28 @@ class ParserTraits {
ZoneList<v8::internal::Expression*>* args,
int pos);
+ // Rewrite all DestructuringAssignments in the current FunctionState.
+ V8_INLINE void RewriteDestructuringAssignments();
+
+ V8_INLINE void QueueDestructuringAssignmentForRewriting(
+ Expression* assignment);
+
+ void SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
+ const AstRawString* name);
+
+ void SetFunctionNameFromIdentifierRef(Expression* value,
+ Expression* identifier);
+
+ // Rewrite expressions that are not used as patterns
+ V8_INLINE Expression* RewriteNonPattern(
+ Expression* expr, const ExpressionClassifier* classifier, bool* ok);
+ V8_INLINE ZoneList<Expression*>* RewriteNonPatternArguments(
+ ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
+ bool* ok);
+ V8_INLINE ObjectLiteralProperty* RewriteNonPatternObjectLiteralProperty(
+ ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
+ bool* ok);
+
private:
Parser* parser_;
};
@@ -925,7 +710,6 @@ class Parser : public ParserBase<ParserTraits> {
void SetCachedData(ParseInfo* info);
- bool inside_with() const { return scope_->inside_with(); }
ScriptCompiler::CompileOptions compile_options() const {
return compile_options_;
}
@@ -936,10 +720,6 @@ class Parser : public ParserBase<ParserTraits> {
bool produce_cached_parse_data() const {
return compile_options_ == ScriptCompiler::kProduceParserCache;
}
- Scope* DeclarationScope(VariableMode mode) {
- return IsLexicalVariableMode(mode)
- ? scope_ : scope_->DeclarationScope();
- }
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
@@ -968,22 +748,22 @@ class Parser : public ParserBase<ParserTraits> {
bool* ok);
Statement* ParseNativeDeclaration(bool* ok);
Block* ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok);
+ Block* ParseBlock(ZoneList<const AstRawString*>* labels,
+ bool finalize_block_scope, bool* ok);
Block* ParseVariableStatement(VariableDeclarationContext var_context,
ZoneList<const AstRawString*>* names,
bool* ok);
+ DoExpression* ParseDoExpression(bool* ok);
struct DeclarationDescriptor {
enum Kind { NORMAL, PARAMETER };
Parser* parser;
- Scope* declaration_scope;
Scope* scope;
Scope* hoist_scope;
VariableMode mode;
- bool is_const;
bool needs_init;
int declaration_pos;
int initialization_pos;
- Token::Value init_op;
Kind declaration_kind;
};
@@ -1007,7 +787,6 @@ class Parser : public ParserBase<ParserTraits> {
Block* BuildInitializationBlock(ZoneList<const AstRawString*>* names,
bool* ok);
- const AstRawString* SingleName() const;
DeclarationDescriptor descriptor;
List<Declaration> declarations;
@@ -1022,6 +801,13 @@ class Parser : public ParserBase<ParserTraits> {
const DeclarationParsingResult::Declaration* declaration,
ZoneList<const AstRawString*>* names, bool* ok);
+ static void RewriteDestructuringAssignment(
+ Parser* parser, RewritableAssignmentExpression* expr, Scope* Scope);
+
+ static Expression* RewriteDestructuringAssignment(Parser* parser,
+ Assignment* assignment,
+ Scope* scope);
+
void set_initializer_position(int pos) { initializer_position_ = pos; }
private:
@@ -1031,30 +817,58 @@ class Parser : public ParserBase<ParserTraits> {
// Visiting functions for AST nodes make this an AstVisitor.
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- virtual void Visit(AstNode* node) override;
+ void Visit(AstNode* node) override;
+
+ enum PatternContext {
+ BINDING,
+ INITIALIZER,
+ ASSIGNMENT,
+ ASSIGNMENT_INITIALIZER
+ };
+
+ PatternContext context() const { return context_; }
+ void set_context(PatternContext context) { context_ = context; }
void RecurseIntoSubpattern(AstNode* pattern, Expression* value) {
Expression* old_value = current_value_;
current_value_ = value;
+ recursion_level_++;
pattern->Accept(this);
+ recursion_level_--;
current_value_ = old_value;
}
+ void VisitObjectLiteral(ObjectLiteral* node, Variable** temp_var);
+ void VisitArrayLiteral(ArrayLiteral* node, Variable** temp_var);
+
+ bool IsBindingContext() const { return IsBindingContext(context_); }
+ bool IsInitializerContext() const { return context_ != ASSIGNMENT; }
+ bool IsAssignmentContext() const { return IsAssignmentContext(context_); }
+ bool IsAssignmentContext(PatternContext c) const;
+ bool IsBindingContext(PatternContext c) const;
+ bool IsSubPattern() const { return recursion_level_ > 1; }
+ PatternContext SetAssignmentContextIfNeeded(Expression* node);
+ PatternContext SetInitializerContextIfNeeded(Expression* node);
+
Variable* CreateTempVar(Expression* value = nullptr);
- AstNodeFactory* factory() const { return descriptor_->parser->factory(); }
+ AstNodeFactory* factory() const { return parser_->factory(); }
AstValueFactory* ast_value_factory() const {
- return descriptor_->parser->ast_value_factory();
+ return parser_->ast_value_factory();
}
- bool inside_with() const { return descriptor_->parser->inside_with(); }
- Zone* zone() const { return descriptor_->parser->zone(); }
+ Zone* zone() const { return parser_->zone(); }
+ Scope* scope() const { return scope_; }
+ Scope* scope_;
+ Parser* parser_;
+ PatternContext context_;
Expression* pattern_;
int initializer_position_;
Block* block_;
const DeclarationDescriptor* descriptor_;
ZoneList<const AstRawString*>* names_;
Expression* current_value_;
+ int recursion_level_;
bool* ok_;
};
@@ -1085,25 +899,23 @@ class Parser : public ParserBase<ParserTraits> {
TryStatement* ParseTryStatement(bool* ok);
DebuggerStatement* ParseDebuggerStatement(bool* ok);
- // Support for hamony block scoped bindings.
- Block* ParseScopedBlock(ZoneList<const AstRawString*>* labels, bool* ok);
-
- // !%_IsSpecObject(result = iterator.next()) &&
+ // !%_IsJSReceiver(result = iterator.next()) &&
// %ThrowIteratorResultNotAnObject(result)
Expression* BuildIteratorNextResult(Expression* iterator, Variable* result,
int pos);
// Initialize the components of a for-in / for-of statement.
- void InitializeForEachStatement(ForEachStatement* stmt,
- Expression* each,
- Expression* subject,
- Statement* body);
+ void InitializeForEachStatement(ForEachStatement* stmt, Expression* each,
+ Expression* subject, Statement* body,
+ bool is_destructuring);
Statement* DesugarLexicalBindingsInForStatement(
Scope* inner_scope, bool is_const, ZoneList<const AstRawString*>* names,
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
Statement* body, bool* ok);
+ void RewriteDoExpression(Expression* expr, bool* ok);
+
FunctionLiteral* ParseFunctionLiteral(
const AstRawString* name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
@@ -1123,8 +935,8 @@ class Parser : public ParserBase<ParserTraits> {
// Get odd-ball literals.
Literal* GetLiteralUndefined(int position);
- // For harmony block scoping mode: Check if the scope has conflicting var/let
- // declarations from different scopes. It covers for example
+ // Check if the scope has conflicting var/let declarations from different
+ // scopes. This covers for example
//
// function f() { { { var x; } let x; } }
// function g() { { var x; let x; } }
@@ -1134,6 +946,10 @@ class Parser : public ParserBase<ParserTraits> {
// hoisted over such a scope.
void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
+ // Insert initializer statements for var-bindings shadowing parameter bindings
+ // from a non-simple parameter list.
+ void InsertShadowingVarBindingInitializers(Block* block);
+
// Implement sloppy block-scoped functions, ES2015 Annex B 3.3
void InsertSloppyBlockFunctionVarBindings(Scope* scope, bool* ok);
@@ -1147,7 +963,6 @@ class Parser : public ParserBase<ParserTraits> {
BreakableStatement* LookupBreakTarget(const AstRawString* label, bool* ok);
IterationStatement* LookupContinueTarget(const AstRawString* label, bool* ok);
- void AddAssertIsConstruct(ZoneList<Statement*>* body, int pos);
Statement* BuildAssertIsCoercible(Variable* var);
// Factory methods.
@@ -1193,6 +1008,23 @@ class Parser : public ParserBase<ParserTraits> {
Expression* SpreadCallNew(Expression* function,
ZoneList<v8::internal::Expression*>* args, int pos);
+ void SetLanguageMode(Scope* scope, LanguageMode mode);
+ void RaiseLanguageMode(LanguageMode mode);
+
+ V8_INLINE void RewriteDestructuringAssignments();
+
+ V8_INLINE Expression* RewriteNonPattern(
+ Expression* expr, const ExpressionClassifier* classifier, bool* ok);
+ V8_INLINE ZoneList<Expression*>* RewriteNonPatternArguments(
+ ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
+ bool* ok);
+ V8_INLINE ObjectLiteralProperty* RewriteNonPatternObjectLiteralProperty(
+ ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
+ bool* ok);
+
+ friend class InitializerRewriter;
+ void RewriteParameterInitializer(Expression* expr, Scope* scope);
+
Scanner scanner_;
PreParser* reusable_preparser_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
@@ -1244,6 +1076,7 @@ ZoneList<Statement*>* ParserTraits::ParseEagerFunctionBody(
function_type, ok);
}
+
void ParserTraits::CheckConflictingVarDeclarations(v8::internal::Scope* scope,
bool* ok) {
parser_->CheckConflictingVarDeclarations(scope, ok);
@@ -1320,19 +1153,18 @@ Expression* ParserTraits::SpreadCallNew(
}
-void ParserTraits::AddFormalParameter(
- ParserFormalParameters* parameters,
- Expression* pattern, Expression* initializer, bool is_rest) {
- bool is_simple =
- !is_rest && pattern->IsVariableProxy() && initializer == nullptr;
- DCHECK(parser_->allow_harmony_destructuring() ||
- parser_->allow_harmony_rest_parameters() ||
- parser_->allow_harmony_default_parameters() || is_simple);
+void ParserTraits::AddFormalParameter(ParserFormalParameters* parameters,
+ Expression* pattern,
+ Expression* initializer,
+ int initializer_end_position,
+ bool is_rest) {
+ bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
const AstRawString* name = is_simple
? pattern->AsVariableProxy()->raw_name()
: parser_->ast_value_factory()->empty_string();
parameters->params.Add(
- ParserFormalParameters::Parameter(name, pattern, initializer, is_rest),
+ ParserFormalParameters::Parameter(name, pattern, initializer,
+ initializer_end_position, is_rest),
parameters->scope->zone());
}
@@ -1342,8 +1174,10 @@ void ParserTraits::DeclareFormalParameter(
ExpressionClassifier* classifier) {
bool is_duplicate = false;
bool is_simple = classifier->is_simple_parameter_list();
- auto name = parameter.name;
- auto mode = is_simple ? VAR : TEMPORARY;
+ auto name = is_simple || parameter.is_rest
+ ? parameter.name
+ : parser_->ast_value_factory()->empty_string();
+ auto mode = is_simple || parameter.is_rest ? VAR : TEMPORARY;
if (!is_simple) scope->SetHasNonSimpleParameters();
bool is_optional = parameter.initializer != nullptr;
Variable* var = scope->DeclareParameter(
@@ -1373,6 +1207,14 @@ void ParserTraits::AddParameterInitializationBlock(
}
}
}
-} } // namespace v8::internal
-#endif // V8_PARSER_H_
+
+DoExpression* ParserTraits::ParseDoExpression(bool* ok) {
+ return parser_->ParseDoExpression(ok);
+}
+
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_PARSER_H_
diff --git a/chromium/v8/src/pattern-rewriter.cc b/chromium/v8/src/parsing/pattern-rewriter.cc
index e4c602aa488..6e20282785d 100644
--- a/chromium/v8/src/pattern-rewriter.cc
+++ b/chromium/v8/src/parsing/pattern-rewriter.cc
@@ -2,35 +2,129 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/messages.h"
-#include "src/parser.h"
+#include "src/parsing/parameter-initializer-rewriter.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
-
void Parser::PatternRewriter::DeclareAndInitializeVariables(
Block* block, const DeclarationDescriptor* declaration_descriptor,
const DeclarationParsingResult::Declaration* declaration,
ZoneList<const AstRawString*>* names, bool* ok) {
PatternRewriter rewriter;
+ rewriter.scope_ = declaration_descriptor->scope;
+ rewriter.parser_ = declaration_descriptor->parser;
+ rewriter.context_ = BINDING;
rewriter.pattern_ = declaration->pattern;
rewriter.initializer_position_ = declaration->initializer_position;
rewriter.block_ = block;
rewriter.descriptor_ = declaration_descriptor;
rewriter.names_ = names;
rewriter.ok_ = ok;
+ rewriter.recursion_level_ = 0;
rewriter.RecurseIntoSubpattern(rewriter.pattern_, declaration->initializer);
}
+void Parser::PatternRewriter::RewriteDestructuringAssignment(
+ Parser* parser, RewritableAssignmentExpression* to_rewrite, Scope* scope) {
+ PatternRewriter rewriter;
+
+ DCHECK(!to_rewrite->is_rewritten());
+
+ bool ok = true;
+ rewriter.scope_ = scope;
+ rewriter.parser_ = parser;
+ rewriter.context_ = ASSIGNMENT;
+ rewriter.pattern_ = to_rewrite;
+ rewriter.block_ = nullptr;
+ rewriter.descriptor_ = nullptr;
+ rewriter.names_ = nullptr;
+ rewriter.ok_ = &ok;
+ rewriter.recursion_level_ = 0;
+
+ rewriter.RecurseIntoSubpattern(rewriter.pattern_, nullptr);
+ DCHECK(ok);
+}
+
+
+Expression* Parser::PatternRewriter::RewriteDestructuringAssignment(
+ Parser* parser, Assignment* assignment, Scope* scope) {
+ DCHECK_NOT_NULL(assignment);
+ DCHECK_EQ(Token::ASSIGN, assignment->op());
+ auto to_rewrite =
+ parser->factory()->NewRewritableAssignmentExpression(assignment);
+ RewriteDestructuringAssignment(parser, to_rewrite, scope);
+ return to_rewrite->expression();
+}
+
+
+bool Parser::PatternRewriter::IsAssignmentContext(PatternContext c) const {
+ return c == ASSIGNMENT || c == ASSIGNMENT_INITIALIZER;
+}
+
+
+bool Parser::PatternRewriter::IsBindingContext(PatternContext c) const {
+ return c == BINDING || c == INITIALIZER;
+}
+
+
+Parser::PatternRewriter::PatternContext
+Parser::PatternRewriter::SetAssignmentContextIfNeeded(Expression* node) {
+ PatternContext old_context = context();
+ if (node->IsAssignment() && node->AsAssignment()->op() == Token::ASSIGN) {
+ set_context(ASSIGNMENT);
+ }
+ return old_context;
+}
+
+
+Parser::PatternRewriter::PatternContext
+Parser::PatternRewriter::SetInitializerContextIfNeeded(Expression* node) {
+ // Set appropriate initializer context for BindingElement and
+ // AssignmentElement nodes
+ PatternContext old_context = context();
+ bool is_destructuring_assignment =
+ node->IsRewritableAssignmentExpression() &&
+ !node->AsRewritableAssignmentExpression()->is_rewritten();
+ bool is_assignment =
+ node->IsAssignment() && node->AsAssignment()->op() == Token::ASSIGN;
+ if (is_destructuring_assignment || is_assignment) {
+ switch (old_context) {
+ case BINDING:
+ set_context(INITIALIZER);
+ break;
+ case ASSIGNMENT:
+ set_context(ASSIGNMENT_INITIALIZER);
+ break;
+ default:
+ break;
+ }
+ }
+ return old_context;
+}
+
+
void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
Expression* value = current_value_;
- descriptor_->scope->RemoveUnresolved(pattern->AsVariableProxy());
+
+ if (IsAssignmentContext()) {
+ // In an assignment context, simply perform the assignment
+ Assignment* assignment = factory()->NewAssignment(
+ Token::ASSIGN, pattern, value, pattern->position());
+ block_->statements()->Add(
+ factory()->NewExpressionStatement(assignment, pattern->position()),
+ zone());
+ return;
+ }
+
+ descriptor_->scope->RemoveUnresolved(pattern);
// Declare variable.
// Note that we *always* must treat the initial value via a separate init
@@ -47,15 +141,14 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// For let/const declarations in harmony mode, we can also immediately
// pre-resolve the proxy because it resides in the same scope as the
// declaration.
- Parser* parser = descriptor_->parser;
const AstRawString* name = pattern->raw_name();
- VariableProxy* proxy = parser->NewUnresolved(name, descriptor_->mode);
+ VariableProxy* proxy = parser_->NewUnresolved(name, descriptor_->mode);
Declaration* declaration = factory()->NewVariableDeclaration(
proxy, descriptor_->mode, descriptor_->scope,
descriptor_->declaration_pos);
- Variable* var = parser->Declare(declaration, descriptor_->declaration_kind,
- descriptor_->mode != VAR, ok_,
- descriptor_->hoist_scope);
+ Variable* var =
+ parser_->Declare(declaration, descriptor_->declaration_kind,
+ descriptor_->mode != VAR, ok_, descriptor_->hoist_scope);
if (!*ok_) return;
DCHECK_NOT_NULL(var);
DCHECK(!proxy->is_resolved() || proxy->var() == var);
@@ -63,9 +156,11 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
DCHECK(initializer_position_ != RelocInfo::kNoPosition);
- if (descriptor_->declaration_scope->num_var_or_const() >
- kMaxNumFunctionLocals) {
- parser->ReportMessage(MessageTemplate::kTooManyVariables);
+ Scope* declaration_scope = IsLexicalVariableMode(descriptor_->mode)
+ ? descriptor_->scope
+ : descriptor_->scope->DeclarationScope();
+ if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
+ parser_->ReportMessage(MessageTemplate::kTooManyVariables);
*ok_ = false;
return;
}
@@ -99,8 +194,8 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// The "variable" c initialized to x is the same as the declared
// one - there is no re-lookup (see the last parameter of the
// Declare() call above).
- Scope* initialization_scope = descriptor_->is_const
- ? descriptor_->declaration_scope
+ Scope* initialization_scope = IsImmutableVariableMode(descriptor_->mode)
+ ? declaration_scope
: descriptor_->scope;
@@ -134,7 +229,7 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
zone());
CallRuntime* initialize;
- if (descriptor_->is_const) {
+ if (IsImmutableVariableMode(descriptor_->mode)) {
arguments->Add(value, zone());
value = NULL; // zap the value to avoid the unnecessary assignment
@@ -157,7 +252,7 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// we're in a with. The initialization value should not
// necessarily be stored in the global object in that case,
// which is why we need to generate a separate assignment node.
- if (value != NULL && !inside_with()) {
+ if (value != NULL && !descriptor_->scope->inside_with()) {
arguments->Add(value, zone());
value = NULL; // zap the value to avoid the unnecessary assignment
// Construct the call to Runtime_InitializeVarGlobal
@@ -171,11 +266,11 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
}
if (initialize != NULL) {
- block_->AddStatement(
+ block_->statements()->Add(
factory()->NewExpressionStatement(initialize, RelocInfo::kNoPosition),
zone());
}
- } else if (value != nullptr && (descriptor_->needs_init ||
+ } else if (value != nullptr && (descriptor_->mode == CONST_LEGACY ||
IsLexicalVariableMode(descriptor_->mode))) {
// Constant initializations always assign to the declared constant which
// is always at the function scope level. This is only relevant for
@@ -187,11 +282,12 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
DCHECK_NOT_NULL(proxy);
DCHECK_NOT_NULL(proxy->var());
DCHECK_NOT_NULL(value);
- Assignment* assignment = factory()->NewAssignment(
- descriptor_->init_op, proxy, value, descriptor_->initialization_pos);
- block_->AddStatement(
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
- zone());
+ // Add break location for destructured sub-pattern.
+ int pos = IsSubPattern() ? pattern->position() : RelocInfo::kNoPosition;
+ Assignment* assignment =
+ factory()->NewAssignment(Token::INIT, proxy, value, pos);
+ block_->statements()->Add(
+ factory()->NewExpressionStatement(assignment, pos), zone());
value = NULL;
}
@@ -203,24 +299,24 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// if they are inside a 'with' statement - they may change a 'with' object
// property).
VariableProxy* proxy = initialization_scope->NewUnresolved(factory(), name);
- Assignment* assignment = factory()->NewAssignment(
- descriptor_->init_op, proxy, value, descriptor_->initialization_pos);
- block_->AddStatement(
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
- zone());
+ // Add break location for destructured sub-pattern.
+ int pos = IsSubPattern() ? pattern->position() : RelocInfo::kNoPosition;
+ Assignment* assignment =
+ factory()->NewAssignment(Token::INIT, proxy, value, pos);
+ block_->statements()->Add(
+ factory()->NewExpressionStatement(assignment, pos), zone());
}
}
Variable* Parser::PatternRewriter::CreateTempVar(Expression* value) {
- auto temp = descriptor_->parser->scope_->NewTemporary(
- ast_value_factory()->empty_string());
+ auto temp = scope()->NewTemporary(ast_value_factory()->empty_string());
if (value != nullptr) {
auto assignment = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(temp), value,
RelocInfo::kNoPosition);
- block_->AddStatement(
+ block_->statements()->Add(
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
zone());
}
@@ -228,24 +324,98 @@ Variable* Parser::PatternRewriter::CreateTempVar(Expression* value) {
}
-void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern) {
- auto temp = CreateTempVar(current_value_);
+void Parser::PatternRewriter::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ if (!IsAssignmentContext()) {
+ // Mark the assignment as rewritten to prevent redundant rewriting, and
+ // perform BindingPattern rewriting
+ DCHECK(!node->is_rewritten());
+ node->Rewrite(node->expression());
+ return node->expression()->Accept(this);
+ }
- block_->AddStatement(descriptor_->parser->BuildAssertIsCoercible(temp),
- zone());
+ if (node->is_rewritten()) return;
+ DCHECK(IsAssignmentContext());
+ Assignment* assign = node->expression()->AsAssignment();
+ DCHECK_NOT_NULL(assign);
+ DCHECK_EQ(Token::ASSIGN, assign->op());
+
+ auto initializer = assign->value();
+ auto value = initializer;
+
+ if (IsInitializerContext()) {
+ // let {<pattern> = <init>} = <value>
+ // becomes
+ // temp = <value>;
+ // <pattern> = temp === undefined ? <init> : temp;
+ auto temp_var = CreateTempVar(current_value_);
+ Expression* is_undefined = factory()->NewCompareOperation(
+ Token::EQ_STRICT, factory()->NewVariableProxy(temp_var),
+ factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ value = factory()->NewConditional(is_undefined, initializer,
+ factory()->NewVariableProxy(temp_var),
+ RelocInfo::kNoPosition);
+ }
+
+ PatternContext old_context = SetAssignmentContextIfNeeded(initializer);
+ int pos = assign->position();
+ Block* old_block = block_;
+ block_ = factory()->NewBlock(nullptr, 8, false, pos);
+ Variable* temp = nullptr;
+ Expression* pattern = assign->target();
+ Expression* old_value = current_value_;
+ current_value_ = value;
+ if (pattern->IsObjectLiteral()) {
+ VisitObjectLiteral(pattern->AsObjectLiteral(), &temp);
+ } else {
+ DCHECK(pattern->IsArrayLiteral());
+ VisitArrayLiteral(pattern->AsArrayLiteral(), &temp);
+ }
+ DCHECK_NOT_NULL(temp);
+ current_value_ = old_value;
+ Expression* expr = factory()->NewDoExpression(block_, temp, pos);
+ node->Rewrite(expr);
+ block_ = old_block;
+ if (block_) {
+ block_->statements()->Add(factory()->NewExpressionStatement(expr, pos),
+ zone());
+ }
+ return set_context(old_context);
+}
+
+
+void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
+ Variable** temp_var) {
+ auto temp = *temp_var = CreateTempVar(current_value_);
+
+ block_->statements()->Add(parser_->BuildAssertIsCoercible(temp), zone());
for (ObjectLiteralProperty* property : *pattern->properties()) {
+ PatternContext context = SetInitializerContextIfNeeded(property->value());
RecurseIntoSubpattern(
property->value(),
factory()->NewProperty(factory()->NewVariableProxy(temp),
property->key(), RelocInfo::kNoPosition));
+ set_context(context);
}
}
-void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
- auto iterator = CreateTempVar(
- descriptor_->parser->GetIterator(current_value_, factory()));
+void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* node) {
+ Variable* temp_var = nullptr;
+ VisitObjectLiteral(node, &temp_var);
+}
+
+
+void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
+ Variable** temp_var) {
+ auto temp = *temp_var = CreateTempVar(current_value_);
+
+ block_->statements()->Add(parser_->BuildAssertIsCoercible(temp), zone());
+
+ auto iterator = CreateTempVar(parser_->GetIterator(
+ factory()->NewVariableProxy(temp), factory(), RelocInfo::kNoPosition));
auto done = CreateTempVar(
factory()->NewBooleanLiteral(false, RelocInfo::kNoPosition));
auto result = CreateTempVar();
@@ -258,18 +428,19 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
break;
}
+ PatternContext context = SetInitializerContextIfNeeded(value);
// if (!done) {
// result = IteratorNext(iterator);
// v = (done = result.done) ? undefined : result.value;
// }
auto next_block =
factory()->NewBlock(nullptr, 2, true, RelocInfo::kNoPosition);
- next_block->AddStatement(factory()->NewExpressionStatement(
- descriptor_->parser->BuildIteratorNextResult(
- factory()->NewVariableProxy(iterator),
- result, RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- zone());
+ next_block->statements()->Add(factory()->NewExpressionStatement(
+ parser_->BuildIteratorNextResult(
+ factory()->NewVariableProxy(iterator),
+ result, RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ zone());
auto assign_to_done = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(done),
@@ -287,7 +458,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
RelocInfo::kNoPosition),
RelocInfo::kNoPosition),
RelocInfo::kNoPosition);
- next_block->AddStatement(
+ next_block->statements()->Add(
factory()->NewExpressionStatement(
factory()->NewAssignment(Token::ASSIGN,
factory()->NewVariableProxy(v), next_value,
@@ -301,11 +472,12 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
RelocInfo::kNoPosition),
next_block, factory()->NewEmptyStatement(RelocInfo::kNoPosition),
RelocInfo::kNoPosition);
- block_->AddStatement(if_statement, zone());
+ block_->statements()->Add(if_statement, zone());
if (!(value->IsLiteral() && value->AsLiteral()->raw_value()->IsTheHole())) {
RecurseIntoSubpattern(value, factory()->NewVariableProxy(v));
}
+ set_context(context);
}
if (spread != nullptr) {
@@ -316,7 +488,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
empty_exprs,
// Reuse pattern's literal index - it is unused since there is no
// actual literal allocated.
- node->literal_index(), is_strong(descriptor_->parser->language_mode()),
+ node->literal_index(), is_strong(scope()->language_mode()),
RelocInfo::kNoPosition));
auto arguments = new (zone()) ZoneList<Expression*>(2, zone());
@@ -334,8 +506,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
RelocInfo::kNoPosition),
factory()->NewEmptyStatement(RelocInfo::kNoPosition),
RelocInfo::kNoPosition);
- block_->AddStatement(if_statement, zone());
-
+ block_->statements()->Add(if_statement, zone());
RecurseIntoSubpattern(spread->expression(),
factory()->NewVariableProxy(array));
@@ -343,31 +514,58 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
}
+void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
+ Variable* temp_var = nullptr;
+ VisitArrayLiteral(node, &temp_var);
+}
+
+
void Parser::PatternRewriter::VisitAssignment(Assignment* node) {
// let {<pattern> = <init>} = <value>
// becomes
// temp = <value>;
// <pattern> = temp === undefined ? <init> : temp;
- DCHECK(node->op() == Token::ASSIGN);
+ DCHECK_EQ(Token::ASSIGN, node->op());
+
+ auto initializer = node->value();
+ auto value = initializer;
auto temp = CreateTempVar(current_value_);
- Expression* is_undefined = factory()->NewCompareOperation(
- Token::EQ_STRICT, factory()->NewVariableProxy(temp),
- factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
- Expression* value = factory()->NewConditional(
- is_undefined, node->value(), factory()->NewVariableProxy(temp),
- RelocInfo::kNoPosition);
+
+ if (IsInitializerContext()) {
+ Expression* is_undefined = factory()->NewCompareOperation(
+ Token::EQ_STRICT, factory()->NewVariableProxy(temp),
+ factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ value = factory()->NewConditional(is_undefined, initializer,
+ factory()->NewVariableProxy(temp),
+ RelocInfo::kNoPosition);
+ }
+
+ if (IsBindingContext() &&
+ descriptor_->declaration_kind == DeclarationDescriptor::PARAMETER &&
+ scope()->is_arrow_scope()) {
+ RewriteParameterInitializerScope(parser_->stack_limit(), initializer,
+ scope()->outer_scope(), scope());
+ }
+
+ PatternContext old_context = SetAssignmentContextIfNeeded(initializer);
RecurseIntoSubpattern(node->target(), value);
+ set_context(old_context);
}
-void Parser::PatternRewriter::VisitSpread(Spread* node) {
- UNREACHABLE();
-}
+// =============== AssignmentPattern only ==================
+
+void Parser::PatternRewriter::VisitProperty(v8::internal::Property* node) {
+ DCHECK(IsAssignmentContext());
+ auto value = current_value_;
+ Assignment* assignment =
+ factory()->NewAssignment(Token::ASSIGN, node, value, node->position());
-void Parser::PatternRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
- UNREACHABLE();
+ block_->statements()->Add(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
}
@@ -393,9 +591,10 @@ NOT_A_PATTERN(Conditional)
NOT_A_PATTERN(ContinueStatement)
NOT_A_PATTERN(CountOperation)
NOT_A_PATTERN(DebuggerStatement)
+NOT_A_PATTERN(DoExpression)
NOT_A_PATTERN(DoWhileStatement)
NOT_A_PATTERN(EmptyStatement)
-NOT_A_PATTERN(SloppyBlockFunctionStatement)
+NOT_A_PATTERN(EmptyParentheses)
NOT_A_PATTERN(ExportDeclaration)
NOT_A_PATTERN(ExpressionStatement)
NOT_A_PATTERN(ForInStatement)
@@ -407,9 +606,10 @@ NOT_A_PATTERN(IfStatement)
NOT_A_PATTERN(ImportDeclaration)
NOT_A_PATTERN(Literal)
NOT_A_PATTERN(NativeFunctionLiteral)
-NOT_A_PATTERN(Property)
NOT_A_PATTERN(RegExpLiteral)
NOT_A_PATTERN(ReturnStatement)
+NOT_A_PATTERN(SloppyBlockFunctionStatement)
+NOT_A_PATTERN(Spread)
NOT_A_PATTERN(SuperPropertyReference)
NOT_A_PATTERN(SuperCallReference)
NOT_A_PATTERN(SwitchStatement)
diff --git a/chromium/v8/src/preparse-data-format.h b/chromium/v8/src/parsing/preparse-data-format.h
index 560693f67eb..f7d9f68ccea 100644
--- a/chromium/v8/src/preparse-data-format.h
+++ b/chromium/v8/src/parsing/preparse-data-format.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PREPARSE_DATA_FORMAT_H_
-#define V8_PREPARSE_DATA_FORMAT_H_
+#ifndef V8_PARSING_PREPARSE_DATA_FORMAT_H_
+#define V8_PARSING_PREPARSE_DATA_FORMAT_H_
namespace v8 {
namespace internal {
@@ -35,6 +35,7 @@ struct PreparseDataConstants {
};
-} } // namespace v8::internal.
+} // namespace internal
+} // namespace v8.
-#endif // V8_PREPARSE_DATA_FORMAT_H_
+#endif // V8_PARSING_PREPARSE_DATA_FORMAT_H_
diff --git a/chromium/v8/src/preparse-data.cc b/chromium/v8/src/parsing/preparse-data.cc
index ffbfbab633b..d02cd63d669 100644
--- a/chromium/v8/src/preparse-data.cc
+++ b/chromium/v8/src/parsing/preparse-data.cc
@@ -5,9 +5,9 @@
#include "src/base/logging.h"
#include "src/globals.h"
#include "src/hashmap.h"
-#include "src/parser.h"
-#include "src/preparse-data.h"
-#include "src/preparse-data-format.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/preparse-data.h"
+#include "src/parsing/preparse-data-format.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/preparse-data.h b/chromium/v8/src/parsing/preparse-data.h
index f7ed1ed91ad..dbe1022d1e1 100644
--- a/chromium/v8/src/preparse-data.h
+++ b/chromium/v8/src/parsing/preparse-data.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PREPARSE_DATA_H_
-#define V8_PREPARSE_DATA_H_
+#ifndef V8_PARSING_PREPARSE_DATA_H_
+#define V8_PARSING_PREPARSE_DATA_H_
#include "src/allocation.h"
#include "src/hashmap.h"
#include "src/messages.h"
-#include "src/preparse-data-format.h"
+#include "src/parsing/preparse-data-format.h"
namespace v8 {
namespace internal {
@@ -206,6 +206,7 @@ class CompleteParserRecorder : public ParserRecorder {
};
-} } // namespace v8::internal.
+} // namespace internal
+} // namespace v8.
-#endif // V8_PREPARSE_DATA_H_
+#endif // V8_PARSING_PREPARSE_DATA_H_
diff --git a/chromium/v8/src/preparser.cc b/chromium/v8/src/parsing/preparser.cc
index 1bdcb85db79..64511acc39a 100644
--- a/chromium/v8/src/preparser.cc
+++ b/chromium/v8/src/parsing/preparser.cc
@@ -11,9 +11,10 @@
#include "src/globals.h"
#include "src/hashmap.h"
#include "src/list.h"
-#include "src/preparse-data.h"
-#include "src/preparse-data-format.h"
-#include "src/preparser.h"
+#include "src/parsing/parser-base.h"
+#include "src/parsing/preparse-data.h"
+#include "src/parsing/preparse-data-format.h"
+#include "src/parsing/preparser.h"
#include "src/unicode.h"
#include "src/utils.h"
@@ -111,8 +112,7 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
FunctionState top_state(&function_state_, &scope_, top_scope, kNormalFunction,
&top_factory);
scope_->SetLanguageMode(language_mode);
- Scope* function_scope = NewScope(
- scope_, IsArrowFunction(kind) ? ARROW_SCOPE : FUNCTION_SCOPE, kind);
+ Scope* function_scope = NewScope(scope_, FUNCTION_SCOPE, kind);
if (!has_simple_parameters) function_scope->SetHasNonSimpleParameters();
PreParserFactory function_factory(NULL);
FunctionState function_state(&function_state_, &scope_, function_scope, kind,
@@ -451,8 +451,8 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
: kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
- pos, FunctionLiteral::DECLARATION,
- FunctionLiteral::NORMAL_ARITY, language_mode(),
+ pos, FunctionLiteral::kDeclaration,
+ FunctionLiteral::kNormalArity, language_mode(),
CHECK_OK);
return Statement::FunctionDeclaration();
}
@@ -478,19 +478,12 @@ PreParser::Statement PreParser::ParseClassDeclaration(bool* ok) {
PreParser::Statement PreParser::ParseBlock(bool* ok) {
// Block ::
- // '{' Statement* '}'
+ // '{' StatementList '}'
- // Note that a Block does not introduce a new execution scope!
- // (ECMA-262, 3rd, 12.2)
- //
Expect(Token::LBRACE, CHECK_OK);
Statement final = Statement::Default();
while (peek() != Token::RBRACE) {
- if (is_strict(language_mode()) || allow_harmony_sloppy()) {
- final = ParseStatementListItem(CHECK_OK);
- } else {
- final = ParseStatement(CHECK_OK);
- }
+ final = ParseStatementListItem(CHECK_OK);
}
Expect(Token::RBRACE, ok);
return final;
@@ -503,8 +496,8 @@ PreParser::Statement PreParser::ParseVariableStatement(
// VariableStatement ::
// VariableDeclarations ';'
- Statement result = ParseVariableDeclarations(var_context, nullptr, nullptr,
- nullptr, CHECK_OK);
+ Statement result = ParseVariableDeclarations(
+ var_context, nullptr, nullptr, nullptr, nullptr, nullptr, CHECK_OK);
ExpectSemicolon(CHECK_OK);
return result;
}
@@ -516,9 +509,9 @@ PreParser::Statement PreParser::ParseVariableStatement(
// to initialize it properly. This mechanism is also used for the parsing
// of 'for-in' loops.
PreParser::Statement PreParser::ParseVariableDeclarations(
- VariableDeclarationContext var_context, int* num_decl,
- Scanner::Location* first_initializer_loc, Scanner::Location* bindings_loc,
- bool* ok) {
+ VariableDeclarationContext var_context, int* num_decl, bool* is_lexical,
+ bool* is_binding_pattern, Scanner::Location* first_initializer_loc,
+ Scanner::Location* bindings_loc, bool* ok) {
// VariableDeclarations ::
// ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
//
@@ -533,7 +526,8 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// ConstBinding ::
// BindingPattern '=' AssignmentExpression
bool require_initializer = false;
- bool is_strict_const = false;
+ bool lexical = false;
+ bool is_pattern = false;
if (peek() == Token::VAR) {
if (is_strong(language_mode())) {
Scanner::Location location = scanner()->peek_location();
@@ -557,12 +551,13 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
if (is_strict(language_mode()) ||
(allow_harmony_sloppy() && !allow_legacy_const())) {
DCHECK(var_context != kStatement);
- is_strict_const = true;
- require_initializer = var_context != kForStatement;
+ require_initializer = true;
+ lexical = true;
}
} else if (peek() == Token::LET && allow_let()) {
Consume(Token::LET);
DCHECK(var_context != kStatement);
+ lexical = true;
} else {
*ok = false;
return Statement::Default();
@@ -577,26 +572,35 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
do {
// Parse binding pattern.
if (nvars > 0) Consume(Token::COMMA);
+ int decl_pos = peek_position();
+ PreParserExpression pattern = PreParserExpression::Default();
{
ExpressionClassifier pattern_classifier;
Token::Value next = peek();
- PreParserExpression pattern =
- ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
+ pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
+
ValidateBindingPattern(&pattern_classifier, CHECK_OK);
+ if (lexical) {
+ ValidateLetPattern(&pattern_classifier, CHECK_OK);
+ }
- if (!allow_harmony_destructuring() && !pattern.IsIdentifier()) {
+ if (!allow_harmony_destructuring_bind() && !pattern.IsIdentifier()) {
ReportUnexpectedToken(next);
*ok = false;
return Statement::Default();
}
}
+ is_pattern = (pattern.IsObjectLiteral() || pattern.IsArrayLiteral()) &&
+ !pattern.is_parenthesized();
+
+ bool is_for_iteration_variable =
+ var_context == kForStatement &&
+ (peek() == Token::IN || PeekContextualKeyword(CStrVector("of")));
+
Scanner::Location variable_loc = scanner()->location();
nvars++;
- if (peek() == Token::ASSIGN || require_initializer ||
- // require initializers for multiple consts.
- (is_strict_const && peek() == Token::COMMA)) {
- Expect(Token::ASSIGN, CHECK_OK);
+ if (Check(Token::ASSIGN)) {
ExpressionClassifier classifier;
ParseAssignmentExpression(var_context != kForStatement, &classifier,
CHECK_OK);
@@ -606,6 +610,14 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
if (first_initializer_loc && !first_initializer_loc->IsValid()) {
*first_initializer_loc = variable_loc;
}
+ } else if ((require_initializer || is_pattern) &&
+ !is_for_iteration_variable) {
+ PreParserTraits::ReportMessageAt(
+ Scanner::Location(decl_pos, scanner()->location().end_pos),
+ MessageTemplate::kDeclarationMissingInitializer,
+ is_pattern ? "destructuring" : "const");
+ *ok = false;
+ return Statement::Default();
}
} while (peek() == Token::COMMA);
@@ -614,7 +626,9 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
Scanner::Location(bindings_start, scanner()->location().end_pos);
}
- if (num_decl != NULL) *num_decl = nvars;
+ if (num_decl != nullptr) *num_decl = nvars;
+ if (is_lexical != nullptr) *is_lexical = lexical;
+ if (is_binding_pattern != nullptr) *is_binding_pattern = is_pattern;
return Statement::Default();
}
@@ -903,14 +917,15 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
(peek() == Token::LET && IsNextLetKeyword())) {
int decl_count;
+ bool is_lexical;
+ bool is_binding_pattern;
Scanner::Location first_initializer_loc = Scanner::Location::invalid();
Scanner::Location bindings_loc = Scanner::Location::invalid();
- ParseVariableDeclarations(kForStatement, &decl_count,
- &first_initializer_loc, &bindings_loc,
- CHECK_OK);
+ ParseVariableDeclarations(kForStatement, &decl_count, &is_lexical,
+ &is_binding_pattern, &first_initializer_loc,
+ &bindings_loc, CHECK_OK);
bool accept_IN = decl_count >= 1;
- bool accept_OF = true;
- if (accept_IN && CheckInOrOf(accept_OF, &mode, ok)) {
+ if (accept_IN && CheckInOrOf(&mode, ok)) {
if (!*ok) return Statement::Default();
if (decl_count != 1) {
const char* loop_type =
@@ -922,7 +937,8 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
return Statement::Default();
}
if (first_initializer_loc.IsValid() &&
- (is_strict(language_mode()) || mode == ForEachStatement::ITERATE)) {
+ (is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
+ is_lexical || is_binding_pattern)) {
if (mode == ForEachStatement::ITERATE) {
ReportMessageAt(first_initializer_loc,
MessageTemplate::kForOfLoopInitializer);
@@ -941,15 +957,29 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
} else {
int lhs_beg_pos = peek_position();
- Expression lhs = ParseExpression(false, CHECK_OK);
+ ExpressionClassifier classifier;
+ Expression lhs = ParseExpression(false, &classifier, CHECK_OK);
int lhs_end_pos = scanner()->location().end_pos;
is_let_identifier_expression =
lhs.IsIdentifier() && lhs.AsIdentifier().IsLet();
- if (CheckInOrOf(lhs.IsIdentifier(), &mode, ok)) {
- if (!*ok) return Statement::Default();
- lhs = CheckAndRewriteReferenceExpression(
- lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
- kSyntaxError, CHECK_OK);
+ bool is_for_each = CheckInOrOf(&mode, ok);
+ if (!*ok) return Statement::Default();
+ bool is_destructuring = is_for_each &&
+ allow_harmony_destructuring_assignment() &&
+ (lhs->IsArrayLiteral() || lhs->IsObjectLiteral());
+
+ if (is_destructuring) {
+ ValidateAssignmentPattern(&classifier, CHECK_OK);
+ } else {
+ ValidateExpression(&classifier, CHECK_OK);
+ }
+
+ if (is_for_each) {
+ if (!is_destructuring) {
+ lhs = CheckAndRewriteReferenceExpression(
+ lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
+ kSyntaxError, CHECK_OK);
+ }
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
ParseSubStatement(CHECK_OK);
@@ -1024,9 +1054,12 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
if (tok == Token::CATCH) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
+ ExpressionClassifier pattern_classifier;
+ ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
+ ValidateBindingPattern(&pattern_classifier, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
{
+ // TODO(adamk): Make this CATCH_SCOPE
Scope* with_scope = NewScope(scope_, WITH_SCOPE);
BlockState block_state(&scope_, with_scope);
ParseBlock(CHECK_OK);
@@ -1201,10 +1234,11 @@ PreParserExpression PreParser::ParseClassLiteral(
const bool is_static = false;
bool is_computed_name = false; // Classes do not care about computed
// property names here.
+ Identifier name;
ExpressionClassifier classifier;
ParsePropertyDefinition(&checker, in_class, has_extends, is_static,
&is_computed_name, &has_seen_constructor,
- &classifier, CHECK_OK);
+ &classifier, &name, CHECK_OK);
ValidateExpression(&classifier, CHECK_OK);
}
@@ -1234,7 +1268,25 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
return Expression::Default();
}
+
+PreParserExpression PreParser::ParseDoExpression(bool* ok) {
+ // AssignmentExpression ::
+ // do '{' StatementList '}'
+ Expect(Token::DO, CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK);
+ Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
+ {
+ BlockState block_state(&scope_, block_scope);
+ while (peek() != Token::RBRACE) {
+ ParseStatementListItem(CHECK_OK);
+ }
+ Expect(Token::RBRACE, CHECK_OK);
+ return PreParserExpression::Default();
+ }
+}
+
#undef CHECK_OK
-} } // v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/parsing/preparser.h b/chromium/v8/src/parsing/preparser.h
new file mode 100644
index 00000000000..59100f1ae96
--- /dev/null
+++ b/chromium/v8/src/parsing/preparser.h
@@ -0,0 +1,1175 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_PREPARSER_H
+#define V8_PARSING_PREPARSER_H
+
+#include "src/ast/scopes.h"
+#include "src/bailout-reason.h"
+#include "src/hashmap.h"
+#include "src/messages.h"
+#include "src/parsing/expression-classifier.h"
+#include "src/parsing/func-name-inferrer.h"
+#include "src/parsing/parser-base.h"
+#include "src/parsing/scanner.h"
+#include "src/parsing/token.h"
+
+namespace v8 {
+namespace internal {
+
+
+class PreParserIdentifier {
+ public:
+ PreParserIdentifier() : type_(kUnknownIdentifier) {}
+ static PreParserIdentifier Default() {
+ return PreParserIdentifier(kUnknownIdentifier);
+ }
+ static PreParserIdentifier Eval() {
+ return PreParserIdentifier(kEvalIdentifier);
+ }
+ static PreParserIdentifier Arguments() {
+ return PreParserIdentifier(kArgumentsIdentifier);
+ }
+ static PreParserIdentifier Undefined() {
+ return PreParserIdentifier(kUndefinedIdentifier);
+ }
+ static PreParserIdentifier FutureReserved() {
+ return PreParserIdentifier(kFutureReservedIdentifier);
+ }
+ static PreParserIdentifier FutureStrictReserved() {
+ return PreParserIdentifier(kFutureStrictReservedIdentifier);
+ }
+ static PreParserIdentifier Let() {
+ return PreParserIdentifier(kLetIdentifier);
+ }
+ static PreParserIdentifier Static() {
+ return PreParserIdentifier(kStaticIdentifier);
+ }
+ static PreParserIdentifier Yield() {
+ return PreParserIdentifier(kYieldIdentifier);
+ }
+ static PreParserIdentifier Prototype() {
+ return PreParserIdentifier(kPrototypeIdentifier);
+ }
+ static PreParserIdentifier Constructor() {
+ return PreParserIdentifier(kConstructorIdentifier);
+ }
+ bool IsEval() const { return type_ == kEvalIdentifier; }
+ bool IsArguments() const { return type_ == kArgumentsIdentifier; }
+ bool IsEvalOrArguments() const { return IsEval() || IsArguments(); }
+ bool IsUndefined() const { return type_ == kUndefinedIdentifier; }
+ bool IsLet() const { return type_ == kLetIdentifier; }
+ bool IsStatic() const { return type_ == kStaticIdentifier; }
+ bool IsYield() const { return type_ == kYieldIdentifier; }
+ bool IsPrototype() const { return type_ == kPrototypeIdentifier; }
+ bool IsConstructor() const { return type_ == kConstructorIdentifier; }
+ bool IsFutureReserved() const { return type_ == kFutureReservedIdentifier; }
+ bool IsFutureStrictReserved() const {
+ return type_ == kFutureStrictReservedIdentifier ||
+ type_ == kLetIdentifier || type_ == kStaticIdentifier ||
+ type_ == kYieldIdentifier;
+ }
+
+ // Allow identifier->name()[->length()] to work. The preparser
+ // does not need the actual positions/lengths of the identifiers.
+ const PreParserIdentifier* operator->() const { return this; }
+ const PreParserIdentifier raw_name() const { return *this; }
+
+ int position() const { return 0; }
+ int length() const { return 0; }
+
+ private:
+ enum Type {
+ kUnknownIdentifier,
+ kFutureReservedIdentifier,
+ kFutureStrictReservedIdentifier,
+ kLetIdentifier,
+ kStaticIdentifier,
+ kYieldIdentifier,
+ kEvalIdentifier,
+ kArgumentsIdentifier,
+ kUndefinedIdentifier,
+ kPrototypeIdentifier,
+ kConstructorIdentifier
+ };
+
+ explicit PreParserIdentifier(Type type) : type_(type) {}
+ Type type_;
+
+ friend class PreParserExpression;
+};
+
+
+class PreParserExpression {
+ public:
+ static PreParserExpression Default() {
+ return PreParserExpression(TypeField::encode(kExpression));
+ }
+
+ static PreParserExpression Spread(PreParserExpression expression) {
+ return PreParserExpression(TypeField::encode(kSpreadExpression));
+ }
+
+ static PreParserExpression FromIdentifier(PreParserIdentifier id) {
+ return PreParserExpression(TypeField::encode(kIdentifierExpression) |
+ IdentifierTypeField::encode(id.type_));
+ }
+
+ static PreParserExpression BinaryOperation(PreParserExpression left,
+ Token::Value op,
+ PreParserExpression right) {
+ return PreParserExpression(TypeField::encode(kBinaryOperationExpression));
+ }
+
+ static PreParserExpression Assignment() {
+ return PreParserExpression(TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kAssignment));
+ }
+
+ static PreParserExpression ObjectLiteral() {
+ return PreParserExpression(TypeField::encode(kObjectLiteralExpression));
+ }
+
+ static PreParserExpression ArrayLiteral() {
+ return PreParserExpression(TypeField::encode(kArrayLiteralExpression));
+ }
+
+ static PreParserExpression StringLiteral() {
+ return PreParserExpression(TypeField::encode(kStringLiteralExpression));
+ }
+
+ static PreParserExpression UseStrictStringLiteral() {
+ return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
+ IsUseStrictField::encode(true));
+ }
+
+ static PreParserExpression UseStrongStringLiteral() {
+ return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
+ IsUseStrongField::encode(true));
+ }
+
+ static PreParserExpression This() {
+ return PreParserExpression(TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kThisExpression));
+ }
+
+ static PreParserExpression ThisProperty() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kThisPropertyExpression));
+ }
+
+ static PreParserExpression Property() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kPropertyExpression));
+ }
+
+ static PreParserExpression Call() {
+ return PreParserExpression(TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kCallExpression));
+ }
+
+ static PreParserExpression SuperCallReference() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kSuperCallReference));
+ }
+
+ static PreParserExpression NoTemplateTag() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kNoTemplateTagExpression));
+ }
+
+ bool IsIdentifier() const {
+ return TypeField::decode(code_) == kIdentifierExpression;
+ }
+
+ PreParserIdentifier AsIdentifier() const {
+ DCHECK(IsIdentifier());
+ return PreParserIdentifier(IdentifierTypeField::decode(code_));
+ }
+
+ bool IsAssignment() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kAssignment;
+ }
+
+ bool IsObjectLiteral() const {
+ return TypeField::decode(code_) == kObjectLiteralExpression;
+ }
+
+ bool IsArrayLiteral() const {
+ return TypeField::decode(code_) == kArrayLiteralExpression;
+ }
+
+ bool IsStringLiteral() const {
+ return TypeField::decode(code_) == kStringLiteralExpression;
+ }
+
+ bool IsUseStrictLiteral() const {
+ return TypeField::decode(code_) == kStringLiteralExpression &&
+ IsUseStrictField::decode(code_);
+ }
+
+ bool IsUseStrongLiteral() const {
+ return TypeField::decode(code_) == kStringLiteralExpression &&
+ IsUseStrongField::decode(code_);
+ }
+
+ bool IsThis() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kThisExpression;
+ }
+
+ bool IsThisProperty() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kThisPropertyExpression;
+ }
+
+ bool IsProperty() const {
+ return TypeField::decode(code_) == kExpression &&
+ (ExpressionTypeField::decode(code_) == kPropertyExpression ||
+ ExpressionTypeField::decode(code_) == kThisPropertyExpression);
+ }
+
+ bool IsCall() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kCallExpression;
+ }
+
+ bool IsSuperCallReference() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kSuperCallReference;
+ }
+
+ bool IsValidReferenceExpression() const {
+ return IsIdentifier() || IsProperty();
+ }
+
+ // At the moment PreParser doesn't track these expression types.
+ bool IsFunctionLiteral() const { return false; }
+ bool IsCallNew() const { return false; }
+
+ bool IsNoTemplateTag() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kNoTemplateTagExpression;
+ }
+
+ bool IsSpreadExpression() const {
+ return TypeField::decode(code_) == kSpreadExpression;
+ }
+
+ PreParserExpression AsFunctionLiteral() { return *this; }
+
+ bool IsBinaryOperation() const {
+ return TypeField::decode(code_) == kBinaryOperationExpression;
+ }
+
+ // Dummy implementation for making expression->somefunc() work in both Parser
+ // and PreParser.
+ PreParserExpression* operator->() { return this; }
+
+ // More dummy implementations of things PreParser doesn't need to track:
+ void set_index(int index) {} // For YieldExpressions
+ void set_should_eager_compile() {}
+
+ int position() const { return RelocInfo::kNoPosition; }
+ void set_function_token_position(int position) {}
+
+ // Parenthesized expressions in the form `( Expression )`.
+ void set_is_parenthesized() {
+ code_ = ParenthesizedField::update(code_, true);
+ }
+ bool is_parenthesized() const { return ParenthesizedField::decode(code_); }
+
+ private:
+ enum Type {
+ kExpression,
+ kIdentifierExpression,
+ kStringLiteralExpression,
+ kBinaryOperationExpression,
+ kSpreadExpression,
+ kObjectLiteralExpression,
+ kArrayLiteralExpression
+ };
+
+ enum ExpressionType {
+ kThisExpression,
+ kThisPropertyExpression,
+ kPropertyExpression,
+ kCallExpression,
+ kSuperCallReference,
+ kNoTemplateTagExpression,
+ kAssignment
+ };
+
+ explicit PreParserExpression(uint32_t expression_code)
+ : code_(expression_code) {}
+
+ // The first three bits are for the Type.
+ typedef BitField<Type, 0, 3> TypeField;
+
+ // The high order bit applies only to nodes which would inherit from the
+ // Expression ASTNode --- This is by necessity, due to the fact that
+ // Expression nodes may be represented as multiple Types, not exclusively
+ // through kExpression.
+ // TODO(caitp, adamk): clean up PreParserExpression bitfields.
+ typedef BitField<bool, 31, 1> ParenthesizedField;
+
+ // The rest of the bits are interpreted depending on the value
+ // of the Type field, so they can share the storage.
+ typedef BitField<ExpressionType, TypeField::kNext, 3> ExpressionTypeField;
+ typedef BitField<bool, TypeField::kNext, 1> IsUseStrictField;
+ typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseStrongField;
+ typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 10>
+ IdentifierTypeField;
+ typedef BitField<bool, TypeField::kNext, 1> HasCoverInitializedNameField;
+
+ uint32_t code_;
+};
+
+
+// The pre-parser doesn't need to build lists of expressions, identifiers, or
+// the like.
+template <typename T>
+class PreParserList {
+ public:
+ // These functions make list->Add(some_expression) work (and do nothing).
+ PreParserList() : length_(0) {}
+ PreParserList* operator->() { return this; }
+ void Add(T, void*) { ++length_; }
+ int length() const { return length_; }
+ private:
+ int length_;
+};
+
+
+typedef PreParserList<PreParserExpression> PreParserExpressionList;
+
+
+class PreParserStatement {
+ public:
+ static PreParserStatement Default() {
+ return PreParserStatement(kUnknownStatement);
+ }
+
+ static PreParserStatement Jump() {
+ return PreParserStatement(kJumpStatement);
+ }
+
+ static PreParserStatement FunctionDeclaration() {
+ return PreParserStatement(kFunctionDeclaration);
+ }
+
+ // Creates expression statement from expression.
+ // Preserves being an unparenthesized string literal, possibly
+ // "use strict".
+ static PreParserStatement ExpressionStatement(
+ PreParserExpression expression) {
+ if (expression.IsUseStrictLiteral()) {
+ return PreParserStatement(kUseStrictExpressionStatement);
+ }
+ if (expression.IsUseStrongLiteral()) {
+ return PreParserStatement(kUseStrongExpressionStatement);
+ }
+ if (expression.IsStringLiteral()) {
+ return PreParserStatement(kStringLiteralExpressionStatement);
+ }
+ return Default();
+ }
+
+ bool IsStringLiteral() {
+ return code_ == kStringLiteralExpressionStatement;
+ }
+
+ bool IsUseStrictLiteral() {
+ return code_ == kUseStrictExpressionStatement;
+ }
+
+ bool IsUseStrongLiteral() { return code_ == kUseStrongExpressionStatement; }
+
+ bool IsFunctionDeclaration() {
+ return code_ == kFunctionDeclaration;
+ }
+
+ bool IsJumpStatement() {
+ return code_ == kJumpStatement;
+ }
+
+ private:
+ enum Type {
+ kUnknownStatement,
+ kJumpStatement,
+ kStringLiteralExpressionStatement,
+ kUseStrictExpressionStatement,
+ kUseStrongExpressionStatement,
+ kFunctionDeclaration
+ };
+
+ explicit PreParserStatement(Type code) : code_(code) {}
+ Type code_;
+};
+
+
+typedef PreParserList<PreParserStatement> PreParserStatementList;
+
+
+class PreParserFactory {
+ public:
+ explicit PreParserFactory(void* unused_value_factory) {}
+ PreParserExpression NewStringLiteral(PreParserIdentifier identifier,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewNumberLiteral(double number,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
+ int js_flags, int literal_index,
+ bool is_strong, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewArrayLiteral(PreParserExpressionList values,
+ int literal_index,
+ bool is_strong,
+ int pos) {
+ return PreParserExpression::ArrayLiteral();
+ }
+ PreParserExpression NewArrayLiteral(PreParserExpressionList values,
+ int first_spread_index, int literal_index,
+ bool is_strong, int pos) {
+ return PreParserExpression::ArrayLiteral();
+ }
+ PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
+ PreParserExpression value,
+ ObjectLiteralProperty::Kind kind,
+ bool is_static,
+ bool is_computed_name) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
+ PreParserExpression value,
+ bool is_static,
+ bool is_computed_name) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
+ int literal_index,
+ int boilerplate_properties,
+ bool has_function,
+ bool is_strong,
+ int pos) {
+ return PreParserExpression::ObjectLiteral();
+ }
+ PreParserExpression NewVariableProxy(void* variable) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewProperty(PreParserExpression obj,
+ PreParserExpression key,
+ int pos) {
+ if (obj.IsThis()) {
+ return PreParserExpression::ThisProperty();
+ }
+ return PreParserExpression::Property();
+ }
+ PreParserExpression NewUnaryOperation(Token::Value op,
+ PreParserExpression expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewBinaryOperation(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right, int pos) {
+ return PreParserExpression::BinaryOperation(left, op, right);
+ }
+ PreParserExpression NewCompareOperation(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewRewritableAssignmentExpression(
+ PreParserExpression expression) {
+ return expression;
+ }
+ PreParserExpression NewAssignment(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right,
+ int pos) {
+ return PreParserExpression::Assignment();
+ }
+ PreParserExpression NewYield(PreParserExpression generator_object,
+ PreParserExpression expression,
+ Yield::Kind yield_kind,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewConditional(PreParserExpression condition,
+ PreParserExpression then_expression,
+ PreParserExpression else_expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCountOperation(Token::Value op,
+ bool is_prefix,
+ PreParserExpression expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCall(PreParserExpression expression,
+ PreParserExpressionList arguments,
+ int pos) {
+ return PreParserExpression::Call();
+ }
+ PreParserExpression NewCallNew(PreParserExpression expression,
+ PreParserExpressionList arguments,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCallRuntime(const AstRawString* name,
+ const Runtime::Function* function,
+ PreParserExpressionList arguments,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserStatement NewReturnStatement(PreParserExpression expression,
+ int pos) {
+ return PreParserStatement::Default();
+ }
+ PreParserExpression NewFunctionLiteral(
+ PreParserIdentifier name, Scope* scope, PreParserStatementList body,
+ int materialized_literal_count, int expected_property_count,
+ int parameter_count,
+ FunctionLiteral::ParameterFlag has_duplicate_parameters,
+ FunctionLiteral::FunctionType function_type,
+ FunctionLiteral::EagerCompileHint eager_compile_hint, FunctionKind kind,
+ int position) {
+ return PreParserExpression::Default();
+ }
+
+ PreParserExpression NewSpread(PreParserExpression expression, int pos) {
+ return PreParserExpression::Spread(expression);
+ }
+
+ PreParserExpression NewEmptyParentheses(int pos) {
+ return PreParserExpression::Default();
+ }
+
+ // Return the object itself as AstVisitor and implement the needed
+ // dummy method right in this class.
+ PreParserFactory* visitor() { return this; }
+ int* ast_properties() {
+ static int dummy = 42;
+ return &dummy;
+ }
+};
+
+
+struct PreParserFormalParameters : FormalParametersBase {
+ explicit PreParserFormalParameters(Scope* scope)
+ : FormalParametersBase(scope) {}
+ int arity = 0;
+
+ int Arity() const { return arity; }
+ PreParserIdentifier at(int i) { return PreParserIdentifier(); } // Dummy
+};
+
+
+class PreParser;
+
+class PreParserTraits {
+ public:
+ struct Type {
+ // TODO(marja): To be removed. The Traits object should contain all the data
+ // it needs.
+ typedef PreParser* Parser;
+
+ // PreParser doesn't need to store generator variables.
+ typedef void GeneratorVariable;
+
+ typedef int AstProperties;
+
+ // Return types for traversing functions.
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+ typedef PreParserExpression YieldExpression;
+ typedef PreParserExpression FunctionLiteral;
+ typedef PreParserExpression ClassLiteral;
+ typedef PreParserExpression ObjectLiteralProperty;
+ typedef PreParserExpression Literal;
+ typedef PreParserExpressionList ExpressionList;
+ typedef PreParserExpressionList PropertyList;
+ typedef PreParserIdentifier FormalParameter;
+ typedef PreParserFormalParameters FormalParameters;
+ typedef PreParserStatementList StatementList;
+
+ // For constructing objects returned by the traversing functions.
+ typedef PreParserFactory Factory;
+ };
+
+ explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
+
+ // Helper functions for recursive descent.
+ static bool IsEval(PreParserIdentifier identifier) {
+ return identifier.IsEval();
+ }
+
+ static bool IsArguments(PreParserIdentifier identifier) {
+ return identifier.IsArguments();
+ }
+
+ static bool IsEvalOrArguments(PreParserIdentifier identifier) {
+ return identifier.IsEvalOrArguments();
+ }
+
+ static bool IsUndefined(PreParserIdentifier identifier) {
+ return identifier.IsUndefined();
+ }
+
+ static bool IsPrototype(PreParserIdentifier identifier) {
+ return identifier.IsPrototype();
+ }
+
+ static bool IsConstructor(PreParserIdentifier identifier) {
+ return identifier.IsConstructor();
+ }
+
+ // Returns true if the expression is of type "this.foo".
+ static bool IsThisProperty(PreParserExpression expression) {
+ return expression.IsThisProperty();
+ }
+
+ static bool IsIdentifier(PreParserExpression expression) {
+ return expression.IsIdentifier();
+ }
+
+ static PreParserIdentifier AsIdentifier(PreParserExpression expression) {
+ return expression.AsIdentifier();
+ }
+
+ static bool IsFutureStrictReserved(PreParserIdentifier identifier) {
+ return identifier.IsFutureStrictReserved();
+ }
+
+ static bool IsBoilerplateProperty(PreParserExpression property) {
+ // PreParser doesn't count boilerplate properties.
+ return false;
+ }
+
+ static bool IsArrayIndex(PreParserIdentifier string, uint32_t* index) {
+ return false;
+ }
+
+ static PreParserExpression GetPropertyValue(PreParserExpression property) {
+ return PreParserExpression::Default();
+ }
+
+ // Functions for encapsulating the differences between parsing and preparsing;
+ // operations interleaved with the recursive descent.
+ static void PushLiteralName(FuncNameInferrer* fni, PreParserIdentifier id) {
+ // PreParser should not use FuncNameInferrer.
+ UNREACHABLE();
+ }
+
+ static void PushPropertyName(FuncNameInferrer* fni,
+ PreParserExpression expression) {
+ // PreParser should not use FuncNameInferrer.
+ UNREACHABLE();
+ }
+
+ static void InferFunctionName(FuncNameInferrer* fni,
+ PreParserExpression expression) {
+ // PreParser should not use FuncNameInferrer.
+ UNREACHABLE();
+ }
+
+ static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
+ Scope* scope, PreParserExpression property, bool* has_function) {}
+
+ static void CheckAssigningFunctionLiteralToProperty(
+ PreParserExpression left, PreParserExpression right) {}
+
+ static PreParserExpression MarkExpressionAsAssigned(
+ PreParserExpression expression) {
+ // TODO(marja): To be able to produce the same errors, the preparser needs
+ // to start tracking which expressions are variables and which are assigned.
+ return expression;
+ }
+
+ bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
+ PreParserExpression y,
+ Token::Value op,
+ int pos,
+ PreParserFactory* factory) {
+ return false;
+ }
+
+ PreParserExpression BuildUnaryExpression(PreParserExpression expression,
+ Token::Value op, int pos,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ PreParserExpression NewThrowReferenceError(MessageTemplate::Template message,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewThrowSyntaxError(MessageTemplate::Template message,
+ Handle<Object> arg, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewThrowTypeError(MessageTemplate::Template message,
+ Handle<Object> arg, int pos) {
+ return PreParserExpression::Default();
+ }
+
+ // Reporting errors.
+ void ReportMessageAt(Scanner::Location location,
+ MessageTemplate::Template message,
+ const char* arg = NULL,
+ ParseErrorType error_type = kSyntaxError);
+ void ReportMessageAt(int start_pos, int end_pos,
+ MessageTemplate::Template message,
+ const char* arg = NULL,
+ ParseErrorType error_type = kSyntaxError);
+
+ // "null" return type creators.
+ static PreParserIdentifier EmptyIdentifier() {
+ return PreParserIdentifier::Default();
+ }
+ static PreParserIdentifier EmptyIdentifierString() {
+ return PreParserIdentifier::Default();
+ }
+ static PreParserExpression EmptyExpression() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpression EmptyLiteral() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpression EmptyObjectLiteralProperty() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpression EmptyFunctionLiteral() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpressionList NullExpressionList() {
+ return PreParserExpressionList();
+ }
+
+ // Odd-ball literal creators.
+ static PreParserExpression GetLiteralTheHole(int position,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ // Producing data during the recursive descent.
+ PreParserIdentifier GetSymbol(Scanner* scanner);
+ PreParserIdentifier GetNumberAsSymbol(Scanner* scanner);
+
+ static PreParserIdentifier GetNextSymbol(Scanner* scanner) {
+ return PreParserIdentifier::Default();
+ }
+
+ static PreParserExpression ThisExpression(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
+ return PreParserExpression::This();
+ }
+
+ static PreParserExpression SuperPropertyReference(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression SuperCallReference(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
+ return PreParserExpression::SuperCallReference();
+ }
+
+ static PreParserExpression NewTargetExpression(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression DefaultConstructor(bool call_super, Scope* scope,
+ int pos, int end_pos) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression ExpressionFromLiteral(
+ Token::Value token, int pos, Scanner* scanner,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression ExpressionFromIdentifier(
+ PreParserIdentifier name, int start_position, int end_position,
+ Scope* scope, PreParserFactory* factory) {
+ return PreParserExpression::FromIdentifier(name);
+ }
+
+ PreParserExpression ExpressionFromString(int pos,
+ Scanner* scanner,
+ PreParserFactory* factory = NULL);
+
+ PreParserExpression GetIterator(PreParserExpression iterable,
+ PreParserFactory* factory, int pos) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpressionList NewExpressionList(int size, Zone* zone) {
+ return PreParserExpressionList();
+ }
+
+ static PreParserStatementList NewStatementList(int size, Zone* zone) {
+ return PreParserStatementList();
+ }
+
+ static PreParserExpressionList NewPropertyList(int size, Zone* zone) {
+ return PreParserExpressionList();
+ }
+
+ static void AddParameterInitializationBlock(
+ const PreParserFormalParameters& parameters,
+ PreParserStatementList list, bool* ok) {}
+
+ V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
+ int* expected_property_count, bool* ok) {
+ UNREACHABLE();
+ }
+
+ V8_INLINE PreParserStatementList ParseEagerFunctionBody(
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok);
+
+ V8_INLINE void ParseArrowFunctionFormalParameterList(
+ PreParserFormalParameters* parameters,
+ PreParserExpression expression, const Scanner::Location& params_loc,
+ Scanner::Location* duplicate_loc, bool* ok);
+
+ void ReindexLiterals(const PreParserFormalParameters& paramaters) {}
+
+ struct TemplateLiteralState {};
+
+ TemplateLiteralState OpenTemplateLiteral(int pos) {
+ return TemplateLiteralState();
+ }
+ void AddTemplateSpan(TemplateLiteralState*, bool) {}
+ void AddTemplateExpression(TemplateLiteralState*, PreParserExpression) {}
+ PreParserExpression CloseTemplateLiteral(TemplateLiteralState*, int,
+ PreParserExpression tag) {
+ if (IsTaggedTemplate(tag)) {
+ // Emulate generation of array literals for tag callsite
+ // 1st is array of cooked strings, second is array of raw strings
+ MaterializeTemplateCallsiteLiterals();
+ }
+ return EmptyExpression();
+ }
+ inline void MaterializeTemplateCallsiteLiterals();
+ PreParserExpression NoTemplateTag() {
+ return PreParserExpression::NoTemplateTag();
+ }
+ static bool IsTaggedTemplate(const PreParserExpression tag) {
+ return !tag.IsNoTemplateTag();
+ }
+
+ void AddFormalParameter(PreParserFormalParameters* parameters,
+ PreParserExpression pattern,
+ PreParserExpression initializer,
+ int initializer_end_position, bool is_rest) {
+ ++parameters->arity;
+ }
+ void DeclareFormalParameter(Scope* scope, PreParserIdentifier parameter,
+ ExpressionClassifier* classifier) {
+ if (!classifier->is_simple_parameter_list()) {
+ scope->SetHasNonSimpleParameters();
+ }
+ }
+
+ void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
+
+ // Temporary glue; these functions will move to ParserBase.
+ PreParserExpression ParseV8Intrinsic(bool* ok);
+ V8_INLINE PreParserExpression ParseDoExpression(bool* ok);
+ PreParserExpression ParseFunctionLiteral(
+ PreParserIdentifier name, Scanner::Location function_name_location,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
+ int function_token_position, FunctionLiteral::FunctionType type,
+ FunctionLiteral::ArityRestriction arity_restriction,
+ LanguageMode language_mode, bool* ok);
+
+ PreParserExpression ParseClassLiteral(PreParserIdentifier name,
+ Scanner::Location class_name_location,
+ bool name_is_strict_reserved, int pos,
+ bool* ok);
+
+ PreParserExpressionList PrepareSpreadArguments(PreParserExpressionList list) {
+ return list;
+ }
+
+ inline void MaterializeUnspreadArgumentsLiterals(int count);
+
+ inline PreParserExpression SpreadCall(PreParserExpression function,
+ PreParserExpressionList args, int pos);
+
+ inline PreParserExpression SpreadCallNew(PreParserExpression function,
+ PreParserExpressionList args,
+ int pos);
+
+ inline void RewriteDestructuringAssignments() {}
+
+ inline void QueueDestructuringAssignmentForRewriting(PreParserExpression) {}
+
+ void SetFunctionNameFromPropertyName(PreParserExpression,
+ PreParserIdentifier) {}
+ void SetFunctionNameFromIdentifierRef(PreParserExpression,
+ PreParserExpression) {}
+
+ inline PreParserExpression RewriteNonPattern(
+ PreParserExpression expr, const ExpressionClassifier* classifier,
+ bool* ok);
+ inline PreParserExpression RewriteNonPatternArguments(
+ PreParserExpression args, const ExpressionClassifier* classifier,
+ bool* ok);
+ inline PreParserExpression RewriteNonPatternObjectLiteralProperty(
+ PreParserExpression property, const ExpressionClassifier* classifier,
+ bool* ok);
+
+ private:
+ PreParser* pre_parser_;
+};
+
+
+// Preparsing checks a JavaScript program and emits preparse-data that helps
+// a later parsing to be faster.
+// See preparse-data-format.h for the data format.
+
+// The PreParser checks that the syntax follows the grammar for JavaScript,
+// and collects some information about the program along the way.
+// The grammar check is only performed in order to understand the program
+// sufficiently to deduce some information about it, that can be used
+// to speed up later parsing. Finding errors is not the goal of pre-parsing,
+// rather it is to speed up properly written and correct programs.
+// That means that contextual checks (like a label being declared where
+// it is used) are generally omitted.
+class PreParser : public ParserBase<PreParserTraits> {
+ public:
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+ typedef PreParserStatement Statement;
+
+ enum PreParseResult {
+ kPreParseStackOverflow,
+ kPreParseSuccess
+ };
+
+ PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
+ ParserRecorder* log, uintptr_t stack_limit)
+ : ParserBase<PreParserTraits>(zone, scanner, stack_limit, NULL,
+ ast_value_factory, log, this) {}
+
+ // Pre-parse the program from the character stream; returns true on
+ // success (even if parsing failed, the pre-parse data successfully
+ // captured the syntax error), and false if a stack-overflow happened
+ // during parsing.
+ PreParseResult PreParseProgram(int* materialized_literals = 0) {
+ Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
+ PreParserFactory factory(NULL);
+ FunctionState top_scope(&function_state_, &scope_, scope, kNormalFunction,
+ &factory);
+ bool ok = true;
+ int start_position = scanner()->peek_location().beg_pos;
+ ParseStatementList(Token::EOS, &ok);
+ if (stack_overflow()) return kPreParseStackOverflow;
+ if (!ok) {
+ ReportUnexpectedToken(scanner()->current_token());
+ } else if (is_strict(scope_->language_mode())) {
+ CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
+ &ok);
+ }
+ if (materialized_literals) {
+ *materialized_literals = function_state_->materialized_literal_count();
+ }
+ return kPreParseSuccess;
+ }
+
+ // Parses a single function literal, from the opening parentheses before
+ // parameters to the closing brace after the body.
+ // Returns a FunctionEntry describing the body of the function in enough
+ // detail that it can be lazily compiled.
+ // The scanner is expected to have matched the "function" or "function*"
+ // keyword and parameters, and have consumed the initial '{'.
+ // At return, unless an error occurred, the scanner is positioned before the
+ // the final '}'.
+ PreParseResult PreParseLazyFunction(
+ LanguageMode language_mode, FunctionKind kind, bool has_simple_parameters,
+ ParserRecorder* log, Scanner::BookmarkScope* bookmark = nullptr);
+
+ private:
+ friend class PreParserTraits;
+
+ static const int kLazyParseTrialLimit = 200;
+
+ // These types form an algebra over syntactic categories that is just
+ // rich enough to let us recognize and propagate the constructs that
+ // are either being counted in the preparser data, or is important
+ // to throw the correct syntax error exceptions.
+
+ // All ParseXXX functions take as the last argument an *ok parameter
+ // which is set to false if parsing failed; it is unchanged otherwise.
+ // By making the 'exception handling' explicit, we are forced to check
+ // for failure at the call sites.
+ Statement ParseStatementListItem(bool* ok);
+ void ParseStatementList(int end_token, bool* ok,
+ Scanner::BookmarkScope* bookmark = nullptr);
+ Statement ParseStatement(bool* ok);
+ Statement ParseSubStatement(bool* ok);
+ Statement ParseFunctionDeclaration(bool* ok);
+ Statement ParseClassDeclaration(bool* ok);
+ Statement ParseBlock(bool* ok);
+ Statement ParseVariableStatement(VariableDeclarationContext var_context,
+ bool* ok);
+ Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
+ int* num_decl, bool* is_lexical,
+ bool* is_binding_pattern,
+ Scanner::Location* first_initializer_loc,
+ Scanner::Location* bindings_loc,
+ bool* ok);
+ Statement ParseExpressionOrLabelledStatement(bool* ok);
+ Statement ParseIfStatement(bool* ok);
+ Statement ParseContinueStatement(bool* ok);
+ Statement ParseBreakStatement(bool* ok);
+ Statement ParseReturnStatement(bool* ok);
+ Statement ParseWithStatement(bool* ok);
+ Statement ParseSwitchStatement(bool* ok);
+ Statement ParseDoWhileStatement(bool* ok);
+ Statement ParseWhileStatement(bool* ok);
+ Statement ParseForStatement(bool* ok);
+ Statement ParseThrowStatement(bool* ok);
+ Statement ParseTryStatement(bool* ok);
+ Statement ParseDebuggerStatement(bool* ok);
+ Expression ParseConditionalExpression(bool accept_IN, bool* ok);
+ Expression ParseObjectLiteral(bool* ok);
+ Expression ParseV8Intrinsic(bool* ok);
+ Expression ParseDoExpression(bool* ok);
+
+ V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
+ int* expected_property_count, bool* ok);
+ V8_INLINE PreParserStatementList ParseEagerFunctionBody(
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok);
+
+ Expression ParseFunctionLiteral(
+ Identifier name, Scanner::Location function_name_location,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
+ int function_token_pos, FunctionLiteral::FunctionType function_type,
+ FunctionLiteral::ArityRestriction arity_restriction,
+ LanguageMode language_mode, bool* ok);
+ void ParseLazyFunctionLiteralBody(bool* ok,
+ Scanner::BookmarkScope* bookmark = nullptr);
+
+ PreParserExpression ParseClassLiteral(PreParserIdentifier name,
+ Scanner::Location class_name_location,
+ bool name_is_strict_reserved, int pos,
+ bool* ok);
+};
+
+
+void PreParserTraits::MaterializeTemplateCallsiteLiterals() {
+ pre_parser_->function_state_->NextMaterializedLiteralIndex();
+ pre_parser_->function_state_->NextMaterializedLiteralIndex();
+}
+
+
+void PreParserTraits::MaterializeUnspreadArgumentsLiterals(int count) {
+ for (int i = 0; i < count; ++i) {
+ pre_parser_->function_state_->NextMaterializedLiteralIndex();
+ }
+}
+
+
+PreParserExpression PreParserTraits::SpreadCall(PreParserExpression function,
+ PreParserExpressionList args,
+ int pos) {
+ return pre_parser_->factory()->NewCall(function, args, pos);
+}
+
+PreParserExpression PreParserTraits::SpreadCallNew(PreParserExpression function,
+ PreParserExpressionList args,
+ int pos) {
+ return pre_parser_->factory()->NewCallNew(function, args, pos);
+}
+
+
+void PreParserTraits::ParseArrowFunctionFormalParameterList(
+ PreParserFormalParameters* parameters,
+ PreParserExpression params, const Scanner::Location& params_loc,
+ Scanner::Location* duplicate_loc, bool* ok) {
+ // TODO(wingo): Detect duplicated identifiers in paramlists. Detect parameter
+ // lists that are too long.
+}
+
+
+PreParserExpression PreParserTraits::ParseDoExpression(bool* ok) {
+ return pre_parser_->ParseDoExpression(ok);
+}
+
+
+PreParserExpression PreParserTraits::RewriteNonPattern(
+ PreParserExpression expr, const ExpressionClassifier* classifier,
+ bool* ok) {
+ pre_parser_->ValidateExpression(classifier, ok);
+ return expr;
+}
+
+
+PreParserExpression PreParserTraits::RewriteNonPatternArguments(
+ PreParserExpression args, const ExpressionClassifier* classifier,
+ bool* ok) {
+ pre_parser_->ValidateExpression(classifier, ok);
+ return args;
+}
+
+
+PreParserExpression PreParserTraits::RewriteNonPatternObjectLiteralProperty(
+ PreParserExpression property, const ExpressionClassifier* classifier,
+ bool* ok) {
+ pre_parser_->ValidateExpression(classifier, ok);
+ return property;
+}
+
+
+PreParserStatementList PreParser::ParseEagerFunctionBody(
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok) {
+ ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
+
+ ParseStatementList(Token::RBRACE, ok);
+ if (!*ok) return PreParserStatementList();
+
+ Expect(Token::RBRACE, ok);
+ return PreParserStatementList();
+}
+
+
+PreParserStatementList PreParserTraits::ParseEagerFunctionBody(
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok) {
+ return pre_parser_->ParseEagerFunctionBody(function_name, pos, parameters,
+ kind, function_type, ok);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_PREPARSER_H
diff --git a/chromium/v8/src/parsing/rewriter.cc b/chromium/v8/src/parsing/rewriter.cc
new file mode 100644
index 00000000000..4da60aca186
--- /dev/null
+++ b/chromium/v8/src/parsing/rewriter.cc
@@ -0,0 +1,403 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/parsing/rewriter.h"
+
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/parsing/parser.h"
+
+namespace v8 {
+namespace internal {
+
+class Processor: public AstVisitor {
+ public:
+ Processor(Isolate* isolate, Scope* scope, Variable* result,
+ AstValueFactory* ast_value_factory)
+ : result_(result),
+ result_assigned_(false),
+ replacement_(nullptr),
+ is_set_(false),
+ zone_(ast_value_factory->zone()),
+ scope_(scope),
+ factory_(ast_value_factory) {
+ InitializeAstVisitor(isolate);
+ }
+
+ Processor(Parser* parser, Scope* scope, Variable* result,
+ AstValueFactory* ast_value_factory)
+ : result_(result),
+ result_assigned_(false),
+ replacement_(nullptr),
+ is_set_(false),
+ scope_(scope),
+ factory_(ast_value_factory) {
+ InitializeAstVisitor(parser->stack_limit());
+ }
+
+ ~Processor() override {}
+
+ void Process(ZoneList<Statement*>* statements);
+ bool result_assigned() const { return result_assigned_; }
+
+ Zone* zone() { return zone_; }
+ Scope* scope() { return scope_; }
+ AstNodeFactory* factory() { return &factory_; }
+
+ // Returns ".result = value"
+ Expression* SetResult(Expression* value) {
+ result_assigned_ = true;
+ VariableProxy* result_proxy = factory()->NewVariableProxy(result_);
+ return factory()->NewAssignment(Token::ASSIGN, result_proxy, value,
+ RelocInfo::kNoPosition);
+ }
+
+ // Inserts '.result = undefined' in front of the given statement.
+ Statement* AssignUndefinedBefore(Statement* s);
+
+ private:
+ Variable* result_;
+
+ // We are not tracking result usage via the result_'s use
+ // counts (we leave the accurate computation to the
+ // usage analyzer). Instead we simple remember if
+ // there was ever an assignment to result_.
+ bool result_assigned_;
+
+ // When visiting a node, we "return" a replacement for that node in
+ // [replacement_]. In many cases this will just be the original node.
+ Statement* replacement_;
+
+ // To avoid storing to .result all the time, we eliminate some of
+ // the stores by keeping track of whether or not we're sure .result
+ // will be overwritten anyway. This is a bit more tricky than what I
+ // was hoping for.
+ bool is_set_;
+
+ Zone* zone_;
+ Scope* scope_;
+ AstNodeFactory factory_;
+
+ // Node visitors.
+#define DEF_VISIT(type) void Visit##type(type* node) override;
+ AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ void VisitIterationStatement(IterationStatement* stmt);
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+};
+
+
+Statement* Processor::AssignUndefinedBefore(Statement* s) {
+ Expression* result_proxy = factory()->NewVariableProxy(result_);
+ Expression* undef = factory()->NewUndefinedLiteral(RelocInfo::kNoPosition);
+ Expression* assignment = factory()->NewAssignment(
+ Token::ASSIGN, result_proxy, undef, RelocInfo::kNoPosition);
+ Block* b = factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
+ b->statements()->Add(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
+ b->statements()->Add(s, zone());
+ return b;
+}
+
+
+void Processor::Process(ZoneList<Statement*>* statements) {
+ for (int i = statements->length() - 1; i >= 0; --i) {
+ Visit(statements->at(i));
+ statements->Set(i, replacement_);
+ }
+}
+
+
+void Processor::VisitBlock(Block* node) {
+ // An initializer block is the rewritten form of a variable declaration
+ // with initialization expressions. The initializer block contains the
+ // list of assignments corresponding to the initialization expressions.
+ // While unclear from the spec (ECMA-262, 3rd., 12.2), the value of
+ // a variable declaration with initialization expression is 'undefined'
+ // with some JS VMs: For instance, using smjs, print(eval('var x = 7'))
+ // returns 'undefined'. To obtain the same behavior with v8, we need
+ // to prevent rewriting in that case.
+ if (!node->ignore_completion_value()) Process(node->statements());
+ replacement_ = node;
+}
+
+
+void Processor::VisitExpressionStatement(ExpressionStatement* node) {
+ // Rewrite : <x>; -> .result = <x>;
+ if (!is_set_) {
+ node->set_expression(SetResult(node->expression()));
+ is_set_ = true;
+ }
+ replacement_ = node;
+}
+
+
+void Processor::VisitIfStatement(IfStatement* node) {
+ // Rewrite both branches.
+ bool set_after = is_set_;
+ Visit(node->then_statement());
+ node->set_then_statement(replacement_);
+ bool set_in_then = is_set_;
+ is_set_ = set_after;
+ Visit(node->else_statement());
+ node->set_else_statement(replacement_);
+ is_set_ = is_set_ && set_in_then;
+ replacement_ = node;
+
+ if (FLAG_harmony_completion && !is_set_) {
+ is_set_ = true;
+ replacement_ = AssignUndefinedBefore(node);
+ }
+}
+
+
+void Processor::VisitIterationStatement(IterationStatement* node) {
+ // Rewrite the body.
+ bool set_after = is_set_;
+ is_set_ = false; // We are in a loop, so we can't rely on [set_after].
+ Visit(node->body());
+ node->set_body(replacement_);
+ is_set_ = is_set_ && set_after;
+ replacement_ = node;
+
+ if (FLAG_harmony_completion && !is_set_) {
+ is_set_ = true;
+ replacement_ = AssignUndefinedBefore(node);
+ }
+}
+
+
+void Processor::VisitDoWhileStatement(DoWhileStatement* node) {
+ VisitIterationStatement(node);
+}
+
+
+void Processor::VisitWhileStatement(WhileStatement* node) {
+ VisitIterationStatement(node);
+}
+
+
+void Processor::VisitForStatement(ForStatement* node) {
+ VisitIterationStatement(node);
+}
+
+
+void Processor::VisitForInStatement(ForInStatement* node) {
+ VisitIterationStatement(node);
+}
+
+
+void Processor::VisitForOfStatement(ForOfStatement* node) {
+ VisitIterationStatement(node);
+}
+
+
+void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
+ // Rewrite both try and catch block.
+ bool set_after = is_set_;
+ Visit(node->try_block());
+ node->set_try_block(static_cast<Block*>(replacement_));
+ bool set_in_try = is_set_;
+ is_set_ = set_after;
+ Visit(node->catch_block());
+ node->set_catch_block(static_cast<Block*>(replacement_));
+ is_set_ = is_set_ && set_in_try;
+ replacement_ = node;
+
+ if (FLAG_harmony_completion && !is_set_) {
+ is_set_ = true;
+ replacement_ = AssignUndefinedBefore(node);
+ }
+}
+
+
+void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
+ // Rewrite both try and finally block (in reverse order).
+ bool set_after = is_set_;
+ is_set_ = true; // Don't normally need to assign in finally block.
+ Visit(node->finally_block());
+ node->set_finally_block(replacement_->AsBlock());
+ { // Save .result value at the beginning of the finally block and restore it
+ // at the end again: ".backup = .result; ...; .result = .backup"
+ // This is necessary because the finally block does not normally contribute
+ // to the completion value.
+ Variable* backup = scope()->NewTemporary(
+ factory()->ast_value_factory()->dot_result_string());
+ Expression* backup_proxy = factory()->NewVariableProxy(backup);
+ Expression* result_proxy = factory()->NewVariableProxy(result_);
+ Expression* save = factory()->NewAssignment(
+ Token::ASSIGN, backup_proxy, result_proxy, RelocInfo::kNoPosition);
+ Expression* restore = factory()->NewAssignment(
+ Token::ASSIGN, result_proxy, backup_proxy, RelocInfo::kNoPosition);
+ node->finally_block()->statements()->InsertAt(
+ 0, factory()->NewExpressionStatement(save, RelocInfo::kNoPosition),
+ zone());
+ node->finally_block()->statements()->Add(
+ factory()->NewExpressionStatement(restore, RelocInfo::kNoPosition),
+ zone());
+ }
+ is_set_ = set_after;
+ Visit(node->try_block());
+ node->set_try_block(replacement_->AsBlock());
+ replacement_ = node;
+
+ if (FLAG_harmony_completion && !is_set_) {
+ is_set_ = true;
+ replacement_ = AssignUndefinedBefore(node);
+ }
+}
+
+
+void Processor::VisitSwitchStatement(SwitchStatement* node) {
+ // Rewrite statements in all case clauses (in reverse order).
+ ZoneList<CaseClause*>* clauses = node->cases();
+ bool set_after = is_set_;
+ for (int i = clauses->length() - 1; i >= 0; --i) {
+ CaseClause* clause = clauses->at(i);
+ Process(clause->statements());
+ }
+ is_set_ = is_set_ && set_after;
+ replacement_ = node;
+
+ if (FLAG_harmony_completion && !is_set_) {
+ is_set_ = true;
+ replacement_ = AssignUndefinedBefore(node);
+ }
+}
+
+
+void Processor::VisitContinueStatement(ContinueStatement* node) {
+ is_set_ = false;
+ replacement_ = node;
+}
+
+
+void Processor::VisitBreakStatement(BreakStatement* node) {
+ is_set_ = false;
+ replacement_ = node;
+}
+
+
+void Processor::VisitWithStatement(WithStatement* node) {
+ Visit(node->statement());
+ node->set_statement(replacement_);
+ replacement_ = node;
+
+ if (FLAG_harmony_completion && !is_set_) {
+ is_set_ = true;
+ replacement_ = AssignUndefinedBefore(node);
+ }
+}
+
+
+void Processor::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* node) {
+ Visit(node->statement());
+ node->set_statement(replacement_);
+ replacement_ = node;
+}
+
+
+void Processor::VisitEmptyStatement(EmptyStatement* node) {
+ replacement_ = node;
+}
+
+
+void Processor::VisitReturnStatement(ReturnStatement* node) {
+ is_set_ = true;
+ replacement_ = node;
+}
+
+
+void Processor::VisitDebuggerStatement(DebuggerStatement* node) {
+ replacement_ = node;
+}
+
+
+// Expressions are never visited.
+#define DEF_VISIT(type) \
+ void Processor::Visit##type(type* expr) { UNREACHABLE(); }
+EXPRESSION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+
+// Declarations are never visited.
+#define DEF_VISIT(type) \
+ void Processor::Visit##type(type* expr) { UNREACHABLE(); }
+DECLARATION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+
+// Assumes code has been parsed. Mutates the AST, so the AST should not
+// continue to be used in the case of failure.
+bool Rewriter::Rewrite(ParseInfo* info) {
+ FunctionLiteral* function = info->literal();
+ DCHECK(function != NULL);
+ Scope* scope = function->scope();
+ DCHECK(scope != NULL);
+ if (!scope->is_script_scope() && !scope->is_eval_scope()) return true;
+
+ ZoneList<Statement*>* body = function->body();
+ if (!body->is_empty()) {
+ Variable* result =
+ scope->NewTemporary(info->ast_value_factory()->dot_result_string());
+ // The name string must be internalized at this point.
+ DCHECK(!result->name().is_null());
+ Processor processor(info->isolate(), scope, result,
+ info->ast_value_factory());
+ processor.Process(body);
+ if (processor.HasStackOverflow()) return false;
+
+ if (processor.result_assigned()) {
+ DCHECK(function->end_position() != RelocInfo::kNoPosition);
+ // Set the position of the assignment statement one character past the
+ // source code, such that it definitely is not in the source code range
+ // of an immediate inner scope. For example in
+ // eval('with ({x:1}) x = 1');
+ // the end position of the function generated for executing the eval code
+ // coincides with the end of the with scope which is the position of '1'.
+ int pos = function->end_position();
+ VariableProxy* result_proxy =
+ processor.factory()->NewVariableProxy(result, pos);
+ Statement* result_statement =
+ processor.factory()->NewReturnStatement(result_proxy, pos);
+ body->Add(result_statement, info->zone());
+ }
+ }
+
+ return true;
+}
+
+
+bool Rewriter::Rewrite(Parser* parser, DoExpression* expr,
+ AstValueFactory* factory) {
+ Block* block = expr->block();
+ Scope* scope = block->scope();
+ ZoneList<Statement*>* body = block->statements();
+ VariableProxy* result = expr->result();
+ Variable* result_var = result->var();
+
+ if (!body->is_empty()) {
+ Processor processor(parser, scope, result_var, factory);
+ processor.Process(body);
+ if (processor.HasStackOverflow()) return false;
+
+ if (!processor.result_assigned()) {
+ AstNodeFactory* node_factory = processor.factory();
+ Expression* undef =
+ node_factory->NewUndefinedLiteral(RelocInfo::kNoPosition);
+ Statement* completion = node_factory->NewExpressionStatement(
+ processor.SetResult(undef), expr->position());
+ body->Add(completion, factory->zone());
+ }
+ }
+ return true;
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/rewriter.h b/chromium/v8/src/parsing/rewriter.h
index b283a55ce00..477644a7567 100644
--- a/chromium/v8/src/rewriter.h
+++ b/chromium/v8/src/parsing/rewriter.h
@@ -2,13 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REWRITER_H_
-#define V8_REWRITER_H_
+#ifndef V8_PARSING_REWRITER_H_
+#define V8_PARSING_REWRITER_H_
namespace v8 {
namespace internal {
+class AstValueFactory;
+class DoExpression;
class ParseInfo;
+class Parser;
class Rewriter {
public:
@@ -19,9 +22,15 @@ class Rewriter {
// Assumes code has been parsed and scopes have been analyzed. Mutates the
// AST, so the AST should not continue to be used in the case of failure.
static bool Rewrite(ParseInfo* info);
+
+ // Rewrite a list of statements, using the same rules as a top-level program,
+ // to ensure identical behaviour of completion result.
+ static bool Rewrite(Parser* parser, DoExpression* expr,
+ AstValueFactory* factory);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_REWRITER_H_
+#endif // V8_PARSING_REWRITER_H_
diff --git a/chromium/v8/src/scanner-character-streams.cc b/chromium/v8/src/parsing/scanner-character-streams.cc
index a58f392c0cd..91ed54f7bea 100644
--- a/chromium/v8/src/scanner-character-streams.cc
+++ b/chromium/v8/src/parsing/scanner-character-streams.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/scanner-character-streams.h"
+#include "src/parsing/scanner-character-streams.h"
#include "include/v8.h"
#include "src/globals.h"
@@ -193,7 +193,8 @@ Utf8ToUtf16CharacterStream::~Utf8ToUtf16CharacterStream() { }
size_t Utf8ToUtf16CharacterStream::CopyChars(uint16_t* dest, size_t length,
const byte* src, size_t* src_pos,
size_t src_length) {
- static const unibrow::uchar kMaxUtf16Character = 0xffff;
+ static const unibrow::uchar kMaxUtf16Character =
+ unibrow::Utf16::kMaxNonSurrogateCharCode;
size_t i = 0;
// Because of the UTF-16 lead and trail surrogates, we stop filling the buffer
// one character early (in the normal case), because we need to have at least
diff --git a/chromium/v8/src/scanner-character-streams.h b/chromium/v8/src/parsing/scanner-character-streams.h
index a26f50a8922..603db93d022 100644
--- a/chromium/v8/src/scanner-character-streams.h
+++ b/chromium/v8/src/parsing/scanner-character-streams.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SCANNER_CHARACTER_STREAMS_H_
-#define V8_SCANNER_CHARACTER_STREAMS_H_
+#ifndef V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
+#define V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
#include "src/handles.h"
-#include "src/scanner.h"
+#include "src/parsing/scanner.h"
#include "src/vector.h"
namespace v8 {
@@ -21,16 +21,16 @@ class ExternalTwoByteString;
class BufferedUtf16CharacterStream: public Utf16CharacterStream {
public:
BufferedUtf16CharacterStream();
- virtual ~BufferedUtf16CharacterStream();
+ ~BufferedUtf16CharacterStream() override;
- virtual void PushBack(uc32 character);
+ void PushBack(uc32 character) override;
protected:
static const size_t kBufferSize = 512;
static const size_t kPushBackStepSize = 16;
- virtual size_t SlowSeekForward(size_t delta);
- virtual bool ReadBlock();
+ size_t SlowSeekForward(size_t delta) override;
+ bool ReadBlock() override;
virtual void SlowPushBack(uc16 character);
virtual size_t BufferSeekForward(size_t delta) = 0;
@@ -46,16 +46,16 @@ class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
public:
GenericStringUtf16CharacterStream(Handle<String> data, size_t start_position,
size_t end_position);
- virtual ~GenericStringUtf16CharacterStream();
+ ~GenericStringUtf16CharacterStream() override;
- virtual bool SetBookmark();
- virtual void ResetToBookmark();
+ bool SetBookmark() override;
+ void ResetToBookmark() override;
protected:
static const size_t kNoBookmark = -1;
- virtual size_t BufferSeekForward(size_t delta);
- virtual size_t FillBuffer(size_t position);
+ size_t BufferSeekForward(size_t delta) override;
+ size_t FillBuffer(size_t position) override;
Handle<String> string_;
size_t length_;
@@ -67,14 +67,14 @@ class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
class Utf8ToUtf16CharacterStream: public BufferedUtf16CharacterStream {
public:
Utf8ToUtf16CharacterStream(const byte* data, size_t length);
- virtual ~Utf8ToUtf16CharacterStream();
+ ~Utf8ToUtf16CharacterStream() override;
static size_t CopyChars(uint16_t* dest, size_t length, const byte* src,
size_t* src_pos, size_t src_length);
protected:
- virtual size_t BufferSeekForward(size_t delta);
- virtual size_t FillBuffer(size_t char_position);
+ size_t BufferSeekForward(size_t delta) override;
+ size_t FillBuffer(size_t char_position) override;
void SetRawPosition(size_t char_position);
const byte* raw_data_;
@@ -103,7 +103,7 @@ class ExternalStreamingStream : public BufferedUtf16CharacterStream {
bookmark_data_offset_(0),
bookmark_utf8_split_char_buffer_length_(0) {}
- virtual ~ExternalStreamingStream() {
+ ~ExternalStreamingStream() override {
delete[] current_data_;
bookmark_buffer_.Dispose();
bookmark_data_.Dispose();
@@ -120,8 +120,8 @@ class ExternalStreamingStream : public BufferedUtf16CharacterStream {
size_t FillBuffer(size_t position) override;
- virtual bool SetBookmark() override;
- virtual void ResetToBookmark() override;
+ bool SetBookmark() override;
+ void ResetToBookmark() override;
private:
void HandleUtf8SplitCharacters(size_t* data_in_buffer);
@@ -154,23 +154,23 @@ class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
ExternalTwoByteStringUtf16CharacterStream(Handle<ExternalTwoByteString> data,
int start_position,
int end_position);
- virtual ~ExternalTwoByteStringUtf16CharacterStream();
+ ~ExternalTwoByteStringUtf16CharacterStream() override;
- virtual void PushBack(uc32 character) {
+ void PushBack(uc32 character) override {
DCHECK(buffer_cursor_ > raw_data_);
buffer_cursor_--;
pos_--;
}
- virtual bool SetBookmark();
- virtual void ResetToBookmark();
+ bool SetBookmark() override;
+ void ResetToBookmark() override;
protected:
- virtual size_t SlowSeekForward(size_t delta) {
+ size_t SlowSeekForward(size_t delta) override {
// Fast case always handles seeking.
return 0;
}
- virtual bool ReadBlock() {
+ bool ReadBlock() override {
// Entire string is read at start.
return false;
}
@@ -183,6 +183,7 @@ class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
size_t bookmark_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_SCANNER_CHARACTER_STREAMS_H_
+#endif // V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
diff --git a/chromium/v8/src/scanner.cc b/chromium/v8/src/parsing/scanner.cc
index de799033b92..19fab9355e0 100644
--- a/chromium/v8/src/scanner.cc
+++ b/chromium/v8/src/parsing/scanner.cc
@@ -4,17 +4,17 @@
// Features shared by parsing and pre-parsing scanners.
-#include "src/scanner.h"
+#include "src/parsing/scanner.h"
#include <stdint.h>
#include <cmath>
-#include "src/ast-value-factory.h"
+#include "src/ast/ast-value-factory.h"
#include "src/char-predicates-inl.h"
#include "src/conversions-inl.h"
#include "src/list-inl.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -356,7 +356,7 @@ Token::Value Scanner::SkipSourceURLComment() {
void Scanner::TryToParseSourceURLComment() {
- // Magic comments are of the form: //[#@]\s<name>=\s*<value>\s*.* and this
+ // Magic comments are of the form: //[#]\s<name>=\s*<value>\s*.* and this
// function will just return if it cannot parse a magic comment.
if (c0_ < 0 || !unicode_cache_->IsWhiteSpace(c0_)) return;
Advance();
@@ -574,7 +574,7 @@ void Scanner::Scan() {
Advance();
if (c0_ == '/') {
Advance();
- if (c0_ == '@' || c0_ == '#') {
+ if (c0_ == '#') {
Advance();
token = SkipSourceURLComment();
} else {
@@ -1177,7 +1177,7 @@ uc32 Scanner::ScanUnicodeEscape() {
static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
- int input_length) {
+ int input_length, bool escaped) {
DCHECK(input_length >= 1);
const int kMinLength = 2;
const int kMaxLength = 10;
@@ -1189,26 +1189,30 @@ static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
#define KEYWORD_GROUP_CASE(ch) \
break; \
case ch:
-#define KEYWORD(keyword, token) \
- { \
- /* 'keyword' is a char array, so sizeof(keyword) is */ \
- /* strlen(keyword) plus 1 for the NUL char. */ \
- const int keyword_length = sizeof(keyword) - 1; \
- STATIC_ASSERT(keyword_length >= kMinLength); \
- STATIC_ASSERT(keyword_length <= kMaxLength); \
- if (input_length == keyword_length && \
- input[1] == keyword[1] && \
- (keyword_length <= 2 || input[2] == keyword[2]) && \
- (keyword_length <= 3 || input[3] == keyword[3]) && \
- (keyword_length <= 4 || input[4] == keyword[4]) && \
- (keyword_length <= 5 || input[5] == keyword[5]) && \
- (keyword_length <= 6 || input[6] == keyword[6]) && \
- (keyword_length <= 7 || input[7] == keyword[7]) && \
- (keyword_length <= 8 || input[8] == keyword[8]) && \
- (keyword_length <= 9 || input[9] == keyword[9])) { \
- return token; \
- } \
- }
+#define KEYWORD(keyword, token) \
+ { \
+ /* 'keyword' is a char array, so sizeof(keyword) is */ \
+ /* strlen(keyword) plus 1 for the NUL char. */ \
+ const int keyword_length = sizeof(keyword) - 1; \
+ STATIC_ASSERT(keyword_length >= kMinLength); \
+ STATIC_ASSERT(keyword_length <= kMaxLength); \
+ if (input_length == keyword_length && input[1] == keyword[1] && \
+ (keyword_length <= 2 || input[2] == keyword[2]) && \
+ (keyword_length <= 3 || input[3] == keyword[3]) && \
+ (keyword_length <= 4 || input[4] == keyword[4]) && \
+ (keyword_length <= 5 || input[5] == keyword[5]) && \
+ (keyword_length <= 6 || input[6] == keyword[6]) && \
+ (keyword_length <= 7 || input[7] == keyword[7]) && \
+ (keyword_length <= 8 || input[8] == keyword[8]) && \
+ (keyword_length <= 9 || input[9] == keyword[9])) { \
+ if (escaped) { \
+ return token == Token::FUTURE_STRICT_RESERVED_WORD \
+ ? Token::ESCAPED_STRICT_RESERVED_WORD \
+ : Token::ESCAPED_KEYWORD; \
+ } \
+ return token; \
+ } \
+ }
KEYWORDS(KEYWORD_GROUP_CASE, KEYWORD)
}
return Token::IDENTIFIER;
@@ -1224,7 +1228,7 @@ bool Scanner::IdentifierIsFutureStrictReserved(
return true;
}
return Token::FUTURE_STRICT_RESERVED_WORD ==
- KeywordOrIdentifierToken(string->raw_data(), string->length());
+ KeywordOrIdentifierToken(string->raw_data(), string->length(), false);
}
@@ -1257,7 +1261,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
// Only a-z+: could be a keyword or identifier.
literal.Complete();
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
- return KeywordOrIdentifierToken(chars.start(), chars.length());
+ return KeywordOrIdentifierToken(chars.start(), chars.length(), false);
}
HandleLeadSurrogate();
@@ -1284,7 +1288,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
return Token::ILLEGAL;
}
AddLiteralChar(c);
- return ScanIdentifierSuffix(&literal);
+ return ScanIdentifierSuffix(&literal, true);
} else {
uc32 first_char = c0_;
Advance();
@@ -1300,24 +1304,26 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
continue;
}
// Fallthrough if no longer able to complete keyword.
- return ScanIdentifierSuffix(&literal);
+ return ScanIdentifierSuffix(&literal, false);
}
literal.Complete();
if (next_.literal_chars->is_one_byte()) {
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
- return KeywordOrIdentifierToken(chars.start(), chars.length());
+ return KeywordOrIdentifierToken(chars.start(), chars.length(), false);
}
return Token::IDENTIFIER;
}
-Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal) {
+Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal,
+ bool escaped) {
// Scan the rest of the identifier characters.
while (c0_ >= 0 && unicode_cache_->IsIdentifierPart(c0_)) {
if (c0_ == '\\') {
uc32 c = ScanIdentifierUnicodeEscape();
+ escaped = true;
// Only allow legal identifier part characters.
if (c < 0 ||
c == '\\' ||
@@ -1332,6 +1338,10 @@ Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal) {
}
literal->Complete();
+ if (escaped && next_.literal_chars->is_one_byte()) {
+ Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
+ return KeywordOrIdentifierToken(chars.start(), chars.length(), true);
+ }
return Token::IDENTIFIER;
}
@@ -1383,20 +1393,41 @@ bool Scanner::ScanRegExpPattern(bool seen_equal) {
}
-bool Scanner::ScanRegExpFlags() {
+Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
// Scan regular expression flags.
LiteralScope literal(this);
+ int flags = 0;
while (c0_ >= 0 && unicode_cache_->IsIdentifierPart(c0_)) {
- if (c0_ != '\\') {
- AddLiteralCharAdvance();
- } else {
- return false;
+ RegExp::Flags flag = RegExp::kNone;
+ switch (c0_) {
+ case 'g':
+ flag = RegExp::kGlobal;
+ break;
+ case 'i':
+ flag = RegExp::kIgnoreCase;
+ break;
+ case 'm':
+ flag = RegExp::kMultiline;
+ break;
+ case 'u':
+ if (!FLAG_harmony_unicode_regexps) return Nothing<RegExp::Flags>();
+ flag = RegExp::kUnicode;
+ break;
+ case 'y':
+ if (!FLAG_harmony_regexps) return Nothing<RegExp::Flags>();
+ flag = RegExp::kSticky;
+ break;
+ default:
+ return Nothing<RegExp::Flags>();
}
+ if (flags & flag) return Nothing<RegExp::Flags>();
+ AddLiteralCharAdvance();
+ flags |= flag;
}
literal.Complete();
- next_.location.end_pos = source_pos() - 1;
- return true;
+ next_.location.end_pos = source_pos();
+ return Just(RegExp::Flags(flags));
}
diff --git a/chromium/v8/src/scanner.h b/chromium/v8/src/parsing/scanner.h
index a86ed07ab9c..1d0aba0611a 100644
--- a/chromium/v8/src/scanner.h
+++ b/chromium/v8/src/parsing/scanner.h
@@ -4,8 +4,8 @@
// Features shared by parsing and pre-parsing scanners.
-#ifndef V8_SCANNER_H_
-#define V8_SCANNER_H_
+#ifndef V8_PARSING_SCANNER_H_
+#define V8_PARSING_SCANNER_H_
#include "src/allocation.h"
#include "src/base/logging.h"
@@ -13,7 +13,7 @@
#include "src/globals.h"
#include "src/hashmap.h"
#include "src/list.h"
-#include "src/token.h"
+#include "src/parsing/token.h"
#include "src/unicode.h"
#include "src/unicode-decoder.h"
#include "src/utils.h"
@@ -28,17 +28,6 @@ class ParserRecorder;
class UnicodeCache;
-// Returns the value (0 .. 15) of a hexadecimal character c.
-// If c is not a legal hexadecimal character, returns a value < 0.
-inline int HexValue(uc32 c) {
- c -= '0';
- if (static_cast<unsigned>(c) <= 9) return c;
- c = (c | 0x20) - ('a' - '0'); // detect 0x11..0x16 and 0x31..0x36.
- if (static_cast<unsigned>(c) <= 5) return c + 10;
- return -1;
-}
-
-
// ---------------------------------------------------------------------
// Buffered stream of UTF-16 code units, using an internal UTF-16 buffer.
// A code unit is a 16 bit value representing either a 16 bit code point
@@ -166,11 +155,7 @@ class LiteralBuffer {
public:
LiteralBuffer() : is_one_byte_(true), position_(0), backing_store_() { }
- ~LiteralBuffer() {
- if (backing_store_.length() > 0) {
- backing_store_.Dispose();
- }
- }
+ ~LiteralBuffer() { backing_store_.Dispose(); }
INLINE(void AddChar(uint32_t code_unit)) {
if (position_ >= backing_store_.length()) ExpandBuffer();
@@ -377,13 +362,10 @@ class Scanner {
Location peek_location() const { return next_.location; }
bool literal_contains_escapes() const {
- Location location = current_.location;
- int source_length = (location.end_pos - location.beg_pos);
- if (current_.token == Token::STRING) {
- // Subtract delimiters.
- source_length -= 2;
- }
- return current_.literal_chars->length() != source_length;
+ return LiteralContainsEscapes(current_);
+ }
+ bool next_literal_contains_escapes() const {
+ return LiteralContainsEscapes(next_);
}
bool is_literal_contextual_keyword(Vector<const char> keyword) {
DCHECK_NOT_NULL(current_.literal_chars);
@@ -452,9 +434,8 @@ class Scanner {
// Scans the input as a regular expression pattern, previous
// character(s) must be /(=). Returns true if a pattern is scanned.
bool ScanRegExpPattern(bool seen_equal);
- // Returns true if regexp flags are scanned (always since flags can
- // be empty).
- bool ScanRegExpFlags();
+ // Scans the input as regular expression flags. Returns the flags on success.
+ Maybe<RegExp::Flags> ScanRegExpFlags();
// Scans the input as a template literal
Token::Value ScanTemplateStart();
@@ -669,7 +650,7 @@ class Scanner {
void ScanDecimalDigits();
Token::Value ScanNumber(bool seen_period);
Token::Value ScanIdentifierOrKeyword();
- Token::Value ScanIdentifierSuffix(LiteralScope* literal);
+ Token::Value ScanIdentifierSuffix(LiteralScope* literal, bool escaped);
Token::Value ScanString();
@@ -693,6 +674,16 @@ class Scanner {
return static_cast<int>(source_->pos()) - kCharacterLookaheadBufferSize;
}
+ static bool LiteralContainsEscapes(const TokenDesc& token) {
+ Location location = token.location;
+ int source_length = (location.end_pos - location.beg_pos);
+ if (token.token == Token::STRING) {
+ // Subtract delimiters.
+ source_length -= 2;
+ }
+ return token.literal_chars->length() != source_length;
+ }
+
UnicodeCache* unicode_cache_;
// Buffers collecting literal strings, numbers, etc.
@@ -763,6 +754,7 @@ class Scanner {
bool has_multiline_comment_before_next_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_SCANNER_H_
+#endif // V8_PARSING_SCANNER_H_
diff --git a/chromium/v8/src/token.cc b/chromium/v8/src/parsing/token.cc
index 73e883f4bdf..7edfefa8210 100644
--- a/chromium/v8/src/token.cc
+++ b/chromium/v8/src/parsing/token.cc
@@ -3,7 +3,8 @@
// found in the LICENSE file.
#include <stdint.h>
-#include "src/token.h"
+
+#include "src/parsing/token.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/token.h b/chromium/v8/src/parsing/token.h
index db9092d21b2..fee1f7e85aa 100644
--- a/chromium/v8/src/token.h
+++ b/chromium/v8/src/parsing/token.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TOKEN_H_
-#define V8_TOKEN_H_
+#ifndef V8_PARSING_TOKEN_H_
+#define V8_PARSING_TOKEN_H_
#include "src/base/logging.h"
#include "src/globals.h"
@@ -50,10 +50,7 @@ namespace internal {
/* IsAssignmentOp() and Assignment::is_compound() relies on */ \
/* this block of enum values being contiguous and sorted in the */ \
/* same order! */ \
- T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
- T(INIT_LET, "=init_let", 2) /* AST-use only. */ \
- T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
- T(INIT_CONST_LEGACY, "=init_const_legacy", 2) /* AST-use only. */ \
+ T(INIT, "=init", 2) /* AST-use only. */ \
T(ASSIGN, "=", 2) \
T(ASSIGN_BIT_OR, "|=", 2) \
T(ASSIGN_BIT_XOR, "^=", 2) \
@@ -163,6 +160,8 @@ namespace internal {
\
/* Illegal token - not able to scan. */ \
T(ILLEGAL, "ILLEGAL", 0) \
+ T(ESCAPED_KEYWORD, NULL, 0) \
+ T(ESCAPED_STRICT_RESERVED_WORD, NULL, 0) \
\
/* Scanner-internal use only. */ \
T(WHITESPACE, NULL, 0) \
@@ -200,6 +199,7 @@ class Token {
switch (tok) {
case IDENTIFIER:
return true;
+ case ESCAPED_STRICT_RESERVED_WORD:
case FUTURE_STRICT_RESERVED_WORD:
case LET:
case STATIC:
@@ -214,7 +214,7 @@ class Token {
}
static bool IsAssignmentOp(Value tok) {
- return INIT_VAR <= tok && tok <= ASSIGN_MOD;
+ return INIT <= tok && tok <= ASSIGN_MOD;
}
static bool IsBinaryOp(Value op) {
@@ -318,6 +318,7 @@ class Token {
static const char token_type[NUM_TOKENS];
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_TOKEN_H_
+#endif // V8_PARSING_TOKEN_H_
diff --git a/chromium/v8/src/ppc/assembler-ppc-inl.h b/chromium/v8/src/ppc/assembler-ppc-inl.h
index b1e28257519..b384d3f4f96 100644
--- a/chromium/v8/src/ppc/assembler-ppc-inl.h
+++ b/chromium/v8/src/ppc/assembler-ppc-inl.h
@@ -60,7 +60,7 @@ void RelocInfo::apply(intptr_t delta) {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, host_);
- Assembler::set_target_address_at(pc_, host_, target + delta,
+ Assembler::set_target_address_at(isolate_, pc_, host_, target + delta,
SKIP_ICACHE_FLUSH);
}
}
@@ -136,7 +136,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -196,8 +197,9 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(
- pc_, host_, reinterpret_cast<Address>(target), icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ reinterpret_cast<Address>(target),
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -286,7 +288,7 @@ Code* RelocInfo::code_age_stub() {
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + kCodeAgingTargetDelta, host_,
+ Assembler::set_target_address_at(isolate_, pc_ + kCodeAgingTargetDelta, host_,
stub->instruction_start(),
icache_flush_mode);
}
@@ -300,7 +302,7 @@ Address RelocInfo::debug_call_address() {
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
- Assembler::set_target_address_at(pc_, host_, target);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -319,9 +321,10 @@ void RelocInfo::WipeOut() {
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(pc_, host_, NULL, SKIP_ICACHE_FLUSH);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL,
+ SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
@@ -637,16 +640,16 @@ Address Assembler::target_constant_pool_address_at(
// has already deserialized the mov instructions etc.
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
- set_target_address_at(instruction_payload, code, target);
+ Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(isolate, instruction_payload, code, target);
}
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
Code* code = NULL;
- set_target_address_at(pc, code, target, SKIP_ICACHE_FLUSH);
+ set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
@@ -654,8 +657,8 @@ void Assembler::deserialization_set_target_internal_reference_at(
// This code assumes the FIXED_SEQUENCE of lis/ori
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
if (FLAG_enable_embedded_constant_pool && constant_pool) {
ConstantPoolEntry::Access access;
@@ -698,7 +701,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
*(p + 3) = instr4;
*(p + 4) = instr5;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(p, 5 * kInstrSize);
+ Assembler::FlushICache(isolate, p, 5 * kInstrSize);
}
#else
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
@@ -713,14 +716,14 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
*p = instr1;
*(p + 1) = instr2;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(p, 2 * kInstrSize);
+ Assembler::FlushICache(isolate, p, 2 * kInstrSize);
}
#endif
return;
}
UNREACHABLE();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_ASSEMBLER_PPC_INL_H_
diff --git a/chromium/v8/src/ppc/assembler-ppc.cc b/chromium/v8/src/ppc/assembler-ppc.cc
index 6bbb53c4ba5..147fb59aaea 100644
--- a/chromium/v8/src/ppc/assembler-ppc.cc
+++ b/chromium/v8/src/ppc/assembler-ppc.cc
@@ -128,16 +128,6 @@ Register ToRegister(int num) {
}
-const char* DoubleRegister::AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
- "d11", "d12", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
- "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
- return names[index];
-}
-
-
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -286,14 +276,14 @@ bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
Register Assembler::GetRA(Instr instr) {
Register reg;
- reg.code_ = Instruction::RAValue(instr);
+ reg.reg_code = Instruction::RAValue(instr);
return reg;
}
Register Assembler::GetRB(Instr instr) {
Register reg;
- reg.code_ = Instruction::RBValue(instr);
+ reg.reg_code = Instruction::RBValue(instr);
return reg;
}
@@ -463,7 +453,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// pointer in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
CodePatcher::DONT_FLUSH);
patcher.masm()->bitwise_mov32(dst, offset);
break;
@@ -474,7 +464,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Register dst = Register::from_code((operands >> 21) & 0x1f);
Register base = Register::from_code((operands >> 16) & 0x1f);
int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
CodePatcher::DONT_FLUSH);
patcher.masm()->bitwise_add32(dst, base, offset);
break;
@@ -482,7 +472,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
case kUnboundMovLabelAddrOpcode: {
// Load the address of the label in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
kMovInstructionsNoConstantPool,
CodePatcher::DONT_FLUSH);
// Keep internal references relative until EmitRelocations.
@@ -490,7 +480,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
break;
}
case kUnboundJumpTableEntryOpcode: {
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
// Keep internal references relative until EmitRelocations.
patcher.masm()->dp(target_pos);
@@ -747,6 +737,11 @@ void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
}
+void Assembler::popcntw(Register ra, Register rs) {
+ emit(EXT2 | POPCNTW | rs.code() * B21 | ra.code() * B16);
+}
+
+
void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
x_form(EXT2 | ANDX, ra, rs, rb, rc);
}
@@ -1481,6 +1476,11 @@ void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
}
+void Assembler::popcntd(Register ra, Register rs) {
+ emit(EXT2 | POPCNTD | rs.code() * B21 | ra.code() * B16);
+}
+
+
void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
@@ -1844,7 +1844,10 @@ void Assembler::mtxer(Register src) {
}
-void Assembler::mcrfs(int bf, int bfa) {
+void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
+ DCHECK(static_cast<int>(bit) < 32);
+ int bf = cr.code();
+ int bfa = bit / CRWIDTH;
emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
}
@@ -2163,6 +2166,24 @@ void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
}
+void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT3 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT3 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
@@ -2175,6 +2196,18 @@ void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
}
+void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc) {
@@ -2189,6 +2222,20 @@ void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
}
+void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
+ DCHECK(static_cast<int>(bit) < 32);
+ int bt = bit;
+ emit(EXT4 | MTFSB0 | bt * B21 | rc);
+}
+
+
+void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
+ DCHECK(static_cast<int>(bit) < 32);
+ int bt = bit;
+ emit(EXT4 | MTFSB1 | bt * B21 | rc);
+}
+
+
void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
}
@@ -2293,6 +2340,7 @@ void Assembler::GrowBuffer(int needed) {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
@@ -2371,7 +2419,7 @@ void Assembler::EmitRelocations() {
RelocInfo::Mode rmode = it->rmode();
Address pc = buffer_ + it->position();
Code* code = NULL;
- RelocInfo rinfo(pc, rmode, it->data(), code);
+ RelocInfo rinfo(isolate(), pc, rmode, it->data(), code);
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
@@ -2381,7 +2429,8 @@ void Assembler::EmitRelocations() {
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
- set_target_address_at(pc, code, buffer_ + pos, SKIP_ICACHE_FLUSH);
+ set_target_address_at(isolate(), pc, code, buffer_ + pos,
+ SKIP_ICACHE_FLUSH);
}
reloc_info_writer.Write(&rinfo);
diff --git a/chromium/v8/src/ppc/assembler-ppc.h b/chromium/v8/src/ppc/assembler-ppc.h
index a1c08ad0ea3..e84d6952519 100644
--- a/chromium/v8/src/ppc/assembler-ppc.h
+++ b/chromium/v8/src/ppc/assembler-ppc.h
@@ -44,7 +44,6 @@
#include <vector>
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/ppc/constants-ppc.h"
#define ABI_USES_FUNCTION_DESCRIPTORS \
@@ -57,13 +56,16 @@
#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \
(!V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN)
-#define ABI_TOC_ADDRESSABILITY_VIA_IP \
- (V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
+#if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
+#define ABI_CALL_VIA_IP 1
+#else
+#define ABI_CALL_VIA_IP 0
+#endif
#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
-#define ABI_TOC_REGISTER kRegister_r2_Code
+#define ABI_TOC_REGISTER Register::kCode_r2
#else
-#define ABI_TOC_REGISTER kRegister_r13_Code
+#define ABI_TOC_REGISTER Register::kCode_r13
#endif
#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
@@ -71,6 +73,40 @@
namespace v8 {
namespace internal {
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(r0) V(sp) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(r11) V(ip) V(r13) V(r14) V(r15) \
+ V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
+ V(r24) V(r25) V(r26) V(r27) V(r28) V(r29) V(r30) V(fp)
+
+#if V8_EMBEDDED_CONSTANT_POOL
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(r14) V(r15) \
+ V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
+ V(r24) V(r25) V(r26) V(r27) V(r30)
+#else
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(r3) V(r4) V(r5) V(r6) V(r7) \
+ V(r8) V(r9) V(r10) V(r14) V(r15) \
+ V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
+ V(r24) V(r25) V(r26) V(r27) V(r28) V(r30)
+#endif
+
+#define DOUBLE_REGISTERS(V) \
+ V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
+ V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
+ V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+// clang-format on
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -92,310 +128,112 @@ namespace internal {
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
-// Core register
struct Register {
- static const int kNumRegisters = 32;
- static const int kSizeInBytes = kPointerSize;
-
-#if V8_TARGET_LITTLE_ENDIAN
- static const int kMantissaOffset = 0;
- static const int kExponentOffset = 4;
-#else
- static const int kMantissaOffset = 4;
- static const int kExponentOffset = 0;
-#endif
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- static const int kAllocatableLowRangeBegin = 3;
- static const int kAllocatableLowRangeEnd = 10;
- static const int kAllocatableHighRangeBegin = 14;
- static const int kAllocatableHighRangeEnd =
- FLAG_enable_embedded_constant_pool ? 27 : 28;
- static const int kAllocatableContext = 30;
-
- static const int kNumAllocatableLow =
- kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1;
- static const int kNumAllocatableHigh =
- kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1;
- static const int kMaxNumAllocatableRegisters =
- kNumAllocatableLow + kNumAllocatableHigh + 1; // cp
-
- static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
-
- static int ToAllocationIndex(Register reg) {
- int index;
- int code = reg.code();
- if (code == kAllocatableContext) {
- // Context is the last index
- index = NumAllocatableRegisters() - 1;
- } else if (code <= kAllocatableLowRangeEnd) {
- // low range
- index = code - kAllocatableLowRangeBegin;
- } else {
- // high range
- index = code - kAllocatableHighRangeBegin + kNumAllocatableLow;
- }
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return index;
- }
+ static const int kNumRegisters = Code::kAfterLast;
- static Register FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- // Last index is always the 'cp' register.
- if (index == kMaxNumAllocatableRegisters - 1) {
- return from_code(kAllocatableContext);
- }
- return (index < kNumAllocatableLow)
- ? from_code(index + kAllocatableLowRangeBegin)
- : from_code(index - kNumAllocatableLow +
- kAllocatableHighRangeBegin);
- }
-
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "r3",
- "r4",
- "r5",
- "r6",
- "r7",
- "r8",
- "r9",
- "r10",
- "r14",
- "r15",
- "r16",
- "r17",
- "r18",
- "r19",
- "r20",
- "r21",
- "r22",
- "r23",
- "r24",
- "r25",
- "r26",
- "r27",
- "r28",
- "cp",
- };
- if (FLAG_enable_embedded_constant_pool &&
- (index == kMaxNumAllocatableRegisters - 2)) {
- return names[index + 1];
- }
- return names[index];
- }
+#define REGISTER_COUNT(R) 1 +
+ static const int kNumAllocatable =
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT)0;
+#undef REGISTER_COUNT
+#define REGISTER_BIT(R) 1 << kCode_##R |
static const RegList kAllocatable =
- 1 << 3 | 1 << 4 | 1 << 5 | 1 << 6 | 1 << 7 | 1 << 8 | 1 << 9 | 1 << 10 |
- 1 << 14 | 1 << 15 | 1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 | 1 << 20 |
- 1 << 21 | 1 << 22 | 1 << 23 | 1 << 24 | 1 << 25 | 1 << 26 | 1 << 27 |
- (FLAG_enable_embedded_constant_pool ? 0 : 1 << 28) | 1 << 30;
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_BIT)0;
+#undef REGISTER_BIT
static Register from_code(int code) {
+ DCHECK(code >= 0);
+ DCHECK(code < kNumRegisters);
Register r = {code};
return r;
}
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
-
void set_code(int code) {
- code_ = code;
+ reg_code = code;
DCHECK(is_valid());
}
+#if V8_TARGET_LITTLE_ENDIAN
+ static const int kMantissaOffset = 0;
+ static const int kExponentOffset = 4;
+#else
+ static const int kMantissaOffset = 4;
+ static const int kExponentOffset = 0;
+#endif
+
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
-// These constants are used in several locations, including static initializers
-const int kRegister_no_reg_Code = -1;
-const int kRegister_r0_Code = 0; // general scratch
-const int kRegister_sp_Code = 1; // stack pointer
-const int kRegister_r2_Code = 2; // special on PowerPC
-const int kRegister_r3_Code = 3;
-const int kRegister_r4_Code = 4;
-const int kRegister_r5_Code = 5;
-const int kRegister_r6_Code = 6;
-const int kRegister_r7_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_r11_Code = 11; // lithium scratch
-const int kRegister_ip_Code = 12; // ip (general scratch)
-const int kRegister_r13_Code = 13; // special on PowerPC
-const int kRegister_r14_Code = 14;
-const int kRegister_r15_Code = 15;
-
-const int kRegister_r16_Code = 16;
-const int kRegister_r17_Code = 17;
-const int kRegister_r18_Code = 18;
-const int kRegister_r19_Code = 19;
-const int kRegister_r20_Code = 20;
-const int kRegister_r21_Code = 21;
-const int kRegister_r22_Code = 22;
-const int kRegister_r23_Code = 23;
-const int kRegister_r24_Code = 24;
-const int kRegister_r25_Code = 25;
-const int kRegister_r26_Code = 26;
-const int kRegister_r27_Code = 27;
-const int kRegister_r28_Code = 28; // constant pool pointer
-const int kRegister_r29_Code = 29; // roots array pointer
-const int kRegister_r30_Code = 30; // context pointer
-const int kRegister_fp_Code = 31; // frame pointer
-
-const Register no_reg = {kRegister_no_reg_Code};
-
-const Register r0 = {kRegister_r0_Code};
-const Register sp = {kRegister_sp_Code};
-const Register r2 = {kRegister_r2_Code};
-const Register r3 = {kRegister_r3_Code};
-const Register r4 = {kRegister_r4_Code};
-const Register r5 = {kRegister_r5_Code};
-const Register r6 = {kRegister_r6_Code};
-const Register r7 = {kRegister_r7_Code};
-const Register r8 = {kRegister_r8_Code};
-const Register r9 = {kRegister_r9_Code};
-const Register r10 = {kRegister_r10_Code};
-const Register r11 = {kRegister_r11_Code};
-const Register ip = {kRegister_ip_Code};
-const Register r13 = {kRegister_r13_Code};
-const Register r14 = {kRegister_r14_Code};
-const Register r15 = {kRegister_r15_Code};
-
-const Register r16 = {kRegister_r16_Code};
-const Register r17 = {kRegister_r17_Code};
-const Register r18 = {kRegister_r18_Code};
-const Register r19 = {kRegister_r19_Code};
-const Register r20 = {kRegister_r20_Code};
-const Register r21 = {kRegister_r21_Code};
-const Register r22 = {kRegister_r22_Code};
-const Register r23 = {kRegister_r23_Code};
-const Register r24 = {kRegister_r24_Code};
-const Register r25 = {kRegister_r25_Code};
-const Register r26 = {kRegister_r26_Code};
-const Register r27 = {kRegister_r27_Code};
-const Register r28 = {kRegister_r28_Code};
-const Register r29 = {kRegister_r29_Code};
-const Register r30 = {kRegister_r30_Code};
-const Register fp = {kRegister_fp_Code};
-
-// Give alias names to registers
-const Register cp = {kRegister_r30_Code}; // JavaScript context pointer
-const Register kRootRegister = {kRegister_r29_Code}; // Roots array pointer.
-const Register kConstantPoolRegister = {kRegister_r28_Code}; // Constant pool
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
+
+// Aliases
+const Register kLithiumScratch = r11; // lithium scratch.
+const Register kConstantPoolRegister = r28; // Constant pool.
+const Register kRootRegister = r29; // Roots array pointer.
+const Register cp = r30; // JavaScript context pointer.
// Double word FP register.
struct DoubleRegister {
- static const int kNumRegisters = 32;
- static const int kMaxNumRegisters = kNumRegisters;
- static const int kNumVolatileRegisters = 14; // d0-d13
- static const int kSizeInBytes = 8;
-
- static const int kAllocatableLowRangeBegin = 1;
- static const int kAllocatableLowRangeEnd = 12;
- static const int kAllocatableHighRangeBegin = 15;
- static const int kAllocatableHighRangeEnd = 31;
-
- static const int kNumAllocatableLow =
- kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1;
- static const int kNumAllocatableHigh =
- kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1;
- static const int kMaxNumAllocatableRegisters =
- kNumAllocatableLow + kNumAllocatableHigh;
- static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
-
- // TODO(turbofan)
- inline static int NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
- }
-
- static int ToAllocationIndex(DoubleRegister reg) {
- int code = reg.code();
- int index = (code <= kAllocatableLowRangeEnd)
- ? code - kAllocatableLowRangeBegin
- : code - kAllocatableHighRangeBegin + kNumAllocatableLow;
- DCHECK(index < kMaxNumAllocatableRegisters);
- return index;
- }
-
- static DoubleRegister FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return (index < kNumAllocatableLow)
- ? from_code(index + kAllocatableLowRangeBegin)
- : from_code(index - kNumAllocatableLow +
- kAllocatableHighRangeBegin);
- }
-
- static const char* AllocationIndexToString(int index);
-
- static DoubleRegister from_code(int code) {
- DoubleRegister r = {code};
- return r;
- }
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; }
- bool is(DoubleRegister reg) const { return code_ == reg.code_; }
+ static const int kNumRegisters = Code::kAfterLast;
+ static const int kMaxNumRegisters = kNumRegisters;
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
- void split_code(int* vm, int* m) const {
- DCHECK(is_valid());
- *m = (code_ & 0x10) >> 4;
- *vm = code_ & 0x0F;
+
+ static DoubleRegister from_code(int code) {
+ DoubleRegister r = {code};
+ return r;
}
- int code_;
+ int reg_code;
};
-
-const DoubleRegister no_dreg = {-1};
-const DoubleRegister d0 = {0};
-const DoubleRegister d1 = {1};
-const DoubleRegister d2 = {2};
-const DoubleRegister d3 = {3};
-const DoubleRegister d4 = {4};
-const DoubleRegister d5 = {5};
-const DoubleRegister d6 = {6};
-const DoubleRegister d7 = {7};
-const DoubleRegister d8 = {8};
-const DoubleRegister d9 = {9};
-const DoubleRegister d10 = {10};
-const DoubleRegister d11 = {11};
-const DoubleRegister d12 = {12};
-const DoubleRegister d13 = {13};
-const DoubleRegister d14 = {14};
-const DoubleRegister d15 = {15};
-const DoubleRegister d16 = {16};
-const DoubleRegister d17 = {17};
-const DoubleRegister d18 = {18};
-const DoubleRegister d19 = {19};
-const DoubleRegister d20 = {20};
-const DoubleRegister d21 = {21};
-const DoubleRegister d22 = {22};
-const DoubleRegister d23 = {23};
-const DoubleRegister d24 = {24};
-const DoubleRegister d25 = {25};
-const DoubleRegister d26 = {26};
-const DoubleRegister d27 = {27};
-const DoubleRegister d28 = {28};
-const DoubleRegister d29 = {29};
-const DoubleRegister d30 = {30};
-const DoubleRegister d31 = {31};
+#define DECLARE_REGISTER(R) \
+ const DoubleRegister R = {DoubleRegister::kCode_##R};
+DOUBLE_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_dreg = {Register::kCode_no_reg};
// Aliases for double registers. Defined using #define instead of
// "static const DoubleRegister&" because Clang complains otherwise when a
@@ -409,19 +247,19 @@ Register ToRegister(int num);
// Coprocessor register
struct CRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(CRegister creg) const { return code_ == creg.code_; }
+ bool is_valid() const { return 0 <= reg_code && reg_code < 16; }
+ bool is(CRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
@@ -622,17 +460,18 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(
- Address pc, Code* code, Address target,
+ Isolate* isolate, Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target,
+ icache_flush_mode);
}
// Return the code target address at a call site from the return address
@@ -646,11 +485,12 @@ class Assembler : public AssemblerBase {
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target);
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -1072,6 +912,7 @@ class Assembler : public AssemblerBase {
void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void cntlzd_(Register dst, Register src, RCBit rc = LeaveRC);
+ void popcntd(Register dst, Register src);
void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
@@ -1101,6 +942,7 @@ class Assembler : public AssemblerBase {
void rotrwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void cntlzw_(Register dst, Register src, RCBit rc = LeaveRC);
+ void popcntw(Register dst, Register src);
void subi(Register dst, Register src1, const Operand& src2);
@@ -1145,7 +987,7 @@ class Assembler : public AssemblerBase {
void mtlr(Register src);
void mtctr(Register src);
void mtxer(Register src);
- void mcrfs(int bf, int bfa);
+ void mcrfs(CRegister cr, FPSCRBit bit);
void mfcr(Register dst);
#if V8_TARGET_ARCH_PPC64
void mffprd(Register dst, DoubleRegister src);
@@ -1213,15 +1055,27 @@ class Assembler : public AssemblerBase {
RCBit rc = LeaveRC);
void fcfid(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ void fcfidu(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fcfidus(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fcfids(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
void fctid(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void fctidz(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ void fctidu(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fctiduz(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
void fsel(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc = LeaveRC);
void fneg(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ void mtfsb0(FPSCRBit bit, RCBit rc = LeaveRC);
+ void mtfsb1(FPSCRBit bit, RCBit rc = LeaveRC);
void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC);
void mffs(const DoubleRegister frt, RCBit rc = LeaveRC);
void mtfsf(const DoubleRegister frb, bool L = 1, int FLM = 0, bool W = 0,
@@ -1325,7 +1179,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
@@ -1471,7 +1325,10 @@ class Assembler : public AssemblerBase {
}
void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
- void EndBlockTrampolinePool() { trampoline_pool_blocked_nesting_--; }
+ void EndBlockTrampolinePool() {
+ int count = --trampoline_pool_blocked_nesting_;
+ if (count == 0) CheckTrampolinePoolQuick();
+ }
bool is_trampoline_pool_blocked() const {
return trampoline_pool_blocked_nesting_ > 0;
}
@@ -1612,7 +1469,7 @@ class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_ASSEMBLER_PPC_H_
diff --git a/chromium/v8/src/ppc/builtins-ppc.cc b/chromium/v8/src/ppc/builtins-ppc.cc
index e08c865e4e9..0476cd27e1d 100644
--- a/chromium/v8/src/ppc/builtins-ppc.cc
+++ b/chromium/v8/src/ppc/builtins-ppc.cc
@@ -21,11 +21,11 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- r3 : number of arguments excluding receiver
- // -- r4 : called function (only guaranteed when
- // extra_args requires it)
+ // -- r4 : target
+ // -- r6 : new.target
// -- sp[0] : last argument
// -- ...
- // -- sp[4 * (argc - 1)] : first argument (argc == r0)
+ // -- sp[4 * (argc - 1)] : first argument
// -- sp[4 * argc] : receiver
// -----------------------------------
__ AssertFunction(r4);
@@ -34,21 +34,31 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(r4);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ switch (extra_args) {
+ case BuiltinExtraArguments::kTarget:
+ __ Push(r4);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kNewTarget:
+ __ Push(r6);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kTargetAndNewTarget:
+ __ Push(r4, r6);
+ num_extra_args += 2;
+ break;
+ case BuiltinExtraArguments::kNone:
+ break;
}
- // JumpToExternalReference expects r0 to contain the number of arguments
+ // JumpToExternalReference expects r3 to contain the number of arguments
// including the receiver and the extra arguments.
__ addi(r3, r3, Operand(num_extra_args + 1));
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -56,29 +66,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the native context.
-
- __ LoadP(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- // Load the InternalArray function from the native context.
- __ LoadP(result,
- MemOperand(result, Context::SlotOffset(
- Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+ // Load the InternalArray function from the current native context.
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
-
- __ LoadP(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- // Load the Array function from the native context.
- __ LoadP(
- result,
- MemOperand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ // Load the Array function from the current native context.
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
@@ -140,6 +136,110 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into r3 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_arguments);
+ __ subi(r3, r3, Operand(1));
+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+ __ LoadPUX(r3, MemOperand(sp, r3));
+ __ Drop(2);
+ }
+
+ // 2a. Convert the first argument to a number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0.
+ __ bind(&no_arguments);
+ __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ Ret(1);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : constructor function
+ // -- r6 : new target
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into r5 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_arguments);
+ __ subi(r3, r3, Operand(1));
+ __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+ __ LoadPUX(r5, MemOperand(sp, r5));
+ __ Drop(2);
+ __ b(&done);
+ __ bind(&no_arguments);
+ __ LoadSmiLiteral(r5, Smi::FromInt(0));
+ __ Drop(1);
+ __ bind(&done);
+ }
+
+ // 3. Make sure r5 is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(r5, &done_convert);
+ __ CompareObjectType(r5, r7, r7, HEAP_NUMBER_TYPE);
+ __ beq(&done_convert);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4, r6);
+ __ mr(r3, r5);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mr(r5, r3);
+ __ Pop(r4, r6);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(r4, r6);
+ __ bne(&new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(r3, r4, r5, r7, r8, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r5, r4, r6); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(r5);
+ }
+ __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -191,7 +291,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&symbol_descriptive_string);
{
__ Push(r3);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -201,92 +301,88 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
+ // -- r6 : new target
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r3 and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into r5 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
__ cmpi(r3, Operand::Zero());
__ beq(&no_arguments);
__ subi(r3, r3, Operand(1));
- __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
- __ LoadPUX(r3, MemOperand(sp, r3));
+ __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+ __ LoadPUX(r5, MemOperand(sp, r5));
__ Drop(2);
__ b(&done);
__ bind(&no_arguments);
- __ LoadRoot(r3, Heap::kempty_stringRootIndex);
+ __ LoadRoot(r5, Heap::kempty_stringRootIndex);
__ Drop(1);
__ bind(&done);
}
- // 2. Make sure r3 is a string.
+ // 3. Make sure r5 is a string.
{
Label convert, done_convert;
- __ JumpIfSmi(r3, &convert);
- __ CompareObjectType(r3, r5, r5, FIRST_NONSTRING_TYPE);
+ __ JumpIfSmi(r5, &convert);
+ __ CompareObjectType(r5, r7, r7, FIRST_NONSTRING_TYPE);
__ blt(&done_convert);
__ bind(&convert);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
- __ push(r4);
+ __ Push(r4, r6);
+ __ mr(r3, r5);
__ CallStub(&stub);
- __ pop(r4);
+ __ mr(r5, r3);
+ __ Pop(r4, r6);
}
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- r3 : the first argument
- // -- r4 : constructor function
- // -- lr : return address
- // -----------------------------------
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(r4, r6);
+ __ bne(&new_object);
- Label allocate, done_allocate;
- __ mr(r5, r3);
- __ Allocate(JSValue::kSize, r3, r6, r7, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
-
- // Initialize the JSValue in r3.
- __ LoadGlobalFunctionInitialMap(r4, r6, r7);
- __ StoreP(r6, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(r3, r4, r5, r7, r8, &new_object);
+ __ Ret();
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ LoadSmiLiteral(r6, Smi::FromInt(JSValue::kSize));
- __ Push(r4, r5, r6);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(r4, r5);
- }
- __ b(&done_allocate);
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r5, r4, r6); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(r5);
}
+ __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+ __ Ret();
}
static void CallRuntimePassFunction(MacroAssembler* masm,
Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- r4 : target function (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -----------------------------------
+
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
+ // Push a copy of the target function and the new target.
// Push function as parameter to the runtime call.
- __ Push(r4, r4);
+ __ Push(r4, r6, r4);
__ CallRuntime(function_id, 1);
- // Restore reciever.
- __ Pop(r4);
+ // Restore target function and new target.
+ __ Pop(r4, r6);
}
@@ -324,12 +420,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
// -- r5 : allocation site or undefined
- // -- r6 : original constructor
+ // -- r6 : new target
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -342,183 +439,175 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(r5, r7);
- __ SmiTag(r3);
- __ Push(r5, r3, r4, r6);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ mov(r5, Operand(debug_step_in_fp));
- __ LoadP(r5, MemOperand(r5));
- __ cmpi(r5, Operand::Zero());
- __ bne(&rt_call);
-
- // Fall back to runtime if the original constructor and function differ.
- __ cmp(r4, r6);
- __ bne(&rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // r4: constructor function
- __ LoadP(r5,
- FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r5, &rt_call);
- __ CompareObjectType(r5, r8, r7, MAP_TYPE);
- __ bne(&rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // r4: constructor function
- // r5: initial map
- __ CompareInstanceType(r5, r8, JS_FUNCTION_TYPE);
- __ beq(&rt_call);
-
- if (!is_api_function) {
- Label allocate;
- MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lwz(r7, bit_field3);
- __ DecodeField<Map::Counter>(r11, r7);
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ blt(&allocate);
- // Decrease generous allocation count.
- __ Add(r7, r7, -(1 << Map::Counter::kShift), r0);
- __ stw(r7, bit_field3);
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ bne(&allocate);
-
- __ push(r4);
-
- __ Push(r5, r4); // r4 = constructor
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ Pop(r4, r5);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // r4: constructor function
- // r5: initial map
- Label rt_call_reload_new_target;
- __ lbz(r6, FieldMemOperand(r5, Map::kInstanceSizeOffset));
- __ Allocate(r6, r7, r8, r9, &rt_call_reload_new_target, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // r4: constructor function
- // r5: initial map
- // r6: object size
- // r7: JSObject (not tagged)
- __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
- __ mr(r8, r7);
- __ StoreP(r5, MemOperand(r8, JSObject::kMapOffset));
- __ StoreP(r9, MemOperand(r8, JSObject::kPropertiesOffset));
- __ StoreP(r9, MemOperand(r8, JSObject::kElementsOffset));
- __ addi(r8, r8, Operand(JSObject::kElementsOffset + kPointerSize));
-
- __ ShiftLeftImm(r9, r6, Operand(kPointerSizeLog2));
- __ add(r9, r7, r9); // End of object.
-
- // Fill all the in-object properties with the appropriate filler.
- // r4: constructor function
- // r5: initial map
- // r6: object size
- // r7: JSObject (not tagged)
- // r8: First in-object property of JSObject (not tagged)
- // r9: End of object
- DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r10, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ blt(&no_inobject_slack_tracking);
-
- // Allocate object with a slack.
- __ lbz(
- r3,
- FieldMemOperand(
- r5, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ lbz(r5, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
- __ sub(r3, r3, r5);
- if (FLAG_debug_code) {
- __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
- __ add(r0, r8, r0);
- // r0: offset of first field after pre-allocated fields
- __ cmp(r0, r9);
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
- {
- Label done;
- __ cmpi(r3, Operand::Zero());
- __ beq(&done);
- __ InitializeNFieldsWithFiller(r8, r3, r10);
- __ bind(&done);
+ if (!create_implicit_receiver) {
+ __ SmiTag(r7, r3, SetRC);
+ __ Push(r5, r7);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ } else {
+ __ SmiTag(r3);
+ __ Push(r5, r3);
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ CompareObjectType(r6, r8, r7, JS_FUNCTION_TYPE);
+ __ bne(&rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ // r6: new target
+ __ LoadP(r5,
+ FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r5, &rt_call);
+ __ CompareObjectType(r5, r8, r7, MAP_TYPE);
+ __ bne(&rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ LoadP(r8, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
+ __ cmp(r4, r8);
+ __ bne(&rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // r4: constructor function
+ // r5: initial map
+ // r6: new target
+ __ CompareInstanceType(r5, r8, JS_FUNCTION_TYPE);
+ __ beq(&rt_call);
+
+ // Now allocate the JSObject on the heap.
+ // r4: constructor function
+ // r5: initial map
+ // r6: new target
+ __ lbz(r10, FieldMemOperand(r5, Map::kInstanceSizeOffset));
+
+ __ Allocate(r10, r7, r10, r9, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // r4: constructor function
+ // r5: initial map
+ // r6: new target
+ // r7: JSObject (not HeapObject tagged - the actual address).
+ // r10: start of next object
+ __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r5, MemOperand(r7, JSObject::kMapOffset));
+ __ StoreP(r9, MemOperand(r7, JSObject::kPropertiesOffset));
+ __ StoreP(r9, MemOperand(r7, JSObject::kElementsOffset));
+ __ addi(r8, r7, Operand(JSObject::kElementsOffset + kPointerSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ addi(r7, r7, Operand(kHeapObjectTag));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // r7: JSObject (tagged)
+ // r8: First in-object property of JSObject (not tagged)
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ lwz(r3, bit_field3);
+ __ DecodeField<Map::ConstructionCounter>(r11, r3);
+ // r11: slack tracking counter
+ __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
+ __ blt(&no_inobject_slack_tracking);
+ // Decrease generous allocation count.
+ __ Add(r3, r3, -(1 << Map::ConstructionCounter::kShift), r0);
+ __ stw(r3, bit_field3);
+
+ // Allocate object with a slack.
+ __ lbz(r3, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+ __ sub(r3, r10, r3);
+ // r3: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(r8, r3);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(r8, r3, r9);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(r8, r10, r9);
+
+ // r11: slack tracking counter value before decreasing.
+ __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
+ __ bne(&allocated);
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(r4, r6, r7, r5);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(r4, r6, r7);
+
+ // Continue with JSObject being successfully allocated
+ // r4: constructor function
+ // r6: new target
+ // r7: JSObject
+ __ b(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- // To allow for truncation.
- __ LoadRoot(r10, Heap::kOnePointerFillerMapRootIndex);
- // Fill the remaining fields with one pointer filler map.
- __ bind(&no_inobject_slack_tracking);
+ __ InitializeFieldsWithFiller(r8, r10, r9);
+
+ // Continue with JSObject being successfully allocated
+ // r4: constructor function
+ // r6: new target
+ // r7: JSObject
+ __ b(&allocated);
}
- __ InitializeFieldsWithFiller(r8, r9, r10);
+ // Allocate the new receiver object using the runtime call.
+ // r4: constructor function
+ // r6: new target
+ __ bind(&rt_call);
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ addi(r7, r7, Operand(kHeapObjectTag));
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(r4, r6, r4, r6);
+ __ CallRuntime(Runtime::kNewObject);
+ __ mr(r7, r3);
+ __ Pop(r4, r6);
- // Continue with JSObject being successfully allocated
+ // Receiver for constructor call allocated.
+ // r4: constructor function
+ // r6: new target
// r7: JSObject
- __ b(&allocated);
+ __ bind(&allocated);
- // Reload the original constructor and fall-through.
- __ bind(&rt_call_reload_new_target);
- __ LoadP(r6, MemOperand(sp, 0 * kPointerSize));
- }
+ // Retrieve smi-tagged arguments count from the stack.
+ __ LoadP(r3, MemOperand(sp));
+ __ SmiUntag(r3, SetRC);
- // Allocate the new receiver object using the runtime call.
- // r4: constructor function
- // r6: original constructor
- __ bind(&rt_call);
- __ Push(r4, r6);
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mr(r7, r3);
-
- // Receiver for constructor call allocated.
- // r7: JSObject
- __ bind(&allocated);
-
- // Restore the parameters.
- __ Pop(r4, ip);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ LoadP(r6, MemOperand(sp));
-
- // Push new.target onto the construct frame. This is stored just below the
- // receiver on the stack.
- __ Push(ip, r7, r7);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(r7, r7);
+ }
// Set up pointer to last argument.
__ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
+ // r3: number of arguments
// r4: constructor function
// r5: address of last argument (caller sp)
- // r6: number of arguments (smi-tagged)
+ // r6: new target
+ // cr0: condition indicating whether r3 is zero
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: number of arguments (smi-tagged)
Label loop, no_args;
- __ SmiUntag(r3, r6, SetRC);
__ beq(&no_args, cr0);
__ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
__ sub(sp, sp, ip);
@@ -533,57 +622,60 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Call the function.
// r3: number of arguments
// r4: constructor function
+ // r6: new target
if (is_api_function) {
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
Handle<Code> code = masm->isolate()->builtins()->HandleApiCallConstruct();
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r3);
- __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r3: result
// sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
+ // sp[1]: number of arguments (smi-tagged)
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r3: result
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(r3, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r3, r4, r6, FIRST_SPEC_OBJECT_TYPE);
- __ bge(&exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ LoadP(r3, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r3: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (original constructor)
- // sp[2]: number of arguments (smi-tagged)
- __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // r3: result
+ // sp[0]: receiver
+ // sp[1]: number of arguments (smi-tagged)
+ __ JumpIfSmi(r3, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r3, r4, r6, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ LoadP(r3, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // r3: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
+ } else {
+ __ LoadP(r4, MemOperand(sp));
+ }
// Leave construct frame.
}
@@ -591,104 +683,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ SmiToPtrArrayOffset(r4, r4);
__ add(sp, sp, r4);
__ addi(sp, sp, Operand(kPointerSize));
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5);
+ }
__ blr();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : number of arguments
- // -- r4 : constructor function
- // -- r5 : allocation site or undefined
- // -- r6 : original constructor
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
-
- __ AssertUndefinedOrAllocationSite(r5, r7);
-
- // Smi-tagged arguments count.
- __ mr(r7, r3);
- __ SmiTag(r7, SetRC);
-
- // receiver is the hole.
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-
- // allocation site, smi arguments count, new.target, receiver
- __ Push(r5, r7, r6, ip);
-
- // Set up pointer to last argument.
- __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- // r3: number of arguments
- // r4: constructor function
- // r5: address of last argument (caller sp)
- // r7: number of arguments (smi-tagged)
- // cr0: compare against zero of arguments
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- Label loop, no_args;
- __ beq(&no_args, cr0);
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ mtctr(r3);
- __ bind(&loop);
- __ subi(ip, ip, Operand(kPointerSize));
- __ LoadPX(r0, MemOperand(r5, ip));
- __ push(r0);
- __ bdnz(&loop);
- __ bind(&no_args);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ mov(r5, Operand(debug_step_in_fp));
- __ LoadP(r5, MemOperand(r5));
- __ and_(r0, r5, r5, SetRC);
- __ beq(&skip_step_in, cr0);
-
- __ Push(r3, r4, r4);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(r3, r4);
-
- __ bind(&skip_step_in);
-
- // Call the function.
- // r3: number of arguments
- // r4: constructor function
- ParameterCount actual(r3);
- __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- // r3: result
- // sp[0]: number of arguments (smi-tagged)
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Get arguments count, skipping over new.target.
- __ LoadP(r4, MemOperand(sp, kPointerSize));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Leave construct frame.
- }
- __ SmiToPtrArrayOffset(r4, r4);
- __ add(sp, sp, r4);
- __ addi(sp, sp, Operand(kPointerSize));
- __ blr();
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ push(r4);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -717,7 +737,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ bgt(&okay); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -819,6 +839,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o r4: the JS function object being called.
+// o r6: the new target
// o cp: our context
// o pp: the caller's constant pool pointer (if enabled)
// o fp: the caller's frame pointer
@@ -836,6 +857,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushFixedFrame(r4);
__ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ push(r6);
+
+ // Push zero for bytecode array offset.
+ __ li(r3, Operand::Zero());
+ __ push(r3);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -864,7 +890,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
__ cmpl(r6, r0);
__ bge(&ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -885,21 +911,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
@@ -907,16 +919,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(r0, Heap::kStackLimitRootIndex);
__ cmp(sp, r0);
__ bge(&ok);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ CallRuntime(Runtime::kStackGuard);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ subi(
- kInterpreterRegisterFileRegister, fp,
- Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ addi(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -933,6 +946,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// and header removal.
__ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
+ __ bkpt(0); // Does not return here.
}
@@ -956,36 +970,159 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
+ Register count, Register scratch) {
+ Label loop;
+ __ addi(index, index, Operand(kPointerSize)); // Bias up for LoadPU
+ __ mtctr(count);
+ __ bind(&loop);
+ __ LoadPU(scratch, MemOperand(index, -kPointerSize));
+ __ push(scratch);
+ __ bdnz(&loop);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- // Push function as parameter to the runtime call.
- __ Push(r4, r4);
- // Whether to compile in a background thread.
- __ LoadRoot(
- r0, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r5 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- r4 : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Calculate number of arguments (add one for receiver).
+ __ addi(r6, r3, Operand(1));
+
+ // Push the arguments.
+ Generate_InterpreterPushArgs(masm, r5, r6, r7);
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (not including receiver)
+ // -- r6 : new target
+ // -- r4 : constructor to call
+ // -- r5 : address of the first argument
+ // -----------------------------------
+
+ // Push a slot for the receiver to be constructed.
+ __ li(r0, Operand::Zero());
__ push(r0);
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ pop(r4);
+ // Push the arguments (skip if none).
+ Label skip;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&skip);
+ Generate_InterpreterPushArgs(masm, r5, r3, r7);
+ __ bind(&skip);
+
+ // Call the constructor with r3, r4, and r6 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Save accumulator register and pass the deoptimization type to
+ // the runtime system.
+ __ LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(type)));
+ __ Push(kInterpreterAccumulatorRegister, r4);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts).
+ __ Drop(1);
+
+ // Initialize register file register and dispatch table register.
+ __ addi(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ addi(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ LoadP(kContextRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ LoadP(r4,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ LoadP(kInterpreterBytecodeOffsetRegister,
+ MemOperand(
+ kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
+ __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+}
+
+
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -1004,15 +1141,16 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// the runtime:
// r3 - contains return address (beginning of patch sequence)
// r4 - isolate
+ // r6 - new target
// lr - return address
FrameScope scope(masm, StackFrame::MANUAL);
__ mflr(r0);
- __ MultiPush(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ MultiPush(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ PrepareCallCFunction(2, 0, r5);
__ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
- __ MultiPop(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ MultiPop(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ mtlr(r0);
__ mr(ip, r3);
__ Jump(ip);
@@ -1045,16 +1183,17 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// the runtime:
// r3 - contains return address (beginning of patch sequence)
// r4 - isolate
+ // r6 - new target
// lr - return address
FrameScope scope(masm, StackFrame::MANUAL);
__ mflr(r0);
- __ MultiPush(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ MultiPush(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ PrepareCallCFunction(2, 0, r5);
__ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
2);
- __ MultiPop(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ MultiPop(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ mtlr(r0);
__ mr(ip, r3);
@@ -1088,7 +1227,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
@@ -1114,7 +1253,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
__ push(r3);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
}
// Get the full codegen state from the stack and untag it -> r9.
@@ -1154,6 +1293,111 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
+// Clobbers registers {r7, r8, r9, r10}.
+void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Label* receiver_check_failed) {
+ Register signature = r7;
+ Register map = r8;
+ Register constructor = r9;
+ Register scratch = r10;
+
+ // If there is no signature, return the holder.
+ __ LoadP(signature, FieldMemOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ Label receiver_check_passed;
+ __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+ &receiver_check_passed);
+
+ // Walk the prototype chain.
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(constructor, map, scratch, scratch);
+ __ cmpi(scratch, Operand(JS_FUNCTION_TYPE));
+ Label next_prototype;
+ __ bne(&next_prototype);
+ Register type = constructor;
+ __ LoadP(type,
+ FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(type,
+ FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ cmp(signature, type);
+ __ beq(&receiver_check_passed);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype);
+ __ CompareObjectType(type, scratch, scratch, FUNCTION_TEMPLATE_INFO_TYPE);
+ __ bne(&next_prototype);
+
+ // Otherwise load the parent function template and iterate.
+ __ LoadP(type,
+ FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+ __ b(&function_template_loop);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lwz(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+ __ DecodeField<Map::IsHiddenPrototype>(scratch, SetRC);
+ __ beq(receiver_check_failed, cr0);
+ // Iterate.
+ __ b(&prototype_loop_start);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments excluding receiver
+ // -- r4 : callee
+ // -- lr : return address
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+
+ // Load the FunctionTemplateInfo.
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ ShiftLeftImm(r11, r3, Operand(kPointerSizeLog2));
+ __ LoadPX(r5, MemOperand(sp, r11));
+ CompatibleReceiverCheck(masm, r5, r6, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ LoadP(r7, FieldMemOperand(r6, FunctionTemplateInfo::kCallCodeOffset));
+ __ LoadP(r7, FieldMemOperand(r7, CallHandlerInfo::kFastHandlerOffset));
+ __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+
+ // Compatible receiver check failed: throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ // Drop the arguments (including the receiver);
+ __ addi(r11, r11, Operand(kPointerSize));
+ __ add(sp, sp, r11);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1161,7 +1405,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r3);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
@@ -1209,7 +1453,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ bge(&ok);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1220,7 +1464,127 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // 1. Pop receiver into r3 and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ Pop(r3);
+ __ JumpIfSmi(r3, &receiver_not_date);
+ __ CompareObjectType(r3, r4, r5, JS_DATE_TYPE);
+ __ bne(&receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ LoadP(r3, FieldMemOperand(r3, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ mov(r4, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+ __ LoadP(r4, MemOperand(r4));
+ __ LoadP(ip, FieldMemOperand(r3, JSDate::kCacheStampOffset));
+ __ cmp(r4, ip);
+ __ bne(&stamp_mismatch);
+ __ LoadP(r3, FieldMemOperand(
+ r3, JSDate::kValueOffset + field_index * kPointerSize));
+ __ Ret();
+ __ bind(&stamp_mismatch);
+ }
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, r4);
+ __ LoadSmiLiteral(r4, Smi::FromInt(field_index));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ Ret();
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc
+ // -- sp[0] : argArray
+ // -- sp[4] : thisArg
+ // -- sp[8] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into r4, argArray into r3 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label skip;
+ Register arg_size = r5;
+ Register new_sp = r6;
+ Register scratch = r7;
+ __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+ __ add(new_sp, sp, arg_size);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ mr(scratch, r3);
+ __ LoadP(r4, MemOperand(new_sp, 0)); // receiver
+ __ cmpi(arg_size, Operand(kPointerSize));
+ __ blt(&skip);
+ __ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
+ __ beq(&skip);
+ __ LoadP(r3, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
+ __ bind(&skip);
+ __ mr(sp, new_sp);
+ __ StoreP(scratch, MemOperand(sp, 0));
+ }
+
+ // ----------- S t a t e -------------
+ // -- r3 : argArray
+ // -- r4 : receiver
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(r4, &receiver_not_callable);
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsCallable, r0);
+ __ beq(&receiver_not_callable, cr0);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(r3, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ li(r3, Operand::Zero());
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ StoreP(r4, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r3: actual number of arguments
{
@@ -1265,185 +1629,144 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ LoadP(key, MemOperand(fp, indexOffset));
- __ b(&entry);
- __ bind(&loop);
- __ LoadP(receiver, MemOperand(fp, argumentsOffset));
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
- __ LoadP(vector, MemOperand(fp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // Push the nth argument.
- __ push(r3);
-
- // Update the index on the stack and in register key.
- __ LoadP(key, MemOperand(fp, indexOffset));
- __ AddSmiLiteral(key, key, Smi::FromInt(1), r0);
- __ StoreP(key, MemOperand(fp, indexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ LoadP(r0, MemOperand(fp, limitOffset));
- __ cmp(key, r0);
- __ bne(&loop);
-
- // On exit, the pushed arguments count is in r3, untagged
- __ SmiUntag(r3, key);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc
+ // -- sp[0] : argumentsList
+ // -- sp[4] : thisArgument
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into r4 (if present), argumentsList into r3 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
{
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r4,
- FieldMemOperand(r4, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(r4);
-
- __ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
- __ LoadP(r4, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ Push(r3, r4);
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
-
- Generate_CheckStackOverflow(masm, r3, kArgcIsSmiTagged);
+ Label skip;
+ Register arg_size = r5;
+ Register new_sp = r6;
+ Register scratch = r7;
+ __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+ __ add(new_sp, sp, arg_size);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ mr(scratch, r4);
+ __ mr(r3, r4);
+ __ cmpi(arg_size, Operand(kPointerSize));
+ __ blt(&skip);
+ __ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize)); // target
+ __ beq(&skip);
+ __ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
+ __ cmpi(arg_size, Operand(2 * kPointerSize));
+ __ beq(&skip);
+ __ LoadP(r3, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
+ __ bind(&skip);
+ __ mr(sp, new_sp);
+ __ StoreP(scratch, MemOperand(sp, 0));
+ }
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ li(r4, Operand::Zero());
- __ LoadP(r5, MemOperand(fp, kReceiverOffset));
- __ Push(r3, r4, r5); // limit, initial index and receiver.
+ // ----------- S t a t e -------------
+ // -- r3 : argumentsList
+ // -- r4 : target
+ // -- sp[0] : thisArgument
+ // -----------------------------------
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(r4, &target_not_callable);
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsCallable, r0);
+ __ beq(&target_not_callable, cr0);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ LoadP(r4, MemOperand(fp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Tear down the internal frame and remove function, receiver and args.
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
+ {
+ __ StoreP(r4, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ addi(sp, sp, Operand(kStackSize * kPointerSize));
- __ blr();
}
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc
+ // -- sp[0] : new.target (optional)
+ // -- sp[4] : argumentsList
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into r4 (if present), argumentsList into r3 (if present),
+ // new.target into r6 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
{
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r4,
- FieldMemOperand(r4, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(r4);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ LoadP(r3, MemOperand(fp, kNewTargetOffset));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ bne(&validate_arguments);
- __ LoadP(r3, MemOperand(fp, kFunctionOffset));
- __ StoreP(r3, MemOperand(fp, kNewTargetOffset));
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r3);
- __ LoadP(r3, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ push(r3);
- __ LoadP(r3, MemOperand(fp, kNewTargetOffset)); // get the new.target
- __ push(r3);
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- Generate_CheckStackOverflow(masm, r3, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ li(r4, Operand::Zero());
- __ Push(r3, r4); // limit and initial index.
- // Push the constructor function as callee
- __ LoadP(r3, MemOperand(fp, kFunctionOffset));
- __ push(r3);
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ LoadP(r4, MemOperand(fp, kFunctionOffset));
- __ LoadP(r7, MemOperand(fp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ Label skip;
+ Register arg_size = r5;
+ Register new_sp = r7;
+ __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+ __ add(new_sp, sp, arg_size);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ mr(r3, r4);
+ __ mr(r6, r4);
+ __ StoreP(r4, MemOperand(new_sp, 0)); // receiver (undefined)
+ __ cmpi(arg_size, Operand(kPointerSize));
+ __ blt(&skip);
+ __ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize)); // target
+ __ mr(r6, r4); // new.target defaults to target
+ __ beq(&skip);
+ __ LoadP(r3, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
+ __ cmpi(arg_size, Operand(2 * kPointerSize));
+ __ beq(&skip);
+ __ LoadP(r6, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
+ __ bind(&skip);
+ __ mr(sp, new_sp);
}
- __ addi(sp, sp, Operand(kStackSize * kPointerSize));
- __ blr();
-}
+ // ----------- S t a t e -------------
+ // -- r3 : argumentsList
+ // -- r6 : new.target
+ // -- r4 : target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(r4, &target_not_constructor);
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsConstructor, r0);
+ __ beq(&target_not_constructor, cr0);
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(r6, &new_target_not_constructor);
+ __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsConstructor, r0);
+ __ beq(&new_target_not_constructor, cr0);
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ StoreP(r4, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ StoreP(r6, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1453,6 +1776,7 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
// -- r3 : actual number of arguments
// -- r4 : function (passed through to callee)
// -- r5 : expected number of arguments
+ // -- r6 : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -1499,71 +1823,205 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argumentsList
+ // -- r4 : target
+ // -- r6 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(r3, &create_runtime);
+
+ // Load the map of argumentsList into r5.
+ __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+
+ // Load native context into r7.
+ __ LoadP(r7, NativeContextMemOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ LoadP(ip, ContextMemOperand(r7, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ cmp(ip, r5);
+ __ beq(&create_arguments);
+ __ LoadP(ip, ContextMemOperand(r7, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ cmp(ip, r5);
+ __ beq(&create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CompareInstanceType(r5, ip, JS_ARRAY_TYPE);
+ __ beq(&create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4, r6, r3);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(r4, r6);
+ __ LoadP(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ SmiUntag(r5);
+ }
+ __ b(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ LoadP(r5, FieldMemOperand(
+ r3, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ LoadP(r7, FieldMemOperand(r3, JSObject::kElementsOffset));
+ __ LoadP(ip, FieldMemOperand(r7, FixedArray::kLengthOffset));
+ __ cmp(r5, ip);
+ __ bne(&create_runtime);
+ __ SmiUntag(r5);
+ __ mr(r3, r7);
+ __ b(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ lbz(r5, FieldMemOperand(r5, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(r5);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ cmpi(r5, Operand(FAST_ELEMENTS));
+ __ bgt(&create_runtime);
+ __ cmpi(r5, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ beq(&create_runtime);
+ __ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
+ __ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ SmiUntag(r5);
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
+ // Make ip the space we have left. The stack might already be overflowed
+ // here which will cause ip to become negative.
+ __ sub(ip, sp, ip);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftImm(r0, r5, Operand(kPointerSizeLog2));
+ __ cmp(ip, r0); // Signed comparison.
+ __ bgt(&done);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- r4 : target
+ // -- r3 : args (a FixedArray built from argumentsList)
+ // -- r5 : len (number of elements to push from args)
+ // -- r6 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ Label loop, no_args;
+ __ cmpi(r5, Operand::Zero());
+ __ beq(&no_args);
+ __ addi(r3, r3,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ __ mtctr(r5);
+ __ bind(&loop);
+ __ LoadPU(r0, MemOperand(r3, kPointerSize));
+ __ push(r0);
+ __ bdnz(&loop);
+ __ bind(&no_args);
+ __ mr(r3, r5);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(r4);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBitMask(r6, SharedFunctionInfo::kClassConstructorBits, r0);
+ __ bne(&class_constructor, cr0);
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
- SharedFunctionInfo::kStrictModeByteOffset);
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
- __ lbz(r6, FieldMemOperand(r5, SharedFunctionInfo::kNativeByteOffset));
- __ andi(r0, r6, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
- (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ Label done_convert;
+ __ andi(r0, r6, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
+ (1 << SharedFunctionInfo::kNativeBit)));
__ bne(&done_convert, cr0);
{
- __ ShiftLeftImm(r6, r3, Operand(kPointerSizeLog2));
- __ LoadPX(r6, MemOperand(sp, r6));
-
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the function to call (checked to be a JSFunction)
// -- r5 : the shared function info.
- // -- r6 : the receiver
// -- cp : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(r6, &convert_to_object);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(r6, r7, r7, FIRST_JS_RECEIVER_TYPE);
- __ bge(&done_convert);
- __ JumpIfRoot(r6, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
- __ JumpIfNotRoot(r6, Heap::kNullValueRootIndex, &convert_to_object);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(r6);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ ShiftLeftImm(r6, r3, Operand(kPointerSizeLog2));
+ __ LoadPX(r6, MemOperand(sp, r6));
+ __ JumpIfSmi(r6, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r6, r7, r7, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(r6, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy);
+ __ JumpIfNotRoot(r6, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(r6);
+ }
+ __ b(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r3);
+ __ Push(r3, r4);
+ __ mr(r3, r6);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mr(r6, r3);
+ __ Pop(r3, r4);
+ __ SmiUntag(r3);
+ }
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ b(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r3);
- __ Push(r3, r4);
- __ mr(r3, r6);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mr(r6, r3);
- __ Pop(r3, r4);
- __ SmiUntag(r3);
- }
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
__ StorePX(r6, MemOperand(sp, r7));
}
@@ -1581,15 +2039,133 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
#if !V8_TARGET_ARCH_PPC64
__ SmiUntag(r5);
#endif
- __ LoadP(r6, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
ParameterCount actual(r3);
ParameterCount expected(r5);
- __ InvokeCode(r6, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(r4, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
+ __ push(r4);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : target (checked to be a JSBoundFunction)
+ // -- r6 : new.target (only in case of [[Construct]])
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into r5 and length of that into r7.
+ Label no_bound_arguments;
+ __ LoadP(r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset));
+ __ LoadP(r7, FieldMemOperand(r5, FixedArray::kLengthOffset));
+ __ SmiUntag(r7, SetRC);
+ __ beq(&no_bound_arguments, cr0);
+ {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : target (checked to be a JSBoundFunction)
+ // -- r5 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- r6 : new.target (only in case of [[Construct]])
+ // -- r7 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ mr(r9, sp); // preserve previous stack pointer
+ __ ShiftLeftImm(r10, r7, Operand(kPointerSizeLog2));
+ __ sub(sp, sp, r10);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ bgt(&done); // Signed comparison.
+ // Restore the stack pointer.
+ __ mr(sp, r9);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r9 : the previous stack pointer
+ // -- r10: the size of the [[BoundArguments]]
+ {
+ Label skip, loop;
+ __ li(r8, Operand::Zero());
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&skip);
+ __ mtctr(r3);
+ __ bind(&loop);
+ __ LoadPX(r0, MemOperand(r9, r8));
+ __ StorePX(r0, MemOperand(sp, r8));
+ __ addi(r8, r8, Operand(kPointerSize));
+ __ bdnz(&loop);
+ __ bind(&skip);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ addi(r5, r5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, r10);
+ __ mtctr(r7);
+ __ bind(&loop);
+ __ LoadPU(r0, MemOperand(r5, -kPointerSize));
+ __ StorePX(r0, MemOperand(sp, r8));
+ __ addi(r8, r8, Operand(kPointerSize));
+ __ bdnz(&loop);
+ __ add(r3, r3, r7);
+ }
+ }
+ __ bind(&no_bound_arguments);
+}
+
+} // namespace
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(r4);
+
+ // Patch the receiver to [[BoundThis]].
+ __ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ StorePX(ip, MemOperand(sp, r0));
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ LoadP(r4,
+ FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+ masm->isolate())));
+ __ LoadP(ip, MemOperand(ip));
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the target to call (can be any Object).
@@ -1599,16 +2175,22 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(r4, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
- eq);
- __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, eq);
+ __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET, eq);
+ __ cmpi(r8, Operand(JS_PROXY_TYPE));
__ bne(&non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ LoadP(r4, FieldMemOperand(r4, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(r4);
- __ b(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ Push(r4);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ addi(r3, r3, Operand(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1621,15 +2203,17 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r4);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1639,10 +2223,9 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the constructor to call (checked to be a JSFunction)
- // -- r6 : the original constructor (checked to be a JSFunction)
+ // -- r6 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertFunction(r4);
- __ AssertFunction(r6);
// Calling convention for function specific ConstructStubs require
// r5 to contain either an AllocationSite or undefined.
@@ -1658,17 +2241,51 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the function to call (checked to be a JSBoundFunction)
+ // -- r6 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertBoundFunction(r4);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ Label skip;
+ __ cmp(r4, r6);
+ __ bne(&skip);
+ __ LoadP(r6,
+ FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&skip);
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ LoadP(r4,
+ FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ LoadP(ip, MemOperand(ip));
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
- // -- r4 : the constructor to call (checked to be a JSFunctionProxy)
- // -- r6 : the original constructor (either the same as the constructor or
+ // -- r4 : the constructor to call (checked to be a JSProxy)
+ // -- r6 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ LoadP(r4, FieldMemOperand(r4, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ Push(r4, r6);
+ // Include the pushed new_target, constructor and the receiver.
+ __ addi(r3, r3, Operand(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1677,23 +2294,32 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the constructor to call (can be any Object)
- // -- r6 : the original constructor (either the same as the constructor or
+ // -- r6 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(r4, &non_constructor);
- __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+
+ // Dispatch based on instance type.
+ __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET, eq);
+
+ // Check if target has a [[Construct]] internal method.
__ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
__ TestBit(r5, Map::kIsConstructor, r0);
__ beq(&non_constructor, cr0);
- // Dispatch based on instance type.
- __ CompareInstanceType(r7, r8, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
RelocInfo::CODE_TARGET, eq);
- __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ cmpi(r8, Operand(JS_PROXY_TYPE));
__ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
eq);
@@ -1703,7 +2329,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1711,37 +2337,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
-}
-
-
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : the number of arguments (not including the receiver)
- // -- r5 : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- r4 : the target to call (can be any Object).
-
- // Calculate number of arguments (add one for receiver).
- __ addi(r6, r3, Operand(1));
-
- // Push the arguments.
- Label loop;
- __ addi(r5, r5, Operand(kPointerSize)); // Bias up for LoadPU
- __ mtctr(r6);
- __ bind(&loop);
- __ LoadPU(r6, MemOperand(r5, -kPointerSize));
- __ push(r6);
- __ bdnz(&loop);
-
- // Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1750,11 +2347,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r3 : actual number of arguments
// -- r4 : function (passed through to callee)
// -- r5 : expected number of arguments
+ // -- r6 : new target (passed through to callee)
// -----------------------------------
- Label stack_overflow;
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
@@ -1766,31 +2362,34 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: actual >= expected
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
- // Calculate copy start address into r3 and copy end address into r6.
+ // Calculate copy start address into r3 and copy end address into r7.
// r3: actual number of arguments as a smi
// r4: function
// r5: expected number of arguments
+ // r6: new target (passed through to callee)
// ip: code entry to call
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
// adjust for return address and receiver
__ addi(r3, r3, Operand(2 * kPointerSize));
- __ ShiftLeftImm(r6, r5, Operand(kPointerSizeLog2));
- __ sub(r6, r3, r6);
+ __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
+ __ sub(r7, r3, r7);
// Copy the arguments (including the receiver) to the new stack frame.
// r3: copy start address
// r4: function
// r5: expected number of arguments
- // r6: copy end address
+ // r6: new target (passed through to callee)
+ // r7: copy end address
// ip: code entry to call
Label copy;
__ bind(&copy);
__ LoadP(r0, MemOperand(r3, 0));
__ push(r0);
- __ cmp(r3, r6); // Compare before moving to next argument.
+ __ cmp(r3, r7); // Compare before moving to next argument.
__ subi(r3, r3, Operand(kPointerSize));
__ bne(&copy);
@@ -1804,13 +2403,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label no_strong_error;
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r8, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(r8,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kStrongModeFunction,
-#else
- SharedFunctionInfo::kStrongModeFunction + kSmiTagSize,
-#endif
- r0);
+ __ TestBit(r8, SharedFunctionInfo::kStrongModeBit, r0);
__ beq(&no_strong_error, cr0);
// What we really care about is the required number of arguments.
@@ -1827,16 +2420,18 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into r0 and copy end address is fp.
// r3: actual number of arguments as a smi
// r4: function
// r5: expected number of arguments
+ // r6: new target (passed through to callee)
// ip: code entry to call
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
@@ -1845,6 +2440,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: copy start address
// r4: function
// r5: expected number of arguments
+ // r6: new target (passed through to callee)
// ip: code entry to call
Label copy;
__ bind(&copy);
@@ -1858,18 +2454,19 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// r4: function
// r5: expected number of arguments
+ // r6: new target (passed through to callee)
// ip: code entry to call
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ ShiftLeftImm(r6, r5, Operand(kPointerSizeLog2));
- __ sub(r6, fp, r6);
+ __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
+ __ sub(r7, fp, r7);
// Adjust for frame.
- __ subi(r6, r6, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ __ subi(r7, r7, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
2 * kPointerSize));
Label fill;
__ bind(&fill);
__ push(r0);
- __ cmp(sp, r6);
+ __ cmp(sp, r7);
__ bne(&fill);
}
@@ -1878,6 +2475,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ mr(r3, r5);
// r3 : expected number of arguments
// r4 : function (passed through to callee)
+ // r6 : new target (passed through to callee)
__ CallJSEntry(ip);
// Store offset of return address for deoptimizer.
@@ -1897,8 +2495,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0);
}
}
diff --git a/chromium/v8/src/ppc/code-stubs-ppc.cc b/chromium/v8/src/ppc/code-stubs-ppc.cc
index 290159a3e75..26fbe98cf9d 100644
--- a/chromium/v8/src/ppc/code-stubs-ppc.cc
+++ b/chromium/v8/src/ppc/code-stubs-ppc.cc
@@ -260,7 +260,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
// Call runtime on identical JSObjects.
- __ CompareObjectType(r3, r7, r7, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r3, r7, r7, FIRST_JS_RECEIVER_TYPE);
__ bge(slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpi(r7, Operand(SYMBOL_TYPE));
@@ -281,7 +281,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ beq(&heap_number);
// Comparing JS objects with <=, >= is complicated.
if (cond != eq) {
- __ cmpi(r7, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmpi(r7, Operand(FIRST_JS_RECEIVER_TYPE));
__ bge(slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpi(r7, Operand(SYMBOL_TYPE));
@@ -456,11 +456,11 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label first_non_object;
// Get the type of the first operand into r5 and compare it with
- // FIRST_SPEC_OBJECT_TYPE.
- __ CompareObjectType(rhs, r5, r5, FIRST_SPEC_OBJECT_TYPE);
+ // FIRST_JS_RECEIVER_TYPE.
+ __ CompareObjectType(rhs, r5, r5, FIRST_JS_RECEIVER_TYPE);
__ blt(&first_non_object);
// Return non-zero (r3 is not zero)
@@ -473,7 +473,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
__ cmpi(r5, Operand(ODDBALL_TYPE));
__ beq(&return_not_equal);
- __ CompareObjectType(lhs, r6, r6, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r6, r6, FIRST_JS_RECEIVER_TYPE);
__ bge(&return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -536,9 +536,9 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ Ret();
__ bind(&object_test);
- __ cmpi(r5, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmpi(r5, Operand(FIRST_JS_RECEIVER_TYPE));
__ blt(not_both_strings);
- __ CompareObjectType(lhs, r5, r6, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r5, r6, FIRST_JS_RECEIVER_TYPE);
__ blt(not_both_strings);
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -708,8 +708,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cc == eq) {
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result
if (cc == lt || cc == le) {
@@ -723,9 +722,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -948,7 +946,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -1034,15 +1032,22 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
-
+ //
+ // If argv_in_register():
+ // r5: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
__ mr(r15, r4);
- // Compute the argv pointer.
- __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
- __ add(r4, r4, sp);
- __ subi(r4, r4, Operand(kPointerSize));
+ if (argv_in_register()) {
+ // Move argv into the correct register.
+ __ mr(r4, r5);
+ } else {
+ // Compute the argv pointer.
+ __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
+ __ add(r4, r4, sp);
+ __ subi(r4, r4, Operand(kPointerSize));
+ }
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
@@ -1086,16 +1091,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Call C built-in.
__ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
+ Register target = r15;
#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
// Native AIX/PPC64 Linux use a function descriptor.
__ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
__ LoadP(ip, MemOperand(r15, 0)); // Instruction address
- Register target = ip;
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+ target = ip;
+#elif ABI_CALL_VIA_IP
__ Move(ip, r15);
- Register target = ip;
-#else
- Register target = r15;
+ target = ip;
#endif
// To let the GC traverse the return address of the exit frames, we need to
@@ -1141,8 +1145,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// r3:r4: result
// sp: stack pointer
// fp: frame pointer
- // r14: still holds argc (callee-saved).
- __ LeaveExitFrame(save_doubles(), r14, true);
+ Register argc;
+ if (argv_in_register()) {
+ // We don't want to pop arguments so set argc to no_reg.
+ argc = no_reg;
+ } else {
+ // r14: still holds argc (callee-saved).
+ argc = r14;
+ }
+ __ LeaveExitFrame(save_doubles(), argc, true);
__ blr();
// Handling of exception.
@@ -1410,21 +1421,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
__ bne(&slow_case, cr0);
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ LoadP(shared_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(scratch, FieldMemOperand(shared_info,
- SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(scratch,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kBoundFunction,
-#else
- SharedFunctionInfo::kBoundFunction + kSmiTagSize,
-#endif
- r0);
- __ bne(&slow_case, cr0);
-
// Get the "prototype" (or initial map) of the {function}.
__ LoadP(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1449,29 +1445,47 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
Register const null = scratch;
- Label done, loop;
- __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ Register const result = r3;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ LoadP(object_prototype,
- FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, function_prototype);
+
+ // Check if the object needs to be access checked.
+ __ lbz(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ TestBit(map_bit_field, Map::kIsAccessCheckNeeded, r0);
+ __ bne(&fast_runtime_fallback, cr0);
+ // Check if the current object is a Proxy.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ __ beq(&fast_runtime_fallback);
+
+ __ LoadP(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object, function_prototype);
__ beq(&done);
- __ cmp(object_prototype, null);
- __ LoadP(object_map,
- FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ cmp(object, null);
+ __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
__ bne(&loop);
- __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
- __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime
+ __ bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ __ LoadSmiLiteral(scratch, Smi::FromInt(0));
+ __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -1576,7 +1590,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r4);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -1604,7 +1618,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(r4, r6, r5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1613,8 +1627,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r5 : number of parameters (tagged)
// r6 : parameters pointer
// Registers used over whole function:
- // r8 : arguments count (tagged)
- // r9 : mapped parameter count (tagged)
+ // r8 : arguments count (tagged)
+ // r9 : mapped parameter count (tagged)
DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
@@ -1685,7 +1699,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r11, r3, r7, r11, &runtime, TAG_OBJECT);
+ __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
// r3 = address of new object(s) (tagged)
// r5 = argument count (smi-tagged)
@@ -1695,9 +1709,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
- __ LoadP(r7,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
+ __ LoadP(r7, NativeContextMemOperand());
__ cmpi(r9, Operand::Zero());
if (CpuFeatures::IsSupported(ISELECT)) {
__ LoadP(r11, MemOperand(r7, kNormalOffset));
@@ -1848,7 +1860,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r8 = argument count (tagged)
__ bind(&runtime);
__ Push(r4, r6, r8);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1867,7 +1879,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1912,12 +1924,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
- __ LoadP(r7,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
- __ LoadP(
- r7,
- MemOperand(r7, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r7);
__ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
__ LoadRoot(r8, Heap::kEmptyFixedArrayRootIndex);
@@ -1964,7 +1971,30 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ Push(r4, r6, r5);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // r5 : number of parameters (tagged)
+ // r6 : parameters pointer
+ // r7 : rest parameter index (tagged)
+
+ Label runtime;
+ __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r8, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ LoadP(r5, MemOperand(r8, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r0, r5);
+ __ add(r6, r8, r0);
+ __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ __ bind(&runtime);
+ __ Push(r5, r6, r7);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -1973,7 +2003,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2268,7 +2298,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ beq(&runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure and exception return null.
@@ -2357,7 +2387,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2400,35 +2430,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r3 : number of arguments to the construct function
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi)
- // r7 : original constructor (for IsSuperConstructorCall)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r3);
- if (is_super) {
- __ Push(r6, r5, r4, r3, r7);
- } else {
- __ Push(r6, r5, r4, r3);
- }
+ __ Push(r6, r5, r4, r3);
__ CallStub(stub);
- if (is_super) {
- __ Pop(r6, r5, r4, r3, r7);
- } else {
- __ Pop(r6, r5, r4, r3);
- }
+ __ Pop(r6, r5, r4, r3);
__ SmiUntag(r3);
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -2436,7 +2456,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi)
- // r7 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2478,7 +2497,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ bne(&miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&megamorphic);
__ b(&done);
@@ -2502,7 +2521,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ bind(&initialize);
// Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&not_array_function);
@@ -2510,127 +2529,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions and natives.
- __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(r7, FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(r7,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kStrictModeFunction,
-#else
- SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
-#endif
- r0);
- __ bne(cont, cr0);
-
- // Do not transform the receiver for native.
- __ TestBit(r7,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kNative,
-#else
- SharedFunctionInfo::kNative + kSmiTagSize,
-#endif
- r0);
- __ bne(cont, cr0);
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm, int argc) {
- __ mov(r3, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- {
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- __ push(r4);
- __ mr(r3, r6);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ pop(r4);
- }
- __ StoreP(r3, MemOperand(sp, argc * kPointerSize), r0);
- __ b(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm, int argc,
- bool needs_checks, bool call_as_method) {
- // r4 : the function to call
- Label slow, wrap, cont;
-
- if (needs_checks) {
- // Check that the function is really a JavaScript function.
- // r4: pushed function (to be verified)
- __ JumpIfSmi(r4, &slow);
-
- // Goto slow case if we do not have a function.
- __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
- __ bne(&slow);
- }
-
- // Fast-case: Invoke the function now.
- // r4: pushed function
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Compute the receiver in sloppy mode.
- __ LoadP(r6, MemOperand(sp, argc * kPointerSize), r0);
-
- if (needs_checks) {
- __ JumpIfSmi(r6, &wrap);
- __ CompareObjectType(r6, r7, r7, FIRST_SPEC_OBJECT_TYPE);
- __ blt(&wrap);
- } else {
- __ b(&wrap);
- }
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// r3 : number of arguments
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi, for RecordCallTarget)
- // r7 : original constructor (for IsSuperConstructorCall)
Label non_function;
// Check that the function is not a smi.
@@ -2639,35 +2553,29 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE);
__ bne(&non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
-
- __ SmiToPtrArrayOffset(r8, r6);
- __ add(r8, r5, r8);
- // Put the AllocationSite from the feedback vector into r5, or undefined.
- __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
- __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
- __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ isel(eq, r5, r5, r8);
- } else {
- Label feedback_register_initialized;
- __ beq(&feedback_register_initialized);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
+ GenerateRecordCallTarget(masm);
- __ AssertUndefinedOrAllocationSite(r5, r8);
- }
-
- // Pass function as original constructor.
- if (IsSuperConstructorCall()) {
- __ mr(r6, r7);
+ __ SmiToPtrArrayOffset(r8, r6);
+ __ add(r8, r5, r8);
+ // Put the AllocationSite from the feedback vector into r5, or undefined.
+ __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
+ __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
+ __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ isel(eq, r5, r5, r8);
} else {
- __ mr(r6, r4);
+ Label feedback_register_initialized;
+ __ beq(&feedback_register_initialized);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
}
+ __ AssertUndefinedOrAllocationSite(r5, r8);
+
+ // Pass function as new target.
+ __ mr(r6, r4);
+
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
@@ -2686,7 +2594,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r6 - slot id
// r5 - vector
// r7 - allocation site (loaded from vector[slot])
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(miss);
@@ -2711,13 +2619,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// r4 - function
// r6 - slot id (Smi)
// r5 - vector
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2754,34 +2656,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
__ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
- // Compute the receiver in sloppy mode.
- __ LoadP(r6, MemOperand(sp, argc * kPointerSize), r0);
-
- __ JumpIfSmi(r6, &wrap);
- __ CompareObjectType(r6, r7, r7, FIRST_SPEC_OBJECT_TYPE);
- __ blt(&wrap);
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call_function);
+ __ mov(r3, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
- __ beq(&slow_start);
+ __ beq(&call);
// Verify that r7 contains an AllocationSite
__ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
@@ -2809,14 +2693,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bne(&miss);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ StoreP(ip, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
- // We have to update statistics for runtime profiling.
- __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
- __ SubSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
- __ LoadP(r7, FieldMemOperand(r5, generic_offset));
- __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, generic_offset), r0);
- __ b(&slow_start);
+
+ __ bind(&call);
+ __ mov(r3, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2829,14 +2710,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
__ cmp(r4, r7);
__ beq(&miss);
- // Update stats.
- __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
- __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
+ // Make sure the function belongs to the same native context.
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kContextOffset));
+ __ LoadP(r7, ContextMemOperand(r7, Context::NATIVE_CONTEXT_INDEX));
+ __ LoadP(ip, NativeContextMemOperand());
+ __ cmp(r7, ip);
+ __ bne(&miss);
// Initialize the call counter.
__ LoadSmiLiteral(r8, Smi::FromInt(CallICNexus::kCallCountIncrement));
@@ -2854,23 +2737,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(r4);
}
- __ b(&have_js_function);
+ __ b(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
- // Check that the function is really a JavaScript function.
- // r4: pushed function (to be verified)
- __ JumpIfSmi(r4, &slow);
-
- // Goto slow case if we do not have a function.
- __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
- __ bne(&slow);
- __ b(&have_js_function);
+ __ b(&call);
}
@@ -2881,7 +2755,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r4, r5, r6);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to r4 and exit the internal frame.
__ mr(r4, r3);
@@ -2940,11 +2814,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -2971,7 +2845,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Move(result_, r3);
call_helper.AfterCall(masm);
__ b(&exit_);
@@ -3011,7 +2885,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
__ Move(result_, r3);
call_helper.AfterCall(masm);
__ b(&exit_);
@@ -3265,7 +3139,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// r3: original string
@@ -3305,7 +3179,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ blr();
__ bind(&slow_string);
__ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -3316,7 +3190,29 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
+}
+
+
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes one argument in r3.
+ Label not_smi;
+ __ JumpIfNotSmi(r3, &not_smi);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ cmpi(r3, Operand::Zero());
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(lt, r3, r0, r3);
+ } else {
+ Label positive;
+ __ bgt(&positive);
+ __ li(r3, Operand::Zero());
+ __ bind(&positive);
+ }
+ __ Ret();
+ __ bind(&not_smi);
+
+ __ push(r3); // Push argument.
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3346,7 +3242,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3501,7 +3397,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// tagged as a small integer.
__ bind(&runtime);
__ Push(r4, r3);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3543,7 +3439,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
@@ -3827,9 +3723,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3837,16 +3733,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ and_(r5, r4, r3);
__ JumpIfSmi(r5, &miss);
- __ CompareObjectType(r3, r5, r5, JS_OBJECT_TYPE);
- __ bne(&miss);
- __ CompareObjectType(r4, r5, r5, JS_OBJECT_TYPE);
- __ bne(&miss);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(r3, r5, r5, FIRST_JS_RECEIVER_TYPE);
+ __ blt(&miss);
+ __ CompareObjectType(r4, r5, r5, FIRST_JS_RECEIVER_TYPE);
+ __ blt(&miss);
DCHECK(GetCondition() == eq);
__ sub(r3, r3, r4);
@@ -3857,7 +3754,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ and_(r5, r4, r3);
@@ -3874,7 +3771,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ sub(r3, r3, r4);
__ Ret();
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ LoadSmiLiteral(r5, Smi::FromInt(GREATER));
@@ -3882,7 +3779,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ LoadSmiLiteral(r5, Smi::FromInt(LESS));
}
__ Push(r4, r3, r5);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -3898,7 +3795,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r4, r3);
__ LoadSmiLiteral(r0, Smi::FromInt(op()));
__ push(r0);
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -3929,7 +3826,7 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
__ LoadP(ip, MemOperand(target, 0)); // Instruction address
#else
// ip needs to be set for DirectCEentryStub::Generate, and also
- // for ABI_TOC_ADDRESSABILITY_VIA_IP.
+ // for ABI_CALL_VIA_IP.
__ Move(ip, target);
#endif
@@ -4346,11 +4243,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
@@ -4370,75 +4267,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : element value to store
- // -- r6 : element index as smi
- // -- sp[0] : array literal index in function as smi
- // -- sp[4] : array literal
- // clobbers r3, r5, r7
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ LoadP(r7, MemOperand(sp, 0 * kPointerSize));
- __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
- __ LoadP(r5, FieldMemOperand(r4, JSObject::kMapOffset));
-
- __ CheckFastElements(r5, r8, &double_elements);
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
- __ JumpIfSmi(r3, &smi_element);
- __ CheckFastSmiElements(r5, r8, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(r4, r6, r3);
- __ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(r8, FieldMemOperand(r8, JSFunction::kLiteralsOffset));
- __ Push(r8, r7);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
- __ SmiToPtrArrayOffset(r9, r6);
- __ add(r9, r8, r9);
-#if V8_TARGET_ARCH_PPC64
- // add due to offset alignment requirements of StorePU
- __ addi(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ StoreP(r3, MemOperand(r9));
-#else
- __ StorePU(r3, MemOperand(r9, FixedArray::kHeaderSize - kHeapObjectTag));
-#endif
- // Update the write barrier for the array store.
- __ RecordWrite(r8, r9, r3, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
- __ SmiToPtrArrayOffset(r9, r6);
- __ add(r9, r8, r9);
- __ StoreP(r3, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r3, r6, r8, r9, d0, &slow_elements);
- __ Ret();
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -4969,7 +4797,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Function descriptor
__ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
__ LoadP(ip, MemOperand(ip, 0));
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+#elif ABI_CALL_VIA_IP
// ip set above, so nothing to do.
#endif
@@ -5178,7 +5006,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- r3 : argc (only if argument_count() == ANY)
// -- r4 : constructor
// -- r5 : AllocationSite or undefined
- // -- r6 : original constructor
+ // -- r6 : new target
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
@@ -5199,6 +5027,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(r5, r7);
}
+ // Enter the context of the Array function.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
Label subclassing;
__ cmp(r6, r4);
__ bne(&subclassing);
@@ -5218,25 +5049,25 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
- __ push(r4);
- __ push(r6);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ addi(r3, r3, Operand(2));
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ StorePX(r4, MemOperand(sp, r0));
+ __ addi(r3, r3, Operand(3));
break;
case NONE:
- __ li(r3, Operand(2));
+ __ StoreP(r4, MemOperand(sp, 0 * kPointerSize));
+ __ li(r3, Operand(3));
break;
case ONE:
- __ li(r3, Operand(3));
+ __ StoreP(r4, MemOperand(sp, 1 * kPointerSize));
+ __ li(r3, Operand(4));
break;
}
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ Push(r6, r5);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5320,14 +5151,14 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ LoadP(result, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = result;
}
// Load the PropertyCell value at the specified slot.
__ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
__ add(result, context, r0);
- __ LoadP(result, ContextOperand(result));
+ __ LoadP(result, ContextMemOperand(result));
__ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// If the result is not the_hole, return. Otherwise, handle in the runtime.
@@ -5337,7 +5168,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Fallback to runtime.
__ SmiTag(slot);
__ Push(slot);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5363,14 +5194,14 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); i++) {
- __ LoadP(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = context_temp;
}
// Load the PropertyCell at the specified slot.
__ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
__ add(cell, context, r0);
- __ LoadP(cell, ContextOperand(cell));
+ __ LoadP(cell, ContextMemOperand(cell));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
@@ -5465,8 +5296,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot, value);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5602,7 +5432,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/chromium/v8/src/ppc/code-stubs-ppc.h b/chromium/v8/src/ppc/code-stubs-ppc.h
index bc6c26b2172..d394171d896 100644
--- a/chromium/v8/src/ppc/code-stubs-ppc.h
+++ b/chromium/v8/src/ppc/code-stubs-ppc.h
@@ -127,8 +127,8 @@ class RecordWriteStub : public PlatformCodeStub {
}
static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL, stub->instruction_start(),
- stub->instruction_size());
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
@@ -322,7 +322,7 @@ class NameDictionaryLookupStub : public PlatformCodeStub {
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_CODE_STUBS_PPC_H_
diff --git a/chromium/v8/src/ppc/codegen-ppc.cc b/chromium/v8/src/ppc/codegen-ppc.cc
index b313d11bb31..2bf8b4ee83f 100644
--- a/chromium/v8/src/ppc/codegen-ppc.cc
+++ b/chromium/v8/src/ppc/codegen-ppc.cc
@@ -18,23 +18,23 @@ namespace internal {
#if defined(USE_SIMULATOR)
-byte* fast_exp_ppc_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())
+byte* fast_exp_ppc_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+ return Simulator::current(isolate)
->CallFPReturnsDouble(fast_exp_ppc_machine_code, x, 0);
}
#endif
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
DoubleRegister input = d1;
@@ -62,11 +62,11 @@ UnaryMathFunction CreateExpFunction() {
DCHECK(!RelocInfo::RequiresRelocation(desc));
#endif
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_ppc_machine_code = buffer;
return &fast_exp_simulator;
@@ -74,16 +74,17 @@ UnaryMathFunction CreateExpFunction() {
}
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
- return &std::sqrt;
+ return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
+ if (buffer == nullptr) return nullptr;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
// Called from C
__ function_descriptor();
@@ -99,9 +100,9 @@ UnaryMathFunction CreateSqrtFunction() {
DCHECK(!RelocInfo::RequiresRelocation(desc));
#endif
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
@@ -607,15 +608,17 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
#undef __
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(new CodePatcher(
- young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(
+ new CodePatcher(isolate, young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r4);
patcher->masm()->addi(fp, sp,
@@ -664,7 +667,8 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
} else {
// FIXED_SEQUENCE
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ CodePatcher patcher(isolate, sequence,
+ young_length / Assembler::kInstrSize);
Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
// Don't use Call -- we need to preserve ip and lr.
diff --git a/chromium/v8/src/ppc/codegen-ppc.h b/chromium/v8/src/ppc/codegen-ppc.h
index f8da74eaa61..c3cd9b39a0e 100644
--- a/chromium/v8/src/ppc/codegen-ppc.h
+++ b/chromium/v8/src/ppc/codegen-ppc.h
@@ -5,7 +5,7 @@
#ifndef V8_PPC_CODEGEN_PPC_H_
#define V8_PPC_CODEGEN_PPC_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -35,7 +35,7 @@ class MathExpGenerator : public AllStatic {
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_CODEGEN_PPC_H_
diff --git a/chromium/v8/src/ppc/constants-ppc.cc b/chromium/v8/src/ppc/constants-ppc.cc
index 56147b3c484..e6eec643f4d 100644
--- a/chromium/v8/src/ppc/constants-ppc.cc
+++ b/chromium/v8/src/ppc/constants-ppc.cc
@@ -14,45 +14,18 @@ namespace internal {
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumRegisters] = {
"r0", "sp", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
- "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21",
+ "r11", "ip", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21",
"r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "fp"};
-// List of alias names which can be used when referring to PPC registers.
-const Registers::RegisterAlias Registers::aliases_[] = {{10, "sl"},
- {11, "r11"},
- {12, "r12"},
- {13, "r13"},
- {14, "r14"},
- {15, "r15"},
- {kNoRegister, NULL}};
-
-
-const char* Registers::Name(int reg) {
- const char* result;
- if ((0 <= reg) && (reg < kNumRegisters)) {
- result = names_[reg];
- } else {
- result = "noreg";
- }
- return result;
-}
-
-
-const char* FPRegisters::names_[kNumFPRegisters] = {
+const char* DoubleRegisters::names_[kNumDoubleRegisters] = {
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
"d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21",
"d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
-const char* FPRegisters::Name(int reg) {
- DCHECK((0 <= reg) && (reg < kNumFPRegisters));
- return names_[reg];
-}
-
-
-int FPRegisters::Number(const char* name) {
- for (int i = 0; i < kNumFPRegisters; i++) {
+int DoubleRegisters::Number(const char* name) {
+ for (int i = 0; i < kNumDoubleRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
@@ -71,15 +44,6 @@ int Registers::Number(const char* name) {
}
}
- // Look through the alias names.
- int i = 0;
- while (aliases_[i].reg != kNoRegister) {
- if (strcmp(aliases_[i].name, name) == 0) {
- return aliases_[i].reg;
- }
- i++;
- }
-
// No register with the requested name found.
return kNoRegister;
}
diff --git a/chromium/v8/src/ppc/constants-ppc.h b/chromium/v8/src/ppc/constants-ppc.h
index b304bad7ceb..4c404ae911f 100644
--- a/chromium/v8/src/ppc/constants-ppc.h
+++ b/chromium/v8/src/ppc/constants-ppc.h
@@ -18,8 +18,7 @@ namespace internal {
const int kNumRegisters = 32;
// FP support.
-const int kNumFPDoubleRegisters = 32;
-const int kNumFPRegisters = kNumFPDoubleRegisters;
+const int kNumDoubleRegisters = 32;
const int kNoRegister = -1;
@@ -229,6 +228,7 @@ enum OpcodeExt2 {
LHAUX = 375 << 1, // load half-word algebraic w/ update x-form
XORX = 316 << 1, // Exclusive OR
MFSPR = 339 << 1, // Move from Special-Purpose-Register
+ POPCNTW = 378 << 1, // Population Count Words
STHX = 407 << 1, // store half-word w/ x-form
ORC = 412 << 1, // Or with Complement
STHUX = 439 << 1, // store half-word w/ update x-form
@@ -238,6 +238,7 @@ enum OpcodeExt2 {
MTSPR = 467 << 1, // Move to Special-Purpose-Register
DIVD = 489 << 1, // Divide Double Word
DIVW = 491 << 1, // Divide Word
+ POPCNTD = 506 << 1, // Population Count Doubleword
// Below represent bits 10-1 (any value >= 512)
LFSX = 535 << 1, // load float-single w/ x-form
@@ -274,24 +275,29 @@ enum OpcodeExt4 {
FMADD = 29 << 1, // Floating Multiply-Add
// Bits 10-1
- FCMPU = 0 << 1, // Floating Compare Unordered
- FRSP = 12 << 1, // Floating-Point Rounding
- FCTIW = 14 << 1, // Floating Convert to Integer Word X-form
- FCTIWZ = 15 << 1, // Floating Convert to Integer Word with Round to Zero
- FNEG = 40 << 1, // Floating Negate
- MCRFS = 64 << 1, // Move to Condition Register from FPSCR
- FMR = 72 << 1, // Floating Move Register
- MTFSFI = 134 << 1, // Move to FPSCR Field Immediate
- FABS = 264 << 1, // Floating Absolute Value
- FRIN = 392 << 1, // Floating Round to Integer Nearest
- FRIZ = 424 << 1, // Floating Round to Integer Toward Zero
- FRIP = 456 << 1, // Floating Round to Integer Plus
- FRIM = 488 << 1, // Floating Round to Integer Minus
- MFFS = 583 << 1, // move from FPSCR x-form
- MTFSF = 711 << 1, // move to FPSCR fields XFL-form
- FCFID = 846 << 1, // Floating convert from integer doubleword
- FCTID = 814 << 1, // Floating convert from integer doubleword
- FCTIDZ = 815 << 1 // Floating convert from integer doubleword
+ FCMPU = 0 << 1, // Floating Compare Unordered
+ FRSP = 12 << 1, // Floating-Point Rounding
+ FCTIW = 14 << 1, // Floating Convert to Integer Word X-form
+ FCTIWZ = 15 << 1, // Floating Convert to Integer Word with Round to Zero
+ MTFSB1 = 38 << 1, // Move to FPSCR Bit 1
+ FNEG = 40 << 1, // Floating Negate
+ MCRFS = 64 << 1, // Move to Condition Register from FPSCR
+ MTFSB0 = 70 << 1, // Move to FPSCR Bit 0
+ FMR = 72 << 1, // Floating Move Register
+ MTFSFI = 134 << 1, // Move to FPSCR Field Immediate
+ FABS = 264 << 1, // Floating Absolute Value
+ FRIN = 392 << 1, // Floating Round to Integer Nearest
+ FRIZ = 424 << 1, // Floating Round to Integer Toward Zero
+ FRIP = 456 << 1, // Floating Round to Integer Plus
+ FRIM = 488 << 1, // Floating Round to Integer Minus
+ MFFS = 583 << 1, // move from FPSCR x-form
+ MTFSF = 711 << 1, // move to FPSCR fields XFL-form
+ FCTID = 814 << 1, // Floating convert to integer doubleword
+ FCTIDZ = 815 << 1, // ^^^ with round toward zero
+ FCFID = 846 << 1, // Floating convert from integer doubleword
+ FCTIDU = 942 << 1, // Floating convert to integer doubleword unsigned
+ FCTIDUZ = 943 << 1, // ^^^ with round toward zero
+ FCFIDU = 974 << 1 // Floating convert from integer doubleword unsigned
};
enum OpcodeExt5 {
@@ -398,6 +404,13 @@ enum CRBit { CR_LT = 0, CR_GT = 1, CR_EQ = 2, CR_SO = 3, CR_FU = 3 };
#define CRWIDTH 4
+// These are the documented bit positions biased down by 32
+enum FPSCRBit {
+ VXSOFT = 21, // 53: Software-Defined Condition
+ VXSQRT = 22, // 54: Invalid Square Root
+ VXCVI = 23 // 55: Invalid Integer Convert
+};
+
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
@@ -564,35 +577,23 @@ class Instruction {
// Helper functions for converting between register numbers and names.
class Registers {
public:
- // Return the name of the register.
- static const char* Name(int reg);
-
// Lookup the register number for the name provided.
static int Number(const char* name);
- struct RegisterAlias {
- int reg;
- const char* name;
- };
-
private:
static const char* names_[kNumRegisters];
- static const RegisterAlias aliases_[];
};
// Helper functions for converting between FP register numbers and names.
-class FPRegisters {
+class DoubleRegisters {
public:
- // Return the name of the register.
- static const char* Name(int reg);
-
// Lookup the register number for the name provided.
static int Number(const char* name);
private:
- static const char* names_[kNumFPRegisters];
+ static const char* names_[kNumDoubleRegisters];
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_CONSTANTS_PPC_H_
diff --git a/chromium/v8/src/ppc/deoptimizer-ppc.cc b/chromium/v8/src/ppc/deoptimizer-ppc.cc
index 3e4511f78f2..4232342b930 100644
--- a/chromium/v8/src/ppc/deoptimizer-ppc.cc
+++ b/chromium/v8/src/ppc/deoptimizer-ppc.cc
@@ -5,6 +5,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
@@ -44,14 +45,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->bkpt(0);
}
}
@@ -74,7 +76,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
- CodePatcher patcher(call_address, call_size_in_words);
+ CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
@@ -99,7 +101,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -123,7 +125,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
@@ -150,15 +152,17 @@ void Deoptimizer::TableEntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp.bit();
- const int kDoubleRegsSize =
- kDoubleSize * DoubleRegister::kMaxNumAllocatableRegisters;
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
- // Save all FPU registers before messing with them.
+ // Save all double registers before messing with them.
__ subi(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
- DoubleRegister fpu_reg = DoubleRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ stfd(fpu_reg, MemOperand(sp, offset));
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ stfd(dreg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
@@ -215,11 +219,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
int double_regs_offset = FrameDescription::double_registers_offset();
- // Copy VFP registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ // Copy double registers to
+ // double_registers_[DoubleRegister::kNumRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ lfd(d0, MemOperand(sp, src_offset));
__ stfd(d0, MemOperand(r4, dst_offset));
}
@@ -291,9 +296,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ blt(&outer_push_loop);
__ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
- for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
- const DoubleRegister dreg = DoubleRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister dreg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
__ lfd(dreg, MemOperand(r4, src_offset));
}
diff --git a/chromium/v8/src/ppc/disasm-ppc.cc b/chromium/v8/src/ppc/disasm-ppc.cc
index 5d7de8a0b42..d9450f8a422 100644
--- a/chromium/v8/src/ppc/disasm-ppc.cc
+++ b/chromium/v8/src/ppc/disasm-ppc.cc
@@ -78,6 +78,7 @@ class Decoder {
void DecodeExt1(Instruction* instr);
void DecodeExt2(Instruction* instr);
+ void DecodeExt3(Instruction* instr);
void DecodeExt4(Instruction* instr);
void DecodeExt5(Instruction* instr);
@@ -116,7 +117,9 @@ void Decoder::PrintRegister(int reg) {
// Print the double FP register name according to the active name converter.
-void Decoder::PrintDRegister(int reg) { Print(FPRegisters::Name(reg)); }
+void Decoder::PrintDRegister(int reg) {
+ Print(DoubleRegister::from_code(reg).ToString());
+}
// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
@@ -607,6 +610,16 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "stfdux 'rs, 'ra, 'rb");
return;
}
+ case POPCNTW: {
+ Format(instr, "popcntw 'ra, 'rs");
+ return;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case POPCNTD: {
+ Format(instr, "popcntd 'ra, 'rs");
+ return;
+ }
+#endif
}
switch (instr->Bits(10, 2) << 2) {
@@ -870,6 +883,23 @@ void Decoder::DecodeExt2(Instruction* instr) {
}
+void Decoder::DecodeExt3(Instruction* instr) {
+ switch (instr->Bits(10, 1) << 1) {
+ case FCFID: {
+ Format(instr, "fcfids'. 'Dt, 'Db");
+ break;
+ }
+ case FCFIDU: {
+ Format(instr, "fcfidus'.'Dt, 'Db");
+ break;
+ }
+ default: {
+ Unknown(instr); // not used by V8
+ }
+ }
+}
+
+
void Decoder::DecodeExt4(Instruction* instr) {
switch (instr->Bits(5, 1) << 1) {
case FDIV: {
@@ -919,6 +949,10 @@ void Decoder::DecodeExt4(Instruction* instr) {
Format(instr, "fcfid'. 'Dt, 'Db");
break;
}
+ case FCFIDU: {
+ Format(instr, "fcfidu'. 'Dt, 'Db");
+ break;
+ }
case FCTID: {
Format(instr, "fctid 'Dt, 'Db");
break;
@@ -927,6 +961,14 @@ void Decoder::DecodeExt4(Instruction* instr) {
Format(instr, "fctidz 'Dt, 'Db");
break;
}
+ case FCTIDU: {
+ Format(instr, "fctidu 'Dt, 'Db");
+ break;
+ }
+ case FCTIDUZ: {
+ Format(instr, "fctiduz 'Dt, 'Db");
+ break;
+ }
case FCTIW: {
Format(instr, "fctiw'. 'Dt, 'Db");
break;
@@ -975,6 +1017,18 @@ void Decoder::DecodeExt4(Instruction* instr) {
Format(instr, "fneg'. 'Dt, 'Db");
break;
}
+ case MCRFS: {
+ Format(instr, "mcrfs ?,?");
+ break;
+ }
+ case MTFSB0: {
+ Format(instr, "mtfsb0'. ?");
+ break;
+ }
+ case MTFSB1: {
+ Format(instr, "mtfsb1'. ?");
+ break;
+ }
default: {
Unknown(instr); // not used by V8
}
@@ -1287,7 +1341,10 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
Format(instr, "stfdu 'Dt, 'int16('ra)");
break;
}
- case EXT3:
+ case EXT3: {
+ DecodeExt3(instr);
+ break;
+ }
case EXT4: {
DecodeExt4(instr);
break;
@@ -1349,7 +1406,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::Registers::Name(reg);
+ return v8::internal::Register::from_code(reg).ToString();
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
diff --git a/chromium/v8/src/ppc/frames-ppc.h b/chromium/v8/src/ppc/frames-ppc.h
index d5b6d3caa96..b1de9f50ffb 100644
--- a/chromium/v8/src/ppc/frames-ppc.h
+++ b/chromium/v8/src/ppc/frames-ppc.h
@@ -70,6 +70,8 @@ const RegList kCallerSavedDoubles = 1 << 0 | // d0
1 << 12 | // d12
1 << 13; // d13
+const int kNumCallerSavedDoubles = 14;
+
const RegList kCalleeSavedDoubles = 1 << 14 | // d14
1 << 15 | // d15
1 << 16 | // d16
@@ -185,7 +187,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_FRAMES_PPC_H_
diff --git a/chromium/v8/src/ppc/interface-descriptors-ppc.cc b/chromium/v8/src/ppc/interface-descriptors-ppc.cc
index c123e7c602d..b649f71ea35 100644
--- a/chromium/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/chromium/v8/src/ppc/interface-descriptors-ppc.cc
@@ -63,6 +63,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return r5; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r6; }
+const Register RestParamAccessDescriptor::parameter_count() { return r5; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return r6; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return r7; }
+
+
const Register ApiGetterDescriptor::function_address() { return r5; }
@@ -78,14 +83,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister(), MapRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5};
@@ -108,6 +105,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return r3; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return r3; }
@@ -129,6 +130,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r6, r5, r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r6, r5, r4};
@@ -191,7 +199,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi, for RecordCallTarget)
- // r7 : original constructor (for IsSuperConstructorCall)
+ // r7 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {r3, r4, r7, r5};
@@ -208,6 +216,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments
+ // r4 : the target to call
+ // r6 : the new target
+ // r5 : allocation site or undefined
+ Register registers[] = {r4, r6, r3, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments
+ // r4 : the target to call
+ // r6 : the new target
+ Register registers[] = {r4, r6, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r4, r3};
@@ -228,6 +257,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -338,6 +374,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r4, // JSFunction
+ r6, // the new target
r3, // actual number of arguments
r5, // expected number of arguments
};
@@ -370,33 +407,35 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- r4, // math rounding function
- r6, // vector slot id
+ r3, // argument count (not including receiver)
+ r5, // address of first argument
+ r4 // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- r4, // math rounding function
- r6, // vector slot id
- r7, // type vector
+ r3, // argument count (not including receiver)
+ r6, // new target
+ r4, // constructor to call
+ r5 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- r3, // argument count (including receiver)
- r5, // address of first argument
- r4 // the target callable to be call
+ r3, // argument count (argc)
+ r5, // address of first argument (argv)
+ r4 // the runtime function to call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/chromium/v8/src/ppc/macro-assembler-ppc.cc b/chromium/v8/src/ppc/macro-assembler-ppc.cc
index e973471572e..9cd35ab01c3 100644
--- a/chromium/v8/src/ppc/macro-assembler-ppc.cc
+++ b/chromium/v8/src/ppc/macro-assembler-ppc.cc
@@ -12,19 +12,22 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
-#include "src/ppc/macro-assembler-ppc.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
+#include "src/ppc/macro-assembler-ppc.h"
+
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
- if (isolate() != NULL) {
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
- Handle<Object>(isolate()->heap()->undefined_value(), isolate());
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -564,7 +567,7 @@ void MacroAssembler::PopFixedFrame(Register marker_reg) {
const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
const int MacroAssembler::kNumSafepointSavedRegisters =
- Register::kMaxNumAllocatableRegisters;
+ Register::kNumAllocatable;
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
@@ -623,7 +626,9 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// General purpose registers are pushed last on the stack.
- int doubles_size = DoubleRegister::NumAllocatableRegisters() * kDoubleSize;
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -654,9 +659,38 @@ void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst,
const Register src,
const Register int_scratch) {
MovIntToDouble(dst, src, int_scratch);
- fcfid(dst, dst);
- frsp(dst, dst);
+ fcfids(dst, dst);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void MacroAssembler::ConvertInt64ToDouble(Register src,
+ DoubleRegister double_dst) {
+ MovInt64ToDouble(double_dst, src);
+ fcfid(double_dst, double_dst);
+}
+
+
+void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
+ DoubleRegister double_dst) {
+ MovInt64ToDouble(double_dst, src);
+ fcfidus(double_dst, double_dst);
+}
+
+
+void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
+ DoubleRegister double_dst) {
+ MovInt64ToDouble(double_dst, src);
+ fcfidu(double_dst, double_dst);
+}
+
+
+void MacroAssembler::ConvertInt64ToFloat(Register src,
+ DoubleRegister double_dst) {
+ MovInt64ToDouble(double_dst, src);
+ fcfids(double_dst, double_dst);
}
+#endif
void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
@@ -681,6 +715,22 @@ void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
dst, double_dst);
}
+#if V8_TARGET_ARCH_PPC64
+void MacroAssembler::ConvertDoubleToUnsignedInt64(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
+ if (rounding_mode == kRoundToZero) {
+ fctiduz(double_dst, double_input);
+ } else {
+ SetRoundingMode(rounding_mode);
+ fctidu(double_dst, double_input);
+ ResetRoundingMode();
+ }
+
+ MovDoubleToInt64(dst, double_dst);
+}
+#endif
+
void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
@@ -703,20 +753,26 @@ void MacroAssembler::LoadConstantPoolPointerRegister() {
}
-void MacroAssembler::StubPrologue(int prologue_offset) {
+void MacroAssembler::StubPrologue(Register base, int prologue_offset) {
LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
PushFixedFrame(r11);
// Adjust FP to point to saved FP.
addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
if (FLAG_enable_embedded_constant_pool) {
- // ip contains prologue address
- LoadConstantPoolPointerRegister(ip, -prologue_offset);
+ if (!base.is(no_reg)) {
+ // base contains prologue address
+ LoadConstantPoolPointerRegister(base, -prologue_offset);
+ } else {
+ LoadConstantPoolPointerRegister();
+ }
set_constant_pool_available(true);
}
}
-void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
+void MacroAssembler::Prologue(bool code_pre_aging, Register base,
+ int prologue_offset) {
+ DCHECK(!base.is(no_reg));
{
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
@@ -746,8 +802,8 @@ void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
}
}
if (FLAG_enable_embedded_constant_pool) {
- // ip contains prologue address
- LoadConstantPoolPointerRegister(ip, -prologue_offset);
+ // base contains prologue address
+ LoadConstantPoolPointerRegister(base, -prologue_offset);
set_constant_pool_available(true);
}
}
@@ -863,7 +919,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
MultiPushDoubles(kCallerSavedDoubles);
// Note that d0 will be accessible at
// fp - ExitFrameConstants::kFrameSize -
- // kNumVolatileRegisters * kDoubleSize,
+ // kNumCallerSavedDoubles * kDoubleSize,
// since the sp slot and code slot were pushed after the fp.
}
@@ -922,7 +978,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
- const int kNumRegs = DoubleRegister::kNumVolatileRegisters;
+ const int kNumRegs = kNumCallerSavedDoubles;
const int offset =
(ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
addi(r6, fp, Operand(-offset));
@@ -967,9 +1023,7 @@ void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg, Label* done,
+ const ParameterCount& actual, Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -990,8 +1044,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// ARM has some sanity checks as per below, considering add them for PPC
// DCHECK(actual.is_immediate() || actual.reg().is(r3));
// DCHECK(expected.is_immediate() || expected.reg().is(r5));
- // DCHECK((!code_constant.is_null() && code_reg.is(no_reg))
- // || code_reg.is(r6));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -1023,11 +1075,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- mov(r6, Operand(code_constant));
- addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
-
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
@@ -1044,17 +1091,78 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ mov(r7, Operand(step_in_enabled));
+ lbz(r7, MemOperand(r7));
+ cmpi(r7, Operand::Zero());
+ beq(&skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun, fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(r4));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r6));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ }
Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done,
- &definitely_mismatches, flag, call_wrapper);
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
+ call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = ip;
+ LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
CallJSEntry(code);
@@ -1071,7 +1179,8 @@ void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected,
}
-void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual,
+void MacroAssembler::InvokeFunction(Register fun, Register new_target,
+ const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
@@ -1081,20 +1190,19 @@ void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual,
DCHECK(fun.is(r4));
Register expected_reg = r5;
- Register code_reg = ip;
+ Register temp_reg = r7;
- LoadP(code_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
LoadWordArith(expected_reg,
FieldMemOperand(
- code_reg, SharedFunctionInfo::kFormalParameterCountOffset));
+ temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
#if !defined(V8_TARGET_ARCH_PPC64)
SmiUntag(expected_reg);
#endif
- LoadP(code_reg, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
}
@@ -1112,11 +1220,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
- InvokeCode(ip, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(r4, no_reg, expected, actual, flag, call_wrapper);
}
@@ -1203,10 +1307,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- LoadP(scratch, FieldMemOperand(scratch, offset));
- LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1396,11 +1497,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!scratch1.is(ip));
- DCHECK(!scratch2.is(ip));
+ DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -1419,26 +1516,26 @@ void MacroAssembler::Allocate(int object_size, Register result,
DCHECK((limit - top) == kPointerSize);
// Set up allocation top address register.
- Register topaddr = scratch1;
- mov(topaddr, Operand(allocation_top));
-
+ Register top_address = scratch1;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
+ Register alloc_limit = ip;
+ Register result_end = scratch2;
+ mov(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into ip.
- LoadP(result, MemOperand(topaddr));
- LoadP(ip, MemOperand(topaddr, kPointerSize));
+ LoadP(result, MemOperand(top_address));
+ LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- LoadP(ip, MemOperand(topaddr));
- cmp(result, ip);
+ // Assert that result actually contains top on entry.
+ LoadP(alloc_limit, MemOperand(top_address));
+ cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit into ip. Result already contains allocation top.
- LoadP(ip, MemOperand(topaddr, limit - top), r0);
+ // Load allocation limit. Result already contains allocation top.
+ LoadP(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
@@ -1448,15 +1545,15 @@ void MacroAssembler::Allocate(int object_size, Register result,
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
#else
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- andi(scratch2, result, Operand(kDoubleAlignmentMask));
+ andi(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned, cr0);
if ((flags & PRETENURE) != 0) {
- cmpl(result, ip);
+ cmpl(result, alloc_limit);
bge(gc_required);
}
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- stw(scratch2, MemOperand(result));
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ stw(result_end, MemOperand(result));
addi(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
#endif
@@ -1464,17 +1561,17 @@ void MacroAssembler::Allocate(int object_size, Register result,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- sub(r0, ip, result);
+ sub(r0, alloc_limit, result);
if (is_int16(object_size)) {
cmpi(r0, Operand(object_size));
blt(gc_required);
- addi(scratch2, result, Operand(object_size));
+ addi(result_end, result, Operand(object_size));
} else {
- Cmpi(r0, Operand(object_size), scratch2);
+ Cmpi(r0, Operand(object_size), result_end);
blt(gc_required);
- add(scratch2, result, scratch2);
+ add(result_end, result, result_end);
}
- StoreP(scratch2, MemOperand(topaddr));
+ StoreP(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -1484,28 +1581,24 @@ void MacroAssembler::Allocate(int object_size, Register result,
void MacroAssembler::Allocate(Register object_size, Register result,
- Register scratch1, Register scratch2,
+ Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
li(result, Operand(0x7091));
- li(scratch1, Operand(0x7191));
- li(scratch2, Operand(0x7291));
+ li(scratch, Operand(0x7191));
+ li(result_end, Operand(0x7291));
}
b(gc_required);
return;
}
- // Assert that the register arguments are different and that none of
- // them are ip. ip is used explicitly in the code generated below.
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!object_size.is(ip));
- DCHECK(!result.is(ip));
- DCHECK(!scratch1.is(ip));
- DCHECK(!scratch2.is(ip));
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, ip));
+ DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
ExternalReference allocation_top =
@@ -1516,27 +1609,26 @@ void MacroAssembler::Allocate(Register object_size, Register result,
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address.
- Register topaddr = scratch1;
- mov(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
+ Register alloc_limit = ip;
+ mov(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- LoadP(result, MemOperand(topaddr));
- LoadP(ip, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit..
+ LoadP(result, MemOperand(top_address));
+ LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- LoadP(ip, MemOperand(topaddr));
- cmp(result, ip);
+ // Assert that result actually contains top on entry.
+ LoadP(alloc_limit, MemOperand(top_address));
+ cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit into ip. Result already contains allocation top.
- LoadP(ip, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ LoadP(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
@@ -1546,15 +1638,15 @@ void MacroAssembler::Allocate(Register object_size, Register result,
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
#else
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- andi(scratch2, result, Operand(kDoubleAlignmentMask));
+ andi(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned, cr0);
if ((flags & PRETENURE) != 0) {
- cmpl(result, ip);
+ cmpl(result, alloc_limit);
bge(gc_required);
}
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- stw(scratch2, MemOperand(result));
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ stw(result_end, MemOperand(result));
addi(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
#endif
@@ -1563,24 +1655,24 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
- sub(r0, ip, result);
+ sub(r0, alloc_limit, result);
if ((flags & SIZE_IN_WORDS) != 0) {
- ShiftLeftImm(scratch2, object_size, Operand(kPointerSizeLog2));
- cmp(r0, scratch2);
+ ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
+ cmp(r0, result_end);
blt(gc_required);
- add(scratch2, result, scratch2);
+ add(result_end, result, result_end);
} else {
cmp(r0, object_size);
blt(gc_required);
- add(scratch2, result, object_size);
+ add(result_end, result, object_size);
}
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
- andi(r0, scratch2, Operand(kObjectAlignmentMask));
+ andi(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
- StoreP(scratch2, MemOperand(topaddr));
+ StoreP(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -1749,6 +1841,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register value_reg, Register key_reg, Register elements_reg,
Register scratch1, DoubleRegister double_scratch, Label* fail,
int elements_offset) {
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label smi_value, store;
// Handle smi values specially.
@@ -2225,22 +2318,13 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r3, Operand(num_arguments));
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()), num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ mov(r3, Operand(function->nargs));
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -2256,35 +2340,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(ip, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(ip));
- CallJSEntry(ip);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpToJSEntry(ip);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- LoadP(target,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- LoadP(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- LoadP(target, ContextOperand(target, native_context_index), r0);
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(r4));
- GetBuiltinFunction(r4, native_context_index);
- // Load the code entry point from the builtins object.
- LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, r4);
+ InvokeFunctionCode(r4, no_reg, expected, expected, flag, call_wrapper);
}
@@ -2406,43 +2465,27 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- LoadP(dst, GlobalObjectOperand());
- LoadP(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind, ElementsKind transitioned_kind,
Register map_in_out, Register scratch, Label* no_map_match) {
- // Load the global or builtins object from the current context.
- LoadP(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- LoadP(scratch,
- MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
- LoadP(ip, FieldMemOperand(scratch, offset));
+ LoadP(scratch, NativeContextMemOperand());
+ LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
cmp(map_in_out, ip);
bne(no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize;
- LoadP(map_in_out, FieldMemOperand(scratch, offset));
+ LoadP(map_in_out,
+ ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- LoadP(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- LoadP(function,
- FieldMemOperand(function, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0);
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ LoadP(dst, NativeContextMemOperand());
+ LoadP(dst, ContextMemOperand(dst, index));
}
@@ -2601,6 +2644,19 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
+ push(object);
+ CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -2711,29 +2767,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(
}
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst, Register src, RegList temps,
- int field_count) {
- // At least one bit set in the first 15 registers.
- DCHECK((temps & ((1 << 15) - 1)) != 0);
- DCHECK((temps & dst.bit()) == 0);
- DCHECK((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < 15; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.set_code(i);
- break;
- }
- }
- DCHECK(!tmp.is(no_reg));
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
- for (int i = 0; i < field_count; i++) {
- LoadP(tmp, FieldMemOperand(src, i * kPointerSize), r0);
- StoreP(tmp, FieldMemOperand(dst, i * kPointerSize), r0);
- }
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
+ StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
+ StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -2844,25 +2896,25 @@ void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
}
-void MacroAssembler::InitializeNFieldsWithFiller(Register start_offset,
+void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
Register count,
Register filler) {
Label loop;
mtctr(count);
bind(&loop);
- StoreP(filler, MemOperand(start_offset));
- addi(start_offset, start_offset, Operand(kPointerSize));
+ StoreP(filler, MemOperand(current_address));
+ addi(current_address, current_address, Operand(kPointerSize));
bdnz(&loop);
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label done;
- sub(r0, end_offset, start_offset, LeaveOE, SetRC);
+ sub(r0, end_address, current_address, LeaveOE, SetRC);
beq(&done, cr0);
ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
- InitializeNFieldsWithFiller(start_offset, r0, filler);
+ InitializeNFieldsWithFiller(current_address, r0, filler);
bind(&done);
}
@@ -3038,17 +3090,16 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
+ Register dest = function;
#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
// AIX uses a function descriptor. When calling C code be aware
// of this descriptor and pick up values from it
LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
LoadP(ip, MemOperand(function, 0));
- Register dest = ip;
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+ dest = ip;
+#elif ABI_CALL_VIA_IP
Move(ip, function);
- Register dest = ip;
-#else
- Register dest = function;
+ dest = ip;
#endif
Call(dest);
@@ -3150,8 +3201,8 @@ void MacroAssembler::CheckPageFlag(
void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
Register scratch1, Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -3184,27 +3235,6 @@ void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value, Register scratch,
- Label* not_data_object) {
- Label is_data_object;
- LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- beq(&is_data_object);
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT((kIsIndirectStringMask | kIsNotStringMask) == 0x81);
- andi(scratch, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- bne(not_data_object, cr0);
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg) {
DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
@@ -3221,117 +3251,23 @@ void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
}
-void MacroAssembler::EnsureNotWhite(Register value, Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Register load_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
and_(r0, mask_scratch, load_scratch, SetRC);
- bne(&done, cr0);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // LSL may overflow, making the check conservative.
- slwi(r0, mask_scratch, Operand(1));
- and_(r0, load_scratch, r0, SetRC);
- beq(&ok, cr0);
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object, maybe_string_object, is_string_object, is_encoded;
-#if V8_TARGET_ARCH_PPC64
- Label length_computed;
-#endif
-
-
- // Check for heap-number
- LoadP(map, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- bne(&maybe_string_object);
- li(length, Operand(HeapNumber::kSize));
- b(&is_data_object);
- bind(&maybe_string_object);
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- lbz(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- andi(r0, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- bne(value_is_white_and_not_data, cr0);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- andi(r0, instance_type, Operand(kExternalStringTag));
- beq(&is_string_object, cr0);
- li(length, Operand(ExternalString::kSize));
- b(&is_data_object);
- bind(&is_string_object);
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we untag the smi to get the length.
- // For UC16 (char-size of 2):
- // - (32-bit) we just leave the smi tag in place, thereby getting
- // the length multiplied by 2.
- // - (64-bit) we compute the offset in the 2-byte array
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- LoadP(ip, FieldMemOperand(value, String::kLengthOffset));
- andi(r0, instance_type, Operand(kStringEncodingMask));
- beq(&is_encoded, cr0);
- SmiUntag(ip);
-#if V8_TARGET_ARCH_PPC64
- b(&length_computed);
-#endif
- bind(&is_encoded);
-#if V8_TARGET_ARCH_PPC64
- SmiToShortArrayOffset(ip, ip);
- bind(&length_computed);
-#else
- DCHECK(kSmiShift == 1);
-#endif
- addi(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- li(r0, Operand(~kObjectAlignmentMask));
- and_(length, length, r0);
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- orx(ip, ip, mask_scratch);
- stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- mov(ip, Operand(~Page::kPageAlignmentMask));
- and_(bitmap_scratch, bitmap_scratch, ip);
- lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- add(ip, ip, length);
- stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
+ beq(value_is_white, cr0);
}
@@ -4250,8 +4186,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
if (regs & candidate.bit()) continue;
return candidate;
}
@@ -4299,10 +4238,12 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
#ifdef DEBUG
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
- Register reg5, Register reg6, Register reg7, Register reg8) {
+ Register reg5, Register reg6, Register reg7, Register reg8,
+ Register reg9, Register reg10) {
int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
@@ -4313,6 +4254,8 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
@@ -4320,11 +4263,11 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
#endif
-CodePatcher::CodePatcher(byte* address, int instructions,
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap),
+ masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
@@ -4336,7 +4279,7 @@ CodePatcher::CodePatcher(byte* address, int instructions,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- Assembler::FlushICacheWithoutIsolate(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that the code was patched as expected.
diff --git a/chromium/v8/src/ppc/macro-assembler-ppc.h b/chromium/v8/src/ppc/macro-assembler-ppc.h
index f87c563e725..78de89aa5c6 100644
--- a/chromium/v8/src/ppc/macro-assembler-ppc.h
+++ b/chromium/v8/src/ppc/macro-assembler-ppc.h
@@ -14,17 +14,19 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_r3_Code};
-const Register kReturnRegister1 = {kRegister_r4_Code};
-const Register kJSFunctionRegister = {kRegister_r4_Code};
-const Register kContextRegister = {kRegister_r30_Code};
-const Register kInterpreterAccumulatorRegister = {kRegister_r3_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_r14_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_r15_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_r16_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_r17_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_r4_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_r3_Code};
+const Register kReturnRegister0 = {Register::kCode_r3};
+const Register kReturnRegister1 = {Register::kCode_r4};
+const Register kJSFunctionRegister = {Register::kCode_r4};
+const Register kContextRegister = {Register::kCode_r30};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_r3};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_r14};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r15};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r16};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_r17};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_r3};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r6};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_r4};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_r3};
// ----------------------------------------------------------------------------
// Static helper functions
@@ -64,7 +66,8 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg4 = no_reg, Register reg5 = no_reg,
Register reg6 = no_reg, Register reg7 = no_reg,
- Register reg8 = no_reg);
+ Register reg8 = no_reg, Register reg9 = no_reg,
+ Register reg10 = no_reg);
#endif
// These exist to provide portability between 32 and 64bit
@@ -108,11 +111,8 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
// Returns the size of a call in instructions. Note, the value returned is
@@ -216,18 +216,10 @@ class MacroAssembler : public Assembler {
void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object, Register scratch1, Register scratch2,
- Register scratch3, Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value, Register scratch,
- Label* not_data_object);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -385,6 +377,13 @@ class MacroAssembler : public Assembler {
void ConvertIntToFloat(const DoubleRegister dst, const Register src,
const Register int_scratch);
+#if V8_TARGET_ARCH_PPC64
+ void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
+ void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
+ void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
+ void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
+#endif
+
// Converts the double_input to an integer. Note that, upon return,
// the contents of double_dst will also hold the fixed point representation.
void ConvertDoubleToInt64(const DoubleRegister double_input,
@@ -394,9 +393,18 @@ class MacroAssembler : public Assembler {
const Register dst, const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
+#if V8_TARGET_ARCH_PPC64
+ // Converts the double_input to an unsigned integer. Note that, upon return,
+ // the contents of double_dst will also hold the fixed point representation.
+ void ConvertDoubleToUnsignedInt64(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode = kRoundToZero);
+#endif
+
// Generates function and stub prologue code.
- void StubPrologue(int prologue_offset = 0);
- void Prologue(bool code_pre_aging, int prologue_offset = 0);
+ void StubPrologue(Register base = no_reg, int prologue_offset = 0);
+ void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
@@ -415,8 +423,15 @@ class MacroAssembler : public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
@@ -428,7 +443,7 @@ class MacroAssembler : public Assembler {
Register scratch,
Label* no_map_match);
- void LoadGlobalFunction(int index, Register function);
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -536,14 +551,20 @@ class MacroAssembler : public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code, const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
- void InvokeFunction(Register function, const ParameterCount& actual,
- InvokeFlag flag, const CallWrapper& call_wrapper);
+ void InvokeFunction(Register function, Register new_target,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
@@ -635,8 +656,8 @@ class MacroAssembler : public Assembler {
void Allocate(int object_size, Register result, Register scratch1,
Register scratch2, Label* gc_required, AllocationFlags flags);
- void Allocate(Register object_size, Register result, Register scratch1,
- Register scratch2, Label* gc_required, AllocationFlags flags);
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
void AllocateTwoByteString(Register result, Register length,
Register scratch1, Register scratch2,
@@ -669,8 +690,11 @@ class MacroAssembler : public Assembler {
Register heap_number_map,
Label* gc_required);
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
@@ -678,17 +702,17 @@ class MacroAssembler : public Assembler {
void CopyBytes(Register src, Register dst, Register length, Register scratch);
// Initialize fields with filler values. |count| fields starting at
- // |start_offset| are overwritten with the value in |filler|. At the end the
- // loop, |start_offset| points at the next uninitialized field. |count| is
- // assumed to be non-zero.
- void InitializeNFieldsWithFiller(Register start_offset, Register count,
+ // |current_address| are overwritten with the value in |filler|. At the end
+ // the loop, |current_address| points at the next uninitialized field.
+ // |count| is assumed to be non-zero.
+ void InitializeNFieldsWithFiller(Register current_address, Register count,
Register filler);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset, Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@@ -913,29 +937,29 @@ class MacroAssembler : public Assembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext, int num_arguments);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -984,13 +1008,6 @@ class MacroAssembler : public Assembler {
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the code object for the given builtin in the target register and
- // setup the function in r1.
- void GetBuiltinEntry(Register target, int native_context_index);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
@@ -1311,6 +1328,10 @@ class MacroAssembler : public Assembler {
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1477,8 +1498,7 @@ class MacroAssembler : public Assembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual, Handle<Code> code_constant,
- Register code_reg, Label* done,
+ const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag,
const CallWrapper& call_wrapper);
@@ -1525,7 +1545,8 @@ class CodePatcher {
public:
enum FlushICache { FLUSH, DONT_FLUSH };
- CodePatcher(byte* address, int instructions, FlushICache flush_cache = FLUSH);
+ CodePatcher(Isolate* isolate, byte* address, int instructions,
+ FlushICache flush_cache = FLUSH);
~CodePatcher();
// Macro assembler to emit code.
@@ -1549,13 +1570,13 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
-inline MemOperand ContextOperand(Register context, int index = 0) {
+inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
-inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+inline MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
@@ -1569,7 +1590,7 @@ inline MemOperand GlobalObjectOperand() {
#else
#define ACCESS_MASM(masm) masm->
#endif
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_
diff --git a/chromium/v8/src/ppc/simulator-ppc.cc b/chromium/v8/src/ppc/simulator-ppc.cc
index 518f8fae758..0efa6605d58 100644
--- a/chromium/v8/src/ppc/simulator-ppc.cc
+++ b/chromium/v8/src/ppc/simulator-ppc.cc
@@ -164,7 +164,7 @@ bool PPCDebugger::GetValue(const char* desc, intptr_t* value) {
bool PPCDebugger::GetFPDoubleValue(const char* desc, double* value) {
- int regnum = FPRegisters::Number(desc);
+ int regnum = DoubleRegisters::Number(desc);
if (regnum != kNoRegister) {
*value = sim_->get_double_from_d_register(regnum);
return true;
@@ -313,7 +313,8 @@ void PPCDebugger::Debug() {
if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
- PrintF(" %3s: %08" V8PRIxPTR, Registers::Name(i), value);
+ PrintF(" %3s: %08" V8PRIxPTR,
+ Register::from_code(i).ToString(), value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -332,7 +333,7 @@ void PPCDebugger::Debug() {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF(" %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
- Registers::Name(i), value, value);
+ Register::from_code(i).ToString(), value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -351,7 +352,8 @@ void PPCDebugger::Debug() {
for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
dvalue = GetFPDoubleRegisterValue(i);
uint64_t as_words = bit_cast<uint64_t>(dvalue);
- PrintF("%3s: %f 0x%08x %08x\n", FPRegisters::Name(i), dvalue,
+ PrintF("%3s: %f 0x%08x %08x\n",
+ DoubleRegister::from_code(i).ToString(), dvalue,
static_cast<uint32_t>(as_words >> 32),
static_cast<uint32_t>(as_words & 0xffffffff));
}
@@ -443,7 +445,7 @@ void PPCDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
intptr_t value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ Heap* current_heap = sim_->isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
@@ -842,12 +844,12 @@ Simulator::~Simulator() { free(stack_); }
// offset from the svc instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, ExternalReference::Type type)
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr | kCallRtRedirected),
type_(type),
next_(NULL) {
- Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->FlushICache(
isolate->simulator_i_cache(),
@@ -862,9 +864,8 @@ class Redirection {
void* external_function() { return external_function_; }
ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function,
+ static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
@@ -872,7 +873,7 @@ class Redirection {
return current;
}
}
- return new Redirection(external_function, type);
+ return new Redirection(isolate, external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -917,9 +918,10 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
}
-void* Simulator::RedirectExternalReference(void* external_function,
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -1847,6 +1849,36 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
}
break;
}
+ case POPCNTW: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uintptr_t rs_val = get_register(rs);
+ uintptr_t count = 0;
+ int n = 0;
+ uintptr_t bit = 0x80000000;
+ for (; n < 32; n++) {
+ if (bit & rs_val) count++;
+ bit >>= 1;
+ }
+ set_register(ra, count);
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case POPCNTD: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uintptr_t rs_val = get_register(rs);
+ uintptr_t count = 0;
+ int n = 0;
+ uintptr_t bit = 0x8000000000000000UL;
+ for (; n < 64; n++) {
+ if (bit & rs_val) count++;
+ bit >>= 1;
+ }
+ set_register(ra, count);
+ break;
+ }
+#endif
case SYNC: {
// todo - simulate sync
break;
@@ -2663,6 +2695,32 @@ void Simulator::ExecuteExt2(Instruction* instr) {
}
+void Simulator::ExecuteExt3(Instruction* instr) {
+ int opcode = instr->Bits(10, 1) << 1;
+ switch (opcode) {
+ case FCFID: {
+ // fcfids
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ int64_t frb_val = get_d_register(frb);
+ double frt_val = static_cast<float>(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FCFIDU: {
+ // fcfidus
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ uint64_t frb_val = get_d_register(frb);
+ double frt_val = static_cast<float>(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ }
+ UNIMPLEMENTED(); // Not used by V8.
+}
+
+
void Simulator::ExecuteExt4(Instruction* instr) {
switch (instr->Bits(5, 1) << 1) {
case FDIV: {
@@ -2696,10 +2754,11 @@ void Simulator::ExecuteExt4(Instruction* instr) {
return;
}
case FSQRT: {
+ lazily_initialize_fast_sqrt(isolate_);
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
- double frt_val = fast_sqrt(frb_val);
+ double frt_val = fast_sqrt(frb_val, isolate_);
set_d_register_from_double(frt, frt_val);
return;
}
@@ -2836,64 +2895,107 @@ void Simulator::ExecuteExt4(Instruction* instr) {
case FCFID: {
int frt = instr->RTValue();
int frb = instr->RBValue();
- double t_val = get_double_from_d_register(frb);
- int64_t* frb_val_p = reinterpret_cast<int64_t*>(&t_val);
- double frt_val = static_cast<double>(*frb_val_p);
+ int64_t frb_val = get_d_register(frb);
+ double frt_val = static_cast<double>(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FCFIDU: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ uint64_t frb_val = get_d_register(frb);
+ double frt_val = static_cast<double>(frb_val);
set_d_register_from_double(frt, frt_val);
return;
}
- case FCTID: {
+ case FCTID:
+ case FCTIDZ: {
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
+ int mode = (opcode == FCTIDZ) ? kRoundToZero
+ : (fp_condition_reg_ & kFPRoundingModeMask);
int64_t frt_val;
int64_t one = 1; // work-around gcc
- int64_t kMinLongLong = (one << 63);
- int64_t kMaxLongLong = kMinLongLong - 1;
+ int64_t kMinVal = (one << 63);
+ int64_t kMaxVal = kMinVal - 1;
+ bool invalid_convert = false;
- if (frb_val > kMaxLongLong) {
- frt_val = kMaxLongLong;
- } else if (frb_val < kMinLongLong) {
- frt_val = kMinLongLong;
+ if (std::isnan(frb_val)) {
+ frt_val = kMinVal;
+ invalid_convert = true;
} else {
- switch (fp_condition_reg_ & kFPRoundingModeMask) {
+ switch (mode) {
case kRoundToZero:
- frt_val = (int64_t)frb_val;
+ frb_val = std::trunc(frb_val);
break;
case kRoundToPlusInf:
- frt_val = (int64_t)std::ceil(frb_val);
+ frb_val = std::ceil(frb_val);
break;
case kRoundToMinusInf:
- frt_val = (int64_t)std::floor(frb_val);
+ frb_val = std::floor(frb_val);
break;
default:
- frt_val = (int64_t)frb_val;
UNIMPLEMENTED(); // Not used by V8.
break;
}
+ if (frb_val < static_cast<double>(kMinVal)) {
+ frt_val = kMinVal;
+ invalid_convert = true;
+ } else if (frb_val >= static_cast<double>(kMaxVal)) {
+ frt_val = kMaxVal;
+ invalid_convert = true;
+ } else {
+ frt_val = (int64_t)frb_val;
+ }
}
- double* p = reinterpret_cast<double*>(&frt_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, frt_val);
+ if (invalid_convert) SetFPSCR(VXCVI);
return;
}
- case FCTIDZ: {
+ case FCTIDU:
+ case FCTIDUZ: {
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
- int64_t frt_val;
- int64_t one = 1; // work-around gcc
- int64_t kMinLongLong = (one << 63);
- int64_t kMaxLongLong = kMinLongLong - 1;
-
- if (frb_val > kMaxLongLong) {
- frt_val = kMaxLongLong;
- } else if (frb_val < kMinLongLong) {
- frt_val = kMinLongLong;
+ int mode = (opcode == FCTIDUZ)
+ ? kRoundToZero
+ : (fp_condition_reg_ & kFPRoundingModeMask);
+ uint64_t frt_val;
+ uint64_t kMinVal = 0;
+ uint64_t kMaxVal = kMinVal - 1;
+ bool invalid_convert = false;
+
+ if (std::isnan(frb_val)) {
+ frt_val = kMinVal;
+ invalid_convert = true;
} else {
- frt_val = (int64_t)frb_val;
+ switch (mode) {
+ case kRoundToZero:
+ frb_val = std::trunc(frb_val);
+ break;
+ case kRoundToPlusInf:
+ frb_val = std::ceil(frb_val);
+ break;
+ case kRoundToMinusInf:
+ frb_val = std::floor(frb_val);
+ break;
+ default:
+ UNIMPLEMENTED(); // Not used by V8.
+ break;
+ }
+ if (frb_val < static_cast<double>(kMinVal)) {
+ frt_val = kMinVal;
+ invalid_convert = true;
+ } else if (frb_val >= static_cast<double>(kMaxVal)) {
+ frt_val = kMaxVal;
+ invalid_convert = true;
+ } else {
+ frt_val = (uint64_t)frb_val;
+ }
}
- double* p = reinterpret_cast<double*>(&frt_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, frt_val);
+ if (invalid_convert) SetFPSCR(VXCVI);
return;
}
case FCTIW:
@@ -2901,44 +3003,47 @@ void Simulator::ExecuteExt4(Instruction* instr) {
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
+ int mode = (opcode == FCTIWZ) ? kRoundToZero
+ : (fp_condition_reg_ & kFPRoundingModeMask);
int64_t frt_val;
- if (frb_val > kMaxInt) {
- frt_val = kMaxInt;
- } else if (frb_val < kMinInt) {
- frt_val = kMinInt;
- } else {
- if (opcode == FCTIWZ) {
- frt_val = (int64_t)frb_val;
- } else {
- switch (fp_condition_reg_ & kFPRoundingModeMask) {
- case kRoundToZero:
- frt_val = (int64_t)frb_val;
- break;
- case kRoundToPlusInf:
- frt_val = (int64_t)std::ceil(frb_val);
- break;
- case kRoundToMinusInf:
- frt_val = (int64_t)std::floor(frb_val);
- break;
- case kRoundToNearest:
- frt_val = (int64_t)lround(frb_val);
-
- // Round to even if exactly halfway. (lround rounds up)
- if (std::fabs(static_cast<double>(frt_val) - frb_val) == 0.5 &&
- (frt_val % 2)) {
- frt_val += ((frt_val > 0) ? -1 : 1);
- }
+ int64_t kMinVal = kMinInt;
+ int64_t kMaxVal = kMaxInt;
- break;
- default:
- DCHECK(false);
- frt_val = (int64_t)frb_val;
- break;
+ if (std::isnan(frb_val)) {
+ frt_val = kMinVal;
+ } else {
+ switch (mode) {
+ case kRoundToZero:
+ frb_val = std::trunc(frb_val);
+ break;
+ case kRoundToPlusInf:
+ frb_val = std::ceil(frb_val);
+ break;
+ case kRoundToMinusInf:
+ frb_val = std::floor(frb_val);
+ break;
+ case kRoundToNearest: {
+ double orig = frb_val;
+ frb_val = lround(frb_val);
+ // Round to even if exactly halfway. (lround rounds up)
+ if (std::fabs(frb_val - orig) == 0.5 && ((int64_t)frb_val % 2)) {
+ frb_val += ((frb_val > 0) ? -1.0 : 1.0);
+ }
+ break;
}
+ default:
+ UNIMPLEMENTED(); // Not used by V8.
+ break;
+ }
+ if (frb_val < kMinVal) {
+ frt_val = kMinVal;
+ } else if (frb_val > kMaxVal) {
+ frt_val = kMaxVal;
+ } else {
+ frt_val = (int64_t)frb_val;
}
}
- double* p = reinterpret_cast<double*>(&frt_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, frt_val);
return;
}
case FNEG: {
@@ -2991,6 +3096,43 @@ void Simulator::ExecuteExt4(Instruction* instr) {
set_d_register(frt, lval);
return;
}
+ case MCRFS: {
+ int bf = instr->Bits(25, 23);
+ int bfa = instr->Bits(20, 18);
+ int cr_shift = (7 - bf) * CRWIDTH;
+ int fp_shift = (7 - bfa) * CRWIDTH;
+ int field_val = (fp_condition_reg_ >> fp_shift) & 0xf;
+ condition_reg_ &= ~(0x0f << cr_shift);
+ condition_reg_ |= (field_val << cr_shift);
+ // Clear copied exception bits
+ switch (bfa) {
+ case 5:
+ ClearFPSCR(VXSOFT);
+ ClearFPSCR(VXSQRT);
+ ClearFPSCR(VXCVI);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ return;
+ }
+ case MTFSB0: {
+ int bt = instr->Bits(25, 21);
+ ClearFPSCR(bt);
+ if (instr->Bit(0)) { // RC bit set
+ UNIMPLEMENTED();
+ }
+ return;
+ }
+ case MTFSB1: {
+ int bt = instr->Bits(25, 21);
+ SetFPSCR(bt);
+ if (instr->Bit(0)) { // RC bit set
+ UNIMPLEMENTED();
+ }
+ return;
+ }
case FABS: {
int frt = instr->RTValue();
int frb = instr->RBValue();
@@ -3578,8 +3720,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
- case EXT3:
- UNIMPLEMENTED();
+ case EXT3: {
+ ExecuteExt3(instr);
+ break;
+ }
case EXT4: {
ExecuteExt4(instr);
break;
@@ -3717,6 +3861,9 @@ void Simulator::CallInternal(byte* entry) {
set_pc(reinterpret_cast<intptr_t>(entry));
#endif
+ // Put target address in ip (for JS prologue).
+ set_register(r12, get_pc());
+
// Put down marker for end of simulation. The simulator will stop simulation
// when the PC reaches this value. By saving the "end simulation" value into
// the LR the simulation stops when returning to this call point.
diff --git a/chromium/v8/src/ppc/simulator-ppc.h b/chromium/v8/src/ppc/simulator-ppc.h
index 042b2ada2c6..a3b03dc5062 100644
--- a/chromium/v8/src/ppc/simulator-ppc.h
+++ b/chromium/v8/src/ppc/simulator-ppc.h
@@ -22,7 +22,7 @@ namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*ppc_regexp_matcher)(String*, int, const byte*, const byte*, int*,
@@ -33,8 +33,9 @@ typedef int (*ppc_regexp_matcher)(String*, int, const byte*, const byte*, int*,
// should act as a function matching the type ppc_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<ppc_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<ppc_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
NULL, p8))
// The stack limit beyond which we will throw stack overflow errors in
@@ -48,14 +49,18 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() {}
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ USE(isolate);
+ }
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
@@ -311,12 +316,16 @@ class Simulator {
bool ExecuteExt2_9bit_part2(Instruction* instr);
void ExecuteExt2_5bit(Instruction* instr);
void ExecuteExt2(Instruction* instr);
+ void ExecuteExt3(Instruction* instr);
void ExecuteExt4(Instruction* instr);
#if V8_TARGET_ARCH_PPC64
void ExecuteExt5(Instruction* instr);
#endif
void ExecuteGeneric(Instruction* instr);
+ void SetFPSCR(int bit) { fp_condition_reg_ |= (1 << (31 - bit)); }
+ void ClearFPSCR(int bit) { fp_condition_reg_ &= ~(1 << (31 - bit)); }
+
// Executes one instruction.
void ExecuteInstruction(Instruction* instr);
@@ -328,7 +337,8 @@ class Simulator {
// Runtime call support.
static void* RedirectExternalReference(
- void* external_function, v8::internal::ExternalReference::Type type);
+ Isolate* isolate, void* external_function,
+ v8::internal::ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, intptr_t* z);
@@ -390,16 +400,17 @@ class Simulator {
// When running with the simulator transition into simulated execution at this
// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
- FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
+ FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
(intptr_t)p3, (intptr_t)p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current()) \
- ->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
- (intptr_t)p3, (intptr_t)p4, (intptr_t)p5, (intptr_t)p6, \
- (intptr_t)p7, (intptr_t)NULL, (intptr_t)p8)
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ Simulator::current(isolate)->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, \
+ (intptr_t)p2, (intptr_t)p3, (intptr_t)p4, \
+ (intptr_t)p5, (intptr_t)p6, (intptr_t)p7, \
+ (intptr_t)NULL, (intptr_t)p8)
// The simulator has its own stack. Thus it has a different stack limit from
@@ -413,17 +424,18 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
}
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // !defined(USE_SIMULATOR)
#endif // V8_PPC_SIMULATOR_PPC_H_
diff --git a/chromium/v8/src/profiler/allocation-tracker.h b/chromium/v8/src/profiler/allocation-tracker.h
index ebda3dba83a..03802a5c668 100644
--- a/chromium/v8/src/profiler/allocation-tracker.h
+++ b/chromium/v8/src/profiler/allocation-tracker.h
@@ -152,6 +152,7 @@ class AllocationTracker {
DISALLOW_COPY_AND_ASSIGN(AllocationTracker);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ALLOCATION_TRACKER_H_
diff --git a/chromium/v8/src/profiler/circular-queue-inl.h b/chromium/v8/src/profiler/circular-queue-inl.h
index 66b4af5b4a4..428945a2eed 100644
--- a/chromium/v8/src/profiler/circular-queue-inl.h
+++ b/chromium/v8/src/profiler/circular-queue-inl.h
@@ -64,6 +64,7 @@ typename SamplingCircularQueue<T, L>::Entry* SamplingCircularQueue<T, L>::Next(
return next;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_CIRCULAR_QUEUE_INL_H_
diff --git a/chromium/v8/src/profiler/circular-queue.h b/chromium/v8/src/profiler/circular-queue.h
index 3508b371c46..272843bb2df 100644
--- a/chromium/v8/src/profiler/circular-queue.h
+++ b/chromium/v8/src/profiler/circular-queue.h
@@ -63,6 +63,7 @@ class SamplingCircularQueue {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_CIRCULAR_QUEUE_H_
diff --git a/chromium/v8/src/profiler/cpu-profiler-inl.h b/chromium/v8/src/profiler/cpu-profiler-inl.h
index df727ae7cb8..45e4ccf1368 100644
--- a/chromium/v8/src/profiler/cpu-profiler-inl.h
+++ b/chromium/v8/src/profiler/cpu-profiler-inl.h
@@ -65,7 +65,7 @@ TickSample* ProfilerEventsProcessor::StartTickSample() {
void* address = ticks_buffer_.StartEnqueue();
if (address == NULL) return NULL;
TickSampleEventRecord* evt =
- new(address) TickSampleEventRecord(last_code_event_id_);
+ new (address) TickSampleEventRecord(last_code_event_id_.Value());
return &evt->sample;
}
@@ -74,6 +74,7 @@ void ProfilerEventsProcessor::FinishTickSample() {
ticks_buffer_.FinishEnqueue();
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_CPU_PROFILER_INL_H_
diff --git a/chromium/v8/src/profiler/cpu-profiler.cc b/chromium/v8/src/profiler/cpu-profiler.cc
index c4216ed4785..bbddc873b1c 100644
--- a/chromium/v8/src/profiler/cpu-profiler.cc
+++ b/chromium/v8/src/profiler/cpu-profiler.cc
@@ -4,10 +4,10 @@
#include "src/profiler/cpu-profiler.h"
-#include "src/compiler.h"
+#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
-#include "src/hashmap.h"
+#include "src/locked-queue-inl.h"
#include "src/log-inl.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/vm-state-inl.h"
@@ -36,14 +36,14 @@ ProfilerEventsProcessor::~ProfilerEventsProcessor() {}
void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
- event.generic.order = ++last_code_event_id_;
+ event.generic.order = last_code_event_id_.Increment(1);
events_buffer_.Enqueue(event);
}
void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
int fp_to_sp_delta) {
- TickSampleEventRecord record(last_code_event_id_);
+ TickSampleEventRecord record(last_code_event_id_.Value());
RegisterState regs;
Address fp = isolate->c_entry_fp(isolate->thread_local_top());
regs.sp = fp - fp_to_sp_delta;
@@ -55,7 +55,7 @@ void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
- TickSampleEventRecord record(last_code_event_id_);
+ TickSampleEventRecord record(last_code_event_id_.Value());
RegisterState regs;
StackFrameIterator it(isolate);
if (!it.done()) {
@@ -97,9 +97,9 @@ bool ProfilerEventsProcessor::ProcessCodeEvent() {
ProfilerEventsProcessor::SampleProcessingResult
ProfilerEventsProcessor::ProcessOneSample() {
- if (!ticks_from_vm_buffer_.IsEmpty()
- && ticks_from_vm_buffer_.Peek()->order ==
- last_processed_code_event_id_) {
+ TickSampleEventRecord record1;
+ if (ticks_from_vm_buffer_.Peek(&record1) &&
+ (record1.order == last_processed_code_event_id_)) {
TickSampleEventRecord record;
ticks_from_vm_buffer_.Dequeue(&record);
generator_->RecordTickSample(record.sample);
@@ -254,7 +254,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
NULL, code->instruction_start());
if (info) {
- rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
rec->entry->set_inlined_function_infos(info->inlined_function_infos());
}
rec->entry->FillFunctionInfo(shared);
@@ -291,7 +290,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
CodeEntry::kEmptyNamePrefix, profiles_->GetName(script_name), line,
column, line_table, code->instruction_start());
if (info) {
- rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
rec->entry->set_inlined_function_infos(info->inlined_function_infos());
}
rec->entry->FillFunctionInfo(shared);
@@ -441,6 +439,7 @@ void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
void CpuProfiler::StartProfiling(String* title, bool record_samples) {
StartProfiling(profiles_->GetName(title), record_samples);
+ isolate_->debug()->feature_tracker()->Track(DebugFeatureTracker::kProfiler);
}
diff --git a/chromium/v8/src/profiler/cpu-profiler.h b/chromium/v8/src/profiler/cpu-profiler.h
index 2d6732725a8..e5ef0ac7c46 100644
--- a/chromium/v8/src/profiler/cpu-profiler.h
+++ b/chromium/v8/src/profiler/cpu-profiler.h
@@ -6,12 +6,13 @@
#define V8_PROFILER_CPU_PROFILER_H_
#include "src/allocation.h"
+#include "src/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/platform/time.h"
#include "src/compiler.h"
+#include "src/locked-queue.h"
#include "src/profiler/circular-queue.h"
#include "src/profiler/sampler.h"
-#include "src/profiler/unbound-queue.h"
namespace v8 {
namespace internal {
@@ -169,14 +170,14 @@ class ProfilerEventsProcessor : public base::Thread {
base::Atomic32 running_;
// Sampling period in microseconds.
const base::TimeDelta period_;
- UnboundQueue<CodeEventsContainer> events_buffer_;
+ LockedQueue<CodeEventsContainer> events_buffer_;
static const size_t kTickSampleBufferSize = 1 * MB;
static const size_t kTickSampleQueueLength =
kTickSampleBufferSize / sizeof(TickSampleEventRecord);
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
- UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
- unsigned last_code_event_id_;
+ LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
+ AtomicNumber<unsigned> last_code_event_id_;
unsigned last_processed_code_event_id_;
};
@@ -270,7 +271,8 @@ class CpuProfiler : public CodeEventListener {
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_CPU_PROFILER_H_
diff --git a/chromium/v8/src/profiler/heap-profiler.cc b/chromium/v8/src/profiler/heap-profiler.cc
index 3f776e05a81..4403e5d6c9c 100644
--- a/chromium/v8/src/profiler/heap-profiler.cc
+++ b/chromium/v8/src/profiler/heap-profiler.cc
@@ -5,6 +5,7 @@
#include "src/profiler/heap-profiler.h"
#include "src/api.h"
+#include "src/debug/debug.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
@@ -75,6 +76,10 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
}
ids_->RemoveDeadEntries();
is_tracking_object_moves_ = true;
+
+ heap()->isolate()->debug()->feature_tracker()->Track(
+ DebugFeatureTracker::kHeapSnapshot);
+
return result;
}
@@ -86,6 +91,8 @@ void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
if (track_allocations) {
allocation_tracker_.Reset(new AllocationTracker(ids_.get(), names_.get()));
heap()->DisableInlineAllocation();
+ heap()->isolate()->debug()->feature_tracker()->Track(
+ DebugFeatureTracker::kAllocationTracking);
}
}
diff --git a/chromium/v8/src/profiler/heap-profiler.h b/chromium/v8/src/profiler/heap-profiler.h
index b304f388ffa..9a04e83af4b 100644
--- a/chromium/v8/src/profiler/heap-profiler.h
+++ b/chromium/v8/src/profiler/heap-profiler.h
@@ -66,6 +66,8 @@ class HeapProfiler {
Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
void ClearHeapObjectMap();
+ Isolate* isolate() const { return heap()->isolate(); }
+
private:
Heap* heap() const;
@@ -79,6 +81,7 @@ class HeapProfiler {
base::Mutex profiler_mutex_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_HEAP_PROFILER_H_
diff --git a/chromium/v8/src/profiler/heap-snapshot-generator-inl.h b/chromium/v8/src/profiler/heap-snapshot-generator-inl.h
index 12e37f5e60d..169ab569e82 100644
--- a/chromium/v8/src/profiler/heap-snapshot-generator-inl.h
+++ b/chromium/v8/src/profiler/heap-snapshot-generator-inl.h
@@ -16,6 +16,11 @@ HeapEntry* HeapGraphEdge::from() const {
}
+Isolate* HeapGraphEdge::isolate() const {
+ return snapshot()->profiler()->isolate();
+}
+
+
HeapSnapshot* HeapGraphEdge::snapshot() const {
return to_entry_->snapshot();
}
@@ -43,6 +48,9 @@ HeapGraphEdge** HeapEntry::children_arr() {
}
-} } // namespace v8::internal
+Isolate* HeapEntry::isolate() const { return snapshot_->profiler()->isolate(); }
+
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_INL_H_
diff --git a/chromium/v8/src/profiler/heap-snapshot-generator.cc b/chromium/v8/src/profiler/heap-snapshot-generator.cc
index c7bb3c950ac..69ed5e6f293 100644
--- a/chromium/v8/src/profiler/heap-snapshot-generator.cc
+++ b/chromium/v8/src/profiler/heap-snapshot-generator.cc
@@ -7,6 +7,7 @@
#include "src/code-stubs.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
+#include "src/objects-body-descriptors.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
@@ -804,9 +805,10 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
- const char* name = shared->bound() ? "native_bind" :
- names_->GetName(String::cast(shared->name()));
+ const char* name = names_->GetName(String::cast(shared->name()));
return AddEntry(object, HeapEntry::kClosure, name);
+ } else if (object->IsJSBoundFunction()) {
+ return AddEntry(object, HeapEntry::kClosure, "native_bind");
} else if (object->IsJSRegExp()) {
JSRegExp* re = JSRegExp::cast(object);
return AddEntry(object,
@@ -836,7 +838,10 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
HeapEntry::kString,
names_->GetName(String::cast(object)));
} else if (object->IsSymbol()) {
- return AddEntry(object, HeapEntry::kSymbol, "symbol");
+ if (Symbol::cast(object)->is_private())
+ return AddEntry(object, HeapEntry::kHidden, "private symbol");
+ else
+ return AddEntry(object, HeapEntry::kSymbol, "symbol");
} else if (object->IsCode()) {
return AddEntry(object, HeapEntry::kCode, "");
} else if (object->IsSharedFunctionInfo()) {
@@ -984,55 +989,39 @@ int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
class IndexedReferencesExtractor : public ObjectVisitor {
public:
- IndexedReferencesExtractor(V8HeapExplorer* generator,
- HeapObject* parent_obj,
+ IndexedReferencesExtractor(V8HeapExplorer* generator, HeapObject* parent_obj,
int parent)
: generator_(generator),
parent_obj_(parent_obj),
+ parent_start_(HeapObject::RawField(parent_obj_, 0)),
+ parent_end_(HeapObject::RawField(parent_obj_, parent_obj_->Size())),
parent_(parent),
- next_index_(0) {
- }
- void VisitCodeEntry(Address entry_address) {
+ next_index_(0) {}
+ void VisitCodeEntry(Address entry_address) override {
Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
generator_->SetInternalReference(parent_obj_, parent_, "code", code);
generator_->TagCodeObject(code);
}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
+ intptr_t index =
+ static_cast<intptr_t>(p - HeapObject::RawField(parent_obj_, 0));
++next_index_;
- if (CheckVisitedAndUnmark(p)) continue;
+ // |p| could be outside of the object, e.g., while visiting RelocInfo of
+ // code objects.
+ if (p >= parent_start_ && p < parent_end_ && generator_->marks_[index]) {
+ generator_->marks_[index] = false;
+ continue;
+ }
generator_->SetHiddenReference(parent_obj_, parent_, next_index_, *p);
}
}
- static void MarkVisitedField(HeapObject* obj, int offset) {
- if (offset < 0) return;
- Address field = obj->address() + offset;
- DCHECK(Memory::Object_at(field)->IsHeapObject());
- intptr_t p = reinterpret_cast<intptr_t>(Memory::Object_at(field));
- DCHECK(!IsMarked(p));
- intptr_t p_tagged = p | kTag;
- Memory::Object_at(field) = reinterpret_cast<Object*>(p_tagged);
- }
private:
- bool CheckVisitedAndUnmark(Object** field) {
- intptr_t p = reinterpret_cast<intptr_t>(*field);
- if (IsMarked(p)) {
- intptr_t p_untagged = (p & ~kTaggingMask) | kHeapObjectTag;
- *field = reinterpret_cast<Object*>(p_untagged);
- DCHECK((*field)->IsHeapObject());
- return true;
- }
- return false;
- }
-
- static const intptr_t kTaggingMask = 3;
- static const intptr_t kTag = 3;
-
- static bool IsMarked(intptr_t p) { return (p & kTaggingMask) == kTag; }
-
V8HeapExplorer* generator_;
HeapObject* parent_obj_;
+ Object** parent_start_;
+ Object** parent_end_;
int parent_;
int next_index_;
};
@@ -1110,13 +1099,29 @@ void V8HeapExplorer::ExtractJSGlobalProxyReferences(
void V8HeapExplorer::ExtractJSObjectReferences(
int entry, JSObject* js_obj) {
HeapObject* obj = js_obj;
- ExtractClosureReferences(js_obj, entry);
ExtractPropertyReferences(js_obj, entry);
ExtractElementReferences(js_obj, entry);
ExtractInternalReferences(js_obj, entry);
PrototypeIterator iter(heap_->isolate(), js_obj);
SetPropertyReference(obj, entry, heap_->proto_string(), iter.GetCurrent());
- if (obj->IsJSFunction()) {
+ if (obj->IsJSBoundFunction()) {
+ JSBoundFunction* js_fun = JSBoundFunction::cast(obj);
+ TagObject(js_fun->bound_arguments(), "(bound arguments)");
+ SetInternalReference(js_fun, entry, "bindings", js_fun->bound_arguments(),
+ JSBoundFunction::kBoundArgumentsOffset);
+ TagObject(js_fun->creation_context(), "(creation context)");
+ SetInternalReference(js_fun, entry, "creation_context",
+ js_fun->creation_context(),
+ JSBoundFunction::kCreationContextOffset);
+ SetNativeBindReference(js_obj, entry, "bound_this", js_fun->bound_this());
+ SetNativeBindReference(js_obj, entry, "bound_function",
+ js_fun->bound_target_function());
+ FixedArray* bindings = js_fun->bound_arguments();
+ for (int i = 0; i < bindings->length(); i++) {
+ const char* reference_name = names_->GetFormatted("bound_argument_%d", i);
+ SetNativeBindReference(js_obj, entry, reference_name, bindings->get(i));
+ }
+ } else if (obj->IsJSFunction()) {
JSFunction* js_fun = JSFunction::cast(js_obj);
Object* proto_or_map = js_fun->prototype_or_initial_map();
if (!proto_or_map->IsTheHole()) {
@@ -1136,13 +1141,8 @@ void V8HeapExplorer::ExtractJSObjectReferences(
}
}
SharedFunctionInfo* shared_info = js_fun->shared();
- // JSFunction has either bindings or literals and never both.
- bool bound = shared_info->bound();
- TagObject(js_fun->literals_or_bindings(),
- bound ? "(function bindings)" : "(function literals)");
- SetInternalReference(js_fun, entry,
- bound ? "bindings" : "literals",
- js_fun->literals_or_bindings(),
+ TagObject(js_fun->literals(), "(function literals)");
+ SetInternalReference(js_fun, entry, "literals", js_fun->literals(),
JSFunction::kLiteralsOffset);
TagObject(shared_info, "(shared function info)");
SetInternalReference(js_fun, entry,
@@ -1155,23 +1155,23 @@ void V8HeapExplorer::ExtractJSObjectReferences(
SetWeakReference(js_fun, entry,
"next_function_link", js_fun->next_function_link(),
JSFunction::kNextFunctionLinkOffset);
- STATIC_ASSERT(JSFunction::kNextFunctionLinkOffset
- == JSFunction::kNonWeakFieldsEndOffset);
+ // Ensure no new weak references appeared in JSFunction.
+ STATIC_ASSERT(JSFunction::kCodeEntryOffset ==
+ JSFunction::kNonWeakFieldsEndOffset);
+ STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
+ JSFunction::kNextFunctionLinkOffset);
STATIC_ASSERT(JSFunction::kNextFunctionLinkOffset + kPointerSize
== JSFunction::kSize);
- } else if (obj->IsGlobalObject()) {
- GlobalObject* global_obj = GlobalObject::cast(obj);
- SetInternalReference(global_obj, entry,
- "builtins", global_obj->builtins(),
- GlobalObject::kBuiltinsOffset);
- SetInternalReference(global_obj, entry,
- "native_context", global_obj->native_context(),
- GlobalObject::kNativeContextOffset);
- SetInternalReference(global_obj, entry,
- "global_proxy", global_obj->global_proxy(),
- GlobalObject::kGlobalProxyOffset);
- STATIC_ASSERT(GlobalObject::kHeaderSize - JSObject::kHeaderSize ==
- 3 * kPointerSize);
+ } else if (obj->IsJSGlobalObject()) {
+ JSGlobalObject* global_obj = JSGlobalObject::cast(obj);
+ SetInternalReference(global_obj, entry, "native_context",
+ global_obj->native_context(),
+ JSGlobalObject::kNativeContextOffset);
+ SetInternalReference(global_obj, entry, "global_proxy",
+ global_obj->global_proxy(),
+ JSGlobalObject::kGlobalProxyOffset);
+ STATIC_ASSERT(JSGlobalObject::kSize - JSObject::kHeaderSize ==
+ 2 * kPointerSize);
} else if (obj->IsJSArrayBufferView()) {
JSArrayBufferView* view = JSArrayBufferView::cast(obj);
SetInternalReference(view, entry, "buffer", view->buffer(),
@@ -1259,11 +1259,10 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
}
EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
- EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
- EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, GlobalObject, global);
+ EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, HeapObject, extension);
+ EXTRACT_CONTEXT_FIELD(NATIVE_CONTEXT_INDEX, Context, native_context);
if (context->IsNativeContext()) {
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
- TagObject(context->runtime_context(), "(runtime context)");
TagObject(context->embedder_data(), "(context data)");
NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD)
EXTRACT_CONTEXT_FIELD(OPTIMIZED_FUNCTIONS_LIST, unused,
@@ -1539,7 +1538,7 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
// Do not visit weak_next as it is not visited by the StaticVisitor,
// and we're not very interested in weak_next field here.
STATIC_ASSERT(AllocationSite::kWeakNextOffset >=
- AllocationSite::BodyDescriptor::kEndOffset);
+ AllocationSite::BodyDescriptor::kEndOffset);
}
@@ -1588,28 +1587,6 @@ void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
}
-void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
- if (!js_obj->IsJSFunction()) return;
-
- JSFunction* func = JSFunction::cast(js_obj);
- if (func->shared()->bound()) {
- FixedArray* bindings = func->function_bindings();
- SetNativeBindReference(js_obj, entry, "bound_this",
- bindings->get(JSFunction::kBoundThisIndex));
- SetNativeBindReference(js_obj, entry, "bound_function",
- bindings->get(JSFunction::kBoundFunctionIndex));
- for (int i = JSFunction::kBoundArgumentsStartIndex;
- i < bindings->length(); i++) {
- const char* reference_name = names_->GetFormatted(
- "bound_argument_%d",
- i - JSFunction::kBoundArgumentsStartIndex);
- SetNativeBindReference(js_obj, entry, reference_name,
- bindings->get(i));
- }
- }
-}
-
-
void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
if (js_obj->HasFastProperties()) {
DescriptorArray* descs = js_obj->map()->instance_descriptors();
@@ -1644,7 +1621,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
break;
}
}
- } else if (js_obj->IsGlobalObject()) {
+ } else if (js_obj->IsJSGlobalObject()) {
// We assume that global objects can only have slow properties.
GlobalDictionary* dictionary = js_obj->global_dictionary();
int length = dictionary->Capacity();
@@ -1740,14 +1717,11 @@ void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
String* V8HeapExplorer::GetConstructorName(JSObject* object) {
- Heap* heap = object->GetHeap();
- if (object->IsJSFunction()) return heap->closure_string();
- String* constructor_name = object->constructor_name();
- if (constructor_name == heap->Object_string()) {
- // TODO(verwaest): Try to get object.constructor.name in this case.
- // This requires handlification of the V8HeapExplorer.
- }
- return object->constructor_name();
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsJSFunction()) return isolate->heap()->closure_string();
+ DisallowHeapAllocation no_gc;
+ HandleScope scope(isolate);
+ return *JSReceiver::GetConstructorName(handle(object, isolate));
}
@@ -1773,7 +1747,7 @@ class RootsReferencesExtractor : public ObjectVisitor {
heap_(heap) {
}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
if (collecting_all_references_) {
for (Object** p = start; p < end; p++) all_references_.Add(*p);
} else {
@@ -1806,7 +1780,7 @@ class RootsReferencesExtractor : public ObjectVisitor {
}
}
- void Synchronize(VisitorSynchronization::SyncTag tag) {
+ void Synchronize(VisitorSynchronization::SyncTag tag) override {
if (collecting_all_references_ &&
previous_reference_count_ != all_references_.length()) {
previous_reference_count_ = all_references_.length();
@@ -1871,6 +1845,14 @@ bool V8HeapExplorer::IterateAndExtractSinglePass() {
obj = iterator.next(), progress_->ProgressStep()) {
if (interrupted) continue;
+ size_t max_pointer = obj->Size() / kPointerSize;
+ if (max_pointer > marks_.size()) {
+ // Clear the current bits.
+ std::vector<bool>().swap(marks_);
+ // Reallocate to right size.
+ marks_.resize(max_pointer, false);
+ }
+
HeapEntry* heap_entry = GetEntry(obj);
int entry = heap_entry->index();
if ((this->*extractor)(entry, obj)) {
@@ -1915,11 +1897,19 @@ void V8HeapExplorer::SetContextReference(HeapObject* parent_obj,
parent_entry,
names_->GetName(reference_name),
child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(parent_obj, field_offset);
}
}
+void V8HeapExplorer::MarkVisitedField(HeapObject* obj, int offset) {
+ if (offset < 0) return;
+ int index = offset / kPointerSize;
+ DCHECK(!marks_[index]);
+ marks_[index] = true;
+}
+
+
void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
int parent_entry,
const char* reference_name,
@@ -1964,7 +1954,7 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
reference_name,
child_entry);
}
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(parent_obj, field_offset);
}
@@ -1982,7 +1972,7 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
names_->GetName(index),
child_entry);
}
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(parent_obj, field_offset);
}
@@ -2015,7 +2005,7 @@ void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
reference_name,
child_entry);
}
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(parent_obj, field_offset);
}
@@ -2033,7 +2023,7 @@ void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
names_->GetFormatted("%d", index),
child_entry);
}
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(parent_obj, field_offset);
}
@@ -2074,7 +2064,7 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
parent_entry,
name,
child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(parent_obj, field_offset);
}
}
@@ -2133,7 +2123,7 @@ void V8HeapExplorer::SetGcSubrootReference(
// Add a shortcut to JS global object reference at snapshot root.
if (child_obj->IsNativeContext()) {
Context* context = Context::cast(child_obj);
- GlobalObject* global = context->global_object();
+ JSGlobalObject* global = context->global_object();
if (global->IsJSGlobalObject()) {
bool is_debug_object = false;
is_debug_object = heap_->isolate()->debug()->IsDebugGlobal(global);
@@ -2164,6 +2154,7 @@ const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
#undef SYMBOL_NAME
#define SYMBOL_NAME(name, description) NAME_ENTRY(name)
PUBLIC_SYMBOL_LIST(SYMBOL_NAME)
+ WELL_KNOWN_SYMBOL_LIST(SYMBOL_NAME)
#undef SYMBOL_NAME
#undef NAME_ENTRY
CHECK(!strong_gc_subroot_names_.is_empty());
@@ -2191,7 +2182,7 @@ void V8HeapExplorer::MarkAsWeakContainer(Object* object) {
class GlobalObjectsEnumerator : public ObjectVisitor {
public:
- virtual void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
if ((*p)->IsNativeContext()) {
Context* context = Context::cast(*p);
@@ -2244,11 +2235,9 @@ class GlobalHandlesExtractor : public ObjectVisitor {
public:
explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
: explorer_(explorer) {}
- virtual ~GlobalHandlesExtractor() {}
- virtual void VisitPointers(Object** start, Object** end) {
- UNREACHABLE();
- }
- virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {
+ ~GlobalHandlesExtractor() override {}
+ void VisitPointers(Object** start, Object** end) override { UNREACHABLE(); }
+ void VisitEmbedderReference(Object** p, uint16_t class_id) override {
explorer_->VisitSubtreeWrapper(p, class_id);
}
private:
diff --git a/chromium/v8/src/profiler/heap-snapshot-generator.h b/chromium/v8/src/profiler/heap-snapshot-generator.h
index 1baebeee9e7..857f2401bf0 100644
--- a/chromium/v8/src/profiler/heap-snapshot-generator.h
+++ b/chromium/v8/src/profiler/heap-snapshot-generator.h
@@ -8,7 +8,7 @@
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
#include "src/objects.h"
-#include "src/strings-storage.h"
+#include "src/profiler/strings-storage.h"
namespace v8 {
namespace internal {
@@ -50,6 +50,8 @@ class HeapGraphEdge BASE_EMBEDDED {
INLINE(HeapEntry* from() const);
HeapEntry* to() const { return to_entry_; }
+ INLINE(Isolate* isolate() const);
+
private:
INLINE(HeapSnapshot* snapshot() const);
int from_index() const { return FromIndexField::decode(bit_field_); }
@@ -115,6 +117,7 @@ class HeapEntry BASE_EMBEDDED {
}
Vector<HeapGraphEdge*> children() {
return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
+ INLINE(Isolate* isolate() const);
void SetIndexedReference(
HeapGraphEdge::Type type, int index, HeapEntry* entry);
@@ -351,6 +354,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
typedef bool (V8HeapExplorer::*ExtractReferencesMethod)(int entry,
HeapObject* object);
+ void MarkVisitedField(HeapObject* obj, int offset);
+
HeapEntry* AddEntry(HeapObject* object);
HeapEntry* AddEntry(HeapObject* object,
HeapEntry::Type type,
@@ -385,7 +390,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
void ExtractFixedArrayReferences(int entry, FixedArray* array);
- void ExtractClosureReferences(JSObject* js_obj, int entry);
void ExtractPropertyReferences(JSObject* js_obj, int entry);
void ExtractAccessorPairProperty(JSObject* js_obj, int entry, Name* key,
Object* callback_obj, int field_offset = -1);
@@ -465,6 +469,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapObjectsSet weak_containers_;
v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
+ std::vector<bool> marks_;
+
friend class IndexedReferencesExtractor;
friend class RootsReferencesExtractor;
@@ -616,6 +622,7 @@ class HeapSnapshotJSONSerializer {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
diff --git a/chromium/v8/src/profiler/profile-generator-inl.h b/chromium/v8/src/profiler/profile-generator-inl.h
index c2e98cc4c80..85edce2663b 100644
--- a/chromium/v8/src/profiler/profile-generator-inl.h
+++ b/chromium/v8/src/profiler/profile-generator-inl.h
@@ -23,7 +23,6 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name,
column_number_(column_number),
script_id_(v8::UnboundScript::kNoScriptId),
position_(0),
- no_frame_ranges_(NULL),
bailout_reason_(kEmptyBailoutReason),
deopt_reason_(kNoDeoptReason),
deopt_position_(SourcePosition::Unknown()),
@@ -43,6 +42,11 @@ ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
inline unsigned ProfileNode::function_id() const {
return tree_->GetFunctionId(this);
}
-} } // namespace v8::internal
+
+
+inline Isolate* ProfileNode::isolate() const { return tree_->isolate(); }
+
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_PROFILE_GENERATOR_INL_H_
diff --git a/chromium/v8/src/profiler/profile-generator.cc b/chromium/v8/src/profiler/profile-generator.cc
index f3592bba654..890f341e895 100644
--- a/chromium/v8/src/profiler/profile-generator.cc
+++ b/chromium/v8/src/profiler/profile-generator.cc
@@ -4,13 +4,12 @@
#include "src/profiler/profile-generator.h"
-#include "src/compiler.h"
+#include "src/ast/scopeinfo.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
#include "src/profiler/profile-generator-inl.h"
#include "src/profiler/sampler.h"
-#include "src/scopeinfo.h"
#include "src/splay-tree-inl.h"
#include "src/unicode.h"
@@ -50,7 +49,6 @@ const char* const CodeEntry::kNoDeoptReason = "";
CodeEntry::~CodeEntry() {
- delete no_frame_ranges_;
delete line_info_;
}
@@ -251,10 +249,11 @@ class DeleteNodesCallback {
};
-ProfileTree::ProfileTree()
+ProfileTree::ProfileTree(Isolate* isolate)
: root_entry_(Logger::FUNCTION_TAG, "(root)"),
next_node_id_(1),
root_(new ProfileNode(this, &root_entry_)),
+ isolate_(isolate),
next_function_id_(1),
function_ids_(ProfileNode::CodeEntriesMatch) {}
@@ -349,11 +348,11 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
-CpuProfile::CpuProfile(const char* title, bool record_samples)
+CpuProfile::CpuProfile(Isolate* isolate, const char* title, bool record_samples)
: title_(title),
record_samples_(record_samples),
- start_time_(base::TimeTicks::HighResolutionNow()) {
-}
+ start_time_(base::TimeTicks::HighResolutionNow()),
+ top_down_(isolate) {}
void CpuProfile::AddPath(base::TimeTicks timestamp,
@@ -442,8 +441,8 @@ void CodeMap::Print() {
CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
: function_and_resource_names_(heap),
- current_profiles_semaphore_(1) {
-}
+ isolate_(heap->isolate()),
+ current_profiles_semaphore_(1) {}
static void DeleteCodeEntry(CodeEntry** entry_ptr) {
@@ -478,7 +477,7 @@ bool CpuProfilesCollection::StartProfiling(const char* title,
return true;
}
}
- current_profiles_.Add(new CpuProfile(title, record_samples));
+ current_profiles_.Add(new CpuProfile(isolate_, title, record_samples));
current_profiles_semaphore_.Signal();
return true;
}
@@ -611,17 +610,8 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// ebp contains return address of the current function and skips caller's
// frame. Check for this case and just skip such samples.
if (pc_entry) {
- List<OffsetRange>* ranges = pc_entry->no_frame_ranges();
int pc_offset =
static_cast<int>(sample.pc - pc_entry->instruction_start());
- if (ranges) {
- for (int i = 0; i < ranges->length(); i++) {
- OffsetRange& range = ranges->at(i);
- if (range.from <= pc_offset && pc_offset < range.to) {
- return;
- }
- }
- }
src_line = pc_entry->GetSourceLine(pc_offset);
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
src_line = pc_entry->line_number();
@@ -629,11 +619,12 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
src_line_not_found = false;
*entry++ = pc_entry;
- if (pc_entry->builtin_id() == Builtins::kFunctionCall ||
- pc_entry->builtin_id() == Builtins::kFunctionApply) {
- // When current function is FunctionCall or FunctionApply builtin the
- // top frame is either frame of the calling JS function or internal
- // frame. In the latter case we know the caller for sure but in the
+ if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
+ pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
+ // When current function is either the Function.prototype.apply or the
+ // Function.prototype.call builtin the top frame is either frame of
+ // the calling JS function or internal frame.
+ // In the latter case we know the caller for sure but in the
// former case we don't so we simply replace the frame with
// 'unresolved' entry.
if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
diff --git a/chromium/v8/src/profiler/profile-generator.h b/chromium/v8/src/profiler/profile-generator.h
index ce58d70ae9c..47a73f191a1 100644
--- a/chromium/v8/src/profiler/profile-generator.h
+++ b/chromium/v8/src/profiler/profile-generator.h
@@ -10,13 +10,11 @@
#include "src/allocation.h"
#include "src/compiler.h"
#include "src/hashmap.h"
-#include "src/strings-storage.h"
+#include "src/profiler/strings-storage.h"
namespace v8 {
namespace internal {
-struct OffsetRange;
-
// Provides a mapping from the offsets within generated code to
// the source line.
class JITLineInfoTable : public Malloced {
@@ -83,10 +81,6 @@ class CodeEntry {
void FillFunctionInfo(SharedFunctionInfo* shared);
- List<OffsetRange>* no_frame_ranges() const { return no_frame_ranges_; }
- void set_no_frame_ranges(List<OffsetRange>* ranges) {
- no_frame_ranges_ = ranges;
- }
void set_inlined_function_infos(
const std::vector<InlinedFunctionInfo>& infos) {
inlined_function_infos_ = infos;
@@ -125,7 +119,6 @@ class CodeEntry {
int column_number_;
int script_id_;
int position_;
- List<OffsetRange>* no_frame_ranges_;
const char* bailout_reason_;
const char* deopt_reason_;
SourcePosition deopt_position_;
@@ -163,6 +156,7 @@ class ProfileNode {
const std::vector<CpuProfileDeoptInfo>& deopt_infos() const {
return deopt_infos_;
}
+ Isolate* isolate() const;
void Print(int indent);
@@ -193,7 +187,7 @@ class ProfileNode {
class ProfileTree {
public:
- ProfileTree();
+ explicit ProfileTree(Isolate* isolate);
~ProfileTree();
ProfileNode* AddPathFromEnd(
@@ -207,6 +201,8 @@ class ProfileTree {
root_->Print(0);
}
+ Isolate* isolate() const { return isolate_; }
+
private:
template <typename Callback>
void TraverseDepthFirst(Callback* callback);
@@ -214,6 +210,7 @@ class ProfileTree {
CodeEntry root_entry_;
unsigned next_node_id_;
ProfileNode* root_;
+ Isolate* isolate_;
unsigned next_function_id_;
HashMap function_ids_;
@@ -224,7 +221,7 @@ class ProfileTree {
class CpuProfile {
public:
- CpuProfile(const char* title, bool record_samples);
+ CpuProfile(Isolate* isolate, const char* title, bool record_samples);
// Add pc -> ... -> main() call path to the profile.
void AddPath(base::TimeTicks timestamp, const Vector<CodeEntry*>& path,
@@ -346,6 +343,8 @@ class CpuProfilesCollection {
List<CodeEntry*> code_entries_;
List<CpuProfile*> finished_profiles_;
+ Isolate* isolate_;
+
// Accessed by VM thread and profile generator thread.
List<CpuProfile*> current_profiles_;
base::Semaphore current_profiles_semaphore_;
@@ -383,6 +382,7 @@ class ProfileGenerator {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_PROFILE_GENERATOR_H_
diff --git a/chromium/v8/src/profiler/sampler.h b/chromium/v8/src/profiler/sampler.h
index ed932c03542..354e935e31b 100644
--- a/chromium/v8/src/profiler/sampler.h
+++ b/chromium/v8/src/profiler/sampler.h
@@ -130,6 +130,7 @@ class Sampler {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_SAMPLER_H_
diff --git a/chromium/v8/src/strings-storage.cc b/chromium/v8/src/profiler/strings-storage.cc
index 52cc00f852f..9f095b88668 100644
--- a/chromium/v8/src/strings-storage.cc
+++ b/chromium/v8/src/profiler/strings-storage.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/strings-storage.h"
+#include "src/profiler/strings-storage.h"
#include "src/base/smart-pointers.h"
#include "src/objects-inl.h"
diff --git a/chromium/v8/src/strings-storage.h b/chromium/v8/src/profiler/strings-storage.h
index 8fd9da7d3c9..7164caef637 100644
--- a/chromium/v8/src/strings-storage.h
+++ b/chromium/v8/src/profiler/strings-storage.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STRINGS_STORAGE_H_
-#define V8_STRINGS_STORAGE_H_
+#ifndef V8_PROFILER_STRINGS_STORAGE_H_
+#define V8_PROFILER_STRINGS_STORAGE_H_
#include "src/allocation.h"
#include "src/hashmap.h"
@@ -11,8 +11,6 @@
namespace v8 {
namespace internal {
-struct OffsetRange;
-
// Provides a storage of strings allocated in C++ heap, to hold them
// forever, even if they disappear from JS heap or external storage.
class StringsStorage {
@@ -41,7 +39,7 @@ class StringsStorage {
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
-#endif // V8_STRINGS_STORAGE_H_
+#endif // V8_PROFILER_STRINGS_STORAGE_H_
diff --git a/chromium/v8/src/profiler/unbound-queue-inl.h b/chromium/v8/src/profiler/unbound-queue-inl.h
index fef7bec8d3c..8c45d098612 100644
--- a/chromium/v8/src/profiler/unbound-queue-inl.h
+++ b/chromium/v8/src/profiler/unbound-queue-inl.h
@@ -77,6 +77,7 @@ Record* UnboundQueue<Record>::Peek() const {
return &next->value;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_UNBOUND_QUEUE_INL_H_
diff --git a/chromium/v8/src/profiler/unbound-queue.h b/chromium/v8/src/profiler/unbound-queue.h
index a63c327d103..c53b35a8edb 100644
--- a/chromium/v8/src/profiler/unbound-queue.h
+++ b/chromium/v8/src/profiler/unbound-queue.h
@@ -42,6 +42,7 @@ class UnboundQueue BASE_EMBEDDED {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROFILER_UNBOUND_QUEUE_
diff --git a/chromium/v8/src/property-descriptor.cc b/chromium/v8/src/property-descriptor.cc
new file mode 100644
index 00000000000..243a9faac3a
--- /dev/null
+++ b/chromium/v8/src/property-descriptor.cc
@@ -0,0 +1,300 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/property-descriptor.h"
+
+#include "src/bootstrapper.h"
+#include "src/factory.h"
+#include "src/isolate-inl.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Helper function for ToPropertyDescriptor. Comments describe steps for
+// "enumerable", other properties are handled the same way.
+// Returns false if an exception was thrown.
+bool GetPropertyIfPresent(Handle<Object> obj, Handle<String> name,
+ Handle<Object>* value) {
+ LookupIterator it(obj, name);
+ // 4. Let hasEnumerable be HasProperty(Obj, "enumerable").
+ Maybe<bool> has_property = JSReceiver::HasProperty(&it);
+ // 5. ReturnIfAbrupt(hasEnumerable).
+ if (has_property.IsNothing()) return false;
+ // 6. If hasEnumerable is true, then
+ if (has_property.FromJust() == true) {
+ // 6a. Let enum be ToBoolean(Get(Obj, "enumerable")).
+ // 6b. ReturnIfAbrupt(enum).
+ if (!JSObject::GetProperty(&it).ToHandle(value)) return false;
+ }
+ return true;
+}
+
+
+// Helper function for ToPropertyDescriptor. Handles the case of "simple"
+// objects: nothing on the prototype chain, just own fast data properties.
+// Must not have observable side effects, because the slow path will restart
+// the entire conversion!
+bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<Object> obj,
+ PropertyDescriptor* desc) {
+ if (!obj->IsJSObject()) return false;
+ Map* map = Handle<JSObject>::cast(obj)->map();
+ if (map->instance_type() != JS_OBJECT_TYPE) return false;
+ if (map->is_access_check_needed()) return false;
+ if (map->prototype() != *isolate->initial_object_prototype()) return false;
+ // During bootstrapping, the object_function_prototype_map hasn't been
+ // set up yet.
+ if (isolate->bootstrapper()->IsActive()) return false;
+ if (JSObject::cast(map->prototype())->map() !=
+ isolate->native_context()->object_function_prototype_map()) {
+ return false;
+ }
+ // TODO(jkummerow): support dictionary properties?
+ if (map->is_dictionary_map()) return false;
+ Handle<DescriptorArray> descs =
+ Handle<DescriptorArray>(map->instance_descriptors());
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ Name* key = descs->GetKey(i);
+ Handle<Object> value;
+ switch (details.type()) {
+ case DATA:
+ value = JSObject::FastPropertyAt(Handle<JSObject>::cast(obj),
+ details.representation(),
+ FieldIndex::ForDescriptor(map, i));
+ break;
+ case DATA_CONSTANT:
+ value = handle(descs->GetConstant(i), isolate);
+ break;
+ case ACCESSOR:
+ case ACCESSOR_CONSTANT:
+ // Bail out to slow path.
+ return false;
+ }
+ Heap* heap = isolate->heap();
+ if (key == heap->enumerable_string()) {
+ desc->set_enumerable(value->BooleanValue());
+ } else if (key == heap->configurable_string()) {
+ desc->set_configurable(value->BooleanValue());
+ } else if (key == heap->value_string()) {
+ desc->set_value(value);
+ } else if (key == heap->writable_string()) {
+ desc->set_writable(value->BooleanValue());
+ } else if (key == heap->get_string()) {
+ // Bail out to slow path to throw an exception if necessary.
+ if (!value->IsCallable()) return false;
+ desc->set_get(value);
+ } else if (key == heap->set_string()) {
+ // Bail out to slow path to throw an exception if necessary.
+ if (!value->IsCallable()) return false;
+ desc->set_set(value);
+ }
+ }
+ if ((desc->has_get() || desc->has_set()) &&
+ (desc->has_value() || desc->has_writable())) {
+ // Bail out to slow path to throw an exception.
+ return false;
+ }
+ return true;
+}
+
+
+static void CreateDataProperty(Isolate* isolate, Handle<JSObject> object,
+ Handle<String> name, Handle<Object> value) {
+ LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Maybe<bool> result = JSObject::CreateDataProperty(&it, value);
+ CHECK(result.IsJust() && result.FromJust());
+}
+
+
+// ES6 6.2.4.4 "FromPropertyDescriptor"
+Handle<Object> PropertyDescriptor::ToObject(Isolate* isolate) {
+ DCHECK(!(PropertyDescriptor::IsAccessorDescriptor(this) &&
+ PropertyDescriptor::IsDataDescriptor(this)));
+ Factory* factory = isolate->factory();
+ Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
+ if (has_value()) {
+ CreateDataProperty(isolate, result, factory->value_string(), value());
+ }
+ if (has_writable()) {
+ CreateDataProperty(isolate, result, factory->writable_string(),
+ factory->ToBoolean(writable()));
+ }
+ if (has_get()) {
+ CreateDataProperty(isolate, result, factory->get_string(), get());
+ }
+ if (has_set()) {
+ CreateDataProperty(isolate, result, factory->set_string(), set());
+ }
+ if (has_enumerable()) {
+ CreateDataProperty(isolate, result, factory->enumerable_string(),
+ factory->ToBoolean(enumerable()));
+ }
+ if (has_configurable()) {
+ CreateDataProperty(isolate, result, factory->configurable_string(),
+ factory->ToBoolean(configurable()));
+ }
+ return result;
+}
+
+
+// ES6 6.2.4.5
+// Returns false in case of exception.
+// static
+bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
+ Handle<Object> obj,
+ PropertyDescriptor* desc) {
+ // 1. ReturnIfAbrupt(Obj).
+ // 2. If Type(Obj) is not Object, throw a TypeError exception.
+ if (!obj->IsJSReceiver()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kPropertyDescObject, obj));
+ return false;
+ }
+ // 3. Let desc be a new Property Descriptor that initially has no fields.
+ DCHECK(desc->is_empty());
+
+ if (ToPropertyDescriptorFastPath(isolate, obj, desc)) {
+ return true;
+ }
+
+ // enumerable?
+ Handle<Object> enumerable;
+ // 4 through 6b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->enumerable_string(),
+ &enumerable)) {
+ return false;
+ }
+ // 6c. Set the [[Enumerable]] field of desc to enum.
+ if (!enumerable.is_null()) {
+ desc->set_enumerable(enumerable->BooleanValue());
+ }
+
+ // configurable?
+ Handle<Object> configurable;
+ // 7 through 9b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->configurable_string(),
+ &configurable)) {
+ return false;
+ }
+ // 9c. Set the [[Configurable]] field of desc to conf.
+ if (!configurable.is_null()) {
+ desc->set_configurable(configurable->BooleanValue());
+ }
+
+ // value?
+ Handle<Object> value;
+ // 10 through 12b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->value_string(), &value)) {
+ return false;
+ }
+ // 12c. Set the [[Value]] field of desc to value.
+ if (!value.is_null()) desc->set_value(value);
+
+ // writable?
+ Handle<Object> writable;
+ // 13 through 15b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->writable_string(),
+ &writable)) {
+ return false;
+ }
+ // 15c. Set the [[Writable]] field of desc to writable.
+ if (!writable.is_null()) desc->set_writable(writable->BooleanValue());
+
+ // getter?
+ Handle<Object> getter;
+ // 16 through 18b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->get_string(), &getter)) {
+ return false;
+ }
+ if (!getter.is_null()) {
+ // 18c. If IsCallable(getter) is false and getter is not undefined,
+ // throw a TypeError exception.
+ if (!getter->IsCallable() && !getter->IsUndefined()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kObjectGetterCallable, getter));
+ return false;
+ }
+ // 18d. Set the [[Get]] field of desc to getter.
+ desc->set_get(getter);
+ }
+ // setter?
+ Handle<Object> setter;
+ // 19 through 21b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->set_string(), &setter)) {
+ return false;
+ }
+ if (!setter.is_null()) {
+ // 21c. If IsCallable(setter) is false and setter is not undefined,
+ // throw a TypeError exception.
+ if (!setter->IsCallable() && !setter->IsUndefined()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kObjectSetterCallable, setter));
+ return false;
+ }
+ // 21d. Set the [[Set]] field of desc to setter.
+ desc->set_set(setter);
+ }
+
+ // 22. If either desc.[[Get]] or desc.[[Set]] is present, then
+ // 22a. If either desc.[[Value]] or desc.[[Writable]] is present,
+ // throw a TypeError exception.
+ if ((desc->has_get() || desc->has_set()) &&
+ (desc->has_value() || desc->has_writable())) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kValueAndAccessor, obj));
+ return false;
+ }
+
+ // 23. Return desc.
+ return true;
+}
+
+
+// ES6 6.2.4.6
+// static
+void PropertyDescriptor::CompletePropertyDescriptor(Isolate* isolate,
+ PropertyDescriptor* desc) {
+ // 1. ReturnIfAbrupt(Desc).
+ // 2. Assert: Desc is a Property Descriptor.
+ // 3. Let like be Record{
+ // [[Value]]: undefined, [[Writable]]: false,
+ // [[Get]]: undefined, [[Set]]: undefined,
+ // [[Enumerable]]: false, [[Configurable]]: false}.
+ // 4. If either IsGenericDescriptor(Desc) or IsDataDescriptor(Desc) is true,
+ // then:
+ if (!IsAccessorDescriptor(desc)) {
+ // 4a. If Desc does not have a [[Value]] field, set Desc.[[Value]] to
+ // like.[[Value]].
+ if (!desc->has_value()) {
+ desc->set_value(isolate->factory()->undefined_value());
+ }
+ // 4b. If Desc does not have a [[Writable]] field, set Desc.[[Writable]]
+ // to like.[[Writable]].
+ if (!desc->has_writable()) desc->set_writable(false);
+ } else {
+ // 5. Else,
+ // 5a. If Desc does not have a [[Get]] field, set Desc.[[Get]] to
+ // like.[[Get]].
+ if (!desc->has_get()) {
+ desc->set_get(isolate->factory()->undefined_value());
+ }
+ // 5b. If Desc does not have a [[Set]] field, set Desc.[[Set]] to
+ // like.[[Set]].
+ if (!desc->has_set()) {
+ desc->set_set(isolate->factory()->undefined_value());
+ }
+ }
+ // 6. If Desc does not have an [[Enumerable]] field, set
+ // Desc.[[Enumerable]] to like.[[Enumerable]].
+ if (!desc->has_enumerable()) desc->set_enumerable(false);
+ // 7. If Desc does not have a [[Configurable]] field, set
+ // Desc.[[Configurable]] to like.[[Configurable]].
+ if (!desc->has_configurable()) desc->set_configurable(false);
+ // 8. Return Desc.
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/property-descriptor.h b/chromium/v8/src/property-descriptor.h
new file mode 100644
index 00000000000..5fbbfa36ec2
--- /dev/null
+++ b/chromium/v8/src/property-descriptor.h
@@ -0,0 +1,123 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROPERTY_DESCRIPTOR_H_
+#define V8_PROPERTY_DESCRIPTOR_H_
+
+
+#include "src/handles.h"
+#include "src/property-details.h"
+
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class Object;
+
+class PropertyDescriptor {
+ public:
+ PropertyDescriptor()
+ : enumerable_(false),
+ has_enumerable_(false),
+ configurable_(false),
+ has_configurable_(false),
+ writable_(false),
+ has_writable_(false) {}
+
+ // ES6 6.2.4.1
+ static bool IsAccessorDescriptor(PropertyDescriptor* desc) {
+ return desc->has_get() || desc->has_set();
+ }
+
+ // ES6 6.2.4.2
+ static bool IsDataDescriptor(PropertyDescriptor* desc) {
+ return desc->has_value() || desc->has_writable();
+ }
+
+ // ES6 6.2.4.3
+ static bool IsGenericDescriptor(PropertyDescriptor* desc) {
+ return !IsAccessorDescriptor(desc) && !IsDataDescriptor(desc);
+ }
+
+ // ES6 6.2.4.4
+ Handle<Object> ToObject(Isolate* isolate);
+
+ // ES6 6.2.4.5
+ static bool ToPropertyDescriptor(Isolate* isolate, Handle<Object> obj,
+ PropertyDescriptor* desc);
+
+ // ES6 6.2.4.6
+ static void CompletePropertyDescriptor(Isolate* isolate,
+ PropertyDescriptor* desc);
+
+ bool is_empty() const {
+ return !has_enumerable() && !has_configurable() && !has_writable() &&
+ !has_value() && !has_get() && !has_set();
+ }
+
+ bool enumerable() const { return enumerable_; }
+ void set_enumerable(bool enumerable) {
+ enumerable_ = enumerable;
+ has_enumerable_ = true;
+ }
+ bool has_enumerable() const { return has_enumerable_; }
+
+ bool configurable() const { return configurable_; }
+ void set_configurable(bool configurable) {
+ configurable_ = configurable;
+ has_configurable_ = true;
+ }
+ bool has_configurable() const { return has_configurable_; }
+
+ Handle<Object> value() const { return value_; }
+ void set_value(Handle<Object> value) { value_ = value; }
+ bool has_value() const { return !value_.is_null(); }
+
+ bool writable() const { return writable_; }
+ void set_writable(bool writable) {
+ writable_ = writable;
+ has_writable_ = true;
+ }
+ bool has_writable() const { return has_writable_; }
+
+ Handle<Object> get() const { return get_; }
+ void set_get(Handle<Object> get) { get_ = get; }
+ bool has_get() const { return !get_.is_null(); }
+
+ Handle<Object> set() const { return set_; }
+ void set_set(Handle<Object> set) { set_ = set; }
+ bool has_set() const { return !set_.is_null(); }
+
+ Handle<Object> name() const { return name_; }
+ void set_name(Handle<Object> name) { name_ = name; }
+
+ PropertyAttributes ToAttributes() {
+ return static_cast<PropertyAttributes>(
+ (has_enumerable() && !enumerable() ? DONT_ENUM : NONE) |
+ (has_configurable() && !configurable() ? DONT_DELETE : NONE) |
+ (has_writable() && !writable() ? READ_ONLY : NONE));
+ }
+
+ private:
+ bool enumerable_ : 1;
+ bool has_enumerable_ : 1;
+ bool configurable_ : 1;
+ bool has_configurable_ : 1;
+ bool writable_ : 1;
+ bool has_writable_ : 1;
+ Handle<Object> value_;
+ Handle<Object> get_;
+ Handle<Object> set_;
+ Handle<Object> name_;
+
+ // Some compilers (Xcode 5.1, ARM GCC 4.9) insist on having a copy
+ // constructor for std::vector<PropertyDescriptor>, so we can't
+ // DISALLOW_COPY_AND_ASSIGN(PropertyDescriptor); here.
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROPERTY_DESCRIPTOR_H_
diff --git a/chromium/v8/src/property-details.h b/chromium/v8/src/property-details.h
index 33d3b8d7efd..44f32cbc935 100644
--- a/chromium/v8/src/property-details.h
+++ b/chromium/v8/src/property-details.h
@@ -9,30 +9,51 @@
#include "src/allocation.h"
#include "src/utils.h"
-// Ecma-262 3rd 8.6.1
+namespace v8 {
+namespace internal {
+
+// ES6 6.1.7.1
enum PropertyAttributes {
- NONE = v8::None,
- READ_ONLY = v8::ReadOnly,
- DONT_ENUM = v8::DontEnum,
- DONT_DELETE = v8::DontDelete,
+ NONE = ::v8::None,
+ READ_ONLY = ::v8::ReadOnly,
+ DONT_ENUM = ::v8::DontEnum,
+ DONT_DELETE = ::v8::DontDelete,
- SEALED = DONT_DELETE,
- FROZEN = SEALED | READ_ONLY,
+ ALL_ATTRIBUTES_MASK = READ_ONLY | DONT_ENUM | DONT_DELETE,
- STRING = 8, // Used to filter symbols and string names
- SYMBOLIC = 16,
- PRIVATE_SYMBOL = 32,
+ SEALED = DONT_DELETE,
+ FROZEN = SEALED | READ_ONLY,
- DONT_SHOW = DONT_ENUM | SYMBOLIC | PRIVATE_SYMBOL,
- ABSENT = 64 // Used in runtime to indicate a property is absent.
+ ABSENT = 64, // Used in runtime to indicate a property is absent.
// ABSENT can never be stored in or returned from a descriptor's attributes
// bitfield. It is only used as a return value meaning the attributes of
// a non-existent property.
+
+ // When creating a property, EVAL_DECLARED used to indicate that the property
+ // came from a sloppy-mode direct eval, and certain checks need to be done.
+ // Cannot be stored in or returned from a descriptor's attributes bitfield.
+ EVAL_DECLARED = 128
};
-namespace v8 {
-namespace internal {
+enum PropertyFilter {
+ ALL_PROPERTIES = 0,
+ ONLY_WRITABLE = 1,
+ ONLY_ENUMERABLE = 2,
+ ONLY_CONFIGURABLE = 4,
+ SKIP_STRINGS = 8,
+ SKIP_SYMBOLS = 16,
+ ONLY_ALL_CAN_READ = 32,
+ ENUMERABLE_STRINGS = ONLY_ENUMERABLE | SKIP_SYMBOLS,
+};
+// Enable fast comparisons of PropertyAttributes against PropertyFilters.
+STATIC_ASSERT(ALL_PROPERTIES == static_cast<PropertyFilter>(NONE));
+STATIC_ASSERT(ONLY_WRITABLE == static_cast<PropertyFilter>(READ_ONLY));
+STATIC_ASSERT(ONLY_ENUMERABLE == static_cast<PropertyFilter>(DONT_ENUM));
+STATIC_ASSERT(ONLY_CONFIGURABLE == static_cast<PropertyFilter>(DONT_DELETE));
+STATIC_ASSERT(((SKIP_STRINGS | SKIP_SYMBOLS | ONLY_ALL_CAN_READ) &
+ ALL_ATTRIBUTES_MASK) == 0);
+
class Smi;
template<class> class TypeImpl;
@@ -371,6 +392,7 @@ class PropertyDetails BASE_EMBEDDED {
std::ostream& operator<<(std::ostream& os,
const PropertyAttributes& attributes);
std::ostream& operator<<(std::ostream& os, const PropertyDetails& details);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROPERTY_DETAILS_H_
diff --git a/chromium/v8/src/property.h b/chromium/v8/src/property.h
index 09ec5f207fb..b58c9c6acba 100644
--- a/chromium/v8/src/property.h
+++ b/chromium/v8/src/property.h
@@ -107,6 +107,7 @@ class AccessorConstantDescriptor final : public Descriptor {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_PROPERTY_H_
diff --git a/chromium/v8/src/prototype.h b/chromium/v8/src/prototype.h
index 07277498535..3253791f907 100644
--- a/chromium/v8/src/prototype.h
+++ b/chromium/v8/src/prototype.h
@@ -28,12 +28,15 @@ class PrototypeIterator {
enum WhereToEnd { END_AT_NULL, END_AT_NON_HIDDEN };
+ const int kProxyPrototypeLimit = 100 * 1000;
+
PrototypeIterator(Isolate* isolate, Handle<Object> receiver,
WhereToStart where_to_start = START_AT_PROTOTYPE)
: did_jump_to_prototype_chain_(false),
object_(NULL),
handle_(receiver),
- isolate_(isolate) {
+ isolate_(isolate),
+ seen_proxies_(0) {
CHECK(!handle_.is_null());
if (where_to_start == START_AT_PROTOTYPE) {
Advance();
@@ -44,7 +47,8 @@ class PrototypeIterator {
WhereToStart where_to_start = START_AT_PROTOTYPE)
: did_jump_to_prototype_chain_(false),
object_(receiver),
- isolate_(isolate) {
+ isolate_(isolate),
+ seen_proxies_(0) {
if (where_to_start == START_AT_PROTOTYPE) {
Advance();
}
@@ -63,6 +67,17 @@ class PrototypeIterator {
~PrototypeIterator() {}
+ bool HasAccess() const {
+ // We can only perform access check in the handlified version of the
+ // PrototypeIterator.
+ DCHECK(!handle_.is_null());
+ if (handle_->IsAccessCheckNeeded()) {
+ return isolate_->MayAccess(handle(isolate_->context()),
+ Handle<JSObject>::cast(handle_));
+ }
+ return true;
+ }
+
template <typename T = Object>
T* GetCurrent() const {
DCHECK(handle_.is_null());
@@ -72,6 +87,7 @@ class PrototypeIterator {
template <typename T = Object>
static Handle<T> GetCurrent(const PrototypeIterator& iterator) {
DCHECK(!iterator.handle_.is_null());
+ DCHECK(iterator.object_ == NULL);
return Handle<T>::cast(iterator.handle_);
}
@@ -106,6 +122,33 @@ class PrototypeIterator {
}
}
+ // Returns false iff a call to JSProxy::GetPrototype throws.
+ // TODO(neis): This should probably replace Advance().
+ bool AdvanceFollowingProxies() {
+ DCHECK(!(handle_.is_null() && object_->IsJSProxy()));
+ if (!HasAccess()) {
+ // Abort the lookup if we do not have access to the current object.
+ handle_ = isolate_->factory()->null_value();
+ return true;
+ }
+ if (handle_.is_null() || !handle_->IsJSProxy()) {
+ AdvanceIgnoringProxies();
+ return true;
+ }
+ // Due to possible __proto__ recursion limit the number of Proxies
+ // we visit to an arbitrarily chosen large number.
+ seen_proxies_++;
+ if (seen_proxies_ > kProxyPrototypeLimit) {
+ isolate_->Throw(
+ *isolate_->factory()->NewRangeError(MessageTemplate::kStackOverflow));
+ return false;
+ }
+ did_jump_to_prototype_chain_ = true;
+ MaybeHandle<Object> proto =
+ JSProxy::GetPrototype(Handle<JSProxy>::cast(handle_));
+ return proto.ToHandle(&handle_);
+ }
+
bool IsAtEnd(WhereToEnd where_to_end = END_AT_NULL) const {
if (handle_.is_null()) {
return object_->IsNull() ||
@@ -135,6 +178,7 @@ class PrototypeIterator {
Object* object_;
Handle<Object> handle_;
Isolate* isolate_;
+ int seen_proxies_;
DISALLOW_COPY_AND_ASSIGN(PrototypeIterator);
};
diff --git a/chromium/v8/src/proxy.js b/chromium/v8/src/proxy.js
deleted file mode 100644
index cc45b32b3df..00000000000
--- a/chromium/v8/src/proxy.js
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// ----------------------------------------------------------------------------
-// Imports
-
-var GlobalFunction = global.Function;
-var GlobalObject = global.Object;
-
-var ToNameArray;
-
-utils.Import(function(from) {
- ToNameArray = from.ToNameArray;
-});
-
-//----------------------------------------------------------------------------
-
-function ProxyCreate(handler, proto) {
- if (!IS_SPEC_OBJECT(handler))
- throw MakeTypeError(kProxyHandlerNonObject, "create")
- if (IS_UNDEFINED(proto))
- proto = null
- else if (!(IS_SPEC_OBJECT(proto) || IS_NULL(proto)))
- throw MakeTypeError(kProxyProtoNonObject)
- return %CreateJSProxy(handler, proto)
-}
-
-function ProxyCreateFunction(handler, callTrap, constructTrap) {
- if (!IS_SPEC_OBJECT(handler))
- throw MakeTypeError(kProxyHandlerNonObject, "createFunction")
- if (!IS_CALLABLE(callTrap))
- throw MakeTypeError(kProxyTrapFunctionExpected, "call")
- if (IS_UNDEFINED(constructTrap)) {
- constructTrap = DerivedConstructTrap(callTrap)
- } else if (IS_CALLABLE(constructTrap)) {
- // Make sure the trap receives 'undefined' as this.
- var construct = constructTrap
- constructTrap = function() {
- return %Apply(construct, UNDEFINED, arguments, 0, %_ArgumentsLength());
- }
- } else {
- throw MakeTypeError(kProxyTrapFunctionExpected, "construct")
- }
- return %CreateJSFunctionProxy(
- handler, callTrap, constructTrap, GlobalFunction.prototype)
-}
-
-// -------------------------------------------------------------------
-// Proxy Builtins
-
-function DerivedConstructTrap(callTrap) {
- return function() {
- var proto = this.prototype
- if (!IS_SPEC_OBJECT(proto)) proto = GlobalObject.prototype
- var obj = { __proto__: proto };
- var result = %Apply(callTrap, obj, arguments, 0, %_ArgumentsLength());
- return IS_SPEC_OBJECT(result) ? result : obj
- }
-}
-
-function DelegateCallAndConstruct(callTrap, constructTrap) {
- return function() {
- return %Apply(%_IsConstructCall() ? constructTrap : callTrap,
- this, arguments, 0, %_ArgumentsLength())
- }
-}
-
-function DerivedGetTrap(receiver, name) {
- var desc = this.getPropertyDescriptor(name)
- if (IS_UNDEFINED(desc)) { return desc }
- if ('value' in desc) {
- return desc.value
- } else {
- if (IS_UNDEFINED(desc.get)) { return desc.get }
- // The proposal says: desc.get.call(receiver)
- return %_CallFunction(receiver, desc.get)
- }
-}
-
-function DerivedSetTrap(receiver, name, val) {
- var desc = this.getOwnPropertyDescriptor(name)
- if (desc) {
- if ('writable' in desc) {
- if (desc.writable) {
- desc.value = val
- this.defineProperty(name, desc)
- return true
- } else {
- return false
- }
- } else { // accessor
- if (desc.set) {
- // The proposal says: desc.set.call(receiver, val)
- %_CallFunction(receiver, val, desc.set)
- return true
- } else {
- return false
- }
- }
- }
- desc = this.getPropertyDescriptor(name)
- if (desc) {
- if ('writable' in desc) {
- if (desc.writable) {
- // fall through
- } else {
- return false
- }
- } else { // accessor
- if (desc.set) {
- // The proposal says: desc.set.call(receiver, val)
- %_CallFunction(receiver, val, desc.set)
- return true
- } else {
- return false
- }
- }
- }
- this.defineProperty(name, {
- value: val,
- writable: true,
- enumerable: true,
- configurable: true});
- return true;
-}
-
-function DerivedHasTrap(name) {
- return !!this.getPropertyDescriptor(name)
-}
-
-function DerivedHasOwnTrap(name) {
- return !!this.getOwnPropertyDescriptor(name)
-}
-
-function DerivedKeysTrap() {
- var names = this.getOwnPropertyNames()
- var enumerableNames = []
- for (var i = 0, count = 0; i < names.length; ++i) {
- var name = names[i]
- if (IS_SYMBOL(name)) continue
- var desc = this.getOwnPropertyDescriptor(TO_STRING(name))
- if (!IS_UNDEFINED(desc) && desc.enumerable) {
- enumerableNames[count++] = names[i]
- }
- }
- return enumerableNames
-}
-
-function DerivedEnumerateTrap() {
- var names = this.getPropertyNames()
- var enumerableNames = []
- for (var i = 0, count = 0; i < names.length; ++i) {
- var name = names[i]
- if (IS_SYMBOL(name)) continue
- var desc = this.getPropertyDescriptor(TO_STRING(name))
- if (!IS_UNDEFINED(desc)) {
- if (!desc.configurable) {
- throw MakeTypeError(kProxyPropNotConfigurable,
- this, name, "getPropertyDescriptor")
- }
- if (desc.enumerable) enumerableNames[count++] = names[i]
- }
- }
- return enumerableNames
-}
-
-function ProxyEnumerate(proxy) {
- var handler = %GetHandler(proxy)
- if (IS_UNDEFINED(handler.enumerate)) {
- return %Apply(DerivedEnumerateTrap, handler, [], 0, 0)
- } else {
- return ToNameArray(handler.enumerate(), "enumerate", false)
- }
-}
-
-//-------------------------------------------------------------------
-
-var Proxy = new GlobalObject();
-%AddNamedProperty(global, "Proxy", Proxy, DONT_ENUM);
-
-//Set up non-enumerable properties of the Proxy object.
-utils.InstallFunctions(Proxy, DONT_ENUM, [
- "create", ProxyCreate,
- "createFunction", ProxyCreateFunction
-])
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.ProxyDelegateCallAndConstruct = DelegateCallAndConstruct;
- to.ProxyDerivedHasOwnTrap = DerivedHasOwnTrap;
- to.ProxyDerivedKeysTrap = DerivedKeysTrap;
-});
-
-%InstallToContext([
- "derived_get_trap", DerivedGetTrap,
- "derived_has_trap", DerivedHasTrap,
- "derived_set_trap", DerivedSetTrap,
- "proxy_enumerate", ProxyEnumerate,
-]);
-
-})
diff --git a/chromium/v8/src/regexp.js b/chromium/v8/src/regexp.js
deleted file mode 100644
index e19a813483e..00000000000
--- a/chromium/v8/src/regexp.js
+++ /dev/null
@@ -1,463 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-var $regexpLastMatchInfoOverride;
-
-(function(global, utils) {
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var FLAG_harmony_regexps;
-var FLAG_harmony_unicode_regexps;
-var GlobalRegExp = global.RegExp;
-var InternalPackedArray = utils.InternalPackedArray;
-var ToNumber;
-
-utils.Import(function(from) {
- ToNumber = from.ToNumber;
-});
-
-utils.ImportFromExperimental(function(from) {
- FLAG_harmony_regexps = from.FLAG_harmony_regexps;
- FLAG_harmony_unicode_regexps = from.FLAG_harmony_unicode_regexps;
-});
-
-// -------------------------------------------------------------------
-
-// Property of the builtins object for recording the result of the last
-// regexp match. The property RegExpLastMatchInfo includes the matchIndices
-// array of the last successful regexp match (an array of start/end index
-// pairs for the match and all the captured substrings), the invariant is
-// that there are at least two capture indeces. The array also contains
-// the subject string for the last successful match.
-var RegExpLastMatchInfo = new InternalPackedArray(
- 2, // REGEXP_NUMBER_OF_CAPTURES
- "", // Last subject.
- UNDEFINED, // Last input - settable with RegExpSetInput.
- 0, // REGEXP_FIRST_CAPTURE + 0
- 0 // REGEXP_FIRST_CAPTURE + 1
-);
-
-// Override last match info with an array of actual substrings.
-// Used internally by replace regexp with function.
-// The array has the format of an "apply" argument for a replacement
-// function.
-$regexpLastMatchInfoOverride = null;
-
-// -------------------------------------------------------------------
-
-// A recursive descent parser for Patterns according to the grammar of
-// ECMA-262 15.10.1, with deviations noted below.
-function DoConstructRegExp(object, pattern, flags) {
- // RegExp : Called as constructor; see ECMA-262, section 15.10.4.
- if (IS_REGEXP(pattern)) {
- if (!IS_UNDEFINED(flags)) throw MakeTypeError(kRegExpFlags);
- flags = (pattern.global ? 'g' : '')
- + (pattern.ignoreCase ? 'i' : '')
- + (pattern.multiline ? 'm' : '');
- if (FLAG_harmony_unicode_regexps)
- flags += (pattern.unicode ? 'u' : '');
- if (FLAG_harmony_regexps)
- flags += (pattern.sticky ? 'y' : '');
- pattern = pattern.source;
- }
-
- pattern = IS_UNDEFINED(pattern) ? '' : TO_STRING(pattern);
- flags = IS_UNDEFINED(flags) ? '' : TO_STRING(flags);
-
- %RegExpInitializeAndCompile(object, pattern, flags);
-}
-
-
-function RegExpConstructor(pattern, flags) {
- if (%_IsConstructCall()) {
- DoConstructRegExp(this, pattern, flags);
- } else {
- // RegExp : Called as function; see ECMA-262, section 15.10.3.1.
- if (IS_REGEXP(pattern) && IS_UNDEFINED(flags)) {
- return pattern;
- }
- return new GlobalRegExp(pattern, flags);
- }
-}
-
-// Deprecated RegExp.prototype.compile method. We behave like the constructor
-// were called again. In SpiderMonkey, this method returns the regexp object.
-// In JSC, it returns undefined. For compatibility with JSC, we match their
-// behavior.
-function RegExpCompileJS(pattern, flags) {
- // Both JSC and SpiderMonkey treat a missing pattern argument as the
- // empty subject string, and an actual undefined value passed as the
- // pattern as the string 'undefined'. Note that JSC is inconsistent
- // here, treating undefined values differently in
- // RegExp.prototype.compile and in the constructor, where they are
- // the empty string. For compatibility with JSC, we match their
- // behavior.
- if (this == GlobalRegExp.prototype) {
- // We don't allow recompiling RegExp.prototype.
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'RegExp.prototype.compile', this);
- }
- if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
- DoConstructRegExp(this, 'undefined', flags);
- } else {
- DoConstructRegExp(this, pattern, flags);
- }
-}
-
-
-function DoRegExpExec(regexp, string, index) {
- var result = %_RegExpExec(regexp, string, index, RegExpLastMatchInfo);
- if (result !== null) $regexpLastMatchInfoOverride = null;
- return result;
-}
-
-
-// This is kind of performance sensitive, so we want to avoid unnecessary
-// type checks on inputs. But we also don't want to inline it several times
-// manually, so we use a macro :-)
-macro RETURN_NEW_RESULT_FROM_MATCH_INFO(MATCHINFO, STRING)
- var numResults = NUMBER_OF_CAPTURES(MATCHINFO) >> 1;
- var start = MATCHINFO[CAPTURE0];
- var end = MATCHINFO[CAPTURE1];
- // Calculate the substring of the first match before creating the result array
- // to avoid an unnecessary write barrier storing the first result.
- var first = %_SubString(STRING, start, end);
- var result = %_RegExpConstructResult(numResults, start, STRING);
- result[0] = first;
- if (numResults == 1) return result;
- var j = REGEXP_FIRST_CAPTURE + 2;
- for (var i = 1; i < numResults; i++) {
- start = MATCHINFO[j++];
- if (start != -1) {
- end = MATCHINFO[j];
- result[i] = %_SubString(STRING, start, end);
- }
- j++;
- }
- return result;
-endmacro
-
-
-function RegExpExecNoTests(regexp, string, start) {
- // Must be called with RegExp, string and positive integer as arguments.
- var matchInfo = %_RegExpExec(regexp, string, start, RegExpLastMatchInfo);
- if (matchInfo !== null) {
- $regexpLastMatchInfoOverride = null;
- RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, string);
- }
- regexp.lastIndex = 0;
- return null;
-}
-
-
-function RegExpExecJS(string) {
- if (!IS_REGEXP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'RegExp.prototype.exec', this);
- }
-
- string = TO_STRING(string);
- var lastIndex = this.lastIndex;
-
- // Conversion is required by the ES5 specification (RegExp.prototype.exec
- // algorithm, step 5) even if the value is discarded for non-global RegExps.
- var i = TO_INTEGER(lastIndex);
-
- var updateLastIndex = this.global || (FLAG_harmony_regexps && this.sticky);
- if (updateLastIndex) {
- if (i < 0 || i > string.length) {
- this.lastIndex = 0;
- return null;
- }
- } else {
- i = 0;
- }
-
- // matchIndices is either null or the RegExpLastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
-
- if (IS_NULL(matchIndices)) {
- this.lastIndex = 0;
- return null;
- }
-
- // Successful match.
- $regexpLastMatchInfoOverride = null;
- if (updateLastIndex) {
- this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
- }
- RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
-}
-
-
-// One-element cache for the simplified test regexp.
-var regexp_key;
-var regexp_val;
-
-// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be
-// that test is defined in terms of String.prototype.exec. However, it probably
-// means the original value of String.prototype.exec, which is what everybody
-// else implements.
-function RegExpTest(string) {
- if (!IS_REGEXP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'RegExp.prototype.test', this);
- }
- string = TO_STRING(string);
-
- var lastIndex = this.lastIndex;
-
- // Conversion is required by the ES5 specification (RegExp.prototype.exec
- // algorithm, step 5) even if the value is discarded for non-global RegExps.
- var i = TO_INTEGER(lastIndex);
-
- if (this.global || (FLAG_harmony_regexps && this.sticky)) {
- if (i < 0 || i > string.length) {
- this.lastIndex = 0;
- return false;
- }
- // matchIndices is either null or the RegExpLastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, i, RegExpLastMatchInfo);
- if (IS_NULL(matchIndices)) {
- this.lastIndex = 0;
- return false;
- }
- $regexpLastMatchInfoOverride = null;
- this.lastIndex = RegExpLastMatchInfo[CAPTURE1];
- return true;
- } else {
- // Non-global, non-sticky regexp.
- // Remove irrelevant preceeding '.*' in a test regexp. The expression
- // checks whether this.source starts with '.*' and that the third char is
- // not a '?'. But see https://code.google.com/p/v8/issues/detail?id=3560
- var regexp = this;
- if (regexp.source.length >= 3 &&
- %_StringCharCodeAt(regexp.source, 0) == 46 && // '.'
- %_StringCharCodeAt(regexp.source, 1) == 42 && // '*'
- %_StringCharCodeAt(regexp.source, 2) != 63) { // '?'
- regexp = TrimRegExp(regexp);
- }
- // matchIndices is either null or the RegExpLastMatchInfo array.
- var matchIndices = %_RegExpExec(regexp, string, 0, RegExpLastMatchInfo);
- if (IS_NULL(matchIndices)) {
- this.lastIndex = 0;
- return false;
- }
- $regexpLastMatchInfoOverride = null;
- return true;
- }
-}
-
-function TrimRegExp(regexp) {
- if (!%_ObjectEquals(regexp_key, regexp)) {
- regexp_key = regexp;
- regexp_val =
- new GlobalRegExp(%_SubString(regexp.source, 2, regexp.source.length),
- (regexp.ignoreCase ? regexp.multiline ? "im" : "i"
- : regexp.multiline ? "m" : ""));
- }
- return regexp_val;
-}
-
-
-function RegExpToString() {
- if (!IS_REGEXP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'RegExp.prototype.toString', this);
- }
- var result = '/' + this.source + '/';
- if (this.global) result += 'g';
- if (this.ignoreCase) result += 'i';
- if (this.multiline) result += 'm';
- if (FLAG_harmony_unicode_regexps && this.unicode) result += 'u';
- if (FLAG_harmony_regexps && this.sticky) result += 'y';
- return result;
-}
-
-
-// Getters for the static properties lastMatch, lastParen, leftContext, and
-// rightContext of the RegExp constructor. The properties are computed based
-// on the captures array of the last successful match and the subject string
-// of the last successful match.
-function RegExpGetLastMatch() {
- if ($regexpLastMatchInfoOverride !== null) {
- return OVERRIDE_MATCH($regexpLastMatchInfoOverride);
- }
- var regExpSubject = LAST_SUBJECT(RegExpLastMatchInfo);
- return %_SubString(regExpSubject,
- RegExpLastMatchInfo[CAPTURE0],
- RegExpLastMatchInfo[CAPTURE1]);
-}
-
-
-function RegExpGetLastParen() {
- if ($regexpLastMatchInfoOverride) {
- var override = $regexpLastMatchInfoOverride;
- if (override.length <= 3) return '';
- return override[override.length - 3];
- }
- var length = NUMBER_OF_CAPTURES(RegExpLastMatchInfo);
- if (length <= 2) return ''; // There were no captures.
- // We match the SpiderMonkey behavior: return the substring defined by the
- // last pair (after the first pair) of elements of the capture array even if
- // it is empty.
- var regExpSubject = LAST_SUBJECT(RegExpLastMatchInfo);
- var start = RegExpLastMatchInfo[CAPTURE(length - 2)];
- var end = RegExpLastMatchInfo[CAPTURE(length - 1)];
- if (start != -1 && end != -1) {
- return %_SubString(regExpSubject, start, end);
- }
- return "";
-}
-
-
-function RegExpGetLeftContext() {
- var start_index;
- var subject;
- if (!$regexpLastMatchInfoOverride) {
- start_index = RegExpLastMatchInfo[CAPTURE0];
- subject = LAST_SUBJECT(RegExpLastMatchInfo);
- } else {
- var override = $regexpLastMatchInfoOverride;
- start_index = OVERRIDE_POS(override);
- subject = OVERRIDE_SUBJECT(override);
- }
- return %_SubString(subject, 0, start_index);
-}
-
-
-function RegExpGetRightContext() {
- var start_index;
- var subject;
- if (!$regexpLastMatchInfoOverride) {
- start_index = RegExpLastMatchInfo[CAPTURE1];
- subject = LAST_SUBJECT(RegExpLastMatchInfo);
- } else {
- var override = $regexpLastMatchInfoOverride;
- subject = OVERRIDE_SUBJECT(override);
- var match = OVERRIDE_MATCH(override);
- start_index = OVERRIDE_POS(override) + match.length;
- }
- return %_SubString(subject, start_index, subject.length);
-}
-
-
-// The properties $1..$9 are the first nine capturing substrings of the last
-// successful match, or ''. The function RegExpMakeCaptureGetter will be
-// called with indices from 1 to 9.
-function RegExpMakeCaptureGetter(n) {
- return function foo() {
- if ($regexpLastMatchInfoOverride) {
- if (n < $regexpLastMatchInfoOverride.length - 2) {
- return OVERRIDE_CAPTURE($regexpLastMatchInfoOverride, n);
- }
- return '';
- }
- var index = n * 2;
- if (index >= NUMBER_OF_CAPTURES(RegExpLastMatchInfo)) return '';
- var matchStart = RegExpLastMatchInfo[CAPTURE(index)];
- var matchEnd = RegExpLastMatchInfo[CAPTURE(index + 1)];
- if (matchStart == -1 || matchEnd == -1) return '';
- return %_SubString(LAST_SUBJECT(RegExpLastMatchInfo), matchStart, matchEnd);
- };
-}
-
-// -------------------------------------------------------------------
-
-%FunctionSetInstanceClassName(GlobalRegExp, 'RegExp');
-%AddNamedProperty(
- GlobalRegExp.prototype, 'constructor', GlobalRegExp, DONT_ENUM);
-%SetCode(GlobalRegExp, RegExpConstructor);
-
-utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
- "exec", RegExpExecJS,
- "test", RegExpTest,
- "toString", RegExpToString,
- "compile", RegExpCompileJS
-]);
-
-// The length of compile is 1 in SpiderMonkey.
-%FunctionSetLength(GlobalRegExp.prototype.compile, 1);
-
-// The properties `input` and `$_` are aliases for each other. When this
-// value is set the value it is set to is coerced to a string.
-// Getter and setter for the input.
-var RegExpGetInput = function() {
- var regExpInput = LAST_INPUT(RegExpLastMatchInfo);
- return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
-};
-var RegExpSetInput = function(string) {
- LAST_INPUT(RegExpLastMatchInfo) = TO_STRING(string);
-};
-
-%OptimizeObjectForAddingMultipleProperties(GlobalRegExp, 22);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, 'input', RegExpGetInput,
- RegExpSetInput, DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, '$_', RegExpGetInput,
- RegExpSetInput, DONT_ENUM | DONT_DELETE);
-
-// The properties multiline and $* are aliases for each other. When this
-// value is set in SpiderMonkey, the value it is set to is coerced to a
-// boolean. We mimic that behavior with a slight difference: in SpiderMonkey
-// the value of the expression 'RegExp.multiline = null' (for instance) is the
-// boolean false (i.e., the value after coercion), while in V8 it is the value
-// null (i.e., the value before coercion).
-
-// Getter and setter for multiline.
-var multiline = false;
-var RegExpGetMultiline = function() { return multiline; };
-var RegExpSetMultiline = function(flag) { multiline = flag ? true : false; };
-
-%DefineAccessorPropertyUnchecked(GlobalRegExp, 'multiline', RegExpGetMultiline,
- RegExpSetMultiline, DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, '$*', RegExpGetMultiline,
- RegExpSetMultiline,
- DONT_ENUM | DONT_DELETE);
-
-
-var NoOpSetter = function(ignored) {};
-
-
-// Static properties set by a successful match.
-%DefineAccessorPropertyUnchecked(GlobalRegExp, 'lastMatch', RegExpGetLastMatch,
- NoOpSetter, DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, '$&', RegExpGetLastMatch,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, 'lastParen', RegExpGetLastParen,
- NoOpSetter, DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, '$+', RegExpGetLastParen,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, 'leftContext',
- RegExpGetLeftContext, NoOpSetter,
- DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, '$`', RegExpGetLeftContext,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, 'rightContext',
- RegExpGetRightContext, NoOpSetter,
- DONT_DELETE);
-%DefineAccessorPropertyUnchecked(GlobalRegExp, "$'", RegExpGetRightContext,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
-
-for (var i = 1; i < 10; ++i) {
- %DefineAccessorPropertyUnchecked(GlobalRegExp, '$' + i,
- RegExpMakeCaptureGetter(i), NoOpSetter,
- DONT_DELETE);
-}
-%ToFastProperties(GlobalRegExp);
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.RegExpExec = DoRegExpExec;
- to.RegExpExecNoTests = RegExpExecNoTests;
- to.RegExpLastMatchInfo = RegExpLastMatchInfo;
- to.RegExpTest = RegExpTest;
-});
-
-})
diff --git a/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index d296d90e7df..6fafdfb4ad4 100644
--- a/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -58,7 +58,7 @@ namespace internal {
* - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] success counter (only for global regexps to count matches).
* - fp[-24] Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a
+ * string start - 1). Used to initialize capture registers to a
* non-position.
* - fp[-28] At start (if 1, we are starting at the start of the
* string, otherwise 0)
@@ -98,7 +98,8 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -176,29 +177,18 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, &not_at_start);
-
- // If we did, are we still at the start of the input?
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
+ __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r0, current_input_offset(), Operand(-char_size()));
__ cmp(r0, r1);
BranchOrBacktrack(eq, on_at_start);
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
+void RegExpMacroAssemblerARM::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
__ cmp(r0, r1);
BranchOrBacktrack(ne, on_not_at_start);
}
@@ -220,20 +210,27 @@ void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ ldr(r0, register_location(start_reg)); // Index of start of capture
__ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
__ sub(r1, r1, r0, SetCC); // Length of capture.
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ b(eq, &fallthrough);
// Check that there are enough characters left in the input.
- __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match);
+ if (read_backward) {
+ __ ldr(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r3, r3, r1);
+ __ cmp(current_input_offset(), r3);
+ BranchOrBacktrack(le, on_no_match);
+ } else {
+ __ cmn(r1, Operand(current_input_offset()));
+ BranchOrBacktrack(gt, on_no_match);
+ }
if (mode_ == LATIN1) {
Label success;
@@ -242,9 +239,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// r0 - offset of start of capture
// r1 - length of capture
- __ add(r0, r0, Operand(end_of_input_address()));
- __ add(r2, end_of_input_address(), Operand(current_input_offset()));
- __ add(r1, r0, Operand(r1));
+ __ add(r0, r0, end_of_input_address());
+ __ add(r2, end_of_input_address(), current_input_offset());
+ if (read_backward) {
+ __ sub(r2, r2, r1); // Offset by length when matching backwards.
+ }
+ __ add(r1, r0, r1);
// r0 - Address of start of capture.
// r1 - Address of end of capture
@@ -283,6 +283,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ bind(&success);
// Compute new value of character position after the matched part.
__ sub(current_input_offset(), r2, end_of_input_address());
+ if (read_backward) {
+ __ ldr(r0, register_location(start_reg)); // Index of start of capture
+ __ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
+ __ add(current_input_offset(), current_input_offset(), r0);
+ __ sub(current_input_offset(), current_input_offset(), r1);
+ }
} else {
DCHECK(mode_ == UC16);
int argument_count = 4;
@@ -305,7 +311,10 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Save length in callee-save register for use on return.
__ mov(r4, Operand(r1));
// Address of current input position.
- __ add(r1, current_input_offset(), Operand(end_of_input_address()));
+ __ add(r1, current_input_offset(), end_of_input_address());
+ if (read_backward) {
+ __ sub(r1, r1, r4);
+ }
// Isolate.
__ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
@@ -319,17 +328,22 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
__ cmp(r0, Operand::Zero());
BranchOrBacktrack(eq, on_no_match);
- // On success, increment position by length of capture.
- __ add(current_input_offset(), current_input_offset(), Operand(r4));
+
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ sub(current_input_offset(), current_input_offset(), r4);
+ } else {
+ __ add(current_input_offset(), current_input_offset(), r4);
+ }
}
__ bind(&fallthrough);
}
-void RegExpMacroAssemblerARM::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerARM::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
Label success;
@@ -337,17 +351,31 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
__ ldr(r0, register_location(start_reg));
__ ldr(r1, register_location(start_reg + 1));
__ sub(r1, r1, r0, SetCC); // Length to check.
- // Succeed on empty capture (including no capture).
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ b(eq, &fallthrough);
// Check that there are enough characters left in the input.
- __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match);
+ if (read_backward) {
+ __ ldr(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r3, r3, r1);
+ __ cmp(current_input_offset(), r3);
+ BranchOrBacktrack(lt, on_no_match);
+ } else {
+ __ cmn(r1, Operand(current_input_offset()));
+ BranchOrBacktrack(gt, on_no_match);
+ }
- // Compute pointers to match string and capture string
- __ add(r0, r0, Operand(end_of_input_address()));
- __ add(r2, end_of_input_address(), Operand(current_input_offset()));
- __ add(r1, r1, Operand(r0));
+ // r0 - offset of start of capture
+ // r1 - length of capture
+ __ add(r0, r0, end_of_input_address());
+ __ add(r2, end_of_input_address(), current_input_offset());
+ if (read_backward) {
+ __ sub(r2, r2, r1); // Offset by length when matching backwards.
+ }
+ __ add(r1, r0, r1);
Label loop;
__ bind(&loop);
@@ -366,6 +394,13 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
// Move current character position to position after match.
__ sub(current_input_offset(), r2, end_of_input_address());
+ if (read_backward) {
+ __ ldr(r0, register_location(start_reg)); // Index of start of capture
+ __ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
+ __ add(current_input_offset(), current_input_offset(), r0);
+ __ sub(current_input_offset(), current_input_offset(), r1);
+ }
+
__ bind(&fallthrough);
}
@@ -603,7 +638,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(r0, Operand::Zero());
__ push(r0); // Make room for success counter and initialize it to 0.
- __ push(r0); // Make room for "position - 1" constant (value is irrelevant).
+ __ push(r0); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
Label stack_ok;
@@ -647,7 +682,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ sub(r0, r0, Operand(r1, LSL, (mode_ == UC16) ? 1 : 0));
// Store this value in a local variable, for use when clearing
// position registers.
- __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ str(r0, MemOperand(frame_pointer(), kStringStartMinusOne));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -751,7 +786,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare r0 to initialize registers with its value in the next run.
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ ldr(r0, MemOperand(frame_pointer(), kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -892,10 +927,13 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -983,7 +1021,7 @@ void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ ldr(r0, MemOperand(frame_pointer(), kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ str(r0, register_location(reg));
}
@@ -1069,8 +1107,15 @@ MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
void RegExpMacroAssemblerARM::CheckPosition(int cp_offset,
Label* on_outside_input) {
- __ cmp(current_input_offset(), Operand(-cp_offset * char_size()));
- BranchOrBacktrack(ge, on_outside_input);
+ if (cp_offset >= 0) {
+ __ cmp(current_input_offset(), Operand(-cp_offset * char_size()));
+ BranchOrBacktrack(ge, on_outside_input);
+ } else {
+ __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
+ __ cmp(r0, r1);
+ BranchOrBacktrack(le, on_outside_input);
+ }
}
diff --git a/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 123a95711ef..233a98f7615 100644
--- a/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/chromium/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -34,9 +34,11 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
@@ -119,9 +121,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
@@ -214,6 +216,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
diff --git a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index d440879e26f..9948597ca0e 100644
--- a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -113,7 +113,8 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
Zone* zone, Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -210,23 +211,17 @@ void RegExpMacroAssemblerARM64::CheckCharacterGT(uc16 limit,
void RegExpMacroAssemblerARM64::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the input string?
- CompareAndBranchOrBacktrack(start_offset(), 0, ne, &not_at_start);
- // If we did, are we still at the start of the input string?
- __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
- __ Cmp(x10, input_start());
+ __ Add(w10, current_input_offset(), Operand(-char_size()));
+ __ Cmp(w10, string_start_minus_one());
BranchOrBacktrack(eq, on_at_start);
- __ Bind(&not_at_start);
}
-void RegExpMacroAssemblerARM64::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the input string?
- CompareAndBranchOrBacktrack(start_offset(), 0, ne, on_not_at_start);
- // If we did, are we still at the start of the input string?
- __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
- __ Cmp(x10, input_start());
+void RegExpMacroAssemblerARM64::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ Add(w10, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
+ __ Cmp(w10, string_start_minus_one());
BranchOrBacktrack(ne, on_not_at_start);
}
@@ -277,9 +272,9 @@ void RegExpMacroAssemblerARM64::CheckGreedyLoop(Label* on_equal) {
BranchOrBacktrack(eq, on_equal);
}
+
void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
Register capture_start_offset = w10;
@@ -297,12 +292,21 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Ldp(w11, capture_start_offset, capture_location(start_reg, x10));
}
__ Sub(capture_length, w11, capture_start_offset); // Length to check.
- // Succeed on empty capture (including no capture).
- __ Cbz(capture_length, &fallthrough);
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ CompareAndBranch(capture_length, Operand(0), eq, &fallthrough);
// Check that there are enough characters left in the input.
- __ Cmn(capture_length, current_input_offset());
- BranchOrBacktrack(gt, on_no_match);
+ if (read_backward) {
+ __ Add(w12, string_start_minus_one(), capture_length);
+ __ Cmp(current_input_offset(), w12);
+ BranchOrBacktrack(le, on_no_match);
+ } else {
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+ }
if (mode_ == LATIN1) {
Label success;
@@ -322,6 +326,11 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Add(current_position_address,
input_end(),
Operand(current_input_offset(), SXTW));
+ if (read_backward) {
+ // Offset by length when matching backwards.
+ __ Sub(current_position_address, current_position_address,
+ Operand(capture_length, SXTW));
+ }
Label loop;
__ Bind(&loop);
@@ -355,6 +364,10 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Bind(&success);
// Compute new value of character position after the matched part.
__ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (read_backward) {
+ __ Sub(current_input_offset().X(), current_input_offset().X(),
+ Operand(capture_length, SXTW));
+ }
if (masm_->emit_debug_code()) {
__ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
__ Ccmp(current_input_offset(), 0, NoFlag, eq);
@@ -383,6 +396,9 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Mov(w2, capture_length);
// Address of current input position.
__ Add(x1, input_end(), Operand(current_input_offset(), SXTW));
+ if (read_backward) {
+ __ Sub(x1, x1, Operand(capture_length, SXTW));
+ }
// Isolate.
__ Mov(x3, ExternalReference::isolate_address(isolate()));
@@ -400,16 +416,20 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ PopCPURegList(cached_registers);
BranchOrBacktrack(eq, on_no_match);
- // On success, increment position by length of capture.
- __ Add(current_input_offset(), current_input_offset(), capture_length);
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ Sub(current_input_offset(), current_input_offset(), capture_length);
+ } else {
+ __ Add(current_input_offset(), current_input_offset(), capture_length);
+ }
}
__ Bind(&fallthrough);
}
-void RegExpMacroAssemblerARM64::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerARM64::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
Register capture_start_address = x12;
@@ -426,12 +446,21 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
__ Ldp(w11, w10, capture_location(start_reg, x10));
}
__ Sub(capture_length, w11, w10); // Length to check.
- // Succeed on empty capture (including no capture).
- __ Cbz(capture_length, &fallthrough);
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ CompareAndBranch(capture_length, Operand(0), eq, &fallthrough);
// Check that there are enough characters left in the input.
- __ Cmn(capture_length, current_input_offset());
- BranchOrBacktrack(gt, on_no_match);
+ if (read_backward) {
+ __ Add(w12, string_start_minus_one(), capture_length);
+ __ Cmp(current_input_offset(), w12);
+ BranchOrBacktrack(le, on_no_match);
+ } else {
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+ }
// Compute pointers to match string and capture string
__ Add(capture_start_address, input_end(), Operand(w10, SXTW));
@@ -441,6 +470,11 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
__ Add(current_position_address,
input_end(),
Operand(current_input_offset(), SXTW));
+ if (read_backward) {
+ // Offset by length when matching backwards.
+ __ Sub(current_position_address, current_position_address,
+ Operand(capture_length, SXTW));
+ }
Label loop;
__ Bind(&loop);
@@ -459,6 +493,11 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
// Move current character position to position after match.
__ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (read_backward) {
+ __ Sub(current_input_offset().X(), current_input_offset().X(),
+ Operand(capture_length, SXTW));
+ }
+
if (masm_->emit_debug_code()) {
__ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
__ Ccmp(current_input_offset(), 0, NoFlag, eq);
@@ -758,14 +797,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// The non-position value is used as a clearing value for the
// capture registers, it corresponds to the position of the first character
// minus one.
- __ Sub(non_position_value(), current_input_offset(), char_size());
- __ Sub(non_position_value(), non_position_value(),
+ __ Sub(string_start_minus_one(), current_input_offset(), char_size());
+ __ Sub(string_start_minus_one(), string_start_minus_one(),
Operand(start_offset(), LSL, (mode_ == UC16) ? 1 : 0));
// We can store this value twice in an X register for initializing
// on-stack registers later.
- __ Orr(twice_non_position_value(),
- non_position_value().X(),
- Operand(non_position_value().X(), LSL, kWRegSizeInBits));
+ __ Orr(twice_non_position_value(), string_start_minus_one().X(),
+ Operand(string_start_minus_one().X(), LSL, kWRegSizeInBits));
// Initialize code pointer register.
__ Mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -1081,11 +1119,14 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacter(int cp_offset,
int characters) {
// TODO(pielan): Make sure long strings are caught before this, and not
// just asserted in debug mode.
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
// Be sane! (And ensure that an int32_t can be used to index the string)
DCHECK(cp_offset < (1<<30));
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1210,7 +1251,7 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
// If the first capture register is cached in a hardware register but not
// aligned on a 64-bit one, we need to clear the first one specifically.
if ((reg_from < kNumCachedRegisters) && ((reg_from % 2) != 0)) {
- StoreRegister(reg_from, non_position_value());
+ StoreRegister(reg_from, string_start_minus_one());
num_registers--;
reg_from++;
}
@@ -1224,7 +1265,7 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
}
if ((num_registers % 2) == 1) {
- StoreRegister(reg_from, non_position_value());
+ StoreRegister(reg_from, string_start_minus_one());
num_registers--;
reg_from++;
}
@@ -1301,10 +1342,14 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(
void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset,
Label* on_outside_input) {
- CompareAndBranchOrBacktrack(current_input_offset(),
- -cp_offset * char_size(),
- ge,
- on_outside_input);
+ if (cp_offset >= 0) {
+ CompareAndBranchOrBacktrack(current_input_offset(),
+ -cp_offset * char_size(), ge, on_outside_input);
+ } else {
+ __ Add(w12, current_input_offset(), Operand(cp_offset * char_size()));
+ __ Cmp(w12, string_start_minus_one());
+ BranchOrBacktrack(le, on_outside_input);
+ }
}
diff --git a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index a48291a4211..d71f063d00a 100644
--- a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -39,9 +39,11 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
@@ -190,7 +192,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
Register code_pointer() { return x20; }
// Register holding the value used for clearing capture registers.
- Register non_position_value() { return w24; }
+ Register string_start_minus_one() { return w24; }
// The top 32 bit of this register is used to store this value
// twice. This is used for clearing more than one register at a time.
Register twice_non_position_value() { return x24; }
@@ -289,6 +291,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
diff --git a/chromium/v8/src/regexp/bytecodes-irregexp.h b/chromium/v8/src/regexp/bytecodes-irregexp.h
index 27691422f37..2dbfbc0b82c 100644
--- a/chromium/v8/src/regexp/bytecodes-irregexp.h
+++ b/chromium/v8/src/regexp/bytecodes-irregexp.h
@@ -57,15 +57,17 @@ V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_NOT_REGS_EQUAL, 39, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
-V(CHECK_REGISTER_LT, 40, 12) /* bc8 reg_idx24 value32 addr32 */ \
-V(CHECK_REGISTER_GE, 41, 12) /* bc8 reg_idx24 value32 addr32 */ \
-V(CHECK_REGISTER_EQ_POS, 42, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_AT_START, 43, 8) /* bc8 pad24 addr32 */ \
-V(CHECK_NOT_AT_START, 44, 8) /* bc8 pad24 addr32 */ \
-V(CHECK_GREEDY, 45, 8) /* bc8 pad24 addr32 */ \
-V(ADVANCE_CP_AND_GOTO, 46, 8) /* bc8 offset24 addr32 */ \
-V(SET_CURRENT_POSITION_FROM_END, 47, 4) /* bc8 idx24 */
+V(CHECK_NOT_BACK_REF_BACKWARD, 39, 8) /* bc8 reg_idx24 addr32 */ \
+V(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD, 40, 8) /* bc8 reg_idx24 addr32 */ \
+V(CHECK_NOT_REGS_EQUAL, 41, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
+V(CHECK_REGISTER_LT, 42, 12) /* bc8 reg_idx24 value32 addr32 */ \
+V(CHECK_REGISTER_GE, 43, 12) /* bc8 reg_idx24 value32 addr32 */ \
+V(CHECK_REGISTER_EQ_POS, 44, 8) /* bc8 reg_idx24 addr32 */ \
+V(CHECK_AT_START, 45, 8) /* bc8 pad24 addr32 */ \
+V(CHECK_NOT_AT_START, 46, 8) /* bc8 offset24 addr32 */ \
+V(CHECK_GREEDY, 47, 8) /* bc8 pad24 addr32 */ \
+V(ADVANCE_CP_AND_GOTO, 48, 8) /* bc8 offset24 addr32 */ \
+V(SET_CURRENT_POSITION_FROM_END, 49, 4) /* bc8 idx24 */
#define DECLARE_BYTECODES(name, code, length) \
static const int BC_##name = code;
@@ -76,6 +78,8 @@ BYTECODE_ITERATOR(DECLARE_BYTECODES)
static const int BC_##name##_LENGTH = length;
BYTECODE_ITERATOR(DECLARE_BYTECODE_LENGTH)
#undef DECLARE_BYTECODE_LENGTH
-} }
+
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_BYTECODES_IRREGEXP_H_
diff --git a/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 9e50a105747..6ef0f5fff6d 100644
--- a/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -53,7 +53,8 @@ namespace internal {
* - backup of caller ebx
* - success counter (only for global regexps to count matches).
* - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
+ * string start - 1). Used to initialize capture registers to a
+ * non-position.
* - register 0 ebp[-4] (only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
@@ -80,7 +81,8 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -156,25 +158,16 @@ void RegExpMacroAssemblerIA32::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, &not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
+ __ lea(eax, Operand(edi, -char_size()));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOne));
BranchOrBacktrack(equal, on_at_start);
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
+void RegExpMacroAssemblerIA32::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ lea(eax, Operand(edi, -char_size() + cp_offset * char_size()));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOne));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -196,26 +189,28 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ mov(edx, register_location(start_reg)); // Index of start of capture
__ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
__ sub(ebx, edx); // Length of capture.
- // The length of a capture should not be negative. This can only happen
- // if the end of the capture is unrecorded, or at a point earlier than
- // the start of the capture.
- BranchOrBacktrack(less, on_no_match);
-
- // If length is zero, either the capture is empty or it is completely
- // uncaptured. In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ j(equal, &fallthrough);
// Check that there are sufficient characters left in the input.
- __ mov(eax, edi);
- __ add(eax, ebx);
- BranchOrBacktrack(greater, on_no_match);
+ if (read_backward) {
+ __ mov(eax, Operand(ebp, kStringStartMinusOne));
+ __ add(eax, ebx);
+ __ cmp(edi, eax);
+ BranchOrBacktrack(less_equal, on_no_match);
+ } else {
+ __ mov(eax, edi);
+ __ add(eax, ebx);
+ BranchOrBacktrack(greater, on_no_match);
+ }
if (mode_ == LATIN1) {
Label success;
@@ -228,6 +223,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ add(edx, esi); // Start of capture
__ add(edi, esi); // Start of text to match against capture.
+ if (read_backward) {
+ __ sub(edi, ebx); // Offset by length when matching backwards.
+ }
__ add(ebx, edi); // End of text to match against capture.
Label loop;
@@ -278,6 +276,11 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ add(esp, Immediate(kPointerSize));
// Compute new value of character position after the matched part.
__ sub(edi, esi);
+ if (read_backward) {
+ // Subtract match length if we matched backward.
+ __ add(edi, register_location(start_reg));
+ __ sub(edi, register_location(start_reg + 1));
+ }
} else {
DCHECK(mode_ == UC16);
// Save registers before calling C function.
@@ -304,6 +307,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Found by adding negative string-end offset of current position (edi)
// to end of string.
__ add(edi, esi);
+ if (read_backward) {
+ __ sub(edi, ebx); // Offset by length when matching backwards.
+ }
__ mov(Operand(esp, 1 * kPointerSize), edi);
// Set byte_offset1.
// Start of capture, where edx already holds string-end negative offset.
@@ -325,16 +331,20 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
__ or_(eax, eax);
BranchOrBacktrack(zero, on_no_match);
- // On success, increment position by length of capture.
- __ add(edi, ebx);
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ sub(edi, ebx);
+ } else {
+ __ add(edi, ebx);
+ }
}
__ bind(&fallthrough);
}
-void RegExpMacroAssemblerIA32::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerIA32::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
Label success;
Label fail;
@@ -343,22 +353,33 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
__ mov(edx, register_location(start_reg));
__ mov(eax, register_location(start_reg + 1));
__ sub(eax, edx); // Length to check.
- // Fail on partial or illegal capture (start of capture after end of capture).
- BranchOrBacktrack(less, on_no_match);
- // Succeed on empty capture (including no capture)
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ j(equal, &fallthrough);
// Check that there are sufficient characters left in the input.
- __ mov(ebx, edi);
- __ add(ebx, eax);
- BranchOrBacktrack(greater, on_no_match);
+ if (read_backward) {
+ __ mov(ebx, Operand(ebp, kStringStartMinusOne));
+ __ add(ebx, eax);
+ __ cmp(edi, ebx);
+ BranchOrBacktrack(less_equal, on_no_match);
+ } else {
+ __ mov(ebx, edi);
+ __ add(ebx, eax);
+ BranchOrBacktrack(greater, on_no_match);
+ }
// Save register to make it available below.
__ push(backtrack_stackpointer());
// Compute pointers to match string and capture string
- __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
__ add(edx, esi); // Start of capture.
+ __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
+ if (read_backward) {
+ __ sub(ebx, eax); // Offset by length when matching backwards.
+ }
__ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
Label loop;
@@ -389,6 +410,11 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
// Move current character position to position after match.
__ mov(edi, ecx);
__ sub(edi, esi);
+ if (read_backward) {
+ // Subtract match length if we matched backward.
+ __ add(edi, register_location(start_reg));
+ __ sub(edi, register_location(start_reg + 1));
+ }
// Restore backtrack stackpointer.
__ pop(backtrack_stackpointer());
@@ -634,7 +660,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ push(Immediate(0)); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -684,7 +710,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ mov(Operand(ebp, kInputStartMinusOne), eax);
+ __ mov(Operand(ebp, kStringStartMinusOne), eax);
#if V8_OS_WIN
// Ensure that we write to each stack page, in order. Skipping a page
@@ -767,7 +793,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
if (global()) {
- // Restart matching if the regular expression is flagged as global.
+ // Restart matching if the regular expression is flagged as global.
// Increment success counter.
__ inc(Operand(ebp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
@@ -784,7 +810,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Immediate(num_saved_registers_ * kPointerSize));
// Prepare eax to initialize registers with its value in the next run.
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
+ __ mov(eax, Operand(ebp, kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -944,10 +970,13 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1031,7 +1060,7 @@ void RegExpMacroAssemblerIA32::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
+ __ mov(eax, Operand(ebp, kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ mov(register_location(reg), eax);
}
@@ -1100,8 +1129,14 @@ Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
void RegExpMacroAssemblerIA32::CheckPosition(int cp_offset,
Label* on_outside_input) {
- __ cmp(edi, -cp_offset * char_size());
- BranchOrBacktrack(greater_equal, on_outside_input);
+ if (cp_offset >= 0) {
+ __ cmp(edi, -cp_offset * char_size());
+ BranchOrBacktrack(greater_equal, on_outside_input);
+ } else {
+ __ lea(eax, Operand(edi, cp_offset * char_size()));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOne));
+ BranchOrBacktrack(less_equal, on_outside_input);
+ }
}
diff --git a/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 8ec0a9e543d..1ef87eef387 100644
--- a/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/chromium/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -33,9 +33,11 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
@@ -116,9 +118,9 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
@@ -196,6 +198,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
};
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
diff --git a/chromium/v8/src/regexp/interpreter-irregexp.cc b/chromium/v8/src/regexp/interpreter-irregexp.cc
index afc31a3d57b..ea748e4e55d 100644
--- a/chromium/v8/src/regexp/interpreter-irregexp.cc
+++ b/chromium/v8/src/regexp/interpreter-irregexp.cc
@@ -6,7 +6,7 @@
#include "src/regexp/interpreter-irregexp.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/regexp/bytecodes-irregexp.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -270,7 +270,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
break;
BYTECODE(LOAD_CURRENT_CHAR) {
int pos = current + (insn >> BYTECODE_SHIFT);
- if (pos >= subject.length()) {
+ if (pos >= subject.length() || pos < 0) {
pc = code_base + Load32Aligned(pc + 4);
} else {
current_char = subject[pos];
@@ -286,7 +286,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
}
BYTECODE(LOAD_2_CURRENT_CHARS) {
int pos = current + (insn >> BYTECODE_SHIFT);
- if (pos + 2 > subject.length()) {
+ if (pos + 2 > subject.length() || pos < 0) {
pc = code_base + Load32Aligned(pc + 4);
} else {
Char next = subject[pos + 1];
@@ -306,7 +306,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
BYTECODE(LOAD_4_CURRENT_CHARS) {
DCHECK(sizeof(Char) == 1);
int pos = current + (insn >> BYTECODE_SHIFT);
- if (pos + 4 > subject.length()) {
+ if (pos + 4 > subject.length() || pos < 0) {
pc = code_base + Load32Aligned(pc + 4);
} else {
Char next1 = subject[pos + 1];
@@ -497,46 +497,59 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
BYTECODE(CHECK_NOT_BACK_REF) {
int from = registers[insn >> BYTECODE_SHIFT];
int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
- if (from < 0 || len <= 0) {
- pc += BC_CHECK_NOT_BACK_REF_LENGTH;
- break;
- }
- if (current + len > subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- break;
- } else {
- int i;
- for (i = 0; i < len; i++) {
- if (subject[from + i] != subject[current + i]) {
- pc = code_base + Load32Aligned(pc + 4);
- break;
- }
+ if (from >= 0 && len > 0) {
+ if (current + len > subject.length() ||
+ CompareChars(&subject[from], &subject[current], len) != 0) {
+ pc = code_base + Load32Aligned(pc + 4);
+ break;
}
- if (i < len) break;
current += len;
}
pc += BC_CHECK_NOT_BACK_REF_LENGTH;
break;
}
+ BYTECODE(CHECK_NOT_BACK_REF_BACKWARD) {
+ int from = registers[insn >> BYTECODE_SHIFT];
+ int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+ if (from >= 0 && len > 0) {
+ if (current - len < 0 ||
+ CompareChars(&subject[from], &subject[current - len], len) != 0) {
+ pc = code_base + Load32Aligned(pc + 4);
+ break;
+ }
+ current -= len;
+ }
+ pc += BC_CHECK_NOT_BACK_REF_BACKWARD_LENGTH;
+ break;
+ }
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
int from = registers[insn >> BYTECODE_SHIFT];
int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
- if (from < 0 || len <= 0) {
- pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
- break;
+ if (from >= 0 && len > 0) {
+ if (current + len > subject.length() ||
+ !BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
+ from, current, len, subject)) {
+ pc = code_base + Load32Aligned(pc + 4);
+ break;
+ }
+ current += len;
}
- if (current + len > subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- break;
- } else {
- if (BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
- from, current, len, subject)) {
- current += len;
- pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
- } else {
+ pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
+ break;
+ }
+ BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD) {
+ int from = registers[insn >> BYTECODE_SHIFT];
+ int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+ if (from >= 0 && len > 0) {
+ if (current - len < 0 ||
+ !BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
+ from, current - len, len, subject)) {
pc = code_base + Load32Aligned(pc + 4);
+ break;
}
+ current -= len;
}
+ pc += BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD_LENGTH;
break;
}
BYTECODE(CHECK_AT_START)
@@ -547,7 +560,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
}
break;
BYTECODE(CHECK_NOT_AT_START)
- if (current == 0) {
+ if (current + (insn >> BYTECODE_SHIFT) == 0) {
pc += BC_CHECK_NOT_AT_START_LENGTH;
} else {
pc = code_base + Load32Aligned(pc + 4);
diff --git a/chromium/v8/src/regexp/interpreter-irregexp.h b/chromium/v8/src/regexp/interpreter-irregexp.h
index d97d3b0f17f..244af990914 100644
--- a/chromium/v8/src/regexp/interpreter-irregexp.h
+++ b/chromium/v8/src/regexp/interpreter-irregexp.h
@@ -23,6 +23,7 @@ class IrregexpInterpreter {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_INTERPRETER_IRREGEXP_H_
diff --git a/chromium/v8/src/regexp/jsregexp-inl.h b/chromium/v8/src/regexp/jsregexp-inl.h
index 118f3dba9ca..3eb7c3c1704 100644
--- a/chromium/v8/src/regexp/jsregexp-inl.h
+++ b/chromium/v8/src/regexp/jsregexp-inl.h
@@ -78,6 +78,7 @@ int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_JSREGEXP_INL_H_
diff --git a/chromium/v8/src/regexp/jsregexp.cc b/chromium/v8/src/regexp/jsregexp.cc
index 438d1b1368c..34d20fe781d 100644
--- a/chromium/v8/src/regexp/jsregexp.cc
+++ b/chromium/v8/src/regexp/jsregexp.cc
@@ -4,7 +4,7 @@
#include "src/regexp/jsregexp.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
@@ -13,12 +13,12 @@
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/ostreams.h"
-#include "src/parser.h"
#include "src/regexp/interpreter-irregexp.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-macro-assembler-irregexp.h"
#include "src/regexp/regexp-macro-assembler-tracer.h"
+#include "src/regexp/regexp-parser.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime/runtime.h"
#include "src/splay-tree-inl.h"
@@ -51,16 +51,6 @@
namespace v8 {
namespace internal {
-MaybeHandle<Object> RegExpImpl::CreateRegExpLiteral(
- Handle<JSFunction> constructor,
- Handle<String> pattern,
- Handle<String> flags) {
- // Call the construct code with 2 arguments.
- Handle<Object> argv[] = { pattern, flags };
- return Execution::New(constructor, arraysize(argv), argv);
-}
-
-
MUST_USE_RESULT
static inline MaybeHandle<Object> ThrowRegExpException(
Handle<JSRegExp> re, Handle<String> pattern, Handle<String> error_text) {
@@ -156,25 +146,21 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
RegExpCompileData parse_result;
FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(re->GetIsolate(), &zone, &reader,
- flags.is_multiline(), flags.is_unicode(),
- &parse_result)) {
+ flags & JSRegExp::kMultiline,
+ flags & JSRegExp::kUnicode, &parse_result)) {
// Throw an exception if we fail to parse the pattern.
return ThrowRegExpException(re, pattern, parse_result.error);
}
bool has_been_compiled = false;
- if (parse_result.simple &&
- !flags.is_ignore_case() &&
- !flags.is_sticky() &&
- !HasFewDifferentCharacters(pattern)) {
+ if (parse_result.simple && !(flags & JSRegExp::kIgnoreCase) &&
+ !(flags & JSRegExp::kSticky) && !HasFewDifferentCharacters(pattern)) {
// Parse-tree is a single atom that is equal to the pattern.
AtomCompile(re, pattern, flags, pattern);
has_been_compiled = true;
- } else if (parse_result.tree->IsAtom() &&
- !flags.is_ignore_case() &&
- !flags.is_sticky() &&
- parse_result.capture_count == 0) {
+ } else if (parse_result.tree->IsAtom() && !(flags & JSRegExp::kIgnoreCase) &&
+ !(flags & JSRegExp::kSticky) && parse_result.capture_count == 0) {
RegExpAtom* atom = parse_result.tree->AsAtom();
Vector<const uc16> atom_pattern = atom->data();
Handle<String> atom_string;
@@ -385,17 +371,18 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
pattern = String::Flatten(pattern);
RegExpCompileData compile_data;
FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags.is_multiline(),
- flags.is_unicode(), &compile_data)) {
+ if (!RegExpParser::ParseRegExp(isolate, &zone, &reader,
+ flags & JSRegExp::kMultiline,
+ flags & JSRegExp::kUnicode, &compile_data)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
USE(ThrowRegExpException(re, pattern, compile_data.error));
return false;
}
RegExpEngine::CompilationResult result = RegExpEngine::Compile(
- isolate, &zone, &compile_data, flags.is_ignore_case(), flags.is_global(),
- flags.is_multiline(), flags.is_sticky(), pattern, sample_subject,
- is_one_byte);
+ isolate, &zone, &compile_data, flags & JSRegExp::kIgnoreCase,
+ flags & JSRegExp::kGlobal, flags & JSRegExp::kMultiline,
+ flags & JSRegExp::kSticky, pattern, sample_subject, is_one_byte);
if (result.error_message != NULL) {
// Unable to compile regexp.
Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
@@ -1002,6 +989,8 @@ class RegExpCompiler {
inline void set_limiting_recursion(bool value) {
limiting_recursion_ = value;
}
+ bool read_backward() { return read_backward_; }
+ void set_read_backward(bool value) { read_backward_ = value; }
FrequencyCollator* frequency_collator() { return &frequency_collator_; }
int current_expansion_factor() { return current_expansion_factor_; }
@@ -1025,6 +1014,7 @@ class RegExpCompiler {
bool reg_exp_too_big_;
bool limiting_recursion_;
bool optimize_;
+ bool read_backward_;
int current_expansion_factor_;
FrequencyCollator frequency_collator_;
Isolate* isolate_;
@@ -1060,6 +1050,7 @@ RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
reg_exp_too_big_(false),
limiting_recursion_(false),
optimize_(FLAG_regexp_optimization),
+ read_backward_(false),
current_expansion_factor_(1),
frequency_collator_(),
isolate_(isolate),
@@ -1224,7 +1215,8 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
int value = 0;
bool absolute = false;
bool clear = false;
- int store_position = -1;
+ static const int kNoStore = kMinInt;
+ int store_position = kNoStore;
// This is a little tricky because we are scanning the actions in reverse
// historical order (newest first).
for (DeferredAction* action = actions_;
@@ -1245,7 +1237,7 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
// we can set undo_action to IGNORE if we know there is no value to
// restore.
undo_action = RESTORE;
- DCHECK_EQ(store_position, -1);
+ DCHECK_EQ(store_position, kNoStore);
DCHECK(!clear);
break;
}
@@ -1253,14 +1245,14 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
if (!absolute) {
value++;
}
- DCHECK_EQ(store_position, -1);
+ DCHECK_EQ(store_position, kNoStore);
DCHECK(!clear);
undo_action = RESTORE;
break;
case ActionNode::STORE_POSITION: {
Trace::DeferredCapture* pc =
static_cast<Trace::DeferredCapture*>(action);
- if (!clear && store_position == -1) {
+ if (!clear && store_position == kNoStore) {
store_position = pc->cp_offset();
}
@@ -1284,7 +1276,7 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
// Since we're scanning in reverse order, if we've already
// set the position we have to ignore historically earlier
// clearing operations.
- if (store_position == -1) {
+ if (store_position == kNoStore) {
clear = true;
}
undo_action = RESTORE;
@@ -1315,7 +1307,7 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
}
// Perform the chronologically last action (or accumulated increment)
// for the register.
- if (store_position != -1) {
+ if (store_position != kNoStore) {
assembler->WriteCurrentPositionToRegister(reg, store_position);
} else if (clear) {
assembler->ClearRegisters(reg, reg);
@@ -2313,6 +2305,7 @@ void AssertionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
int BackReferenceNode::EatsAtLeast(int still_to_find,
int budget,
bool not_at_start) {
+ if (read_backward()) return 0;
if (budget <= 0) return 0;
return on_success()->EatsAtLeast(still_to_find,
budget - 1,
@@ -2323,6 +2316,7 @@ int BackReferenceNode::EatsAtLeast(int still_to_find,
int TextNode::EatsAtLeast(int still_to_find,
int budget,
bool not_at_start) {
+ if (read_backward()) return 0;
int answer = Length();
if (answer >= still_to_find) return answer;
if (budget <= 0) return answer;
@@ -2333,9 +2327,8 @@ int TextNode::EatsAtLeast(int still_to_find,
}
-int NegativeLookaheadChoiceNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
+int NegativeLookaroundChoiceNode::EatsAtLeast(int still_to_find, int budget,
+ bool not_at_start) {
if (budget <= 0) return 0;
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
@@ -2344,10 +2337,8 @@ int NegativeLookaheadChoiceNode::EatsAtLeast(int still_to_find,
}
-void NegativeLookaheadChoiceNode::GetQuickCheckDetails(
- QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
+void NegativeLookaroundChoiceNode::GetQuickCheckDetails(
+ QuickCheckDetails* details, RegExpCompiler* compiler, int filled_in,
bool not_at_start) {
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
@@ -2517,6 +2508,9 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start) {
+ // Do not collect any quick check details if the text node reads backward,
+ // since it reads in the opposite direction than we use for quick checks.
+ if (read_backward()) return;
Isolate* isolate = compiler->macro_assembler()->isolate();
DCHECK(characters_filled_in < details->characters());
int characters = details->characters();
@@ -2526,8 +2520,8 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
} else {
char_mask = String::kMaxUtf16CodeUnit;
}
- for (int k = 0; k < elms_->length(); k++) {
- TextElement elm = elms_->at(k);
+ for (int k = 0; k < elements()->length(); k++) {
+ TextElement elm = elements()->at(k);
if (elm.text_type() == TextElement::ATOM) {
Vector<const uc16> quarks = elm.atom()->data();
for (int i = 0; i < characters && i < quarks.length(); i++) {
@@ -2678,11 +2672,13 @@ void QuickCheckDetails::Clear() {
void QuickCheckDetails::Advance(int by, bool one_byte) {
- DCHECK(by >= 0);
- if (by >= characters_) {
+ if (by >= characters_ || by < 0) {
+ DCHECK_IMPLIES(by < 0, characters_ == 0);
Clear();
return;
}
+ DCHECK_LE(characters_ - by, 4);
+ DCHECK_LE(characters_, 4);
for (int i = 0; i < characters_ - by; i++) {
positions_[i] = positions_[by + i];
}
@@ -2780,9 +2776,9 @@ RegExpNode* TextNode::FilterOneByte(int depth, bool ignore_case) {
if (depth < 0) return this;
DCHECK(!info()->visited);
VisitMarker marker(info());
- int element_count = elms_->length();
+ int element_count = elements()->length();
for (int i = 0; i < element_count; i++) {
- TextElement elm = elms_->at(i);
+ TextElement elm = elements()->at(i);
if (elm.text_type() == TextElement::ATOM) {
Vector<const uc16> quarks = elm.atom()->data();
for (int j = 0; j < quarks.length(); j++) {
@@ -2898,8 +2894,8 @@ RegExpNode* ChoiceNode::FilterOneByte(int depth, bool ignore_case) {
}
-RegExpNode* NegativeLookaheadChoiceNode::FilterOneByte(int depth,
- bool ignore_case) {
+RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth,
+ bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -3146,9 +3142,9 @@ void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
return;
}
if (trace->at_start() == Trace::UNKNOWN) {
- assembler->CheckNotAtStart(trace->backtrack());
+ assembler->CheckNotAtStart(trace->cp_offset(), trace->backtrack());
Trace at_start_trace = *trace;
- at_start_trace.set_at_start(true);
+ at_start_trace.set_at_start(Trace::TRUE_VALUE);
on_success()->Emit(compiler, &at_start_trace);
return;
}
@@ -3221,10 +3217,11 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
bool one_byte = compiler->one_byte();
Label* backtrack = trace->backtrack();
QuickCheckDetails* quick_check = trace->quick_check_performed();
- int element_count = elms_->length();
+ int element_count = elements()->length();
+ int backward_offset = read_backward() ? -Length() : 0;
for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
- TextElement elm = elms_->at(i);
- int cp_offset = trace->cp_offset() + elm.cp_offset();
+ TextElement elm = elements()->at(i);
+ int cp_offset = trace->cp_offset() + elm.cp_offset() + backward_offset;
if (elm.text_type() == TextElement::ATOM) {
Vector<const uc16> quarks = elm.atom()->data();
for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
@@ -3252,13 +3249,10 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
break;
}
if (emit_function != NULL) {
- bool bound_checked = emit_function(isolate,
- compiler,
- quarks[j],
- backtrack,
- cp_offset + j,
- *checked_up_to < cp_offset + j,
- preloaded);
+ bool bounds_check = *checked_up_to < cp_offset + j || read_backward();
+ bool bound_checked =
+ emit_function(isolate, compiler, quarks[j], backtrack,
+ cp_offset + j, bounds_check, preloaded);
if (bound_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
}
}
@@ -3268,8 +3262,9 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
if (first_element_checked && i == 0) continue;
if (DeterminedAlready(quick_check, elm.cp_offset())) continue;
RegExpCharacterClass* cc = elm.char_class();
+ bool bounds_check = *checked_up_to < cp_offset || read_backward();
EmitCharClass(assembler, cc, one_byte, backtrack, cp_offset,
- *checked_up_to < cp_offset, preloaded, zone());
+ bounds_check, preloaded, zone());
UpdateBoundsCheck(cp_offset, checked_up_to);
}
}
@@ -3278,7 +3273,7 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
int TextNode::Length() {
- TextElement elm = elms_->last();
+ TextElement elm = elements()->last();
DCHECK(elm.cp_offset() >= 0);
return elm.cp_offset() + elm.length();
}
@@ -3347,8 +3342,11 @@ void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
}
Trace successor_trace(*trace);
- successor_trace.set_at_start(false);
- successor_trace.AdvanceCurrentPositionInTrace(Length(), compiler);
+ // If we advance backward, we may end up at the start.
+ successor_trace.AdvanceCurrentPositionInTrace(
+ read_backward() ? -Length() : Length(), compiler);
+ successor_trace.set_at_start(read_backward() ? Trace::UNKNOWN
+ : Trace::FALSE_VALUE);
RecursionCheck rc(compiler);
on_success()->Emit(compiler, &successor_trace);
}
@@ -3360,7 +3358,6 @@ void Trace::InvalidateCurrentCharacter() {
void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
- DCHECK(by > 0);
// We don't have an instruction for shifting the current character register
// down or for using a shifted value for anything so lets just forget that
// we preloaded any characters into it.
@@ -3379,9 +3376,9 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte) {
- int element_count = elms_->length();
+ int element_count = elements()->length();
for (int i = 0; i < element_count; i++) {
- TextElement elm = elms_->at(i);
+ TextElement elm = elements()->at(i);
if (elm.text_type() == TextElement::CHAR_CLASS) {
RegExpCharacterClass* cc = elm.char_class();
// None of the standard character classes is different in the case
@@ -3397,16 +3394,14 @@ void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte) {
}
-int TextNode::GreedyLoopTextLength() {
- TextElement elm = elms_->at(elms_->length() - 1);
- return elm.cp_offset() + elm.length();
-}
+int TextNode::GreedyLoopTextLength() { return Length(); }
RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler) {
- if (elms_->length() != 1) return NULL;
- TextElement elm = elms_->at(0);
+ if (read_backward()) return NULL;
+ if (elements()->length() != 1) return NULL;
+ TextElement elm = elements()->at(0);
if (elm.text_type() != TextElement::CHAR_CLASS) return NULL;
RegExpCharacterClass* node = elm.char_class();
ZoneList<CharacterRange>* ranges = node->ranges(zone());
@@ -3450,7 +3445,7 @@ int ChoiceNode::GreedyLoopTextLengthForAlternative(
SeqRegExpNode* seq_node = static_cast<SeqRegExpNode*>(node);
node = seq_node->on_success();
}
- return length;
+ return read_backward() ? -length : length;
}
@@ -3881,7 +3876,7 @@ void BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
GreedyLoopState::GreedyLoopState(bool not_at_start) {
counter_backtrack_trace_.set_backtrack(&label_);
- if (not_at_start) counter_backtrack_trace_.set_at_start(false);
+ if (not_at_start) counter_backtrack_trace_.set_at_start(Trace::FALSE_VALUE);
}
@@ -4008,7 +4003,7 @@ Trace* ChoiceNode::EmitGreedyLoop(RegExpCompiler* compiler,
macro_assembler->PushCurrentPosition();
Label greedy_match_failed;
Trace greedy_match_trace;
- if (not_at_start()) greedy_match_trace.set_at_start(false);
+ if (not_at_start()) greedy_match_trace.set_at_start(Trace::FALSE_VALUE);
greedy_match_trace.set_backtrack(&greedy_match_failed);
Label loop_label;
macro_assembler->Bind(&loop_label);
@@ -4354,11 +4349,14 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
DCHECK_EQ(start_reg_ + 1, end_reg_);
if (compiler->ignore_case()) {
- assembler->CheckNotBackReferenceIgnoreCase(start_reg_,
+ assembler->CheckNotBackReferenceIgnoreCase(start_reg_, read_backward(),
trace->backtrack());
} else {
- assembler->CheckNotBackReference(start_reg_, trace->backtrack());
+ assembler->CheckNotBackReference(start_reg_, read_backward(),
+ trace->backtrack());
}
+ // We are going to advance backward, so we may end up at the start.
+ if (read_backward()) trace->set_at_start(Trace::UNKNOWN);
on_success()->Emit(compiler, trace);
}
@@ -4719,13 +4717,15 @@ RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
ZoneList<TextElement>* elms =
new(compiler->zone()) ZoneList<TextElement>(1, compiler->zone());
elms->Add(TextElement::Atom(this), compiler->zone());
- return new(compiler->zone()) TextNode(elms, on_success);
+ return new (compiler->zone())
+ TextNode(elms, compiler->read_backward(), on_success);
}
RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new(compiler->zone()) TextNode(elements(), on_success);
+ return new (compiler->zone())
+ TextNode(elements(), compiler->read_backward(), on_success);
}
@@ -4822,7 +4822,8 @@ bool RegExpCharacterClass::is_standard(Zone* zone) {
RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new(compiler->zone()) TextNode(this, on_success);
+ return new (compiler->zone())
+ TextNode(this, compiler->read_backward(), on_success);
}
@@ -5204,7 +5205,9 @@ RegExpNode* RegExpQuantifier::ToNode(int min,
GuardedAlternative(body->ToNode(compiler, answer)));
}
answer = alternation;
- if (not_at_start) alternation->set_not_at_start();
+ if (not_at_start && !compiler->read_backward()) {
+ alternation->set_not_at_start();
+ }
}
return answer;
}
@@ -5216,9 +5219,9 @@ RegExpNode* RegExpQuantifier::ToNode(int min,
int reg_ctr = needs_counter
? compiler->AllocateRegister()
: RegExpCompiler::kNoRegister;
- LoopChoiceNode* center = new(zone) LoopChoiceNode(body->min_match() == 0,
- zone);
- if (not_at_start) center->set_not_at_start();
+ LoopChoiceNode* center = new (zone)
+ LoopChoiceNode(body->min_match() == 0, compiler->read_backward(), zone);
+ if (not_at_start && !compiler->read_backward()) center->set_not_at_start();
RegExpNode* loop_return = needs_counter
? static_cast<RegExpNode*>(ActionNode::IncrementRegister(reg_ctr, center))
: static_cast<RegExpNode*>(center);
@@ -5294,14 +5297,13 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
ZoneList<CharacterRange>* newline_ranges =
new(zone) ZoneList<CharacterRange>(3, zone);
CharacterRange::AddClassEscape('n', newline_ranges, zone);
- RegExpCharacterClass* newline_atom = new(zone) RegExpCharacterClass('n');
- TextNode* newline_matcher = new(zone) TextNode(
- newline_atom,
- ActionNode::PositiveSubmatchSuccess(stack_pointer_register,
- position_register,
- 0, // No captures inside.
- -1, // Ignored if no captures.
- on_success));
+ RegExpCharacterClass* newline_atom = new (zone) RegExpCharacterClass('n');
+ TextNode* newline_matcher = new (zone) TextNode(
+ newline_atom, false, ActionNode::PositiveSubmatchSuccess(
+ stack_pointer_register, position_register,
+ 0, // No captures inside.
+ -1, // Ignored if no captures.
+ on_success));
// Create an end-of-input matcher.
RegExpNode* end_of_line = ActionNode::BeginSubmatch(
stack_pointer_register,
@@ -5323,10 +5325,10 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new(compiler->zone())
+ return new (compiler->zone())
BackReferenceNode(RegExpCapture::StartRegister(index()),
RegExpCapture::EndRegister(index()),
- on_success);
+ compiler->read_backward(), on_success);
}
@@ -5336,8 +5338,8 @@ RegExpNode* RegExpEmpty::ToNode(RegExpCompiler* compiler,
}
-RegExpNode* RegExpLookahead::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
+RegExpNode* RegExpLookaround::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
int stack_pointer_register = compiler->AllocateRegister();
int position_register = compiler->AllocateRegister();
@@ -5347,19 +5349,16 @@ RegExpNode* RegExpLookahead::ToNode(RegExpCompiler* compiler,
int register_start =
register_of_first_capture + capture_from_ * registers_per_capture;
- RegExpNode* success;
+ RegExpNode* result;
+ bool was_reading_backward = compiler->read_backward();
+ compiler->set_read_backward(type() == LOOKBEHIND);
if (is_positive()) {
- RegExpNode* node = ActionNode::BeginSubmatch(
- stack_pointer_register,
- position_register,
- body()->ToNode(
- compiler,
- ActionNode::PositiveSubmatchSuccess(stack_pointer_register,
- position_register,
- register_count,
- register_start,
- on_success)));
- return node;
+ result = ActionNode::BeginSubmatch(
+ stack_pointer_register, position_register,
+ body()->ToNode(compiler,
+ ActionNode::PositiveSubmatchSuccess(
+ stack_pointer_register, position_register,
+ register_count, register_start, on_success)));
} else {
// We use a ChoiceNode for a negative lookahead because it has most of
// the characteristics we need. It has the body of the lookahead as its
@@ -5374,21 +5373,16 @@ RegExpNode* RegExpLookahead::ToNode(RegExpCompiler* compiler,
Zone* zone = compiler->zone();
GuardedAlternative body_alt(
- body()->ToNode(
- compiler,
- success = new(zone) NegativeSubmatchSuccess(stack_pointer_register,
- position_register,
- register_count,
- register_start,
- zone)));
- ChoiceNode* choice_node =
- new(zone) NegativeLookaheadChoiceNode(body_alt,
- GuardedAlternative(on_success),
- zone);
- return ActionNode::BeginSubmatch(stack_pointer_register,
- position_register,
- choice_node);
- }
+ body()->ToNode(compiler, new (zone) NegativeSubmatchSuccess(
+ stack_pointer_register, position_register,
+ register_count, register_start, zone)));
+ ChoiceNode* choice_node = new (zone) NegativeLookaroundChoiceNode(
+ body_alt, GuardedAlternative(on_success), zone);
+ result = ActionNode::BeginSubmatch(stack_pointer_register,
+ position_register, choice_node);
+ }
+ compiler->set_read_backward(was_reading_backward);
+ return result;
}
@@ -5402,8 +5396,10 @@ RegExpNode* RegExpCapture::ToNode(RegExpTree* body,
int index,
RegExpCompiler* compiler,
RegExpNode* on_success) {
+ DCHECK_NOT_NULL(body);
int start_reg = RegExpCapture::StartRegister(index);
int end_reg = RegExpCapture::EndRegister(index);
+ if (compiler->read_backward()) std::swap(start_reg, end_reg);
RegExpNode* store_end = ActionNode::StorePosition(end_reg, true, on_success);
RegExpNode* body_node = body->ToNode(compiler, store_end);
return ActionNode::StorePosition(start_reg, true, body_node);
@@ -5414,8 +5410,14 @@ RegExpNode* RegExpAlternative::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
ZoneList<RegExpTree*>* children = nodes();
RegExpNode* current = on_success;
- for (int i = children->length() - 1; i >= 0; i--) {
- current = children->at(i)->ToNode(compiler, current);
+ if (compiler->read_backward()) {
+ for (int i = 0; i < children->length(); i++) {
+ current = children->at(i)->ToNode(compiler, current);
+ }
+ } else {
+ for (int i = children->length() - 1; i >= 0; i--) {
+ current = children->at(i)->ToNode(compiler, current);
+ }
}
return current;
}
@@ -6291,22 +6293,17 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
if (!is_start_anchored && !is_sticky) {
// Add a .*? at the beginning, outside the body capture, unless
// this expression is anchored at the beginning or sticky.
- RegExpNode* loop_node =
- RegExpQuantifier::ToNode(0,
- RegExpTree::kInfinity,
- false,
- new(zone) RegExpCharacterClass('*'),
- &compiler,
- captured_body,
- data->contains_anchor);
+ RegExpNode* loop_node = RegExpQuantifier::ToNode(
+ 0, RegExpTree::kInfinity, false, new (zone) RegExpCharacterClass('*'),
+ &compiler, captured_body, data->contains_anchor);
if (data->contains_anchor) {
// Unroll loop once, to take care of the case that might start
// at the start of input.
ChoiceNode* first_step_node = new(zone) ChoiceNode(2, zone);
first_step_node->AddAlternative(GuardedAlternative(captured_body));
- first_step_node->AddAlternative(GuardedAlternative(
- new(zone) TextNode(new(zone) RegExpCharacterClass('*'), loop_node)));
+ first_step_node->AddAlternative(GuardedAlternative(new (zone) TextNode(
+ new (zone) RegExpCharacterClass('*'), false, loop_node)));
node = first_step_node;
} else {
node = loop_node;
@@ -6410,7 +6407,9 @@ bool RegExpEngine::TooMuchRegExpCode(Handle<String> pattern) {
Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
- Object* key_pattern, ResultsCacheType type) {
+ Object* key_pattern,
+ FixedArray** last_match_cache,
+ ResultsCacheType type) {
FixedArray* cache;
if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
if (type == STRING_SPLIT_SUBSTRINGS) {
@@ -6426,23 +6425,25 @@ Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
uint32_t hash = key_string->Hash();
uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
~(kArrayEntriesPerCacheEntry - 1));
- if (cache->get(index + kStringOffset) == key_string &&
- cache->get(index + kPatternOffset) == key_pattern) {
- return cache->get(index + kArrayOffset);
- }
- index =
- ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
- if (cache->get(index + kStringOffset) == key_string &&
- cache->get(index + kPatternOffset) == key_pattern) {
- return cache->get(index + kArrayOffset);
+ if (cache->get(index + kStringOffset) != key_string ||
+ cache->get(index + kPatternOffset) != key_pattern) {
+ index =
+ ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+ if (cache->get(index + kStringOffset) != key_string ||
+ cache->get(index + kPatternOffset) != key_pattern) {
+ return Smi::FromInt(0);
+ }
}
- return Smi::FromInt(0);
+
+ *last_match_cache = FixedArray::cast(cache->get(index + kLastMatchOffset));
+ return cache->get(index + kArrayOffset);
}
void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
Handle<Object> key_pattern,
Handle<FixedArray> value_array,
+ Handle<FixedArray> last_match_cache,
ResultsCacheType type) {
Factory* factory = isolate->factory();
Handle<FixedArray> cache;
@@ -6464,6 +6465,7 @@ void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
cache->set(index + kStringOffset, *key_string);
cache->set(index + kPatternOffset, *key_pattern);
cache->set(index + kArrayOffset, *value_array);
+ cache->set(index + kLastMatchOffset, *last_match_cache);
} else {
uint32_t index2 =
((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
@@ -6471,13 +6473,16 @@ void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
cache->set(index2 + kStringOffset, *key_string);
cache->set(index2 + kPatternOffset, *key_pattern);
cache->set(index2 + kArrayOffset, *value_array);
+ cache->set(index2 + kLastMatchOffset, *last_match_cache);
} else {
cache->set(index2 + kStringOffset, Smi::FromInt(0));
cache->set(index2 + kPatternOffset, Smi::FromInt(0));
cache->set(index2 + kArrayOffset, Smi::FromInt(0));
+ cache->set(index2 + kLastMatchOffset, Smi::FromInt(0));
cache->set(index + kStringOffset, *key_string);
cache->set(index + kPatternOffset, *key_pattern);
cache->set(index + kArrayOffset, *value_array);
+ cache->set(index + kLastMatchOffset, *last_match_cache);
}
}
// If the array is a reasonably short list of substrings, convert it into a
diff --git a/chromium/v8/src/regexp/jsregexp.h b/chromium/v8/src/regexp/jsregexp.h
index 760d37862b3..0ad4b79c873 100644
--- a/chromium/v8/src/regexp/jsregexp.h
+++ b/chromium/v8/src/regexp/jsregexp.h
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/assembler.h"
+#include "src/regexp/regexp-ast.h"
namespace v8 {
namespace internal {
@@ -29,13 +30,6 @@ class RegExpImpl {
#endif
}
- // Creates a regular expression literal in the old space.
- // This function calls the garbage collector if necessary.
- MUST_USE_RESULT static MaybeHandle<Object> CreateRegExpLiteral(
- Handle<JSFunction> constructor,
- Handle<String> pattern,
- Handle<String> flags);
-
// Returns a string representation of a regular expression.
// Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4.
// This function calls the garbage collector if necessary.
@@ -233,63 +227,6 @@ enum ElementInSetsRelation {
};
-// Represents code units in the range from from_ to to_, both ends are
-// inclusive.
-class CharacterRange {
- public:
- CharacterRange() : from_(0), to_(0) { }
- // For compatibility with the CHECK_OK macro
- CharacterRange(void* null) { DCHECK_NULL(null); } // NOLINT
- CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { }
- static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges,
- Zone* zone);
- static Vector<const int> GetWordBounds();
- static inline CharacterRange Singleton(uc16 value) {
- return CharacterRange(value, value);
- }
- static inline CharacterRange Range(uc16 from, uc16 to) {
- DCHECK(from <= to);
- return CharacterRange(from, to);
- }
- static inline CharacterRange Everything() {
- return CharacterRange(0, 0xFFFF);
- }
- bool Contains(uc16 i) { return from_ <= i && i <= to_; }
- uc16 from() const { return from_; }
- void set_from(uc16 value) { from_ = value; }
- uc16 to() const { return to_; }
- void set_to(uc16 value) { to_ = value; }
- bool is_valid() { return from_ <= to_; }
- bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
- bool IsSingleton() { return (from_ == to_); }
- void AddCaseEquivalents(Isolate* isolate, Zone* zone,
- ZoneList<CharacterRange>* ranges, bool is_one_byte);
- static void Split(ZoneList<CharacterRange>* base,
- Vector<const int> overlay,
- ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded,
- Zone* zone);
- // Whether a range list is in canonical form: Ranges ordered by from value,
- // and ranges non-overlapping and non-adjacent.
- static bool IsCanonical(ZoneList<CharacterRange>* ranges);
- // Convert range list to canonical form. The characters covered by the ranges
- // will still be the same, but no character is in more than one range, and
- // adjacent ranges are merged. The resulting list may be shorter than the
- // original, but cannot be longer.
- static void Canonicalize(ZoneList<CharacterRange>* ranges);
- // Negate the contents of a character range in canonical form.
- static void Negate(ZoneList<CharacterRange>* src,
- ZoneList<CharacterRange>* dst,
- Zone* zone);
- static const int kStartMarker = (1 << 24);
- static const int kPayloadMask = (1 << 24) - 1;
-
- private:
- uc16 from_;
- uc16 to_;
-};
-
-
// A set of unsigned integers that behaves especially well on small
// integers (< 32). May do zone-allocation.
class OutSet: public ZoneObject {
@@ -387,63 +324,6 @@ class DispatchTable : public ZoneObject {
VISIT(Text)
-#define FOR_EACH_REG_EXP_TREE_TYPE(VISIT) \
- VISIT(Disjunction) \
- VISIT(Alternative) \
- VISIT(Assertion) \
- VISIT(CharacterClass) \
- VISIT(Atom) \
- VISIT(Quantifier) \
- VISIT(Capture) \
- VISIT(Lookahead) \
- VISIT(BackReference) \
- VISIT(Empty) \
- VISIT(Text)
-
-
-#define FORWARD_DECLARE(Name) class RegExp##Name;
-FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
-#undef FORWARD_DECLARE
-
-
-class TextElement final BASE_EMBEDDED {
- public:
- enum TextType {
- ATOM,
- CHAR_CLASS
- };
-
- static TextElement Atom(RegExpAtom* atom);
- static TextElement CharClass(RegExpCharacterClass* char_class);
-
- int cp_offset() const { return cp_offset_; }
- void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
- int length() const;
-
- TextType text_type() const { return text_type_; }
-
- RegExpTree* tree() const { return tree_; }
-
- RegExpAtom* atom() const {
- DCHECK(text_type() == ATOM);
- return reinterpret_cast<RegExpAtom*>(tree());
- }
-
- RegExpCharacterClass* char_class() const {
- DCHECK(text_type() == CHAR_CLASS);
- return reinterpret_cast<RegExpCharacterClass*>(tree());
- }
-
- private:
- TextElement(TextType text_type, RegExpTree* tree)
- : cp_offset_(-1), text_type_(text_type), tree_(tree) {}
-
- int cp_offset_;
- TextType text_type_;
- RegExpTree* tree_;
-};
-
-
class Trace;
struct PreloadState;
class GreedyLoopState;
@@ -603,7 +483,7 @@ class RegExpNode: public ZoneObject {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start) = 0;
- static const int kNodeIsTooComplexForGreedyLoops = -1;
+ static const int kNodeIsTooComplexForGreedyLoops = kMinInt;
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
// Only returns the successor for a text node of length 1 that matches any
// character and that has no guards on it.
@@ -695,33 +575,6 @@ class RegExpNode: public ZoneObject {
};
-// A simple closed interval.
-class Interval {
- public:
- Interval() : from_(kNone), to_(kNone) { }
- Interval(int from, int to) : from_(from), to_(to) { }
- Interval Union(Interval that) {
- if (that.from_ == kNone)
- return *this;
- else if (from_ == kNone)
- return that;
- else
- return Interval(Min(from_, that.from_), Max(to_, that.to_));
- }
- bool Contains(int value) {
- return (from_ <= value) && (value <= to_);
- }
- bool is_empty() { return from_ == kNone; }
- int from() const { return from_; }
- int to() const { return to_; }
- static Interval Empty() { return Interval(); }
- static const int kNone = -1;
- private:
- int from_;
- int to_;
-};
-
-
class SeqRegExpNode: public RegExpNode {
public:
explicit SeqRegExpNode(RegExpNode* on_success)
@@ -827,14 +680,14 @@ class ActionNode: public SeqRegExpNode {
class TextNode: public SeqRegExpNode {
public:
- TextNode(ZoneList<TextElement>* elms,
+ TextNode(ZoneList<TextElement>* elms, bool read_backward,
RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- elms_(elms) { }
- TextNode(RegExpCharacterClass* that,
+ : SeqRegExpNode(on_success), elms_(elms), read_backward_(read_backward) {}
+ TextNode(RegExpCharacterClass* that, bool read_backward,
RegExpNode* on_success)
: SeqRegExpNode(on_success),
- elms_(new(zone()) ZoneList<TextElement>(1, zone())) {
+ elms_(new (zone()) ZoneList<TextElement>(1, zone())),
+ read_backward_(read_backward) {
elms_->Add(TextElement::CharClass(that), zone());
}
virtual void Accept(NodeVisitor* visitor);
@@ -845,6 +698,7 @@ class TextNode: public SeqRegExpNode {
int characters_filled_in,
bool not_at_start);
ZoneList<TextElement>* elements() { return elms_; }
+ bool read_backward() { return read_backward_; }
void MakeCaseIndependent(Isolate* isolate, bool is_one_byte);
virtual int GreedyLoopTextLength();
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
@@ -873,6 +727,7 @@ class TextNode: public SeqRegExpNode {
int* checked_up_to);
int Length();
ZoneList<TextElement>* elms_;
+ bool read_backward_;
};
@@ -925,15 +780,16 @@ class AssertionNode: public SeqRegExpNode {
class BackReferenceNode: public SeqRegExpNode {
public:
- BackReferenceNode(int start_reg,
- int end_reg,
+ BackReferenceNode(int start_reg, int end_reg, bool read_backward,
RegExpNode* on_success)
: SeqRegExpNode(on_success),
start_reg_(start_reg),
- end_reg_(end_reg) { }
+ end_reg_(end_reg),
+ read_backward_(read_backward) {}
virtual void Accept(NodeVisitor* visitor);
int start_register() { return start_reg_; }
int end_register() { return end_reg_; }
+ bool read_backward() { return read_backward_; }
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
virtual int EatsAtLeast(int still_to_find,
int recursion_depth,
@@ -950,6 +806,7 @@ class BackReferenceNode: public SeqRegExpNode {
private:
int start_reg_;
int end_reg_;
+ bool read_backward_;
};
@@ -1074,6 +931,7 @@ class ChoiceNode: public RegExpNode {
return true;
}
virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
+ virtual bool read_backward() { return false; }
protected:
int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
@@ -1116,11 +974,11 @@ class ChoiceNode: public RegExpNode {
};
-class NegativeLookaheadChoiceNode: public ChoiceNode {
+class NegativeLookaroundChoiceNode : public ChoiceNode {
public:
- explicit NegativeLookaheadChoiceNode(GuardedAlternative this_must_fail,
- GuardedAlternative then_do_this,
- Zone* zone)
+ explicit NegativeLookaroundChoiceNode(GuardedAlternative this_must_fail,
+ GuardedAlternative then_do_this,
+ Zone* zone)
: ChoiceNode(2, zone) {
AddAlternative(this_must_fail);
AddAlternative(then_do_this);
@@ -1150,12 +1008,12 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
class LoopChoiceNode: public ChoiceNode {
public:
- explicit LoopChoiceNode(bool body_can_be_zero_length, Zone* zone)
+ LoopChoiceNode(bool body_can_be_zero_length, bool read_backward, Zone* zone)
: ChoiceNode(2, zone),
loop_node_(NULL),
continue_node_(NULL),
- body_can_be_zero_length_(body_can_be_zero_length)
- { }
+ body_can_be_zero_length_(body_can_be_zero_length),
+ read_backward_(read_backward) {}
void AddLoopAlternative(GuardedAlternative alt);
void AddContinueAlternative(GuardedAlternative alt);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
@@ -1169,6 +1027,7 @@ class LoopChoiceNode: public ChoiceNode {
RegExpNode* loop_node() { return loop_node_; }
RegExpNode* continue_node() { return continue_node_; }
bool body_can_be_zero_length() { return body_can_be_zero_length_; }
+ virtual bool read_backward() { return read_backward_; }
virtual void Accept(NodeVisitor* visitor);
virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
@@ -1183,6 +1042,7 @@ class LoopChoiceNode: public ChoiceNode {
RegExpNode* loop_node_;
RegExpNode* continue_node_;
bool body_can_be_zero_length_;
+ bool read_backward_;
};
@@ -1438,9 +1298,7 @@ class Trace {
at_start_ == UNKNOWN;
}
TriBool at_start() { return at_start_; }
- void set_at_start(bool at_start) {
- at_start_ = at_start ? TRUE_VALUE : FALSE_VALUE;
- }
+ void set_at_start(TriBool at_start) { at_start_ = at_start; }
Label* backtrack() { return backtrack_; }
Label* loop_label() { return loop_label_; }
RegExpNode* stop_node() { return stop_node_; }
@@ -1666,12 +1524,12 @@ class RegExpResultsCache : public AllStatic {
// Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
// On success, the returned result is guaranteed to be a COW-array.
static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern,
- ResultsCacheType type);
+ FixedArray** last_match_out, ResultsCacheType type);
// Attempt to add value_array to the cache specified by type. On success,
// value_array is turned into a COW-array.
static void Enter(Isolate* isolate, Handle<String> key_string,
Handle<Object> key_pattern, Handle<FixedArray> value_array,
- ResultsCacheType type);
+ Handle<FixedArray> last_match_cache, ResultsCacheType type);
static void Clear(FixedArray* cache);
static const int kRegExpResultsCacheSize = 0x100;
@@ -1680,6 +1538,7 @@ class RegExpResultsCache : public AllStatic {
static const int kStringOffset = 0;
static const int kPatternOffset = 1;
static const int kArrayOffset = 2;
+ static const int kLastMatchOffset = 3;
};
} // namespace internal
diff --git a/chromium/v8/src/regexp/mips/OWNERS b/chromium/v8/src/regexp/mips/OWNERS
index 5508ba626f3..89455a4fbd7 100644
--- a/chromium/v8/src/regexp/mips/OWNERS
+++ b/chromium/v8/src/regexp/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 77f09917c06..9c59328ed12 100644
--- a/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -97,7 +97,8 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -181,26 +182,17 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
- BranchOrBacktrack(&not_at_start, ne, a0, Operand(zero_reg));
-
- // If we did, are we still at the start of the input?
- __ lw(a1, MemOperand(frame_pointer(), kInputStart));
- __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+ __ lw(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Addu(a0, current_input_offset(), Operand(-char_size()));
BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
- BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
- // If we did, are we still at the start of the input?
- __ lw(a1, MemOperand(frame_pointer(), kInputStart));
- __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+void RegExpMacroAssemblerMIPS::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ lw(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Addu(a0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
}
@@ -223,20 +215,26 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ lw(a0, register_location(start_reg)); // Index of start of capture.
__ lw(a1, register_location(start_reg + 1)); // Index of end of capture.
__ Subu(a1, a1, a0); // Length of capture.
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ Branch(&fallthrough, eq, a1, Operand(zero_reg));
- __ Addu(t5, a1, current_input_offset());
- // Check that there are enough characters left in the input.
- BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+ if (read_backward) {
+ __ lw(t0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Addu(t0, t0, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t0));
+ } else {
+ __ Addu(t5, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+ }
if (mode_ == LATIN1) {
Label success;
@@ -247,6 +245,9 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// a1 - length of capture.
__ Addu(a0, a0, Operand(end_of_input_address()));
__ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Subu(a2, a2, Operand(a1));
+ }
__ Addu(a1, a0, Operand(a1));
// a0 - Address of start of capture.
@@ -285,6 +286,12 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ bind(&success);
// Compute new value of character position after the matched part.
__ Subu(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ lw(t0, register_location(start_reg)); // Index of start of capture.
+ __ lw(t5, register_location(start_reg + 1)); // Index of end of capture.
+ __ Addu(current_input_offset(), current_input_offset(), Operand(t0));
+ __ Subu(current_input_offset(), current_input_offset(), Operand(t5));
+ }
} else {
DCHECK(mode_ == UC16);
// Put regexp engine registers on stack.
@@ -313,6 +320,9 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ mov(s3, a1);
// Address of current input position.
__ Addu(a1, current_input_offset(), Operand(end_of_input_address()));
+ if (read_backward) {
+ __ Subu(a1, a1, Operand(s3));
+ }
// Isolate.
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
@@ -330,17 +340,21 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
- // On success, increment position by length of capture.
- __ Addu(current_input_offset(), current_input_offset(), Operand(s3));
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ Subu(current_input_offset(), current_input_offset(), Operand(s3));
+ } else {
+ __ Addu(current_input_offset(), current_input_offset(), Operand(s3));
+ }
}
__ bind(&fallthrough);
}
-void RegExpMacroAssemblerMIPS::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
Label success;
@@ -348,17 +362,35 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(
__ lw(a0, register_location(start_reg));
__ lw(a1, register_location(start_reg + 1));
__ Subu(a1, a1, a0); // Length to check.
- // Succeed on empty capture (including no capture).
- __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
- __ Addu(t5, a1, current_input_offset());
- // Check that there are enough characters left in the input.
- BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ Branch(&fallthrough, le, a1, Operand(zero_reg));
+
+ if (read_backward) {
+ __ lw(t0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Addu(t0, t0, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t0));
+ } else {
+ __ Addu(t5, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+ }
- // Compute pointers to match string and capture string.
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
__ Addu(a0, a0, Operand(end_of_input_address()));
__ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
- __ Addu(a1, a1, Operand(a0));
+ if (read_backward) {
+ __ Subu(a2, a2, Operand(a1));
+ }
+ __ Addu(a1, a0, Operand(a1));
+
+ // a0 - Address of start of capture.
+ // a1 - Address of end of capture.
+ // a2 - Address of current input position.
+
Label loop;
__ bind(&loop);
@@ -379,6 +411,12 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(
// Move current character position to position after match.
__ Subu(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ lw(t0, register_location(start_reg)); // Index of start of capture.
+ __ lw(t5, register_location(start_reg + 1)); // Index of end of capture.
+ __ Addu(current_input_offset(), current_input_offset(), Operand(t0));
+ __ Subu(current_input_offset(), current_input_offset(), Operand(t5));
+ }
__ bind(&fallthrough);
}
@@ -599,7 +637,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(a0, zero_reg);
__ push(a0); // Make room for success counter and initialize it to 0.
- __ push(a0); // Make room for "position - 1" constant (value irrelevant).
+ __ push(a0); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -642,7 +680,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Subu(a0, a0, t5);
// Store this value in a local variable, for use when clearing
// position registers.
- __ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ sw(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -751,7 +789,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ sw(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare a0 to initialize registers with its value in the next run.
- __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ lw(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -905,10 +943,13 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works).
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1016,7 +1057,7 @@ void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ lw(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ sw(a0, register_location(reg));
}
@@ -1129,10 +1170,14 @@ MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
Label* on_outside_input) {
- BranchOrBacktrack(on_outside_input,
- ge,
- current_input_offset(),
- Operand(-cp_offset * char_size()));
+ if (cp_offset >= 0) {
+ BranchOrBacktrack(on_outside_input, ge, current_input_offset(),
+ Operand(-cp_offset * char_size()));
+ } else {
+ __ lw(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ BranchOrBacktrack(on_outside_input, le, a0, Operand(a1));
+ }
}
diff --git a/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 36fd4b15641..902e2208fe8 100644
--- a/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/chromium/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -33,9 +33,11 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
@@ -120,9 +122,9 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
@@ -220,6 +222,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
diff --git a/chromium/v8/src/regexp/mips64/OWNERS b/chromium/v8/src/regexp/mips64/OWNERS
index 5508ba626f3..89455a4fbd7 100644
--- a/chromium/v8/src/regexp/mips64/OWNERS
+++ b/chromium/v8/src/regexp/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 869cbc4f2e2..5153bd018b7 100644
--- a/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -61,7 +61,7 @@ namespace internal {
* - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] success counter (only for global regexps to count matches).
* - fp[-24] Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a
+ * string start - 1). Used to initialize capture registers to a
* non-position.
* - fp[-28] At start (if 1, we are starting at the start of the
* string, otherwise 0)
@@ -91,7 +91,7 @@ namespace internal {
* - fp[-56] start index (character index of start). kStartIndex
* - fp[-64] void* input_string (location of a handle containing the string). kInputString
* - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures
- * - fp[-80] Offset of location before start of input (effectively character kInputStartMinusOne
+ * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne
* position -1). Used to initialize capture registers to a
* non-position.
* --------- The following output registers are 32-bit values. ---------
@@ -133,7 +133,8 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -217,26 +218,17 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ ld(a0, MemOperand(frame_pointer(), kStartIndex));
- BranchOrBacktrack(&not_at_start, ne, a0, Operand(zero_reg));
-
- // If we did, are we still at the start of the input?
- __ ld(a1, MemOperand(frame_pointer(), kInputStart));
- __ Daddu(a0, end_of_input_address(), Operand(current_input_offset()));
+ __ ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Daddu(a0, current_input_offset(), Operand(-char_size()));
BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ ld(a0, MemOperand(frame_pointer(), kStartIndex));
- BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
- // If we did, are we still at the start of the input?
- __ ld(a1, MemOperand(frame_pointer(), kInputStart));
- __ Daddu(a0, end_of_input_address(), Operand(current_input_offset()));
+void RegExpMacroAssemblerMIPS::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Daddu(a0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
}
@@ -259,20 +251,26 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ ld(a0, register_location(start_reg)); // Index of start of capture.
__ ld(a1, register_location(start_reg + 1)); // Index of end of capture.
__ Dsubu(a1, a1, a0); // Length of capture.
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ Branch(&fallthrough, eq, a1, Operand(zero_reg));
- __ Daddu(t1, a1, current_input_offset());
- // Check that there are enough characters left in the input.
- BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ if (read_backward) {
+ __ ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Daddu(t1, t1, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
+ } else {
+ __ Daddu(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ }
if (mode_ == LATIN1) {
Label success;
@@ -283,6 +281,9 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// a1 - length of capture.
__ Daddu(a0, a0, Operand(end_of_input_address()));
__ Daddu(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Dsubu(a2, a2, Operand(a1));
+ }
__ Daddu(a1, a0, Operand(a1));
// a0 - Address of start of capture.
@@ -321,6 +322,12 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ bind(&success);
// Compute new value of character position after the matched part.
__ Dsubu(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ ld(t1, register_location(start_reg)); // Index of start of capture.
+ __ ld(a2, register_location(start_reg + 1)); // Index of end of capture.
+ __ Daddu(current_input_offset(), current_input_offset(), Operand(t1));
+ __ Dsubu(current_input_offset(), current_input_offset(), Operand(a2));
+ }
} else {
DCHECK(mode_ == UC16);
// Put regexp engine registers on stack.
@@ -349,6 +356,9 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ mov(s3, a1);
// Address of current input position.
__ Daddu(a1, current_input_offset(), Operand(end_of_input_address()));
+ if (read_backward) {
+ __ Dsubu(a1, a1, Operand(s3));
+ }
// Isolate.
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
@@ -367,16 +377,20 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
// On success, increment position by length of capture.
- __ Daddu(current_input_offset(), current_input_offset(), Operand(s3));
+ if (read_backward) {
+ __ Dsubu(current_input_offset(), current_input_offset(), Operand(s3));
+ } else {
+ __ Daddu(current_input_offset(), current_input_offset(), Operand(s3));
+ }
}
__ bind(&fallthrough);
}
-void RegExpMacroAssemblerMIPS::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
Label success;
@@ -384,16 +398,28 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(
__ ld(a0, register_location(start_reg));
__ ld(a1, register_location(start_reg + 1));
__ Dsubu(a1, a1, a0); // Length to check.
- // Succeed on empty capture (including no capture).
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ Branch(&fallthrough, eq, a1, Operand(zero_reg));
- __ Daddu(t1, a1, current_input_offset());
- // Check that there are enough characters left in the input.
- BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ if (read_backward) {
+ __ ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Daddu(t1, t1, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
+ } else {
+ __ Daddu(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ }
// Compute pointers to match string and capture string.
__ Daddu(a0, a0, Operand(end_of_input_address()));
__ Daddu(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Dsubu(a2, a2, Operand(a1));
+ }
__ Daddu(a1, a1, Operand(a0));
Label loop;
@@ -415,6 +441,12 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(
// Move current character position to position after match.
__ Dsubu(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ ld(t1, register_location(start_reg)); // Index of start of capture.
+ __ ld(a2, register_location(start_reg + 1)); // Index of end of capture.
+ __ Daddu(current_input_offset(), current_input_offset(), Operand(t1));
+ __ Dsubu(current_input_offset(), current_input_offset(), Operand(a2));
+ }
__ bind(&fallthrough);
}
@@ -644,7 +676,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Daddu(frame_pointer(), sp, Operand(8 * kPointerSize));
__ mov(a0, zero_reg);
__ push(a0); // Make room for success counter and initialize it to 0.
- __ push(a0); // Make room for "position - 1" constant (value irrelevant).
+ __ push(a0); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -687,7 +719,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Dsubu(a0, a0, t1);
// Store this value in a local variable, for use when clearing
// position registers.
- __ sd(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ sd(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -797,7 +829,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ sd(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare a0 to initialize registers with its value in the next run.
- __ ld(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -951,10 +983,13 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works).
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1062,7 +1097,7 @@ void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ ld(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ sd(a0, register_location(reg));
}
@@ -1175,10 +1210,14 @@ MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
Label* on_outside_input) {
- BranchOrBacktrack(on_outside_input,
- ge,
- current_input_offset(),
- Operand(-cp_offset * char_size()));
+ if (cp_offset >= 0) {
+ BranchOrBacktrack(on_outside_input, ge, current_input_offset(),
+ Operand(-cp_offset * char_size()));
+ } else {
+ __ ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Daddu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ BranchOrBacktrack(on_outside_input, le, a0, Operand(a1));
+ }
}
diff --git a/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 84c85affbe3..9a8ca179d57 100644
--- a/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/chromium/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -33,9 +33,11 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
@@ -125,9 +127,9 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
#elif defined(MIPS_ABI_O32)
// Offsets from frame_pointer() of function parameters and stored registers.
@@ -158,9 +160,9 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
#else
# error "undefined MIPS ABI"
@@ -262,6 +264,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
diff --git a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 03f9741147e..f3ddf7bf986 100644
--- a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -60,7 +60,7 @@ namespace internal {
* - fp[-32] void* input_string (location of a handle containing the string).
* - fp[-36] success counter (only for global regexps to count matches).
* - fp[-40] Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a
+ * string start - 1). Used to initialize capture registers to a
* non-position.
* - fp[-44] At start (if 1, we are starting at the start of the
* string, otherwise 0)
@@ -100,7 +100,8 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -189,30 +190,18 @@ void RegExpMacroAssemblerPPC::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerPPC::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ LoadP(r3, MemOperand(frame_pointer(), kStartIndex));
- __ cmpi(r3, Operand::Zero());
- BranchOrBacktrack(ne, &not_at_start);
-
- // If we did, are we still at the start of the input?
- __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
- __ mr(r0, current_input_offset());
- __ add(r3, end_of_input_address(), r0);
- __ cmp(r4, r3);
+ __ LoadP(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ addi(r3, current_input_offset(), Operand(-char_size()));
+ __ cmp(r3, r4);
BranchOrBacktrack(eq, on_at_start);
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerPPC::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ LoadP(r3, MemOperand(frame_pointer(), kStartIndex));
- __ cmpi(r3, Operand::Zero());
- BranchOrBacktrack(ne, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
- __ add(r3, end_of_input_address(), current_input_offset());
+void RegExpMacroAssemblerPPC::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ LoadP(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ addi(r3, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
__ cmp(r3, r4);
BranchOrBacktrack(ne, on_not_at_start);
}
@@ -238,20 +227,27 @@ void RegExpMacroAssemblerPPC::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
- int start_reg, Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ LoadP(r3, register_location(start_reg), r0); // Index of start of capture
__ LoadP(r4, register_location(start_reg + 1), r0); // Index of end
__ sub(r4, r4, r3, LeaveOE, SetRC); // Length of capture.
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ beq(&fallthrough, cr0);
// Check that there are enough characters left in the input.
- __ add(r0, r4, current_input_offset(), LeaveOE, SetRC);
- // __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match, cr0);
+ if (read_backward) {
+ __ LoadP(r6, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r6, r6, r4);
+ __ cmp(current_input_offset(), r6);
+ BranchOrBacktrack(le, on_no_match);
+ } else {
+ __ add(r0, r4, current_input_offset(), LeaveOE, SetRC);
+ BranchOrBacktrack(gt, on_no_match, cr0);
+ }
if (mode_ == LATIN1) {
Label success;
@@ -262,6 +258,9 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
// r4 - length of capture
__ add(r3, r3, end_of_input_address());
__ add(r5, end_of_input_address(), current_input_offset());
+ if (read_backward) {
+ __ sub(r5, r5, r4); // Offset by length when matching backwards.
+ }
__ add(r4, r3, r4);
// r3 - Address of start of capture.
@@ -303,6 +302,13 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
__ bind(&success);
// Compute new value of character position after the matched part.
__ sub(current_input_offset(), r5, end_of_input_address());
+ if (read_backward) {
+ __ LoadP(r3, register_location(start_reg)); // Index of start of capture
+ __ LoadP(r4,
+ register_location(start_reg + 1)); // Index of end of capture
+ __ add(current_input_offset(), current_input_offset(), r3);
+ __ sub(current_input_offset(), current_input_offset(), r4);
+ }
} else {
DCHECK(mode_ == UC16);
int argument_count = 4;
@@ -326,6 +332,9 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
__ mr(r25, r4);
// Address of current input position.
__ add(r4, current_input_offset(), end_of_input_address());
+ if (read_backward) {
+ __ sub(r4, r4, r25);
+ }
// Isolate.
__ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
@@ -339,8 +348,13 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
__ cmpi(r3, Operand::Zero());
BranchOrBacktrack(eq, on_no_match);
- // On success, increment position by length of capture.
- __ add(current_input_offset(), current_input_offset(), r25);
+
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ sub(current_input_offset(), current_input_offset(), r25);
+ } else {
+ __ add(current_input_offset(), current_input_offset(), r25);
+ }
}
__ bind(&fallthrough);
@@ -348,6 +362,7 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
+ bool read_backward,
Label* on_no_match) {
Label fallthrough;
Label success;
@@ -356,16 +371,30 @@ void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
__ LoadP(r3, register_location(start_reg), r0);
__ LoadP(r4, register_location(start_reg + 1), r0);
__ sub(r4, r4, r3, LeaveOE, SetRC); // Length to check.
- // Succeed on empty capture (including no capture).
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ beq(&fallthrough, cr0);
// Check that there are enough characters left in the input.
- __ add(r0, r4, current_input_offset(), LeaveOE, SetRC);
- BranchOrBacktrack(gt, on_no_match, cr0);
+ if (read_backward) {
+ __ LoadP(r6, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r6, r6, r4);
+ __ cmp(current_input_offset(), r6);
+ BranchOrBacktrack(lt, on_no_match);
+ } else {
+ __ add(r0, r4, current_input_offset(), LeaveOE, SetRC);
+ BranchOrBacktrack(gt, on_no_match, cr0);
+ }
- // Compute pointers to match string and capture string
+ // r3 - offset of start of capture
+ // r4 - length of capture
__ add(r3, r3, end_of_input_address());
__ add(r5, end_of_input_address(), current_input_offset());
+ if (read_backward) {
+ __ sub(r5, r5, r4); // Offset by length when matching backwards.
+ }
__ add(r4, r4, r3);
Label loop;
@@ -389,6 +418,13 @@ void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
// Move current character position to position after match.
__ sub(current_input_offset(), r5, end_of_input_address());
+ if (read_backward) {
+ __ LoadP(r3, register_location(start_reg)); // Index of start of capture
+ __ LoadP(r4, register_location(start_reg + 1)); // Index of end of capture
+ __ add(current_input_offset(), current_input_offset(), r3);
+ __ sub(current_input_offset(), current_input_offset(), r4);
+ }
+
__ bind(&fallthrough);
}
@@ -639,7 +675,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ addi(frame_pointer(), sp, Operand(8 * kPointerSize));
__ li(r3, Operand::Zero());
__ push(r3); // Make room for success counter and initialize it to 0.
- __ push(r3); // Make room for "position - 1" constant (value is irrelevant)
+ __ push(r3); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
Label stack_ok;
@@ -688,7 +724,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ StoreP(r3, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ StoreP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -797,7 +833,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ StoreP(r5, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare r3 to initialize registers with its value in the next run.
- __ LoadP(r3, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -936,10 +972,13 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1 << 30)); // Be sane! (And ensure negation works)
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1028,7 +1067,7 @@ void RegExpMacroAssemblerPPC::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerPPC::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ LoadP(r3, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ StoreP(r3, register_location(reg), r0);
}
@@ -1132,8 +1171,15 @@ MemOperand RegExpMacroAssemblerPPC::register_location(int register_index) {
void RegExpMacroAssemblerPPC::CheckPosition(int cp_offset,
Label* on_outside_input) {
- __ Cmpi(current_input_offset(), Operand(-cp_offset * char_size()), r0);
- BranchOrBacktrack(ge, on_outside_input);
+ if (cp_offset >= 0) {
+ __ Cmpi(current_input_offset(), Operand(-cp_offset * char_size()), r0);
+ BranchOrBacktrack(ge, on_outside_input);
+ } else {
+ __ LoadP(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ addi(r3, current_input_offset(), Operand(cp_offset * char_size()));
+ __ cmp(r3, r4);
+ BranchOrBacktrack(le, on_outside_input);
+ }
}
diff --git a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 2dd339eb8d3..4d1836fc717 100644
--- a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -34,9 +34,11 @@ class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
@@ -112,9 +114,9 @@ class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
@@ -208,7 +210,7 @@ const RegList kRegExpCalleeSaved =
1 << 25 | 1 << 26 | 1 << 27 | 1 << 28 | 1 << 29 | 1 << 30 | 1 << 31;
#endif // V8_INTERPRETED_REGEXP
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
diff --git a/chromium/v8/src/regexp/regexp-ast.cc b/chromium/v8/src/regexp/regexp-ast.cc
new file mode 100644
index 00000000000..31c93b114f1
--- /dev/null
+++ b/chromium/v8/src/regexp/regexp-ast.cc
@@ -0,0 +1,337 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ostreams.h"
+#include "src/regexp/regexp-ast.h"
+
+namespace v8 {
+namespace internal {
+
+#define MAKE_ACCEPT(Name) \
+ void* RegExp##Name::Accept(RegExpVisitor* visitor, void* data) { \
+ return visitor->Visit##Name(this, data); \
+ }
+FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ACCEPT)
+#undef MAKE_ACCEPT
+
+#define MAKE_TYPE_CASE(Name) \
+ RegExp##Name* RegExpTree::As##Name() { return NULL; } \
+ bool RegExpTree::Is##Name() { return false; }
+FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
+#undef MAKE_TYPE_CASE
+
+#define MAKE_TYPE_CASE(Name) \
+ RegExp##Name* RegExp##Name::As##Name() { return this; } \
+ bool RegExp##Name::Is##Name() { return true; }
+FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
+#undef MAKE_TYPE_CASE
+
+
+static Interval ListCaptureRegisters(ZoneList<RegExpTree*>* children) {
+ Interval result = Interval::Empty();
+ for (int i = 0; i < children->length(); i++)
+ result = result.Union(children->at(i)->CaptureRegisters());
+ return result;
+}
+
+
+Interval RegExpAlternative::CaptureRegisters() {
+ return ListCaptureRegisters(nodes());
+}
+
+
+Interval RegExpDisjunction::CaptureRegisters() {
+ return ListCaptureRegisters(alternatives());
+}
+
+
+Interval RegExpLookaround::CaptureRegisters() {
+ return body()->CaptureRegisters();
+}
+
+
+Interval RegExpCapture::CaptureRegisters() {
+ Interval self(StartRegister(index()), EndRegister(index()));
+ return self.Union(body()->CaptureRegisters());
+}
+
+
+Interval RegExpQuantifier::CaptureRegisters() {
+ return body()->CaptureRegisters();
+}
+
+
+bool RegExpAssertion::IsAnchoredAtStart() {
+ return assertion_type() == RegExpAssertion::START_OF_INPUT;
+}
+
+
+bool RegExpAssertion::IsAnchoredAtEnd() {
+ return assertion_type() == RegExpAssertion::END_OF_INPUT;
+}
+
+
+bool RegExpAlternative::IsAnchoredAtStart() {
+ ZoneList<RegExpTree*>* nodes = this->nodes();
+ for (int i = 0; i < nodes->length(); i++) {
+ RegExpTree* node = nodes->at(i);
+ if (node->IsAnchoredAtStart()) {
+ return true;
+ }
+ if (node->max_match() > 0) {
+ return false;
+ }
+ }
+ return false;
+}
+
+
+bool RegExpAlternative::IsAnchoredAtEnd() {
+ ZoneList<RegExpTree*>* nodes = this->nodes();
+ for (int i = nodes->length() - 1; i >= 0; i--) {
+ RegExpTree* node = nodes->at(i);
+ if (node->IsAnchoredAtEnd()) {
+ return true;
+ }
+ if (node->max_match() > 0) {
+ return false;
+ }
+ }
+ return false;
+}
+
+
+bool RegExpDisjunction::IsAnchoredAtStart() {
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ for (int i = 0; i < alternatives->length(); i++) {
+ if (!alternatives->at(i)->IsAnchoredAtStart()) return false;
+ }
+ return true;
+}
+
+
+bool RegExpDisjunction::IsAnchoredAtEnd() {
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ for (int i = 0; i < alternatives->length(); i++) {
+ if (!alternatives->at(i)->IsAnchoredAtEnd()) return false;
+ }
+ return true;
+}
+
+
+bool RegExpLookaround::IsAnchoredAtStart() {
+ return is_positive() && type() == LOOKAHEAD && body()->IsAnchoredAtStart();
+}
+
+
+bool RegExpCapture::IsAnchoredAtStart() { return body()->IsAnchoredAtStart(); }
+
+
+bool RegExpCapture::IsAnchoredAtEnd() { return body()->IsAnchoredAtEnd(); }
+
+
+// Convert regular expression trees to a simple sexp representation.
+// This representation should be different from the input grammar
+// in as many cases as possible, to make it more difficult for incorrect
+// parses to look as correct ones which is likely if the input and
+// output formats are alike.
+class RegExpUnparser final : public RegExpVisitor {
+ public:
+ RegExpUnparser(std::ostream& os, Zone* zone) : os_(os), zone_(zone) {}
+ void VisitCharacterRange(CharacterRange that);
+#define MAKE_CASE(Name) void* Visit##Name(RegExp##Name*, void* data) override;
+ FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
+#undef MAKE_CASE
+ private:
+ std::ostream& os_;
+ Zone* zone_;
+};
+
+
+void* RegExpUnparser::VisitDisjunction(RegExpDisjunction* that, void* data) {
+ os_ << "(|";
+ for (int i = 0; i < that->alternatives()->length(); i++) {
+ os_ << " ";
+ that->alternatives()->at(i)->Accept(this, data);
+ }
+ os_ << ")";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitAlternative(RegExpAlternative* that, void* data) {
+ os_ << "(:";
+ for (int i = 0; i < that->nodes()->length(); i++) {
+ os_ << " ";
+ that->nodes()->at(i)->Accept(this, data);
+ }
+ os_ << ")";
+ return NULL;
+}
+
+
+void RegExpUnparser::VisitCharacterRange(CharacterRange that) {
+ os_ << AsUC16(that.from());
+ if (!that.IsSingleton()) {
+ os_ << "-" << AsUC16(that.to());
+ }
+}
+
+
+void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
+ void* data) {
+ if (that->is_negated()) os_ << "^";
+ os_ << "[";
+ for (int i = 0; i < that->ranges(zone_)->length(); i++) {
+ if (i > 0) os_ << " ";
+ VisitCharacterRange(that->ranges(zone_)->at(i));
+ }
+ os_ << "]";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
+ switch (that->assertion_type()) {
+ case RegExpAssertion::START_OF_INPUT:
+ os_ << "@^i";
+ break;
+ case RegExpAssertion::END_OF_INPUT:
+ os_ << "@$i";
+ break;
+ case RegExpAssertion::START_OF_LINE:
+ os_ << "@^l";
+ break;
+ case RegExpAssertion::END_OF_LINE:
+ os_ << "@$l";
+ break;
+ case RegExpAssertion::BOUNDARY:
+ os_ << "@b";
+ break;
+ case RegExpAssertion::NON_BOUNDARY:
+ os_ << "@B";
+ break;
+ }
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
+ os_ << "'";
+ Vector<const uc16> chardata = that->data();
+ for (int i = 0; i < chardata.length(); i++) {
+ os_ << AsUC16(chardata[i]);
+ }
+ os_ << "'";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitText(RegExpText* that, void* data) {
+ if (that->elements()->length() == 1) {
+ that->elements()->at(0).tree()->Accept(this, data);
+ } else {
+ os_ << "(!";
+ for (int i = 0; i < that->elements()->length(); i++) {
+ os_ << " ";
+ that->elements()->at(i).tree()->Accept(this, data);
+ }
+ os_ << ")";
+ }
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitQuantifier(RegExpQuantifier* that, void* data) {
+ os_ << "(# " << that->min() << " ";
+ if (that->max() == RegExpTree::kInfinity) {
+ os_ << "- ";
+ } else {
+ os_ << that->max() << " ";
+ }
+ os_ << (that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n ");
+ that->body()->Accept(this, data);
+ os_ << ")";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitCapture(RegExpCapture* that, void* data) {
+ os_ << "(^ ";
+ that->body()->Accept(this, data);
+ os_ << ")";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitLookaround(RegExpLookaround* that, void* data) {
+ os_ << "(";
+ os_ << (that->type() == RegExpLookaround::LOOKAHEAD ? "->" : "<-");
+ os_ << (that->is_positive() ? " + " : " - ");
+ that->body()->Accept(this, data);
+ os_ << ")";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitBackReference(RegExpBackReference* that,
+ void* data) {
+ os_ << "(<- " << that->index() << ")";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
+ os_ << '%';
+ return NULL;
+}
+
+
+std::ostream& RegExpTree::Print(std::ostream& os, Zone* zone) { // NOLINT
+ RegExpUnparser unparser(os, zone);
+ Accept(&unparser, NULL);
+ return os;
+}
+
+
+RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
+ : alternatives_(alternatives) {
+ DCHECK(alternatives->length() > 1);
+ RegExpTree* first_alternative = alternatives->at(0);
+ min_match_ = first_alternative->min_match();
+ max_match_ = first_alternative->max_match();
+ for (int i = 1; i < alternatives->length(); i++) {
+ RegExpTree* alternative = alternatives->at(i);
+ min_match_ = Min(min_match_, alternative->min_match());
+ max_match_ = Max(max_match_, alternative->max_match());
+ }
+}
+
+
+static int IncreaseBy(int previous, int increase) {
+ if (RegExpTree::kInfinity - previous < increase) {
+ return RegExpTree::kInfinity;
+ } else {
+ return previous + increase;
+ }
+}
+
+
+RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
+ : nodes_(nodes) {
+ DCHECK(nodes->length() > 1);
+ min_match_ = 0;
+ max_match_ = 0;
+ for (int i = 0; i < nodes->length(); i++) {
+ RegExpTree* node = nodes->at(i);
+ int node_min_match = node->min_match();
+ min_match_ = IncreaseBy(min_match_, node_min_match);
+ int node_max_match = node->max_match();
+ max_match_ = IncreaseBy(max_match_, node_max_match);
+ }
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/regexp/regexp-ast.h b/chromium/v8/src/regexp/regexp-ast.h
new file mode 100644
index 00000000000..f87778596ad
--- /dev/null
+++ b/chromium/v8/src/regexp/regexp-ast.h
@@ -0,0 +1,496 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_AST_H_
+#define V8_REGEXP_REGEXP_AST_H_
+
+#include "src/utils.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+#define FOR_EACH_REG_EXP_TREE_TYPE(VISIT) \
+ VISIT(Disjunction) \
+ VISIT(Alternative) \
+ VISIT(Assertion) \
+ VISIT(CharacterClass) \
+ VISIT(Atom) \
+ VISIT(Quantifier) \
+ VISIT(Capture) \
+ VISIT(Lookaround) \
+ VISIT(BackReference) \
+ VISIT(Empty) \
+ VISIT(Text)
+
+
+#define FORWARD_DECLARE(Name) class RegExp##Name;
+FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
+#undef FORWARD_DECLARE
+
+class RegExpCompiler;
+class RegExpNode;
+class RegExpTree;
+
+
+class RegExpVisitor BASE_EMBEDDED {
+ public:
+ virtual ~RegExpVisitor() {}
+#define MAKE_CASE(Name) \
+ virtual void* Visit##Name(RegExp##Name*, void* data) = 0;
+ FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
+#undef MAKE_CASE
+};
+
+
+// A simple closed interval.
+class Interval {
+ public:
+ Interval() : from_(kNone), to_(kNone) {}
+ Interval(int from, int to) : from_(from), to_(to) {}
+ Interval Union(Interval that) {
+ if (that.from_ == kNone)
+ return *this;
+ else if (from_ == kNone)
+ return that;
+ else
+ return Interval(Min(from_, that.from_), Max(to_, that.to_));
+ }
+ bool Contains(int value) { return (from_ <= value) && (value <= to_); }
+ bool is_empty() { return from_ == kNone; }
+ int from() const { return from_; }
+ int to() const { return to_; }
+ static Interval Empty() { return Interval(); }
+ static const int kNone = -1;
+
+ private:
+ int from_;
+ int to_;
+};
+
+
+// Represents code units in the range from from_ to to_, both ends are
+// inclusive.
+class CharacterRange {
+ public:
+ CharacterRange() : from_(0), to_(0) {}
+ // For compatibility with the CHECK_OK macro
+ CharacterRange(void* null) { DCHECK_NULL(null); } // NOLINT
+ CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) {}
+ static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges,
+ Zone* zone);
+ static Vector<const int> GetWordBounds();
+ static inline CharacterRange Singleton(uc16 value) {
+ return CharacterRange(value, value);
+ }
+ static inline CharacterRange Range(uc16 from, uc16 to) {
+ DCHECK(from <= to);
+ return CharacterRange(from, to);
+ }
+ static inline CharacterRange Everything() {
+ return CharacterRange(0, 0xFFFF);
+ }
+ bool Contains(uc16 i) { return from_ <= i && i <= to_; }
+ uc16 from() const { return from_; }
+ void set_from(uc16 value) { from_ = value; }
+ uc16 to() const { return to_; }
+ void set_to(uc16 value) { to_ = value; }
+ bool is_valid() { return from_ <= to_; }
+ bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
+ bool IsSingleton() { return (from_ == to_); }
+ void AddCaseEquivalents(Isolate* isolate, Zone* zone,
+ ZoneList<CharacterRange>* ranges, bool is_one_byte);
+ static void Split(ZoneList<CharacterRange>* base, Vector<const int> overlay,
+ ZoneList<CharacterRange>** included,
+ ZoneList<CharacterRange>** excluded, Zone* zone);
+ // Whether a range list is in canonical form: Ranges ordered by from value,
+ // and ranges non-overlapping and non-adjacent.
+ static bool IsCanonical(ZoneList<CharacterRange>* ranges);
+ // Convert range list to canonical form. The characters covered by the ranges
+ // will still be the same, but no character is in more than one range, and
+ // adjacent ranges are merged. The resulting list may be shorter than the
+ // original, but cannot be longer.
+ static void Canonicalize(ZoneList<CharacterRange>* ranges);
+ // Negate the contents of a character range in canonical form.
+ static void Negate(ZoneList<CharacterRange>* src,
+ ZoneList<CharacterRange>* dst, Zone* zone);
+ static const int kStartMarker = (1 << 24);
+ static const int kPayloadMask = (1 << 24) - 1;
+
+ private:
+ uc16 from_;
+ uc16 to_;
+};
+
+
+class CharacterSet final BASE_EMBEDDED {
+ public:
+ explicit CharacterSet(uc16 standard_set_type)
+ : ranges_(NULL), standard_set_type_(standard_set_type) {}
+ explicit CharacterSet(ZoneList<CharacterRange>* ranges)
+ : ranges_(ranges), standard_set_type_(0) {}
+ ZoneList<CharacterRange>* ranges(Zone* zone);
+ uc16 standard_set_type() { return standard_set_type_; }
+ void set_standard_set_type(uc16 special_set_type) {
+ standard_set_type_ = special_set_type;
+ }
+ bool is_standard() { return standard_set_type_ != 0; }
+ void Canonicalize();
+
+ private:
+ ZoneList<CharacterRange>* ranges_;
+ // If non-zero, the value represents a standard set (e.g., all whitespace
+ // characters) without having to expand the ranges.
+ uc16 standard_set_type_;
+};
+
+
+class TextElement final BASE_EMBEDDED {
+ public:
+ enum TextType { ATOM, CHAR_CLASS };
+
+ static TextElement Atom(RegExpAtom* atom);
+ static TextElement CharClass(RegExpCharacterClass* char_class);
+
+ int cp_offset() const { return cp_offset_; }
+ void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
+ int length() const;
+
+ TextType text_type() const { return text_type_; }
+
+ RegExpTree* tree() const { return tree_; }
+
+ RegExpAtom* atom() const {
+ DCHECK(text_type() == ATOM);
+ return reinterpret_cast<RegExpAtom*>(tree());
+ }
+
+ RegExpCharacterClass* char_class() const {
+ DCHECK(text_type() == CHAR_CLASS);
+ return reinterpret_cast<RegExpCharacterClass*>(tree());
+ }
+
+ private:
+ TextElement(TextType text_type, RegExpTree* tree)
+ : cp_offset_(-1), text_type_(text_type), tree_(tree) {}
+
+ int cp_offset_;
+ TextType text_type_;
+ RegExpTree* tree_;
+};
+
+
+class RegExpTree : public ZoneObject {
+ public:
+ static const int kInfinity = kMaxInt;
+ virtual ~RegExpTree() {}
+ virtual void* Accept(RegExpVisitor* visitor, void* data) = 0;
+ virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) = 0;
+ virtual bool IsTextElement() { return false; }
+ virtual bool IsAnchoredAtStart() { return false; }
+ virtual bool IsAnchoredAtEnd() { return false; }
+ virtual int min_match() = 0;
+ virtual int max_match() = 0;
+ // Returns the interval of registers used for captures within this
+ // expression.
+ virtual Interval CaptureRegisters() { return Interval::Empty(); }
+ virtual void AppendToText(RegExpText* text, Zone* zone);
+ std::ostream& Print(std::ostream& os, Zone* zone); // NOLINT
+#define MAKE_ASTYPE(Name) \
+ virtual RegExp##Name* As##Name(); \
+ virtual bool Is##Name();
+ FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ASTYPE)
+#undef MAKE_ASTYPE
+};
+
+
+class RegExpDisjunction final : public RegExpTree {
+ public:
+ explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpDisjunction* AsDisjunction() override;
+ Interval CaptureRegisters() override;
+ bool IsDisjunction() override;
+ bool IsAnchoredAtStart() override;
+ bool IsAnchoredAtEnd() override;
+ int min_match() override { return min_match_; }
+ int max_match() override { return max_match_; }
+ ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
+
+ private:
+ bool SortConsecutiveAtoms(RegExpCompiler* compiler);
+ void RationalizeConsecutiveAtoms(RegExpCompiler* compiler);
+ void FixSingleCharacterDisjunctions(RegExpCompiler* compiler);
+ ZoneList<RegExpTree*>* alternatives_;
+ int min_match_;
+ int max_match_;
+};
+
+
+class RegExpAlternative final : public RegExpTree {
+ public:
+ explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpAlternative* AsAlternative() override;
+ Interval CaptureRegisters() override;
+ bool IsAlternative() override;
+ bool IsAnchoredAtStart() override;
+ bool IsAnchoredAtEnd() override;
+ int min_match() override { return min_match_; }
+ int max_match() override { return max_match_; }
+ ZoneList<RegExpTree*>* nodes() { return nodes_; }
+
+ private:
+ ZoneList<RegExpTree*>* nodes_;
+ int min_match_;
+ int max_match_;
+};
+
+
+class RegExpAssertion final : public RegExpTree {
+ public:
+ enum AssertionType {
+ START_OF_LINE,
+ START_OF_INPUT,
+ END_OF_LINE,
+ END_OF_INPUT,
+ BOUNDARY,
+ NON_BOUNDARY
+ };
+ explicit RegExpAssertion(AssertionType type) : assertion_type_(type) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpAssertion* AsAssertion() override;
+ bool IsAssertion() override;
+ bool IsAnchoredAtStart() override;
+ bool IsAnchoredAtEnd() override;
+ int min_match() override { return 0; }
+ int max_match() override { return 0; }
+ AssertionType assertion_type() { return assertion_type_; }
+
+ private:
+ AssertionType assertion_type_;
+};
+
+
+class RegExpCharacterClass final : public RegExpTree {
+ public:
+ RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
+ : set_(ranges), is_negated_(is_negated) {}
+ explicit RegExpCharacterClass(uc16 type) : set_(type), is_negated_(false) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpCharacterClass* AsCharacterClass() override;
+ bool IsCharacterClass() override;
+ bool IsTextElement() override { return true; }
+ int min_match() override { return 1; }
+ int max_match() override { return 1; }
+ void AppendToText(RegExpText* text, Zone* zone) override;
+ CharacterSet character_set() { return set_; }
+ // TODO(lrn): Remove need for complex version if is_standard that
+ // recognizes a mangled standard set and just do { return set_.is_special(); }
+ bool is_standard(Zone* zone);
+ // Returns a value representing the standard character set if is_standard()
+ // returns true.
+ // Currently used values are:
+ // s : unicode whitespace
+ // S : unicode non-whitespace
+ // w : ASCII word character (digit, letter, underscore)
+ // W : non-ASCII word character
+ // d : ASCII digit
+ // D : non-ASCII digit
+ // . : non-unicode non-newline
+ // * : All characters
+ uc16 standard_type() { return set_.standard_set_type(); }
+ ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
+ bool is_negated() { return is_negated_; }
+
+ private:
+ CharacterSet set_;
+ bool is_negated_;
+};
+
+
+class RegExpAtom final : public RegExpTree {
+ public:
+ explicit RegExpAtom(Vector<const uc16> data) : data_(data) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpAtom* AsAtom() override;
+ bool IsAtom() override;
+ bool IsTextElement() override { return true; }
+ int min_match() override { return data_.length(); }
+ int max_match() override { return data_.length(); }
+ void AppendToText(RegExpText* text, Zone* zone) override;
+ Vector<const uc16> data() { return data_; }
+ int length() { return data_.length(); }
+
+ private:
+ Vector<const uc16> data_;
+};
+
+
+class RegExpText final : public RegExpTree {
+ public:
+ explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpText* AsText() override;
+ bool IsText() override;
+ bool IsTextElement() override { return true; }
+ int min_match() override { return length_; }
+ int max_match() override { return length_; }
+ void AppendToText(RegExpText* text, Zone* zone) override;
+ void AddElement(TextElement elm, Zone* zone) {
+ elements_.Add(elm, zone);
+ length_ += elm.length();
+ }
+ ZoneList<TextElement>* elements() { return &elements_; }
+
+ private:
+ ZoneList<TextElement> elements_;
+ int length_;
+};
+
+
+class RegExpQuantifier final : public RegExpTree {
+ public:
+ enum QuantifierType { GREEDY, NON_GREEDY, POSSESSIVE };
+ RegExpQuantifier(int min, int max, QuantifierType type, RegExpTree* body)
+ : body_(body),
+ min_(min),
+ max_(max),
+ min_match_(min * body->min_match()),
+ quantifier_type_(type) {
+ if (max > 0 && body->max_match() > kInfinity / max) {
+ max_match_ = kInfinity;
+ } else {
+ max_match_ = max * body->max_match();
+ }
+ }
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ static RegExpNode* ToNode(int min, int max, bool is_greedy, RegExpTree* body,
+ RegExpCompiler* compiler, RegExpNode* on_success,
+ bool not_at_start = false);
+ RegExpQuantifier* AsQuantifier() override;
+ Interval CaptureRegisters() override;
+ bool IsQuantifier() override;
+ int min_match() override { return min_match_; }
+ int max_match() override { return max_match_; }
+ int min() { return min_; }
+ int max() { return max_; }
+ bool is_possessive() { return quantifier_type_ == POSSESSIVE; }
+ bool is_non_greedy() { return quantifier_type_ == NON_GREEDY; }
+ bool is_greedy() { return quantifier_type_ == GREEDY; }
+ RegExpTree* body() { return body_; }
+
+ private:
+ RegExpTree* body_;
+ int min_;
+ int max_;
+ int min_match_;
+ int max_match_;
+ QuantifierType quantifier_type_;
+};
+
+
+class RegExpCapture final : public RegExpTree {
+ public:
+ explicit RegExpCapture(int index) : body_(NULL), index_(index) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ static RegExpNode* ToNode(RegExpTree* body, int index,
+ RegExpCompiler* compiler, RegExpNode* on_success);
+ RegExpCapture* AsCapture() override;
+ bool IsAnchoredAtStart() override;
+ bool IsAnchoredAtEnd() override;
+ Interval CaptureRegisters() override;
+ bool IsCapture() override;
+ int min_match() override { return body_->min_match(); }
+ int max_match() override { return body_->max_match(); }
+ RegExpTree* body() { return body_; }
+ void set_body(RegExpTree* body) { body_ = body; }
+ int index() { return index_; }
+ static int StartRegister(int index) { return index * 2; }
+ static int EndRegister(int index) { return index * 2 + 1; }
+
+ private:
+ RegExpTree* body_;
+ int index_;
+};
+
+
+class RegExpLookaround final : public RegExpTree {
+ public:
+ enum Type { LOOKAHEAD, LOOKBEHIND };
+
+ RegExpLookaround(RegExpTree* body, bool is_positive, int capture_count,
+ int capture_from, Type type)
+ : body_(body),
+ is_positive_(is_positive),
+ capture_count_(capture_count),
+ capture_from_(capture_from),
+ type_(type) {}
+
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpLookaround* AsLookaround() override;
+ Interval CaptureRegisters() override;
+ bool IsLookaround() override;
+ bool IsAnchoredAtStart() override;
+ int min_match() override { return 0; }
+ int max_match() override { return 0; }
+ RegExpTree* body() { return body_; }
+ bool is_positive() { return is_positive_; }
+ int capture_count() { return capture_count_; }
+ int capture_from() { return capture_from_; }
+ Type type() { return type_; }
+
+ private:
+ RegExpTree* body_;
+ bool is_positive_;
+ int capture_count_;
+ int capture_from_;
+ Type type_;
+};
+
+
+class RegExpBackReference final : public RegExpTree {
+ public:
+ explicit RegExpBackReference(RegExpCapture* capture) : capture_(capture) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpBackReference* AsBackReference() override;
+ bool IsBackReference() override;
+ int min_match() override { return 0; }
+ // The back reference may be recursive, e.g. /(\2)(\1)/. To avoid infinite
+ // recursion, we give up. Ignorance is bliss.
+ int max_match() override { return kInfinity; }
+ int index() { return capture_->index(); }
+ RegExpCapture* capture() { return capture_; }
+
+ private:
+ RegExpCapture* capture_;
+};
+
+
+class RegExpEmpty final : public RegExpTree {
+ public:
+ RegExpEmpty() {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpEmpty* AsEmpty() override;
+ bool IsEmpty() override;
+ int min_match() override { return 0; }
+ int max_match() override { return 0; }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_AST_H_
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h b/chromium/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
index b86d28dfb97..4d0b1bc0a7c 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
+++ b/chromium/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
@@ -5,7 +5,7 @@
#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/regexp/bytecodes-irregexp.h"
namespace v8 {
@@ -56,6 +56,7 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
#endif // V8_INTERPRETED_REGEXP
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler-irregexp.cc b/chromium/v8/src/regexp/regexp-macro-assembler-irregexp.cc
index ca567c9bdab..751ee441c82 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler-irregexp.cc
+++ b/chromium/v8/src/regexp/regexp-macro-assembler-irregexp.cc
@@ -4,7 +4,7 @@
#include "src/regexp/regexp-macro-assembler-irregexp.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/regexp/bytecodes-irregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-macro-assembler-irregexp-inl.h"
@@ -273,8 +273,9 @@ void RegExpMacroAssemblerIrregexp::CheckAtStart(Label* on_at_start) {
}
-void RegExpMacroAssemblerIrregexp::CheckNotAtStart(Label* on_not_at_start) {
- Emit(BC_CHECK_NOT_AT_START, 0);
+void RegExpMacroAssemblerIrregexp::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ Emit(BC_CHECK_NOT_AT_START, cp_offset);
EmitOrLink(on_not_at_start);
}
@@ -370,20 +371,23 @@ void RegExpMacroAssemblerIrregexp::CheckBitInTable(
void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
+ bool read_backward,
Label* on_not_equal) {
DCHECK(start_reg >= 0);
DCHECK(start_reg <= kMaxRegister);
- Emit(BC_CHECK_NOT_BACK_REF, start_reg);
+ Emit(read_backward ? BC_CHECK_NOT_BACK_REF_BACKWARD : BC_CHECK_NOT_BACK_REF,
+ start_reg);
EmitOrLink(on_not_equal);
}
void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_not_equal) {
+ int start_reg, bool read_backward, Label* on_not_equal) {
DCHECK(start_reg >= 0);
DCHECK(start_reg <= kMaxRegister);
- Emit(BC_CHECK_NOT_BACK_REF_NO_CASE, start_reg);
+ Emit(read_backward ? BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD
+ : BC_CHECK_NOT_BACK_REF_NO_CASE,
+ start_reg);
EmitOrLink(on_not_equal);
}
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler-irregexp.h b/chromium/v8/src/regexp/regexp-macro-assembler-irregexp.h
index 556d78d23d2..f1ace63a74c 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler-irregexp.h
+++ b/chromium/v8/src/regexp/regexp-macro-assembler-irregexp.h
@@ -66,7 +66,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
@@ -82,8 +82,10 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void IfRegisterLT(int register_index, int comparand, Label* if_lt);
virtual void IfRegisterGE(int register_index, int comparand, Label* if_ge);
@@ -125,6 +127,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc b/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc
index 2abe55588e3..5301ead69bd 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/chromium/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -4,7 +4,7 @@
#include "src/regexp/regexp-macro-assembler-tracer.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
namespace v8 {
namespace internal {
@@ -13,9 +13,9 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
Isolate* isolate, RegExpMacroAssembler* assembler)
: RegExpMacroAssembler(isolate, assembler->zone()), assembler_(assembler) {
unsigned int type = assembler->Implementation();
- DCHECK(type < 6);
- const char* impl_names[] = {"IA32", "ARM", "ARM64",
- "MIPS", "X64", "X87", "Bytecode"};
+ DCHECK(type < 8);
+ const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS",
+ "PPC", "X64", "X87", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
@@ -241,9 +241,11 @@ void RegExpMacroAssemblerTracer::CheckAtStart(Label* on_at_start) {
}
-void RegExpMacroAssemblerTracer::CheckNotAtStart(Label* on_not_at_start) {
- PrintF(" CheckNotAtStart(label[%08x]);\n", LabelToInt(on_not_at_start));
- assembler_->CheckNotAtStart(on_not_at_start);
+void RegExpMacroAssemblerTracer::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ PrintF(" CheckNotAtStart(cp_offset=%d, label[%08x]);\n", cp_offset,
+ LabelToInt(on_not_at_start));
+ assembler_->CheckNotAtStart(cp_offset, on_not_at_start);
}
@@ -349,19 +351,29 @@ void RegExpMacroAssemblerTracer::CheckBitInTable(
void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg,
+ bool read_backward,
Label* on_no_match) {
- PrintF(" CheckNotBackReference(register=%d, label[%08x]);\n", start_reg,
- LabelToInt(on_no_match));
- assembler_->CheckNotBackReference(start_reg, on_no_match);
+ PrintF(" CheckNotBackReference(register=%d, %s, label[%08x]);\n", start_reg,
+ read_backward ? "backward" : "forward", LabelToInt(on_no_match));
+ assembler_->CheckNotBackReference(start_reg, read_backward, on_no_match);
}
void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, label[%08x]);\n",
- start_reg, LabelToInt(on_no_match));
- assembler_->CheckNotBackReferenceIgnoreCase(start_reg, on_no_match);
+ int start_reg, bool read_backward, Label* on_no_match) {
+ PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, %s, label[%08x]);\n",
+ start_reg, read_backward ? "backward" : "forward",
+ LabelToInt(on_no_match));
+ assembler_->CheckNotBackReferenceIgnoreCase(start_reg, read_backward,
+ on_no_match);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ PrintF(" CheckPosition(cp_offset=%d, label[%08x]);\n", cp_offset,
+ LabelToInt(on_outside_input));
+ assembler_->CheckPosition(cp_offset, on_outside_input);
}
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler-tracer.h b/chromium/v8/src/regexp/regexp-macro-assembler-tracer.h
index d4092ceaad4..77377aac314 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler-tracer.h
+++ b/chromium/v8/src/regexp/regexp-macro-assembler-tracer.h
@@ -30,9 +30,11 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
@@ -49,6 +51,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
@@ -81,6 +84,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
RegExpMacroAssembler* assembler_;
};
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_TRACER_H_
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler.cc b/chromium/v8/src/regexp/regexp-macro-assembler.cc
index 9916d5f32fd..caf8b51fe54 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler.cc
+++ b/chromium/v8/src/regexp/regexp-macro-assembler.cc
@@ -5,7 +5,6 @@
#include "src/regexp/regexp-macro-assembler.h"
#include "src/assembler.h"
-#include "src/ast.h"
#include "src/isolate-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/simulator.h"
@@ -189,16 +188,9 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
Address stack_base = stack_scope.stack()->stack_base();
int direct_call = 0;
- int result = CALL_GENERATED_REGEXP_CODE(code->entry(),
- input,
- start_offset,
- input_start,
- input_end,
- output,
- output_size,
- stack_base,
- direct_call,
- isolate);
+ int result = CALL_GENERATED_REGEXP_CODE(
+ isolate, code->entry(), input, start_offset, input_start, input_end,
+ output, output_size, stack_base, direct_call, isolate);
DCHECK(result >= RETRY);
if (result == EXCEPTION && !isolate->has_pending_exception()) {
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler.h b/chromium/v8/src/regexp/regexp-macro-assembler.h
index c3d94a6acfc..20599334cd3 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler.h
+++ b/chromium/v8/src/regexp/regexp-macro-assembler.h
@@ -5,7 +5,8 @@
#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
-#include "src/ast.h"
+#include "src/assembler.h"
+#include "src/regexp/regexp-ast.h"
namespace v8 {
namespace internal {
@@ -71,9 +72,11 @@ class RegExpMacroAssembler {
virtual void CheckCharacterGT(uc16 limit, Label* on_greater) = 0;
virtual void CheckCharacterLT(uc16 limit, Label* on_less) = 0;
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position) = 0;
- virtual void CheckNotAtStart(Label* on_not_at_start) = 0;
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match) = 0;
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start) = 0;
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match) = 0;
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match) = 0;
// Check the current character for a match with a literal character. If we
// fail to match then goto the on_failure label. End of input always
@@ -102,17 +105,12 @@ class RegExpMacroAssembler {
// Checks whether the given offset from the current position is before
// the end of the string. May overwrite the current character.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input) {
- LoadCurrentCharacter(cp_offset, on_outside_input, true);
- }
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input) = 0;
// Check whether a standard/default character class matches the current
// character. Returns false if the type of special character class does
// not have custom support.
// May clobber the current loaded character.
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- return false;
- }
+ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match) = 0;
virtual void Fail() = 0;
virtual Handle<HeapObject> GetCode(Handle<String> source) = 0;
virtual void GoTo(Label* label) = 0;
@@ -245,6 +243,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
diff --git a/chromium/v8/src/regexp/regexp-parser.cc b/chromium/v8/src/regexp/regexp-parser.cc
new file mode 100644
index 00000000000..fa8900342cf
--- /dev/null
+++ b/chromium/v8/src/regexp/regexp-parser.cc
@@ -0,0 +1,1180 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-parser.h"
+
+#include "src/char-predicates-inl.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/regexp/jsregexp.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
+ bool multiline, bool unicode, Isolate* isolate,
+ Zone* zone)
+ : isolate_(isolate),
+ zone_(zone),
+ error_(error),
+ captures_(NULL),
+ in_(in),
+ current_(kEndMarker),
+ next_pos_(0),
+ captures_started_(0),
+ capture_count_(0),
+ has_more_(true),
+ multiline_(multiline),
+ unicode_(unicode),
+ simple_(false),
+ contains_anchor_(false),
+ is_scanned_for_captures_(false),
+ failed_(false) {
+ Advance();
+}
+
+
+uc32 RegExpParser::Next() {
+ if (has_next()) {
+ return in()->Get(next_pos_);
+ } else {
+ return kEndMarker;
+ }
+}
+
+
+void RegExpParser::Advance() {
+ if (next_pos_ < in()->length()) {
+ StackLimitCheck check(isolate());
+ if (check.HasOverflowed()) {
+ ReportError(CStrVector(Isolate::kStackOverflowMessage));
+ } else if (zone()->excess_allocation()) {
+ ReportError(CStrVector("Regular expression too large"));
+ } else {
+ current_ = in()->Get(next_pos_);
+ next_pos_++;
+ // Read the whole surrogate pair in case of unicode flag, if possible.
+ if (unicode_ && next_pos_ < in()->length() &&
+ unibrow::Utf16::IsLeadSurrogate(static_cast<uc16>(current_))) {
+ uc16 trail = in()->Get(next_pos_);
+ if (unibrow::Utf16::IsTrailSurrogate(trail)) {
+ current_ = unibrow::Utf16::CombineSurrogatePair(
+ static_cast<uc16>(current_), trail);
+ next_pos_++;
+ }
+ }
+ }
+ } else {
+ current_ = kEndMarker;
+ // Advance so that position() points to 1-after-the-last-character. This is
+ // important so that Reset() to this position works correctly.
+ next_pos_ = in()->length() + 1;
+ has_more_ = false;
+ }
+}
+
+
+void RegExpParser::Reset(int pos) {
+ next_pos_ = pos;
+ has_more_ = (pos < in()->length());
+ Advance();
+}
+
+
+void RegExpParser::Advance(int dist) {
+ next_pos_ += dist - 1;
+ Advance();
+}
+
+
+bool RegExpParser::simple() { return simple_; }
+
+
+bool RegExpParser::IsSyntaxCharacter(uc32 c) {
+ return c == '^' || c == '$' || c == '\\' || c == '.' || c == '*' ||
+ c == '+' || c == '?' || c == '(' || c == ')' || c == '[' || c == ']' ||
+ c == '{' || c == '}' || c == '|';
+}
+
+
+RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
+ failed_ = true;
+ *error_ = isolate()->factory()->NewStringFromAscii(message).ToHandleChecked();
+ // Zip to the end to make sure the no more input is read.
+ current_ = kEndMarker;
+ next_pos_ = in()->length();
+ return NULL;
+}
+
+
+#define CHECK_FAILED /**/); \
+ if (failed_) return NULL; \
+ ((void)0
+
+
+// Pattern ::
+// Disjunction
+RegExpTree* RegExpParser::ParsePattern() {
+ RegExpTree* result = ParseDisjunction(CHECK_FAILED);
+ DCHECK(!has_more());
+ // If the result of parsing is a literal string atom, and it has the
+ // same length as the input, then the atom is identical to the input.
+ if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
+ simple_ = true;
+ }
+ return result;
+}
+
+
+// Disjunction ::
+// Alternative
+// Alternative | Disjunction
+// Alternative ::
+// [empty]
+// Term Alternative
+// Term ::
+// Assertion
+// Atom
+// Atom Quantifier
+RegExpTree* RegExpParser::ParseDisjunction() {
+ // Used to store current state while parsing subexpressions.
+ RegExpParserState initial_state(NULL, INITIAL, RegExpLookaround::LOOKAHEAD, 0,
+ zone());
+ RegExpParserState* state = &initial_state;
+ // Cache the builder in a local variable for quick access.
+ RegExpBuilder* builder = initial_state.builder();
+ while (true) {
+ switch (current()) {
+ case kEndMarker:
+ if (state->IsSubexpression()) {
+ // Inside a parenthesized group when hitting end of input.
+ ReportError(CStrVector("Unterminated group") CHECK_FAILED);
+ }
+ DCHECK_EQ(INITIAL, state->group_type());
+ // Parsing completed successfully.
+ return builder->ToRegExp();
+ case ')': {
+ if (!state->IsSubexpression()) {
+ ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
+ }
+ DCHECK_NE(INITIAL, state->group_type());
+
+ Advance();
+ // End disjunction parsing and convert builder content to new single
+ // regexp atom.
+ RegExpTree* body = builder->ToRegExp();
+
+ int end_capture_index = captures_started();
+
+ int capture_index = state->capture_index();
+ SubexpressionType group_type = state->group_type();
+
+ // Build result of subexpression.
+ if (group_type == CAPTURE) {
+ RegExpCapture* capture = GetCapture(capture_index);
+ capture->set_body(body);
+ body = capture;
+ } else if (group_type != GROUPING) {
+ DCHECK(group_type == POSITIVE_LOOKAROUND ||
+ group_type == NEGATIVE_LOOKAROUND);
+ bool is_positive = (group_type == POSITIVE_LOOKAROUND);
+ body = new (zone()) RegExpLookaround(
+ body, is_positive, end_capture_index - capture_index,
+ capture_index, state->lookaround_type());
+ }
+
+ // Restore previous state.
+ state = state->previous_state();
+ builder = state->builder();
+
+ builder->AddAtom(body);
+ // For compatability with JSC and ES3, we allow quantifiers after
+ // lookaheads, and break in all cases.
+ break;
+ }
+ case '|': {
+ Advance();
+ builder->NewAlternative();
+ continue;
+ }
+ case '*':
+ case '+':
+ case '?':
+ return ReportError(CStrVector("Nothing to repeat"));
+ case '^': {
+ Advance();
+ if (multiline_) {
+ builder->AddAssertion(
+ new (zone()) RegExpAssertion(RegExpAssertion::START_OF_LINE));
+ } else {
+ builder->AddAssertion(
+ new (zone()) RegExpAssertion(RegExpAssertion::START_OF_INPUT));
+ set_contains_anchor();
+ }
+ continue;
+ }
+ case '$': {
+ Advance();
+ RegExpAssertion::AssertionType assertion_type =
+ multiline_ ? RegExpAssertion::END_OF_LINE
+ : RegExpAssertion::END_OF_INPUT;
+ builder->AddAssertion(new (zone()) RegExpAssertion(assertion_type));
+ continue;
+ }
+ case '.': {
+ Advance();
+ // everything except \x0a, \x0d, \u2028 and \u2029
+ ZoneList<CharacterRange>* ranges =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ CharacterRange::AddClassEscape('.', ranges, zone());
+ RegExpTree* atom = new (zone()) RegExpCharacterClass(ranges, false);
+ builder->AddAtom(atom);
+ break;
+ }
+ case '(': {
+ SubexpressionType subexpr_type = CAPTURE;
+ RegExpLookaround::Type lookaround_type = state->lookaround_type();
+ Advance();
+ if (current() == '?') {
+ switch (Next()) {
+ case ':':
+ subexpr_type = GROUPING;
+ break;
+ case '=':
+ lookaround_type = RegExpLookaround::LOOKAHEAD;
+ subexpr_type = POSITIVE_LOOKAROUND;
+ break;
+ case '!':
+ lookaround_type = RegExpLookaround::LOOKAHEAD;
+ subexpr_type = NEGATIVE_LOOKAROUND;
+ break;
+ case '<':
+ if (FLAG_harmony_regexp_lookbehind) {
+ Advance();
+ lookaround_type = RegExpLookaround::LOOKBEHIND;
+ if (Next() == '=') {
+ subexpr_type = POSITIVE_LOOKAROUND;
+ break;
+ } else if (Next() == '!') {
+ subexpr_type = NEGATIVE_LOOKAROUND;
+ break;
+ }
+ }
+ // Fall through.
+ default:
+ ReportError(CStrVector("Invalid group") CHECK_FAILED);
+ break;
+ }
+ Advance(2);
+ } else {
+ if (captures_started_ >= kMaxCaptures) {
+ ReportError(CStrVector("Too many captures") CHECK_FAILED);
+ }
+ captures_started_++;
+ }
+ // Store current state and begin new disjunction parsing.
+ state = new (zone()) RegExpParserState(
+ state, subexpr_type, lookaround_type, captures_started_, zone());
+ builder = state->builder();
+ continue;
+ }
+ case '[': {
+ RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
+ builder->AddAtom(atom);
+ break;
+ }
+ // Atom ::
+ // \ AtomEscape
+ case '\\':
+ switch (Next()) {
+ case kEndMarker:
+ return ReportError(CStrVector("\\ at end of pattern"));
+ case 'b':
+ Advance(2);
+ builder->AddAssertion(
+ new (zone()) RegExpAssertion(RegExpAssertion::BOUNDARY));
+ continue;
+ case 'B':
+ Advance(2);
+ builder->AddAssertion(
+ new (zone()) RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
+ continue;
+ // AtomEscape ::
+ // CharacterClassEscape
+ //
+ // CharacterClassEscape :: one of
+ // d D s S w W
+ case 'd':
+ case 'D':
+ case 's':
+ case 'S':
+ case 'w':
+ case 'W': {
+ uc32 c = Next();
+ Advance(2);
+ ZoneList<CharacterRange>* ranges =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ CharacterRange::AddClassEscape(c, ranges, zone());
+ RegExpTree* atom = new (zone()) RegExpCharacterClass(ranges, false);
+ builder->AddAtom(atom);
+ break;
+ }
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9': {
+ int index = 0;
+ if (ParseBackReferenceIndex(&index)) {
+ if (state->IsInsideCaptureGroup(index)) {
+ // The back reference is inside the capture group it refers to.
+ // Nothing can possibly have been captured yet, so we use empty
+ // instead. This ensures that, when checking a back reference,
+ // the capture registers of the referenced capture are either
+ // both set or both cleared.
+ builder->AddEmpty();
+ } else {
+ RegExpCapture* capture = GetCapture(index);
+ RegExpTree* atom = new (zone()) RegExpBackReference(capture);
+ builder->AddAtom(atom);
+ }
+ break;
+ }
+ uc32 first_digit = Next();
+ if (first_digit == '8' || first_digit == '9') {
+ // If the 'u' flag is present, only syntax characters can be
+ // escaped,
+ // no other identity escapes are allowed. If the 'u' flag is not
+ // present, all identity escapes are allowed.
+ if (!unicode_) {
+ builder->AddCharacter(first_digit);
+ Advance(2);
+ } else {
+ return ReportError(CStrVector("Invalid escape"));
+ }
+ break;
+ }
+ }
+ // FALLTHROUGH
+ case '0': {
+ Advance();
+ uc32 octal = ParseOctalLiteral();
+ builder->AddCharacter(octal);
+ break;
+ }
+ // ControlEscape :: one of
+ // f n r t v
+ case 'f':
+ Advance(2);
+ builder->AddCharacter('\f');
+ break;
+ case 'n':
+ Advance(2);
+ builder->AddCharacter('\n');
+ break;
+ case 'r':
+ Advance(2);
+ builder->AddCharacter('\r');
+ break;
+ case 't':
+ Advance(2);
+ builder->AddCharacter('\t');
+ break;
+ case 'v':
+ Advance(2);
+ builder->AddCharacter('\v');
+ break;
+ case 'c': {
+ Advance();
+ uc32 controlLetter = Next();
+ // Special case if it is an ASCII letter.
+ // Convert lower case letters to uppercase.
+ uc32 letter = controlLetter & ~('a' ^ 'A');
+ if (letter < 'A' || 'Z' < letter) {
+ // controlLetter is not in range 'A'-'Z' or 'a'-'z'.
+ // This is outside the specification. We match JSC in
+ // reading the backslash as a literal character instead
+ // of as starting an escape.
+ builder->AddCharacter('\\');
+ } else {
+ Advance(2);
+ builder->AddCharacter(controlLetter & 0x1f);
+ }
+ break;
+ }
+ case 'x': {
+ Advance(2);
+ uc32 value;
+ if (ParseHexEscape(2, &value)) {
+ builder->AddCharacter(value);
+ } else if (!unicode_) {
+ builder->AddCharacter('x');
+ } else {
+ // If the 'u' flag is present, invalid escapes are not treated as
+ // identity escapes.
+ return ReportError(CStrVector("Invalid escape"));
+ }
+ break;
+ }
+ case 'u': {
+ Advance(2);
+ uc32 value;
+ if (ParseUnicodeEscape(&value)) {
+ builder->AddUnicodeCharacter(value);
+ } else if (!unicode_) {
+ builder->AddCharacter('u');
+ } else {
+ // If the 'u' flag is present, invalid escapes are not treated as
+ // identity escapes.
+ return ReportError(CStrVector("Invalid unicode escape"));
+ }
+ break;
+ }
+ default:
+ Advance();
+ // If the 'u' flag is present, only syntax characters can be
+ // escaped, no
+ // other identity escapes are allowed. If the 'u' flag is not
+ // present,
+ // all identity escapes are allowed.
+ if (!unicode_ || IsSyntaxCharacter(current())) {
+ builder->AddCharacter(current());
+ Advance();
+ } else {
+ return ReportError(CStrVector("Invalid escape"));
+ }
+ break;
+ }
+ break;
+ case '{': {
+ int dummy;
+ if (ParseIntervalQuantifier(&dummy, &dummy)) {
+ ReportError(CStrVector("Nothing to repeat") CHECK_FAILED);
+ }
+ // fallthrough
+ }
+ default:
+ builder->AddUnicodeCharacter(current());
+ Advance();
+ break;
+ } // end switch(current())
+
+ int min;
+ int max;
+ switch (current()) {
+ // QuantifierPrefix ::
+ // *
+ // +
+ // ?
+ // {
+ case '*':
+ min = 0;
+ max = RegExpTree::kInfinity;
+ Advance();
+ break;
+ case '+':
+ min = 1;
+ max = RegExpTree::kInfinity;
+ Advance();
+ break;
+ case '?':
+ min = 0;
+ max = 1;
+ Advance();
+ break;
+ case '{':
+ if (ParseIntervalQuantifier(&min, &max)) {
+ if (max < min) {
+ ReportError(CStrVector("numbers out of order in {} quantifier.")
+ CHECK_FAILED);
+ }
+ break;
+ } else {
+ continue;
+ }
+ default:
+ continue;
+ }
+ RegExpQuantifier::QuantifierType quantifier_type = RegExpQuantifier::GREEDY;
+ if (current() == '?') {
+ quantifier_type = RegExpQuantifier::NON_GREEDY;
+ Advance();
+ } else if (FLAG_regexp_possessive_quantifier && current() == '+') {
+ // FLAG_regexp_possessive_quantifier is a debug-only flag.
+ quantifier_type = RegExpQuantifier::POSSESSIVE;
+ Advance();
+ }
+ builder->AddQuantifierToAtom(min, max, quantifier_type);
+ }
+}
+
+
+#ifdef DEBUG
+// Currently only used in an DCHECK.
+static bool IsSpecialClassEscape(uc32 c) {
+ switch (c) {
+ case 'd':
+ case 'D':
+ case 's':
+ case 'S':
+ case 'w':
+ case 'W':
+ return true;
+ default:
+ return false;
+ }
+}
+#endif
+
+
+// In order to know whether an escape is a backreference or not we have to scan
+// the entire regexp and find the number of capturing parentheses. However we
+// don't want to scan the regexp twice unless it is necessary. This mini-parser
+// is called when needed. It can see the difference between capturing and
+// noncapturing parentheses and can skip character classes and backslash-escaped
+// characters.
+void RegExpParser::ScanForCaptures() {
+ // Start with captures started previous to current position
+ int capture_count = captures_started();
+ // Add count of captures after this position.
+ int n;
+ while ((n = current()) != kEndMarker) {
+ Advance();
+ switch (n) {
+ case '\\':
+ Advance();
+ break;
+ case '[': {
+ int c;
+ while ((c = current()) != kEndMarker) {
+ Advance();
+ if (c == '\\') {
+ Advance();
+ } else {
+ if (c == ']') break;
+ }
+ }
+ break;
+ }
+ case '(':
+ if (current() != '?') capture_count++;
+ break;
+ }
+ }
+ capture_count_ = capture_count;
+ is_scanned_for_captures_ = true;
+}
+
+
+bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
+ DCHECK_EQ('\\', current());
+ DCHECK('1' <= Next() && Next() <= '9');
+ // Try to parse a decimal literal that is no greater than the total number
+ // of left capturing parentheses in the input.
+ int start = position();
+ int value = Next() - '0';
+ Advance(2);
+ while (true) {
+ uc32 c = current();
+ if (IsDecimalDigit(c)) {
+ value = 10 * value + (c - '0');
+ if (value > kMaxCaptures) {
+ Reset(start);
+ return false;
+ }
+ Advance();
+ } else {
+ break;
+ }
+ }
+ if (value > captures_started()) {
+ if (!is_scanned_for_captures_) {
+ int saved_position = position();
+ ScanForCaptures();
+ Reset(saved_position);
+ }
+ if (value > capture_count_) {
+ Reset(start);
+ return false;
+ }
+ }
+ *index_out = value;
+ return true;
+}
+
+
+RegExpCapture* RegExpParser::GetCapture(int index) {
+ // The index for the capture groups are one-based. Its index in the list is
+ // zero-based.
+ int know_captures =
+ is_scanned_for_captures_ ? capture_count_ : captures_started_;
+ DCHECK(index <= know_captures);
+ if (captures_ == NULL) {
+ captures_ = new (zone()) ZoneList<RegExpCapture*>(know_captures, zone());
+ }
+ while (captures_->length() < know_captures) {
+ captures_->Add(new (zone()) RegExpCapture(captures_->length() + 1), zone());
+ }
+ return captures_->at(index - 1);
+}
+
+
+bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(int index) {
+ for (RegExpParserState* s = this; s != NULL; s = s->previous_state()) {
+ if (s->group_type() != CAPTURE) continue;
+ // Return true if we found the matching capture index.
+ if (index == s->capture_index()) return true;
+ // Abort if index is larger than what has been parsed up till this state.
+ if (index > s->capture_index()) return false;
+ }
+ return false;
+}
+
+
+// QuantifierPrefix ::
+// { DecimalDigits }
+// { DecimalDigits , }
+// { DecimalDigits , DecimalDigits }
+//
+// Returns true if parsing succeeds, and set the min_out and max_out
+// values. Values are truncated to RegExpTree::kInfinity if they overflow.
+bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
+ DCHECK_EQ(current(), '{');
+ int start = position();
+ Advance();
+ int min = 0;
+ if (!IsDecimalDigit(current())) {
+ Reset(start);
+ return false;
+ }
+ while (IsDecimalDigit(current())) {
+ int next = current() - '0';
+ if (min > (RegExpTree::kInfinity - next) / 10) {
+ // Overflow. Skip past remaining decimal digits and return -1.
+ do {
+ Advance();
+ } while (IsDecimalDigit(current()));
+ min = RegExpTree::kInfinity;
+ break;
+ }
+ min = 10 * min + next;
+ Advance();
+ }
+ int max = 0;
+ if (current() == '}') {
+ max = min;
+ Advance();
+ } else if (current() == ',') {
+ Advance();
+ if (current() == '}') {
+ max = RegExpTree::kInfinity;
+ Advance();
+ } else {
+ while (IsDecimalDigit(current())) {
+ int next = current() - '0';
+ if (max > (RegExpTree::kInfinity - next) / 10) {
+ do {
+ Advance();
+ } while (IsDecimalDigit(current()));
+ max = RegExpTree::kInfinity;
+ break;
+ }
+ max = 10 * max + next;
+ Advance();
+ }
+ if (current() != '}') {
+ Reset(start);
+ return false;
+ }
+ Advance();
+ }
+ } else {
+ Reset(start);
+ return false;
+ }
+ *min_out = min;
+ *max_out = max;
+ return true;
+}
+
+
+uc32 RegExpParser::ParseOctalLiteral() {
+ DCHECK(('0' <= current() && current() <= '7') || current() == kEndMarker);
+ // For compatibility with some other browsers (not all), we parse
+ // up to three octal digits with a value below 256.
+ uc32 value = current() - '0';
+ Advance();
+ if ('0' <= current() && current() <= '7') {
+ value = value * 8 + current() - '0';
+ Advance();
+ if (value < 32 && '0' <= current() && current() <= '7') {
+ value = value * 8 + current() - '0';
+ Advance();
+ }
+ }
+ return value;
+}
+
+
+bool RegExpParser::ParseHexEscape(int length, uc32* value) {
+ int start = position();
+ uc32 val = 0;
+ for (int i = 0; i < length; ++i) {
+ uc32 c = current();
+ int d = HexValue(c);
+ if (d < 0) {
+ Reset(start);
+ return false;
+ }
+ val = val * 16 + d;
+ Advance();
+ }
+ *value = val;
+ return true;
+}
+
+
+bool RegExpParser::ParseUnicodeEscape(uc32* value) {
+ // Accept both \uxxxx and \u{xxxxxx} (if harmony unicode escapes are
+ // allowed). In the latter case, the number of hex digits between { } is
+ // arbitrary. \ and u have already been read.
+ if (current() == '{' && unicode_) {
+ int start = position();
+ Advance();
+ if (ParseUnlimitedLengthHexNumber(0x10ffff, value)) {
+ if (current() == '}') {
+ Advance();
+ return true;
+ }
+ }
+ Reset(start);
+ return false;
+ }
+ // \u but no {, or \u{...} escapes not allowed.
+ return ParseHexEscape(4, value);
+}
+
+
+bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
+ uc32 x = 0;
+ int d = HexValue(current());
+ if (d < 0) {
+ return false;
+ }
+ while (d >= 0) {
+ x = x * 16 + d;
+ if (x > max_value) {
+ return false;
+ }
+ Advance();
+ d = HexValue(current());
+ }
+ *value = x;
+ return true;
+}
+
+
+uc32 RegExpParser::ParseClassCharacterEscape() {
+ DCHECK(current() == '\\');
+ DCHECK(has_next() && !IsSpecialClassEscape(Next()));
+ Advance();
+ switch (current()) {
+ case 'b':
+ Advance();
+ return '\b';
+ // ControlEscape :: one of
+ // f n r t v
+ case 'f':
+ Advance();
+ return '\f';
+ case 'n':
+ Advance();
+ return '\n';
+ case 'r':
+ Advance();
+ return '\r';
+ case 't':
+ Advance();
+ return '\t';
+ case 'v':
+ Advance();
+ return '\v';
+ case 'c': {
+ uc32 controlLetter = Next();
+ uc32 letter = controlLetter & ~('A' ^ 'a');
+ // For compatibility with JSC, inside a character class
+ // we also accept digits and underscore as control characters.
+ if ((controlLetter >= '0' && controlLetter <= '9') ||
+ controlLetter == '_' || (letter >= 'A' && letter <= 'Z')) {
+ Advance(2);
+ // Control letters mapped to ASCII control characters in the range
+ // 0x00-0x1f.
+ return controlLetter & 0x1f;
+ }
+ // We match JSC in reading the backslash as a literal
+ // character instead of as starting an escape.
+ return '\\';
+ }
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ // For compatibility, we interpret a decimal escape that isn't
+ // a back reference (and therefore either \0 or not valid according
+ // to the specification) as a 1..3 digit octal character code.
+ return ParseOctalLiteral();
+ case 'x': {
+ Advance();
+ uc32 value;
+ if (ParseHexEscape(2, &value)) {
+ return value;
+ }
+ if (!unicode_) {
+ // If \x is not followed by a two-digit hexadecimal, treat it
+ // as an identity escape.
+ return 'x';
+ }
+ // If the 'u' flag is present, invalid escapes are not treated as
+ // identity escapes.
+ ReportError(CStrVector("Invalid escape"));
+ return 0;
+ }
+ case 'u': {
+ Advance();
+ uc32 value;
+ if (ParseUnicodeEscape(&value)) {
+ return value;
+ }
+ if (!unicode_) {
+ return 'u';
+ }
+ // If the 'u' flag is present, invalid escapes are not treated as
+ // identity escapes.
+ ReportError(CStrVector("Invalid unicode escape"));
+ return 0;
+ }
+ default: {
+ uc32 result = current();
+ // If the 'u' flag is present, only syntax characters can be escaped, no
+ // other identity escapes are allowed. If the 'u' flag is not present, all
+ // identity escapes are allowed.
+ if (!unicode_ || IsSyntaxCharacter(result)) {
+ Advance();
+ return result;
+ }
+ ReportError(CStrVector("Invalid escape"));
+ return 0;
+ }
+ }
+ return 0;
+}
+
+
+CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
+ DCHECK_EQ(0, *char_class);
+ uc32 first = current();
+ if (first == '\\') {
+ switch (Next()) {
+ case 'w':
+ case 'W':
+ case 'd':
+ case 'D':
+ case 's':
+ case 'S': {
+ *char_class = Next();
+ Advance(2);
+ return CharacterRange::Singleton(0); // Return dummy value.
+ }
+ case kEndMarker:
+ return ReportError(CStrVector("\\ at end of pattern"));
+ default:
+ uc32 c = ParseClassCharacterEscape(CHECK_FAILED);
+ return CharacterRange::Singleton(c);
+ }
+ } else {
+ Advance();
+ return CharacterRange::Singleton(first);
+ }
+}
+
+
+static const uc16 kNoCharClass = 0;
+
+// Adds range or pre-defined character class to character ranges.
+// If char_class is not kInvalidClass, it's interpreted as a class
+// escape (i.e., 's' means whitespace, from '\s').
+static inline void AddRangeOrEscape(ZoneList<CharacterRange>* ranges,
+ uc16 char_class, CharacterRange range,
+ Zone* zone) {
+ if (char_class != kNoCharClass) {
+ CharacterRange::AddClassEscape(char_class, ranges, zone);
+ } else {
+ ranges->Add(range, zone);
+ }
+}
+
+
+RegExpTree* RegExpParser::ParseCharacterClass() {
+ static const char* kUnterminated = "Unterminated character class";
+ static const char* kRangeOutOfOrder = "Range out of order in character class";
+
+ DCHECK_EQ(current(), '[');
+ Advance();
+ bool is_negated = false;
+ if (current() == '^') {
+ is_negated = true;
+ Advance();
+ }
+ ZoneList<CharacterRange>* ranges =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ while (has_more() && current() != ']') {
+ uc16 char_class = kNoCharClass;
+ CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
+ if (current() == '-') {
+ Advance();
+ if (current() == kEndMarker) {
+ // If we reach the end we break out of the loop and let the
+ // following code report an error.
+ break;
+ } else if (current() == ']') {
+ AddRangeOrEscape(ranges, char_class, first, zone());
+ ranges->Add(CharacterRange::Singleton('-'), zone());
+ break;
+ }
+ uc16 char_class_2 = kNoCharClass;
+ CharacterRange next = ParseClassAtom(&char_class_2 CHECK_FAILED);
+ if (char_class != kNoCharClass || char_class_2 != kNoCharClass) {
+ // Either end is an escaped character class. Treat the '-' verbatim.
+ AddRangeOrEscape(ranges, char_class, first, zone());
+ ranges->Add(CharacterRange::Singleton('-'), zone());
+ AddRangeOrEscape(ranges, char_class_2, next, zone());
+ continue;
+ }
+ if (first.from() > next.to()) {
+ return ReportError(CStrVector(kRangeOutOfOrder) CHECK_FAILED);
+ }
+ ranges->Add(CharacterRange::Range(first.from(), next.to()), zone());
+ } else {
+ AddRangeOrEscape(ranges, char_class, first, zone());
+ }
+ }
+ if (!has_more()) {
+ return ReportError(CStrVector(kUnterminated) CHECK_FAILED);
+ }
+ Advance();
+ if (ranges->length() == 0) {
+ ranges->Add(CharacterRange::Everything(), zone());
+ is_negated = !is_negated;
+ }
+ return new (zone()) RegExpCharacterClass(ranges, is_negated);
+}
+
+
+#undef CHECK_FAILED
+
+
+bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
+ FlatStringReader* input, bool multiline,
+ bool unicode, RegExpCompileData* result) {
+ DCHECK(result != NULL);
+ RegExpParser parser(input, &result->error, multiline, unicode, isolate, zone);
+ RegExpTree* tree = parser.ParsePattern();
+ if (parser.failed()) {
+ DCHECK(tree == NULL);
+ DCHECK(!result->error.is_null());
+ } else {
+ DCHECK(tree != NULL);
+ DCHECK(result->error.is_null());
+ if (FLAG_trace_regexp_parser) {
+ OFStream os(stdout);
+ tree->Print(os, zone);
+ os << "\n";
+ }
+ result->tree = tree;
+ int capture_count = parser.captures_started();
+ result->simple = tree->IsAtom() && parser.simple() && capture_count == 0;
+ result->contains_anchor = parser.contains_anchor();
+ result->capture_count = capture_count;
+ }
+ return !parser.failed();
+}
+
+
+RegExpBuilder::RegExpBuilder(Zone* zone)
+ : zone_(zone),
+ pending_empty_(false),
+ characters_(NULL),
+ terms_(),
+ alternatives_()
+#ifdef DEBUG
+ ,
+ last_added_(ADD_NONE)
+#endif
+{
+}
+
+
+void RegExpBuilder::FlushCharacters() {
+ pending_empty_ = false;
+ if (characters_ != NULL) {
+ RegExpTree* atom = new (zone()) RegExpAtom(characters_->ToConstVector());
+ characters_ = NULL;
+ text_.Add(atom, zone());
+ LAST(ADD_ATOM);
+ }
+}
+
+
+void RegExpBuilder::FlushText() {
+ FlushCharacters();
+ int num_text = text_.length();
+ if (num_text == 0) {
+ return;
+ } else if (num_text == 1) {
+ terms_.Add(text_.last(), zone());
+ } else {
+ RegExpText* text = new (zone()) RegExpText(zone());
+ for (int i = 0; i < num_text; i++) text_.Get(i)->AppendToText(text, zone());
+ terms_.Add(text, zone());
+ }
+ text_.Clear();
+}
+
+
+void RegExpBuilder::AddCharacter(uc16 c) {
+ pending_empty_ = false;
+ if (characters_ == NULL) {
+ characters_ = new (zone()) ZoneList<uc16>(4, zone());
+ }
+ characters_->Add(c, zone());
+ LAST(ADD_CHAR);
+}
+
+
+void RegExpBuilder::AddUnicodeCharacter(uc32 c) {
+ if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ ZoneList<uc16> surrogate_pair(2, zone());
+ surrogate_pair.Add(unibrow::Utf16::LeadSurrogate(c), zone());
+ surrogate_pair.Add(unibrow::Utf16::TrailSurrogate(c), zone());
+ RegExpAtom* atom = new (zone()) RegExpAtom(surrogate_pair.ToConstVector());
+ AddAtom(atom);
+ } else {
+ AddCharacter(static_cast<uc16>(c));
+ }
+}
+
+
+void RegExpBuilder::AddEmpty() { pending_empty_ = true; }
+
+
+void RegExpBuilder::AddAtom(RegExpTree* term) {
+ if (term->IsEmpty()) {
+ AddEmpty();
+ return;
+ }
+ if (term->IsTextElement()) {
+ FlushCharacters();
+ text_.Add(term, zone());
+ } else {
+ FlushText();
+ terms_.Add(term, zone());
+ }
+ LAST(ADD_ATOM);
+}
+
+
+void RegExpBuilder::AddAssertion(RegExpTree* assert) {
+ FlushText();
+ terms_.Add(assert, zone());
+ LAST(ADD_ASSERT);
+}
+
+
+void RegExpBuilder::NewAlternative() { FlushTerms(); }
+
+
+void RegExpBuilder::FlushTerms() {
+ FlushText();
+ int num_terms = terms_.length();
+ RegExpTree* alternative;
+ if (num_terms == 0) {
+ alternative = new (zone()) RegExpEmpty();
+ } else if (num_terms == 1) {
+ alternative = terms_.last();
+ } else {
+ alternative = new (zone()) RegExpAlternative(terms_.GetList(zone()));
+ }
+ alternatives_.Add(alternative, zone());
+ terms_.Clear();
+ LAST(ADD_NONE);
+}
+
+
+RegExpTree* RegExpBuilder::ToRegExp() {
+ FlushTerms();
+ int num_alternatives = alternatives_.length();
+ if (num_alternatives == 0) return new (zone()) RegExpEmpty();
+ if (num_alternatives == 1) return alternatives_.last();
+ return new (zone()) RegExpDisjunction(alternatives_.GetList(zone()));
+}
+
+
+void RegExpBuilder::AddQuantifierToAtom(
+ int min, int max, RegExpQuantifier::QuantifierType quantifier_type) {
+ if (pending_empty_) {
+ pending_empty_ = false;
+ return;
+ }
+ RegExpTree* atom;
+ if (characters_ != NULL) {
+ DCHECK(last_added_ == ADD_CHAR);
+ // Last atom was character.
+ Vector<const uc16> char_vector = characters_->ToConstVector();
+ int num_chars = char_vector.length();
+ if (num_chars > 1) {
+ Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
+ text_.Add(new (zone()) RegExpAtom(prefix), zone());
+ char_vector = char_vector.SubVector(num_chars - 1, num_chars);
+ }
+ characters_ = NULL;
+ atom = new (zone()) RegExpAtom(char_vector);
+ FlushText();
+ } else if (text_.length() > 0) {
+ DCHECK(last_added_ == ADD_ATOM);
+ atom = text_.RemoveLast();
+ FlushText();
+ } else if (terms_.length() > 0) {
+ DCHECK(last_added_ == ADD_ATOM);
+ atom = terms_.RemoveLast();
+ if (atom->max_match() == 0) {
+ // Guaranteed to only match an empty string.
+ LAST(ADD_TERM);
+ if (min == 0) {
+ return;
+ }
+ terms_.Add(atom, zone());
+ return;
+ }
+ } else {
+ // Only call immediately after adding an atom or character!
+ UNREACHABLE();
+ return;
+ }
+ terms_.Add(new (zone()) RegExpQuantifier(min, max, quantifier_type, atom),
+ zone());
+ LAST(ADD_TERM);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/regexp/regexp-parser.h b/chromium/v8/src/regexp/regexp-parser.h
new file mode 100644
index 00000000000..af9b765fba1
--- /dev/null
+++ b/chromium/v8/src/regexp/regexp-parser.h
@@ -0,0 +1,277 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_PARSER_H_
+#define V8_REGEXP_REGEXP_PARSER_H_
+
+#include "src/objects.h"
+#include "src/regexp/regexp-ast.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+struct RegExpCompileData;
+
+
+// A BufferedZoneList is an automatically growing list, just like (and backed
+// by) a ZoneList, that is optimized for the case of adding and removing
+// a single element. The last element added is stored outside the backing list,
+// and if no more than one element is ever added, the ZoneList isn't even
+// allocated.
+// Elements must not be NULL pointers.
+template <typename T, int initial_size>
+class BufferedZoneList {
+ public:
+ BufferedZoneList() : list_(NULL), last_(NULL) {}
+
+ // Adds element at end of list. This element is buffered and can
+ // be read using last() or removed using RemoveLast until a new Add or until
+ // RemoveLast or GetList has been called.
+ void Add(T* value, Zone* zone) {
+ if (last_ != NULL) {
+ if (list_ == NULL) {
+ list_ = new (zone) ZoneList<T*>(initial_size, zone);
+ }
+ list_->Add(last_, zone);
+ }
+ last_ = value;
+ }
+
+ T* last() {
+ DCHECK(last_ != NULL);
+ return last_;
+ }
+
+ T* RemoveLast() {
+ DCHECK(last_ != NULL);
+ T* result = last_;
+ if ((list_ != NULL) && (list_->length() > 0))
+ last_ = list_->RemoveLast();
+ else
+ last_ = NULL;
+ return result;
+ }
+
+ T* Get(int i) {
+ DCHECK((0 <= i) && (i < length()));
+ if (list_ == NULL) {
+ DCHECK_EQ(0, i);
+ return last_;
+ } else {
+ if (i == list_->length()) {
+ DCHECK(last_ != NULL);
+ return last_;
+ } else {
+ return list_->at(i);
+ }
+ }
+ }
+
+ void Clear() {
+ list_ = NULL;
+ last_ = NULL;
+ }
+
+ int length() {
+ int length = (list_ == NULL) ? 0 : list_->length();
+ return length + ((last_ == NULL) ? 0 : 1);
+ }
+
+ ZoneList<T*>* GetList(Zone* zone) {
+ if (list_ == NULL) {
+ list_ = new (zone) ZoneList<T*>(initial_size, zone);
+ }
+ if (last_ != NULL) {
+ list_->Add(last_, zone);
+ last_ = NULL;
+ }
+ return list_;
+ }
+
+ private:
+ ZoneList<T*>* list_;
+ T* last_;
+};
+
+
+// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
+class RegExpBuilder : public ZoneObject {
+ public:
+ explicit RegExpBuilder(Zone* zone);
+ void AddCharacter(uc16 character);
+ void AddUnicodeCharacter(uc32 character);
+ // "Adds" an empty expression. Does nothing except consume a
+ // following quantifier
+ void AddEmpty();
+ void AddAtom(RegExpTree* tree);
+ void AddAssertion(RegExpTree* tree);
+ void NewAlternative(); // '|'
+ void AddQuantifierToAtom(int min, int max,
+ RegExpQuantifier::QuantifierType type);
+ RegExpTree* ToRegExp();
+
+ private:
+ void FlushCharacters();
+ void FlushText();
+ void FlushTerms();
+ Zone* zone() const { return zone_; }
+
+ Zone* zone_;
+ bool pending_empty_;
+ ZoneList<uc16>* characters_;
+ BufferedZoneList<RegExpTree, 2> terms_;
+ BufferedZoneList<RegExpTree, 2> text_;
+ BufferedZoneList<RegExpTree, 2> alternatives_;
+#ifdef DEBUG
+ enum { ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM } last_added_;
+#define LAST(x) last_added_ = x;
+#else
+#define LAST(x)
+#endif
+};
+
+
+class RegExpParser BASE_EMBEDDED {
+ public:
+ RegExpParser(FlatStringReader* in, Handle<String>* error, bool multiline_mode,
+ bool unicode, Isolate* isolate, Zone* zone);
+
+ static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
+ bool multiline, bool unicode,
+ RegExpCompileData* result);
+
+ RegExpTree* ParsePattern();
+ RegExpTree* ParseDisjunction();
+ RegExpTree* ParseGroup();
+ RegExpTree* ParseCharacterClass();
+
+ // Parses a {...,...} quantifier and stores the range in the given
+ // out parameters.
+ bool ParseIntervalQuantifier(int* min_out, int* max_out);
+
+ // Parses and returns a single escaped character. The character
+ // must not be 'b' or 'B' since they are usually handle specially.
+ uc32 ParseClassCharacterEscape();
+
+ // Checks whether the following is a length-digit hexadecimal number,
+ // and sets the value if it is.
+ bool ParseHexEscape(int length, uc32* value);
+ bool ParseUnicodeEscape(uc32* value);
+ bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
+
+ uc32 ParseOctalLiteral();
+
+ // Tries to parse the input as a back reference. If successful it
+ // stores the result in the output parameter and returns true. If
+ // it fails it will push back the characters read so the same characters
+ // can be reparsed.
+ bool ParseBackReferenceIndex(int* index_out);
+
+ CharacterRange ParseClassAtom(uc16* char_class);
+ RegExpTree* ReportError(Vector<const char> message);
+ void Advance();
+ void Advance(int dist);
+ void Reset(int pos);
+
+ // Reports whether the pattern might be used as a literal search string.
+ // Only use if the result of the parse is a single atom node.
+ bool simple();
+ bool contains_anchor() { return contains_anchor_; }
+ void set_contains_anchor() { contains_anchor_ = true; }
+ int captures_started() { return captures_started_; }
+ int position() { return next_pos_ - 1; }
+ bool failed() { return failed_; }
+
+ static bool IsSyntaxCharacter(uc32 c);
+
+ static const int kMaxCaptures = 1 << 16;
+ static const uc32 kEndMarker = (1 << 21);
+
+ private:
+ enum SubexpressionType {
+ INITIAL,
+ CAPTURE, // All positive values represent captures.
+ POSITIVE_LOOKAROUND,
+ NEGATIVE_LOOKAROUND,
+ GROUPING
+ };
+
+ class RegExpParserState : public ZoneObject {
+ public:
+ RegExpParserState(RegExpParserState* previous_state,
+ SubexpressionType group_type,
+ RegExpLookaround::Type lookaround_type,
+ int disjunction_capture_index, Zone* zone)
+ : previous_state_(previous_state),
+ builder_(new (zone) RegExpBuilder(zone)),
+ group_type_(group_type),
+ lookaround_type_(lookaround_type),
+ disjunction_capture_index_(disjunction_capture_index) {}
+ // Parser state of containing expression, if any.
+ RegExpParserState* previous_state() { return previous_state_; }
+ bool IsSubexpression() { return previous_state_ != NULL; }
+ // RegExpBuilder building this regexp's AST.
+ RegExpBuilder* builder() { return builder_; }
+ // Type of regexp being parsed (parenthesized group or entire regexp).
+ SubexpressionType group_type() { return group_type_; }
+ // Lookahead or Lookbehind.
+ RegExpLookaround::Type lookaround_type() { return lookaround_type_; }
+ // Index in captures array of first capture in this sub-expression, if any.
+ // Also the capture index of this sub-expression itself, if group_type
+ // is CAPTURE.
+ int capture_index() { return disjunction_capture_index_; }
+
+ // Check whether the parser is inside a capture group with the given index.
+ bool IsInsideCaptureGroup(int index);
+
+ private:
+ // Linked list implementation of stack of states.
+ RegExpParserState* previous_state_;
+ // Builder for the stored disjunction.
+ RegExpBuilder* builder_;
+ // Stored disjunction type (capture, look-ahead or grouping), if any.
+ SubexpressionType group_type_;
+ // Stored read direction.
+ RegExpLookaround::Type lookaround_type_;
+ // Stored disjunction's capture index (if any).
+ int disjunction_capture_index_;
+ };
+
+ // Return the 1-indexed RegExpCapture object, allocate if necessary.
+ RegExpCapture* GetCapture(int index);
+
+ Isolate* isolate() { return isolate_; }
+ Zone* zone() const { return zone_; }
+
+ uc32 current() { return current_; }
+ bool has_more() { return has_more_; }
+ bool has_next() { return next_pos_ < in()->length(); }
+ uc32 Next();
+ FlatStringReader* in() { return in_; }
+ void ScanForCaptures();
+
+ Isolate* isolate_;
+ Zone* zone_;
+ Handle<String>* error_;
+ ZoneList<RegExpCapture*>* captures_;
+ FlatStringReader* in_;
+ uc32 current_;
+ int next_pos_;
+ int captures_started_;
+ // The capture count is only valid after we have scanned for captures.
+ int capture_count_;
+ bool has_more_;
+ bool multiline_;
+ bool unicode_;
+ bool simple_;
+ bool contains_anchor_;
+ bool is_scanned_for_captures_;
+ bool failed_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_PARSER_H_
diff --git a/chromium/v8/src/regexp/regexp-stack.h b/chromium/v8/src/regexp/regexp-stack.h
index 9a6394e198b..aea46cf6735 100644
--- a/chromium/v8/src/regexp/regexp-stack.h
+++ b/chromium/v8/src/regexp/regexp-stack.h
@@ -124,6 +124,7 @@ class RegExpStack {
DISALLOW_COPY_AND_ASSIGN(RegExpStack);
};
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_REGEXP_STACK_H_
diff --git a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 969edc1b3be..286f159cc8e 100644
--- a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -64,7 +64,8 @@ namespace internal {
* - backup of callee save registers (rbx, possibly rsi and rdi).
* - success counter (only useful for global regexp to count matches)
* - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
+ * string start - 1). Used to initialize capture registers to a
+ * non-position.
* - At start of string (if 1, we are starting at the start of the
* string, otherwise 0)
* - register 0 rbp[-n] (Only positions must be stored in the first
@@ -94,7 +95,7 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(isolate, NULL, kRegExpCodeSize),
+ masm_(isolate, NULL, kRegExpCodeSize, CodeObjectRequired::kYes),
no_root_array_scope_(&masm_),
code_relative_fixup_positions_(4, zone),
mode_(mode),
@@ -171,25 +172,16 @@ void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, &not_at_start);
- // If we did, are we still at the start of the input?
- __ leap(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpp(rax, Operand(rbp, kInputStart));
+ __ leap(rax, Operand(rdi, -char_size()));
+ __ cmpp(rax, Operand(rbp, kStringStartMinusOne));
BranchOrBacktrack(equal, on_at_start);
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ leap(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpp(rax, Operand(rbp, kInputStart));
+void RegExpMacroAssemblerX64::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ leap(rax, Operand(rdi, -char_size() + cp_offset * char_size()));
+ __ cmpp(rax, Operand(rbp, kStringStartMinusOne));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -211,8 +203,7 @@ void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
ReadPositionFromRegister(rbx, start_reg + 1); // Offset of end of capture
@@ -222,23 +213,25 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// rdx = Start offset of capture.
// rbx = Length of capture
- // If length is negative, this code will fail (it's a symptom of a partial or
- // illegal capture where start of capture after end of capture).
- // This must not happen (no back-reference can reference a capture that wasn't
- // closed before in the reg-exp, and we must not generate code that can cause
- // this condition).
-
- // If length is zero, either the capture is empty or it is nonparticipating.
- // In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ j(equal, &fallthrough);
// -----------------------
// rdx - Start of capture
// rbx - length of capture
// Check that there are sufficient characters left in the input.
- __ movl(rax, rdi);
- __ addl(rax, rbx);
- BranchOrBacktrack(greater, on_no_match);
+ if (read_backward) {
+ __ movl(rax, Operand(rbp, kStringStartMinusOne));
+ __ addl(rax, rbx);
+ __ cmpl(rdi, rax);
+ BranchOrBacktrack(less_equal, on_no_match);
+ } else {
+ __ movl(rax, rdi);
+ __ addl(rax, rbx);
+ BranchOrBacktrack(greater, on_no_match);
+ }
if (mode_ == LATIN1) {
Label loop_increment;
@@ -248,6 +241,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ leap(r9, Operand(rsi, rdx, times_1, 0));
__ leap(r11, Operand(rsi, rdi, times_1, 0));
+ if (read_backward) {
+ __ subp(r11, rbx); // Offset by length when matching backwards.
+ }
__ addp(rbx, r9); // End of capture
// ---------------------
// r11 - current input character address
@@ -290,6 +286,11 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Compute new value of character position after the matched part.
__ movp(rdi, r11);
__ subq(rdi, rsi);
+ if (read_backward) {
+ // Subtract match length if we matched backward.
+ __ addq(rdi, register_location(start_reg));
+ __ subq(rdi, register_location(start_reg + 1));
+ }
} else {
DCHECK(mode_ == UC16);
// Save important/volatile registers before calling C function.
@@ -313,6 +314,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ leap(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
__ leap(rdx, Operand(rsi, rdi, times_1, 0));
+ if (read_backward) {
+ __ subq(rdx, rbx);
+ }
// Set byte_length.
__ movp(r8, rbx);
// Isolate.
@@ -324,6 +328,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ leap(rdi, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
__ movp(rsi, rax);
+ if (read_backward) {
+ __ subq(rsi, rbx);
+ }
// Set byte_length.
__ movp(rdx, rbx);
// Isolate.
@@ -349,17 +356,21 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
__ testp(rax, rax);
BranchOrBacktrack(zero, on_no_match);
- // On success, increment position by length of capture.
+ // On success, advance position by length of capture.
// Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
- __ addq(rdi, rbx);
+ if (read_backward) {
+ __ subq(rdi, rbx);
+ } else {
+ __ addq(rdi, rbx);
+ }
}
__ bind(&fallthrough);
}
-void RegExpMacroAssemblerX64::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerX64::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
// Find length of back-referenced capture.
@@ -367,25 +378,31 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
ReadPositionFromRegister(rax, start_reg + 1); // Offset of end of capture
__ subp(rax, rdx); // Length to check.
- // Fail on partial or illegal capture (start of capture after end of capture).
- // This must not happen (no back-reference can reference a capture that wasn't
- // closed before in the reg-exp).
- __ Check(greater_equal, kInvalidCaptureReferenced);
-
- // Succeed on empty capture (including non-participating capture)
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ j(equal, &fallthrough);
// -----------------------
// rdx - Start of capture
// rax - length of capture
-
// Check that there are sufficient characters left in the input.
- __ movl(rbx, rdi);
- __ addl(rbx, rax);
- BranchOrBacktrack(greater, on_no_match);
+ if (read_backward) {
+ __ movl(rbx, Operand(rbp, kStringStartMinusOne));
+ __ addl(rbx, rax);
+ __ cmpl(rdi, rbx);
+ BranchOrBacktrack(less_equal, on_no_match);
+ } else {
+ __ movl(rbx, rdi);
+ __ addl(rbx, rax);
+ BranchOrBacktrack(greater, on_no_match);
+ }
// Compute pointers to match string and capture string
__ leap(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
+ if (read_backward) {
+ __ subq(rbx, rax); // Offset by length when matching backwards.
+ }
__ addp(rdx, rsi); // Start of capture.
__ leap(r9, Operand(rdx, rax, times_1, 0)); // End of capture
@@ -416,6 +433,11 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
// Set current character position to position after match.
__ movp(rdi, rbx);
__ subq(rdi, rsi);
+ if (read_backward) {
+ // Subtract match length if we matched backward.
+ __ addq(rdi, register_location(start_reg));
+ __ subq(rdi, register_location(start_reg + 1));
+ }
__ bind(&fallthrough);
}
@@ -682,7 +704,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#endif
__ Push(Immediate(0)); // Number of successful matches in a global regexp.
- __ Push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ Push(Immediate(0)); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -732,7 +754,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ movp(Operand(rbp, kInputStartMinusOne), rax);
+ __ movp(Operand(rbp, kStringStartMinusOne), rax);
#if V8_OS_WIN
// Ensure that we have written to each stack page, in order. Skipping a page
@@ -835,7 +857,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
- __ movp(rax, Operand(rbp, kInputStartMinusOne));
+ __ movp(rax, Operand(rbp, kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -1018,10 +1040,13 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1124,7 +1149,7 @@ void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ movp(rax, Operand(rbp, kInputStartMinusOne));
+ __ movp(rax, Operand(rbp, kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ movp(register_location(reg), rax);
}
@@ -1205,8 +1230,14 @@ Operand RegExpMacroAssemblerX64::register_location(int register_index) {
void RegExpMacroAssemblerX64::CheckPosition(int cp_offset,
Label* on_outside_input) {
- __ cmpl(rdi, Immediate(-cp_offset * char_size()));
- BranchOrBacktrack(greater_equal, on_outside_input);
+ if (cp_offset >= 0) {
+ __ cmpl(rdi, Immediate(-cp_offset * char_size()));
+ BranchOrBacktrack(greater_equal, on_outside_input);
+ } else {
+ __ leap(rax, Operand(rdi, cp_offset * char_size()));
+ __ cmpp(rax, Operand(rbp, kStringStartMinusOne));
+ BranchOrBacktrack(less_equal, on_outside_input);
+ }
}
diff --git a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index d690dc19749..257804739f0 100644
--- a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -34,9 +34,11 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
@@ -171,10 +173,10 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kSuccessfulCaptures = kLastCalleeSaveRegister - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
@@ -276,6 +278,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
diff --git a/chromium/v8/src/regexp/x87/regexp-macro-assembler-x87.cc b/chromium/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
index c6968dc1973..01d0b249b6f 100644
--- a/chromium/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
+++ b/chromium/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
@@ -53,7 +53,8 @@ namespace internal {
* - backup of caller ebx
* - success counter (only for global regexps to count matches).
* - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
+ * string start - 1). Used to initialize capture registers to a
+ * non-position.
* - register 0 ebp[-4] (only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
@@ -80,7 +81,8 @@ RegExpMacroAssemblerX87::RegExpMacroAssemblerX87(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -156,25 +158,16 @@ void RegExpMacroAssemblerX87::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerX87::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, &not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
+ __ lea(eax, Operand(edi, -char_size()));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOne));
BranchOrBacktrack(equal, on_at_start);
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerX87::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
+void RegExpMacroAssemblerX87::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ lea(eax, Operand(edi, -char_size() + cp_offset * char_size()));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOne));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -196,26 +189,28 @@ void RegExpMacroAssemblerX87::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ mov(edx, register_location(start_reg)); // Index of start of capture
__ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
__ sub(ebx, edx); // Length of capture.
- // The length of a capture should not be negative. This can only happen
- // if the end of the capture is unrecorded, or at a point earlier than
- // the start of the capture.
- BranchOrBacktrack(less, on_no_match);
-
- // If length is zero, either the capture is empty or it is completely
- // uncaptured. In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ j(equal, &fallthrough);
// Check that there are sufficient characters left in the input.
- __ mov(eax, edi);
- __ add(eax, ebx);
- BranchOrBacktrack(greater, on_no_match);
+ if (read_backward) {
+ __ mov(eax, Operand(ebp, kStringStartMinusOne));
+ __ add(eax, ebx);
+ __ cmp(edi, eax);
+ BranchOrBacktrack(less_equal, on_no_match);
+ } else {
+ __ mov(eax, edi);
+ __ add(eax, ebx);
+ BranchOrBacktrack(greater, on_no_match);
+ }
if (mode_ == LATIN1) {
Label success;
@@ -228,6 +223,9 @@ void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
__ add(edx, esi); // Start of capture
__ add(edi, esi); // Start of text to match against capture.
+ if (read_backward) {
+ __ sub(edi, ebx); // Offset by length when matching backwards.
+ }
__ add(ebx, edi); // End of text to match against capture.
Label loop;
@@ -278,6 +276,11 @@ void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
__ add(esp, Immediate(kPointerSize));
// Compute new value of character position after the matched part.
__ sub(edi, esi);
+ if (read_backward) {
+ // Subtract match length if we matched backward.
+ __ add(edi, register_location(start_reg));
+ __ sub(edi, register_location(start_reg + 1));
+ }
} else {
DCHECK(mode_ == UC16);
// Save registers before calling C function.
@@ -304,6 +307,9 @@ void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
// Found by adding negative string-end offset of current position (edi)
// to end of string.
__ add(edi, esi);
+ if (read_backward) {
+ __ sub(edi, ebx); // Offset by length when matching backwards.
+ }
__ mov(Operand(esp, 1 * kPointerSize), edi);
// Set byte_offset1.
// Start of capture, where edx already holds string-end negative offset.
@@ -325,16 +331,20 @@ void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
__ or_(eax, eax);
BranchOrBacktrack(zero, on_no_match);
- // On success, increment position by length of capture.
- __ add(edi, ebx);
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ sub(edi, ebx);
+ } else {
+ __ add(edi, ebx);
+ }
}
__ bind(&fallthrough);
}
-void RegExpMacroAssemblerX87::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerX87::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
Label success;
Label fail;
@@ -343,22 +353,33 @@ void RegExpMacroAssemblerX87::CheckNotBackReference(
__ mov(edx, register_location(start_reg));
__ mov(eax, register_location(start_reg + 1));
__ sub(eax, edx); // Length to check.
- // Fail on partial or illegal capture (start of capture after end of capture).
- BranchOrBacktrack(less, on_no_match);
- // Succeed on empty capture (including no capture)
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ j(equal, &fallthrough);
// Check that there are sufficient characters left in the input.
- __ mov(ebx, edi);
- __ add(ebx, eax);
- BranchOrBacktrack(greater, on_no_match);
+ if (read_backward) {
+ __ mov(ebx, Operand(ebp, kStringStartMinusOne));
+ __ add(ebx, eax);
+ __ cmp(edi, ebx);
+ BranchOrBacktrack(less_equal, on_no_match);
+ } else {
+ __ mov(ebx, edi);
+ __ add(ebx, eax);
+ BranchOrBacktrack(greater, on_no_match);
+ }
// Save register to make it available below.
__ push(backtrack_stackpointer());
// Compute pointers to match string and capture string
- __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
__ add(edx, esi); // Start of capture.
+ __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
+ if (read_backward) {
+ __ sub(ebx, eax); // Offset by length when matching backwards.
+ }
__ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
Label loop;
@@ -389,6 +410,11 @@ void RegExpMacroAssemblerX87::CheckNotBackReference(
// Move current character position to position after match.
__ mov(edi, ecx);
__ sub(edi, esi);
+ if (read_backward) {
+ // Subtract match length if we matched backward.
+ __ add(edi, register_location(start_reg));
+ __ sub(edi, register_location(start_reg + 1));
+ }
// Restore backtrack stackpointer.
__ pop(backtrack_stackpointer());
@@ -634,7 +660,7 @@ Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ push(Immediate(0)); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -684,7 +710,7 @@ Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ mov(Operand(ebp, kInputStartMinusOne), eax);
+ __ mov(Operand(ebp, kStringStartMinusOne), eax);
#if V8_OS_WIN
// Ensure that we write to each stack page, in order. Skipping a page
@@ -767,7 +793,7 @@ Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
}
if (global()) {
- // Restart matching if the regular expression is flagged as global.
+ // Restart matching if the regular expression is flagged as global.
// Increment success counter.
__ inc(Operand(ebp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
@@ -784,7 +810,7 @@ Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
Immediate(num_saved_registers_ * kPointerSize));
// Prepare eax to initialize registers with its value in the next run.
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
+ __ mov(eax, Operand(ebp, kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -944,10 +970,13 @@ void RegExpMacroAssemblerX87::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1031,7 +1060,7 @@ void RegExpMacroAssemblerX87::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerX87::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
+ __ mov(eax, Operand(ebp, kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ mov(register_location(reg), eax);
}
@@ -1100,8 +1129,14 @@ Operand RegExpMacroAssemblerX87::register_location(int register_index) {
void RegExpMacroAssemblerX87::CheckPosition(int cp_offset,
Label* on_outside_input) {
- __ cmp(edi, -cp_offset * char_size());
- BranchOrBacktrack(greater_equal, on_outside_input);
+ if (cp_offset >= 0) {
+ __ cmp(edi, -cp_offset * char_size());
+ BranchOrBacktrack(greater_equal, on_outside_input);
+ } else {
+ __ lea(eax, Operand(edi, cp_offset * char_size()));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOne));
+ BranchOrBacktrack(less_equal, on_outside_input);
+ }
}
diff --git a/chromium/v8/src/regexp/x87/regexp-macro-assembler-x87.h b/chromium/v8/src/regexp/x87/regexp-macro-assembler-x87.h
index f636ca08ce5..c95541224fc 100644
--- a/chromium/v8/src/regexp/x87/regexp-macro-assembler-x87.h
+++ b/chromium/v8/src/regexp/x87/regexp-macro-assembler-x87.h
@@ -33,9 +33,11 @@ class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
@@ -116,9 +118,9 @@ class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
@@ -196,6 +198,7 @@ class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
};
#endif // V8_INTERPRETED_REGEXP
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
diff --git a/chromium/v8/src/register-configuration.cc b/chromium/v8/src/register-configuration.cc
new file mode 100644
index 00000000000..6b1655a81bb
--- /dev/null
+++ b/chromium/v8/src/register-configuration.cc
@@ -0,0 +1,162 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/register-configuration.h"
+#include "src/globals.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+#define REGISTER_COUNT(R) 1 +
+static const int kMaxAllocatableGeneralRegisterCount =
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT)0;
+static const int kMaxAllocatableDoubleRegisterCount =
+ ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT)0;
+
+static const int kAllocatableGeneralCodes[] = {
+#define REGISTER_CODE(R) Register::kCode_##R,
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_CODE)};
+#undef REGISTER_CODE
+
+static const int kAllocatableDoubleCodes[] = {
+#define REGISTER_CODE(R) DoubleRegister::kCode_##R,
+ ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_CODE)};
+#undef REGISTER_CODE
+
+static const char* const kGeneralRegisterNames[] = {
+#define REGISTER_NAME(R) #R,
+ GENERAL_REGISTERS(REGISTER_NAME)
+#undef REGISTER_NAME
+};
+
+static const char* const kDoubleRegisterNames[] = {
+#define REGISTER_NAME(R) #R,
+ DOUBLE_REGISTERS(REGISTER_NAME)
+#undef REGISTER_NAME
+};
+
+STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
+ Register::kNumRegisters);
+STATIC_ASSERT(RegisterConfiguration::kMaxDoubleRegisters >=
+ DoubleRegister::kMaxNumRegisters);
+
+class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
+ public:
+ explicit ArchDefaultRegisterConfiguration(CompilerSelector compiler)
+ : RegisterConfiguration(Register::kNumRegisters,
+ DoubleRegister::kMaxNumRegisters,
+#if V8_TARGET_ARCH_IA32
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_X87
+ kMaxAllocatableGeneralRegisterCount,
+ compiler == TURBOFAN
+ ? 1
+ : kMaxAllocatableDoubleRegisterCount,
+ compiler == TURBOFAN
+ ? 1
+ : kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_X64
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_ARM
+ FLAG_enable_embedded_constant_pool
+ ? (kMaxAllocatableGeneralRegisterCount - 1)
+ : kMaxAllocatableGeneralRegisterCount,
+ CpuFeatures::IsSupported(VFP32DREGS)
+ ? kMaxAllocatableDoubleRegisterCount
+ : (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(
+ REGISTER_COUNT)0),
+ ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(
+ REGISTER_COUNT)0,
+#elif V8_TARGET_ARCH_ARM64
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_MIPS
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_MIPS64
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+#elif V8_TARGET_ARCH_PPC
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+#else
+#error Unsupported target architecture.
+#endif
+ kAllocatableGeneralCodes, kAllocatableDoubleCodes,
+ kGeneralRegisterNames, kDoubleRegisterNames) {
+ }
+};
+
+
+template <RegisterConfiguration::CompilerSelector compiler>
+struct RegisterConfigurationInitializer {
+ static void Construct(ArchDefaultRegisterConfiguration* config) {
+ new (config) ArchDefaultRegisterConfiguration(compiler);
+ }
+};
+
+static base::LazyInstance<
+ ArchDefaultRegisterConfiguration,
+ RegisterConfigurationInitializer<RegisterConfiguration::CRANKSHAFT>>::type
+ kDefaultRegisterConfigurationForCrankshaft = LAZY_INSTANCE_INITIALIZER;
+
+
+static base::LazyInstance<
+ ArchDefaultRegisterConfiguration,
+ RegisterConfigurationInitializer<RegisterConfiguration::TURBOFAN>>::type
+ kDefaultRegisterConfigurationForTurboFan = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+
+const RegisterConfiguration* RegisterConfiguration::ArchDefault(
+ CompilerSelector compiler) {
+ return compiler == TURBOFAN
+ ? &kDefaultRegisterConfigurationForTurboFan.Get()
+ : &kDefaultRegisterConfigurationForCrankshaft.Get();
+}
+
+
+RegisterConfiguration::RegisterConfiguration(
+ int num_general_registers, int num_double_registers,
+ int num_allocatable_general_registers, int num_allocatable_double_registers,
+ int num_allocatable_aliased_double_registers,
+ const int* allocatable_general_codes, const int* allocatable_double_codes,
+ const char* const* general_register_names,
+ const char* const* double_register_names)
+ : num_general_registers_(num_general_registers),
+ num_double_registers_(num_double_registers),
+ num_allocatable_general_registers_(num_allocatable_general_registers),
+ num_allocatable_double_registers_(num_allocatable_double_registers),
+ num_allocatable_aliased_double_registers_(
+ num_allocatable_aliased_double_registers),
+ allocatable_general_codes_mask_(0),
+ allocatable_double_codes_mask_(0),
+ allocatable_general_codes_(allocatable_general_codes),
+ allocatable_double_codes_(allocatable_double_codes),
+ general_register_names_(general_register_names),
+ double_register_names_(double_register_names) {
+ for (int i = 0; i < num_allocatable_general_registers_; ++i) {
+ allocatable_general_codes_mask_ |= (1 << allocatable_general_codes_[i]);
+ }
+ for (int i = 0; i < num_allocatable_double_registers_; ++i) {
+ allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]);
+ }
+}
+
+#undef REGISTER_COUNT
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/register-configuration.h b/chromium/v8/src/register-configuration.h
new file mode 100644
index 00000000000..8ad1d783049
--- /dev/null
+++ b/chromium/v8/src/register-configuration.h
@@ -0,0 +1,95 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REGISTER_CONFIGURATION_H_
+#define V8_COMPILER_REGISTER_CONFIGURATION_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+// An architecture independent representation of the sets of registers available
+// for instruction creation.
+class RegisterConfiguration {
+ public:
+ // Define the optimized compiler selector for register configuration
+ // selection.
+ //
+ // TODO(X87): This distinction in RegisterConfigurations is temporary
+ // until x87 TF supports all of the registers that Crankshaft does.
+ enum CompilerSelector { CRANKSHAFT, TURBOFAN };
+
+ // Architecture independent maxes.
+ static const int kMaxGeneralRegisters = 32;
+ static const int kMaxDoubleRegisters = 32;
+
+ static const RegisterConfiguration* ArchDefault(CompilerSelector compiler);
+
+ RegisterConfiguration(int num_general_registers, int num_double_registers,
+ int num_allocatable_general_registers,
+ int num_allocatable_double_registers,
+ int num_allocatable_aliased_double_registers,
+ const int* allocatable_general_codes,
+ const int* allocatable_double_codes,
+ char const* const* general_names,
+ char const* const* double_names);
+
+ int num_general_registers() const { return num_general_registers_; }
+ int num_double_registers() const { return num_double_registers_; }
+ int num_allocatable_general_registers() const {
+ return num_allocatable_general_registers_;
+ }
+ int num_allocatable_double_registers() const {
+ return num_allocatable_double_registers_;
+ }
+ // TODO(turbofan): This is a temporary work-around required because our
+ // register allocator does not yet support the aliasing of single/double
+ // registers on ARM.
+ int num_allocatable_aliased_double_registers() const {
+ return num_allocatable_aliased_double_registers_;
+ }
+ int32_t allocatable_general_codes_mask() const {
+ return allocatable_general_codes_mask_;
+ }
+ int32_t allocatable_double_codes_mask() const {
+ return allocatable_double_codes_mask_;
+ }
+ int GetAllocatableGeneralCode(int index) const {
+ return allocatable_general_codes_[index];
+ }
+ int GetAllocatableDoubleCode(int index) const {
+ return allocatable_double_codes_[index];
+ }
+ const char* GetGeneralRegisterName(int code) const {
+ return general_register_names_[code];
+ }
+ const char* GetDoubleRegisterName(int code) const {
+ return double_register_names_[code];
+ }
+ const int* allocatable_general_codes() const {
+ return allocatable_general_codes_;
+ }
+ const int* allocatable_double_codes() const {
+ return allocatable_double_codes_;
+ }
+
+ private:
+ const int num_general_registers_;
+ const int num_double_registers_;
+ int num_allocatable_general_registers_;
+ int num_allocatable_double_registers_;
+ int num_allocatable_aliased_double_registers_;
+ int32_t allocatable_general_codes_mask_;
+ int32_t allocatable_double_codes_mask_;
+ const int* allocatable_general_codes_;
+ const int* allocatable_double_codes_;
+ char const* const* general_register_names_;
+ char const* const* double_register_names_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_REGISTER_CONFIGURATION_H_
diff --git a/chromium/v8/src/rewriter.cc b/chromium/v8/src/rewriter.cc
deleted file mode 100644
index d88e1199f82..00000000000
--- a/chromium/v8/src/rewriter.cc
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/rewriter.h"
-
-#include "src/ast.h"
-#include "src/parser.h"
-#include "src/scopes.h"
-
-namespace v8 {
-namespace internal {
-
-class Processor: public AstVisitor {
- public:
- Processor(Isolate* isolate, Variable* result,
- AstValueFactory* ast_value_factory)
- : result_(result),
- result_assigned_(false),
- is_set_(false),
- in_try_(false),
- factory_(ast_value_factory) {
- InitializeAstVisitor(isolate, ast_value_factory->zone());
- }
-
- virtual ~Processor() { }
-
- void Process(ZoneList<Statement*>* statements);
- bool result_assigned() const { return result_assigned_; }
-
- AstNodeFactory* factory() { return &factory_; }
-
- private:
- Variable* result_;
-
- // We are not tracking result usage via the result_'s use
- // counts (we leave the accurate computation to the
- // usage analyzer). Instead we simple remember if
- // there was ever an assignment to result_.
- bool result_assigned_;
-
- // To avoid storing to .result all the time, we eliminate some of
- // the stores by keeping track of whether or not we're sure .result
- // will be overwritten anyway. This is a bit more tricky than what I
- // was hoping for
- bool is_set_;
- bool in_try_;
-
- AstNodeFactory factory_;
-
- Expression* SetResult(Expression* value) {
- result_assigned_ = true;
- VariableProxy* result_proxy = factory()->NewVariableProxy(result_);
- return factory()->NewAssignment(
- Token::ASSIGN, result_proxy, value, RelocInfo::kNoPosition);
- }
-
- // Node visitors.
-#define DEF_VISIT(type) virtual void Visit##type(type* node) override;
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- void VisitIterationStatement(IterationStatement* stmt);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-};
-
-
-void Processor::Process(ZoneList<Statement*>* statements) {
- for (int i = statements->length() - 1; i >= 0; --i) {
- Visit(statements->at(i));
- }
-}
-
-
-void Processor::VisitBlock(Block* node) {
- // An initializer block is the rewritten form of a variable declaration
- // with initialization expressions. The initializer block contains the
- // list of assignments corresponding to the initialization expressions.
- // While unclear from the spec (ECMA-262, 3rd., 12.2), the value of
- // a variable declaration with initialization expression is 'undefined'
- // with some JS VMs: For instance, using smjs, print(eval('var x = 7'))
- // returns 'undefined'. To obtain the same behavior with v8, we need
- // to prevent rewriting in that case.
- if (!node->ignore_completion_value()) Process(node->statements());
-}
-
-
-void Processor::VisitExpressionStatement(ExpressionStatement* node) {
- // Rewrite : <x>; -> .result = <x>;
- if (!is_set_ && !node->expression()->IsThrow()) {
- node->set_expression(SetResult(node->expression()));
- if (!in_try_) is_set_ = true;
- }
-}
-
-
-void Processor::VisitIfStatement(IfStatement* node) {
- // Rewrite both then and else parts (reversed).
- bool save = is_set_;
- Visit(node->else_statement());
- bool set_after_then = is_set_;
- is_set_ = save;
- Visit(node->then_statement());
- is_set_ = is_set_ && set_after_then;
-}
-
-
-void Processor::VisitIterationStatement(IterationStatement* node) {
- // Rewrite the body.
- bool set_after_loop = is_set_;
- Visit(node->body());
- is_set_ = is_set_ && set_after_loop;
-}
-
-
-void Processor::VisitDoWhileStatement(DoWhileStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitWhileStatement(WhileStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitForStatement(ForStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitForInStatement(ForInStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitForOfStatement(ForOfStatement* node) {
- VisitIterationStatement(node);
-}
-
-
-void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
- // Rewrite both try and catch blocks (reversed order).
- bool set_after_catch = is_set_;
- Visit(node->catch_block());
- is_set_ = is_set_ && set_after_catch;
- bool save = in_try_;
- in_try_ = true;
- Visit(node->try_block());
- in_try_ = save;
-}
-
-
-void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
- // Rewrite both try and finally block (reversed order).
- Visit(node->finally_block());
- bool save = in_try_;
- in_try_ = true;
- Visit(node->try_block());
- in_try_ = save;
-}
-
-
-void Processor::VisitSwitchStatement(SwitchStatement* node) {
- // Rewrite statements in all case clauses in reversed order.
- ZoneList<CaseClause*>* clauses = node->cases();
- bool set_after_switch = is_set_;
- for (int i = clauses->length() - 1; i >= 0; --i) {
- CaseClause* clause = clauses->at(i);
- Process(clause->statements());
- }
- is_set_ = is_set_ && set_after_switch;
-}
-
-
-void Processor::VisitContinueStatement(ContinueStatement* node) {
- is_set_ = false;
-}
-
-
-void Processor::VisitBreakStatement(BreakStatement* node) {
- is_set_ = false;
-}
-
-
-void Processor::VisitWithStatement(WithStatement* node) {
- bool set_after_body = is_set_;
- Visit(node->statement());
- is_set_ = is_set_ && set_after_body;
-}
-
-
-void Processor::VisitSloppyBlockFunctionStatement(
- SloppyBlockFunctionStatement* node) {
- Visit(node->statement());
-}
-
-
-// Do nothing:
-void Processor::VisitVariableDeclaration(VariableDeclaration* node) {}
-void Processor::VisitFunctionDeclaration(FunctionDeclaration* node) {}
-void Processor::VisitImportDeclaration(ImportDeclaration* node) {}
-void Processor::VisitExportDeclaration(ExportDeclaration* node) {}
-void Processor::VisitEmptyStatement(EmptyStatement* node) {}
-void Processor::VisitReturnStatement(ReturnStatement* node) {}
-void Processor::VisitDebuggerStatement(DebuggerStatement* node) {}
-
-
-// Expressions are never visited yet.
-#define DEF_VISIT(type) \
- void Processor::Visit##type(type* expr) { UNREACHABLE(); }
-EXPRESSION_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-
-// Assumes code has been parsed. Mutates the AST, so the AST should not
-// continue to be used in the case of failure.
-bool Rewriter::Rewrite(ParseInfo* info) {
- FunctionLiteral* function = info->literal();
- DCHECK(function != NULL);
- Scope* scope = function->scope();
- DCHECK(scope != NULL);
- if (!scope->is_script_scope() && !scope->is_eval_scope()) return true;
-
- ZoneList<Statement*>* body = function->body();
- if (!body->is_empty()) {
- Variable* result =
- scope->NewTemporary(info->ast_value_factory()->dot_result_string());
- // The name string must be internalized at this point.
- DCHECK(!result->name().is_null());
- Processor processor(info->isolate(), result, info->ast_value_factory());
- processor.Process(body);
- if (processor.HasStackOverflow()) return false;
-
- if (processor.result_assigned()) {
- DCHECK(function->end_position() != RelocInfo::kNoPosition);
- // Set the position of the assignment statement one character past the
- // source code, such that it definitely is not in the source code range
- // of an immediate inner scope. For example in
- // eval('with ({x:1}) x = 1');
- // the end position of the function generated for executing the eval code
- // coincides with the end of the with scope which is the position of '1'.
- int pos = function->end_position();
- VariableProxy* result_proxy =
- processor.factory()->NewVariableProxy(result, pos);
- Statement* result_statement =
- processor.factory()->NewReturnStatement(result_proxy, pos);
- body->Add(result_statement, info->zone());
- }
- }
-
- return true;
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/runtime-profiler.cc b/chromium/v8/src/runtime-profiler.cc
index 6b942d44a6c..2d4ee9c1a88 100644
--- a/chromium/v8/src/runtime-profiler.cc
+++ b/chromium/v8/src/runtime-profiler.cc
@@ -5,6 +5,7 @@
#include "src/runtime-profiler.h"
#include "src/assembler.h"
+#include "src/ast/scopeinfo.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
@@ -13,7 +14,6 @@
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
-#include "src/scopeinfo.h"
namespace v8 {
namespace internal {
@@ -72,8 +72,10 @@ static void GetICCounts(SharedFunctionInfo* shared,
// Harvest vector-ics as well
TypeFeedbackVector* vector = shared->feedback_vector();
- *ic_with_type_info_count += vector->ic_with_type_info_count();
- *ic_generic_count += vector->ic_generic_count();
+ int with = 0, gen = 0;
+ vector->ComputeCounts(&with, &gen);
+ *ic_with_type_info_count += with;
+ *ic_generic_count += gen;
if (*ic_total_count > 0) {
*type_info_percentage = 100 * *ic_with_type_info_count / *ic_total_count;
@@ -108,7 +110,7 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
int loop_nesting_levels) {
SharedFunctionInfo* shared = function->shared();
- if (!FLAG_use_osr || function->IsBuiltin()) {
+ if (!FLAG_use_osr || function->shared()->IsBuiltin()) {
return;
}
diff --git a/chromium/v8/src/runtime-profiler.h b/chromium/v8/src/runtime-profiler.h
index eff443d926a..0d57929d060 100644
--- a/chromium/v8/src/runtime-profiler.h
+++ b/chromium/v8/src/runtime-profiler.h
@@ -39,6 +39,7 @@ class RuntimeProfiler {
bool any_ic_changed_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_RUNTIME_PROFILER_H_
diff --git a/chromium/v8/src/runtime.js b/chromium/v8/src/runtime.js
deleted file mode 100644
index 0e82d862bd7..00000000000
--- a/chromium/v8/src/runtime.js
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This files contains runtime support implemented in JavaScript.
-
-// CAUTION: Some of the functions specified in this file are called
-// directly from compiled code. These are the functions with names in
-// ALL CAPS. The compiled code passes the first argument in 'this'.
-
-
-// The following declarations are shared with other native JS files.
-// They are all declared at this one spot to avoid redeclaration errors.
-var $NaN;
-var $nonNumberToNumber;
-var $sameValue;
-var $sameValueZero;
-var $toNumber;
-var $toPositiveInteger;
-
-var harmony_tolength = false;
-
-(function(global, utils) {
-
-%CheckIsBootstrapping();
-
-var GlobalArray = global.Array;
-var GlobalBoolean = global.Boolean;
-var GlobalString = global.String;
-var isConcatSpreadableSymbol =
- utils.ImportNow("is_concat_spreadable_symbol");
-
-// ----------------------------------------------------------------------------
-
-/* -----------------------------
- - - - H e l p e r s - - -
- -----------------------------
-*/
-
-function APPLY_PREPARE(args) {
- var length;
-
- // First check that the receiver is callable.
- if (!IS_CALLABLE(this)) {
- throw %make_type_error(kApplyNonFunction, TO_STRING(this), typeof this);
- }
-
- // First check whether length is a positive Smi and args is an
- // array. This is the fast case. If this fails, we do the slow case
- // that takes care of more eventualities.
- if (IS_ARRAY(args)) {
- length = args.length;
- if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength) {
- return length;
- }
- }
-
- length = (args == null) ? 0 : TO_UINT32(args.length);
-
- // We can handle any number of apply arguments if the stack is
- // big enough, but sanity check the value to avoid overflow when
- // multiplying with pointer size.
- if (length > kSafeArgumentsLength) throw %make_range_error(kStackOverflow);
-
- // Make sure the arguments list has the right type.
- if (args != null && !IS_SPEC_OBJECT(args)) {
- throw %make_type_error(kWrongArgs, "Function.prototype.apply");
- }
-
- // Return the length which is the number of arguments to copy to the
- // stack. It is guaranteed to be a small integer at this point.
- return length;
-}
-
-
-function REFLECT_APPLY_PREPARE(args) {
- var length;
-
- // First check that the receiver is callable.
- if (!IS_CALLABLE(this)) {
- throw %make_type_error(kApplyNonFunction, TO_STRING(this), typeof this);
- }
-
- // First check whether length is a positive Smi and args is an
- // array. This is the fast case. If this fails, we do the slow case
- // that takes care of more eventualities.
- if (IS_ARRAY(args)) {
- length = args.length;
- if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength) {
- return length;
- }
- }
-
- if (!IS_SPEC_OBJECT(args)) {
- throw %make_type_error(kWrongArgs, "Reflect.apply");
- }
-
- length = TO_LENGTH(args.length);
-
- // We can handle any number of apply arguments if the stack is
- // big enough, but sanity check the value to avoid overflow when
- // multiplying with pointer size.
- if (length > kSafeArgumentsLength) throw %make_range_error(kStackOverflow);
-
- // Return the length which is the number of arguments to copy to the
- // stack. It is guaranteed to be a small integer at this point.
- return length;
-}
-
-
-function REFLECT_CONSTRUCT_PREPARE(
- args, newTarget) {
- var length;
- var ctorOk = IS_CALLABLE(this) && %IsConstructor(this);
- var newTargetOk = IS_CALLABLE(newTarget) && %IsConstructor(newTarget);
-
- // First check whether length is a positive Smi and args is an
- // array. This is the fast case. If this fails, we do the slow case
- // that takes care of more eventualities.
- if (IS_ARRAY(args)) {
- length = args.length;
- if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength &&
- ctorOk && newTargetOk) {
- return length;
- }
- }
-
- if (!ctorOk) {
- if (!IS_CALLABLE(this)) {
- throw %make_type_error(kCalledNonCallable, TO_STRING(this));
- } else {
- throw %make_type_error(kNotConstructor, TO_STRING(this));
- }
- }
-
- if (!newTargetOk) {
- if (!IS_CALLABLE(newTarget)) {
- throw %make_type_error(kCalledNonCallable, TO_STRING(newTarget));
- } else {
- throw %make_type_error(kNotConstructor, TO_STRING(newTarget));
- }
- }
-
- if (!IS_SPEC_OBJECT(args)) {
- throw %make_type_error(kWrongArgs, "Reflect.construct");
- }
-
- length = TO_LENGTH(args.length);
-
- // We can handle any number of apply arguments if the stack is
- // big enough, but sanity check the value to avoid overflow when
- // multiplying with pointer size.
- if (length > kSafeArgumentsLength) throw %make_range_error(kStackOverflow);
-
- // Return the length which is the number of arguments to copy to the
- // stack. It is guaranteed to be a small integer at this point.
- return length;
-}
-
-
-function CONCAT_ITERABLE_TO_ARRAY(iterable) {
- return %concat_iterable_to_array(this, iterable);
-};
-
-
-/* -------------------------------------
- - - - C o n v e r s i o n s - - -
- -------------------------------------
-*/
-
-// ECMA-262, section 9.2, page 30
-function ToBoolean(x) {
- if (IS_BOOLEAN(x)) return x;
- if (IS_STRING(x)) return x.length != 0;
- if (x == null) return false;
- if (IS_NUMBER(x)) return !((x == 0) || NUMBER_IS_NAN(x));
- return true;
-}
-
-
-// ECMA-262, section 9.3, page 31.
-function ToNumber(x) {
- if (IS_NUMBER(x)) return x;
- if (IS_STRING(x)) {
- return %_HasCachedArrayIndex(x) ? %_GetCachedArrayIndex(x)
- : %StringToNumber(x);
- }
- if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return NAN;
- // Types that can't be converted to number are caught in DefaultNumber.
- return (IS_NULL(x)) ? 0 : ToNumber(DefaultNumber(x));
-}
-
-function NonNumberToNumber(x) {
- if (IS_STRING(x)) {
- return %_HasCachedArrayIndex(x) ? %_GetCachedArrayIndex(x)
- : %StringToNumber(x);
- }
- if (IS_BOOLEAN(x)) return x ? 1 : 0;
- if (IS_UNDEFINED(x)) return NAN;
- // Types that can't be converted to number are caught in DefaultNumber.
- return (IS_NULL(x)) ? 0 : ToNumber(DefaultNumber(x));
-}
-
-
-// ECMA-262, section 9.8, page 35.
-function ToString(x) {
- if (IS_STRING(x)) return x;
- if (IS_NUMBER(x)) return %_NumberToString(x);
- if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
- if (IS_UNDEFINED(x)) return 'undefined';
- // Types that can't be converted to string are caught in DefaultString.
- return (IS_NULL(x)) ? 'null' : ToString(DefaultString(x));
-}
-
-
-// ES5, section 9.12
-function SameValue(x, y) {
- if (typeof x != typeof y) return false;
- if (IS_NUMBER(x)) {
- if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
- // x is +0 and y is -0 or vice versa.
- if (x === 0 && y === 0 && %_IsMinusZero(x) != %_IsMinusZero(y)) {
- return false;
- }
- }
- if (IS_SIMD_VALUE(x)) return %SimdSameValue(x, y);
- return x === y;
-}
-
-
-// ES6, section 7.2.4
-function SameValueZero(x, y) {
- if (typeof x != typeof y) return false;
- if (IS_NUMBER(x)) {
- if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
- }
- if (IS_SIMD_VALUE(x)) return %SimdSameValueZero(x, y);
- return x === y;
-}
-
-
-function ConcatIterableToArray(target, iterable) {
- var index = target.length;
- for (var element of iterable) {
- %AddElement(target, index++, element);
- }
- return target;
-}
-
-
-/* ---------------------------------
- - - - U t i l i t i e s - - -
- ---------------------------------
-*/
-
-// Returns if the given x is a primitive value - not an object or a
-// function.
-function IsPrimitive(x) {
- // Even though the type of null is "object", null is still
- // considered a primitive value. IS_SPEC_OBJECT handles this correctly
- // (i.e., it will return false if x is null).
- return !IS_SPEC_OBJECT(x);
-}
-
-
-// ES6, draft 10-14-14, section 22.1.3.1.1
-function IsConcatSpreadable(O) {
- if (!IS_SPEC_OBJECT(O)) return false;
- var spreadable = O[isConcatSpreadableSymbol];
- if (IS_UNDEFINED(spreadable)) return IS_ARRAY(O);
- return ToBoolean(spreadable);
-}
-
-
-// ECMA-262, section 8.6.2.6, page 28.
-function DefaultNumber(x) {
- var valueOf = x.valueOf;
- if (IS_CALLABLE(valueOf)) {
- var v = %_Call(valueOf, x);
- if (IS_SYMBOL(v)) throw MakeTypeError(kSymbolToNumber);
- if (IS_SIMD_VALUE(x)) throw MakeTypeError(kSimdToNumber);
- if (IsPrimitive(v)) return v;
- }
- var toString = x.toString;
- if (IS_CALLABLE(toString)) {
- var s = %_Call(toString, x);
- if (IsPrimitive(s)) return s;
- }
- throw MakeTypeError(kCannotConvertToPrimitive);
-}
-
-// ECMA-262, section 8.6.2.6, page 28.
-function DefaultString(x) {
- if (!IS_SYMBOL_WRAPPER(x)) {
- if (IS_SYMBOL(x)) throw MakeTypeError(kSymbolToString);
- var toString = x.toString;
- if (IS_CALLABLE(toString)) {
- var s = %_Call(toString, x);
- if (IsPrimitive(s)) return s;
- }
-
- var valueOf = x.valueOf;
- if (IS_CALLABLE(valueOf)) {
- var v = %_Call(valueOf, x);
- if (IsPrimitive(v)) return v;
- }
- }
- throw MakeTypeError(kCannotConvertToPrimitive);
-}
-
-function ToPositiveInteger(x, rangeErrorIndex) {
- var i = TO_INTEGER_MAP_MINUS_ZERO(x);
- if (i < 0) throw MakeRangeError(rangeErrorIndex);
- return i;
-}
-
-//----------------------------------------------------------------------------
-
-// NOTE: Setting the prototype for Array must take place as early as
-// possible due to code generation for array literals. When
-// generating code for a array literal a boilerplate array is created
-// that is cloned when running the code. It is essential that the
-// boilerplate gets the right prototype.
-%FunctionSetPrototype(GlobalArray, new GlobalArray(0));
-
-// ----------------------------------------------------------------------------
-// Exports
-
-$NaN = %GetRootNaN();
-$nonNumberToNumber = NonNumberToNumber;
-$sameValue = SameValue;
-$sameValueZero = SameValueZero;
-$toNumber = ToNumber;
-$toPositiveInteger = ToPositiveInteger;
-
-%InstallToContext([
- "apply_prepare_builtin", APPLY_PREPARE,
- "concat_iterable_to_array_builtin", CONCAT_ITERABLE_TO_ARRAY,
- "reflect_apply_prepare_builtin", REFLECT_APPLY_PREPARE,
- "reflect_construct_prepare_builtin", REFLECT_CONSTRUCT_PREPARE,
-]);
-
-%InstallToContext([
- "concat_iterable_to_array", ConcatIterableToArray,
- "non_number_to_number", NonNumberToNumber,
- "to_number_fun", ToNumber,
-]);
-
-utils.Export(function(to) {
- to.ToBoolean = ToBoolean;
- to.ToNumber = ToNumber;
- to.ToString = ToString;
-});
-
-})
diff --git a/chromium/v8/src/runtime/runtime-array.cc b/chromium/v8/src/runtime/runtime-array.cc
index 6fc1ad4ea12..28e92cbd2ba 100644
--- a/chromium/v8/src/runtime/runtime-array.cc
+++ b/chromium/v8/src/runtime/runtime-array.cc
@@ -9,6 +9,7 @@
#include "src/elements.h"
#include "src/factory.h"
#include "src/isolate-inl.h"
+#include "src/key-accumulator.h"
#include "src/messages.h"
#include "src/prototype.h"
@@ -205,7 +206,8 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
return *isolate->factory()->NewNumberFromUint(Min(actual_length, length));
}
- KeyAccumulator accumulator(isolate);
+ KeyAccumulator accumulator(isolate, ALL_PROPERTIES);
+ // No need to separate protoype levels since we only get numbers/element keys
for (PrototypeIterator iter(isolate, array,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
@@ -216,16 +218,14 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
// collecting keys in that case.
return *isolate->factory()->NewNumberFromUint(length);
}
+ accumulator.NextPrototype();
Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
- Handle<FixedArray> current_keys =
- isolate->factory()->NewFixedArray(current->NumberOfOwnElements(NONE));
- current->GetOwnElementKeys(*current_keys, NONE);
- accumulator.AddKeys(current_keys, FixedArray::ALL_KEYS);
+ JSObject::CollectOwnElementKeys(current, &accumulator, ALL_PROPERTIES);
}
// Erase any keys >= length.
// TODO(adamk): Remove this step when the contract of %GetArrayKeys
// is changed to let this happen on the JS side.
- Handle<FixedArray> keys = accumulator.GetKeys();
+ Handle<FixedArray> keys = accumulator.GetKeys(KEEP_NUMBERS);
for (int i = 0; i < keys->length(); i++) {
if (NumberToUint32(keys->get(i)) >= length) keys->set_undefined(i);
}
@@ -233,15 +233,24 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
}
-static Object* ArrayConstructorCommon(Isolate* isolate,
- Handle<JSFunction> constructor,
- Handle<JSFunction> original_constructor,
- Handle<AllocationSite> site,
- Arguments* caller_args) {
+namespace {
+
+Object* ArrayConstructorCommon(Isolate* isolate, Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target,
+ Handle<AllocationSite> site,
+ Arguments* caller_args) {
Factory* factory = isolate->factory();
+ // If called through new, new.target can be:
+ // - a subclass of constructor,
+ // - a proxy wrapper around constructor, or
+ // - the constructor itself.
+ // If called through Reflect.construct, it's guaranteed to be a constructor by
+ // REFLECT_CONSTRUCT_PREPARE.
+ DCHECK(new_target->IsConstructor());
+
bool holey = false;
- bool can_use_type_feedback = true;
+ bool can_use_type_feedback = !site.is_null();
bool can_inline_array_constructor = true;
if (caller_args->length() == 1) {
Handle<Object> argument_one = caller_args->at<Object>(0);
@@ -253,7 +262,7 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
can_use_type_feedback = false;
} else if (value != 0) {
holey = true;
- if (value >= JSObject::kInitialMaxFastElementArray) {
+ if (value >= JSArray::kInitialMaxFastElementArray) {
can_inline_array_constructor = false;
}
}
@@ -263,43 +272,36 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
}
}
- Handle<JSArray> array;
- if (!site.is_null() && can_use_type_feedback) {
- ElementsKind to_kind = site->GetElementsKind();
- if (holey && !IsFastHoleyElementsKind(to_kind)) {
- to_kind = GetHoleyElementsKind(to_kind);
- // Update the allocation site info to reflect the advice alteration.
- site->SetElementsKind(to_kind);
- }
-
- // We should allocate with an initial map that reflects the allocation site
- // advice. Therefore we use AllocateJSObjectFromMap instead of passing
- // the constructor.
- Handle<Map> initial_map(constructor->initial_map(), isolate);
- if (to_kind != initial_map->elements_kind()) {
- initial_map = Map::AsElementsKind(initial_map, to_kind);
- }
-
- // If we don't care to track arrays of to_kind ElementsKind, then
- // don't emit a memento for them.
- Handle<AllocationSite> allocation_site;
- if (AllocationSite::GetMode(to_kind) == TRACK_ALLOCATION_SITE) {
- allocation_site = site;
- }
+ Handle<Map> initial_map;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, initial_map,
+ JSFunction::GetDerivedMap(isolate, constructor, new_target));
+
+ ElementsKind to_kind = can_use_type_feedback ? site->GetElementsKind()
+ : initial_map->elements_kind();
+ if (holey && !IsFastHoleyElementsKind(to_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ // Update the allocation site info to reflect the advice alteration.
+ if (!site.is_null()) site->SetElementsKind(to_kind);
+ }
- array = Handle<JSArray>::cast(
- factory->NewJSObjectFromMap(initial_map, NOT_TENURED, allocation_site));
- } else {
- array = Handle<JSArray>::cast(factory->NewJSObject(constructor));
+ // We should allocate with an initial map that reflects the allocation site
+ // advice. Therefore we use AllocateJSObjectFromMap instead of passing
+ // the constructor.
+ if (to_kind != initial_map->elements_kind()) {
+ initial_map = Map::AsElementsKind(initial_map, to_kind);
+ }
- // We might need to transition to holey
- ElementsKind kind = constructor->initial_map()->elements_kind();
- if (holey && !IsFastHoleyElementsKind(kind)) {
- kind = GetHoleyElementsKind(kind);
- JSObject::TransitionElementsKind(array, kind);
- }
+ // If we don't care to track arrays of to_kind ElementsKind, then
+ // don't emit a memento for them.
+ Handle<AllocationSite> allocation_site;
+ if (AllocationSite::GetMode(to_kind) == TRACK_ALLOCATION_SITE) {
+ allocation_site = site;
}
+ Handle<JSArray> array = Handle<JSArray>::cast(
+ factory->NewJSObjectFromMap(initial_map, NOT_TENURED, allocation_site));
+
factory->NewJSArrayStorage(array, 0, 0, DONT_INITIALIZE_ARRAY_ELEMENTS);
ElementsKind old_kind = array->GetElementsKind();
@@ -314,21 +316,28 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
site->SetDoNotInlineCall();
}
- // Set up the prototoype using original function.
- // TODO(dslomov): instead of setting the __proto__,
- // use and cache the correct map.
- if (*original_constructor != *constructor) {
- if (original_constructor->has_instance_prototype()) {
- Handle<Object> prototype =
- handle(original_constructor->instance_prototype(), isolate);
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetPrototype(array, prototype, false));
- }
- }
-
return *array;
}
+} // namespace
+
+
+RUNTIME_FUNCTION(Runtime_NewArray) {
+ HandleScope scope(isolate);
+ DCHECK_LE(3, args.length());
+ int const argc = args.length() - 3;
+ // TODO(bmeurer): Remove this Arguments nonsense.
+ Arguments argv(argc, args.arguments() - 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, argc + 1);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, type_info, argc + 2);
+ // TODO(bmeurer): Use MaybeHandle to pass around the AllocationSite.
+ Handle<AllocationSite> site = type_info->IsAllocationSite()
+ ? Handle<AllocationSite>::cast(type_info)
+ : Handle<AllocationSite>::null();
+ return ArrayConstructorCommon(isolate, constructor, new_target, site, &argv);
+}
+
RUNTIME_FUNCTION(Runtime_ArrayConstructor) {
HandleScope scope(isolate);
@@ -364,25 +373,6 @@ RUNTIME_FUNCTION(Runtime_ArrayConstructor) {
}
-RUNTIME_FUNCTION(Runtime_ArrayConstructorWithSubclassing) {
- HandleScope scope(isolate);
- int args_length = args.length();
- CHECK(args_length >= 2);
-
- // This variables and checks work around -Werror=strict-overflow.
- int pre_last_arg_index = args_length - 2;
- int last_arg_index = args_length - 1;
- CHECK(pre_last_arg_index >= 0);
- CHECK(last_arg_index >= 0);
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, pre_last_arg_index);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, original_constructor, last_arg_index);
- Arguments caller_args(args_length - 2, args.arguments());
- return ArrayConstructorCommon(isolate, constructor, original_constructor,
- Handle<AllocationSite>::null(), &caller_args);
-}
-
-
RUNTIME_FUNCTION(Runtime_InternalArrayConstructor) {
HandleScope scope(isolate);
Arguments empty_args(0, NULL);
@@ -497,5 +487,18 @@ RUNTIME_FUNCTION(Runtime_FastOneByteArrayJoin) {
// to a slow path.
return isolate->heap()->undefined_value();
}
+
+
+RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, original_array, 0);
+ Handle<Object> constructor;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, constructor,
+ Object::ArraySpeciesConstructor(isolate, original_array));
+ return *constructor;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-atomics.cc b/chromium/v8/src/runtime/runtime-atomics.cc
index 84eab2ce113..94d98d4ffaf 100644
--- a/chromium/v8/src/runtime/runtime-atomics.cc
+++ b/chromium/v8/src/runtime/runtime-atomics.cc
@@ -153,126 +153,80 @@ template <typename T>
T FromObject(Handle<Object> number);
template <>
-inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
+inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
return NumberToUint32(*number);
}
template <>
-inline int32_t FromObject<int32_t>(Handle<Object> number) {
+inline int8_t FromObject<int8_t>(Handle<Object> number) {
return NumberToInt32(*number);
}
-template <typename T, typename F>
-inline T ToAtomic(F from) {
- return static_cast<T>(from);
-}
-
-template <typename T, typename F>
-inline T FromAtomic(F from) {
- return static_cast<T>(from);
-}
-
-template <typename T>
-inline Object* ToObject(Isolate* isolate, T t);
-
template <>
-inline Object* ToObject<int8_t>(Isolate* isolate, int8_t t) {
- return Smi::FromInt(t);
+inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
+ return NumberToUint32(*number);
}
template <>
-inline Object* ToObject<uint8_t>(Isolate* isolate, uint8_t t) {
- return Smi::FromInt(t);
+inline int16_t FromObject<int16_t>(Handle<Object> number) {
+ return NumberToInt32(*number);
}
template <>
-inline Object* ToObject<int16_t>(Isolate* isolate, int16_t t) {
- return Smi::FromInt(t);
+inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
+ return NumberToUint32(*number);
}
template <>
-inline Object* ToObject<uint16_t>(Isolate* isolate, uint16_t t) {
- return Smi::FromInt(t);
+inline int32_t FromObject<int32_t>(Handle<Object> number) {
+ return NumberToInt32(*number);
}
-template <>
-inline Object* ToObject<int32_t>(Isolate* isolate, int32_t t) {
- return *isolate->factory()->NewNumber(t);
-}
-template <>
-inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) {
- return *isolate->factory()->NewNumber(t);
-}
+inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
-template <typename T>
-struct FromObjectTraits {};
+inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
-template <>
-struct FromObjectTraits<int8_t> {
- typedef int32_t convert_type;
- typedef int8_t atomic_type;
-};
+inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
-template <>
-struct FromObjectTraits<uint8_t> {
- typedef uint32_t convert_type;
- typedef uint8_t atomic_type;
-};
+inline Object* ToObject(Isolate* isolate, uint16_t t) {
+ return Smi::FromInt(t);
+}
-template <>
-struct FromObjectTraits<int16_t> {
- typedef int32_t convert_type;
- typedef int16_t atomic_type;
-};
-template <>
-struct FromObjectTraits<uint16_t> {
- typedef uint32_t convert_type;
- typedef uint16_t atomic_type;
-};
+inline Object* ToObject(Isolate* isolate, int32_t t) {
+ return *isolate->factory()->NewNumber(t);
+}
-template <>
-struct FromObjectTraits<int32_t> {
- typedef int32_t convert_type;
- typedef int32_t atomic_type;
-};
-template <>
-struct FromObjectTraits<uint32_t> {
- typedef uint32_t convert_type;
- typedef uint32_t atomic_type;
-};
+inline Object* ToObject(Isolate* isolate, uint32_t t) {
+ return *isolate->factory()->NewNumber(t);
+}
template <typename T>
inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
Handle<Object> oldobj, Handle<Object> newobj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type oldval = ToAtomic<atomic_type>(FromObject<convert_type>(oldobj));
- atomic_type newval = ToAtomic<atomic_type>(FromObject<convert_type>(newobj));
- atomic_type result = CompareExchangeSeqCst(
- static_cast<atomic_type*>(buffer) + index, oldval, newval);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T oldval = FromObject<T>(oldobj);
+ T newval = FromObject<T>(newobj);
+ T result =
+ CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- atomic_type result = LoadSeqCst(static_cast<atomic_type*>(buffer) + index);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T result = LoadSeqCst(static_cast<T*>(buffer) + index);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value);
+ T value = FromObject<T>(obj);
+ StoreSeqCst(static_cast<T*>(buffer) + index, value);
return *obj;
}
@@ -280,72 +234,54 @@ inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
template <typename T>
inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- atomic_type result =
- AddSeqCst(static_cast<atomic_type*>(buffer) + index, value);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T value = FromObject<T>(obj);
+ T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- atomic_type result =
- SubSeqCst(static_cast<atomic_type*>(buffer) + index, value);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T value = FromObject<T>(obj);
+ T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- atomic_type result =
- AndSeqCst(static_cast<atomic_type*>(buffer) + index, value);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T value = FromObject<T>(obj);
+ T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- atomic_type result =
- OrSeqCst(static_cast<atomic_type*>(buffer) + index, value);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T value = FromObject<T>(obj);
+ T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- atomic_type result =
- XorSeqCst(static_cast<atomic_type*>(buffer) + index, value);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T value = FromObject<T>(obj);
+ T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- atomic_type result =
- ExchangeSeqCst(static_cast<atomic_type*>(buffer) + index, value);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T value = FromObject<T>(obj);
+ T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
}
@@ -363,21 +299,19 @@ inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer,
Handle<Object> oldobj,
Handle<Object> newobj) {
typedef int32_t convert_type;
- typedef uint8_t atomic_type;
- atomic_type oldval = ClampToUint8(FromObject<convert_type>(oldobj));
- atomic_type newval = ClampToUint8(FromObject<convert_type>(newobj));
- atomic_type result = CompareExchangeSeqCst(
- static_cast<atomic_type*>(buffer) + index, oldval, newval);
- return ToObject<uint8_t>(isolate, FromAtomic<uint8_t>(result));
+ uint8_t oldval = ClampToUint8(FromObject<convert_type>(oldobj));
+ uint8_t newval = ClampToUint8(FromObject<convert_type>(newobj));
+ uint8_t result = CompareExchangeSeqCst(static_cast<uint8_t*>(buffer) + index,
+ oldval, newval);
+ return ToObject(isolate, result);
}
inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
typedef int32_t convert_type;
- typedef uint8_t atomic_type;
- atomic_type value = ClampToUint8(FromObject<convert_type>(obj));
- StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value);
+ uint8_t value = ClampToUint8(FromObject<convert_type>(obj));
+ StoreSeqCst(static_cast<uint8_t*>(buffer) + index, value);
return *obj;
}
@@ -386,16 +320,15 @@ inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index,
inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \
size_t index, Handle<Object> obj) { \
typedef int32_t convert_type; \
- typedef uint8_t atomic_type; \
- atomic_type* p = static_cast<atomic_type*>(buffer) + index; \
+ uint8_t* p = static_cast<uint8_t*>(buffer) + index; \
convert_type operand = FromObject<convert_type>(obj); \
- atomic_type expected; \
- atomic_type result; \
+ uint8_t expected; \
+ uint8_t result; \
do { \
expected = *p; \
result = ClampToUint8(static_cast<convert_type>(expected) op operand); \
} while (CompareExchangeSeqCst(p, expected, result) != expected); \
- return ToObject<uint8_t>(isolate, expected); \
+ return ToObject(isolate, expected); \
}
DO_UINT8_CLAMPED_OP(Add, +)
@@ -410,14 +343,13 @@ DO_UINT8_CLAMPED_OP(Xor, ^)
inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer,
size_t index, Handle<Object> obj) {
typedef int32_t convert_type;
- typedef uint8_t atomic_type;
- atomic_type* p = static_cast<atomic_type*>(buffer) + index;
- atomic_type result = ClampToUint8(FromObject<convert_type>(obj));
- atomic_type expected;
+ uint8_t* p = static_cast<uint8_t*>(buffer) + index;
+ uint8_t result = ClampToUint8(FromObject<convert_type>(obj));
+ uint8_t expected;
do {
expected = *p;
} while (CompareExchangeSeqCst(p, expected, result) != expected);
- return ToObject<uint8_t>(isolate, expected);
+ return ToObject(isolate, expected);
}
@@ -444,18 +376,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoCompareExchange<ctype>(isolate, buffer, index, oldobj, newobj);
+ return DoCompareExchange<ctype>(isolate, source, index, oldobj, newobj);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoCompareExchangeUint8Clamped(isolate, buffer, index, oldobj,
+ return DoCompareExchangeUint8Clamped(isolate, source, index, oldobj,
newobj);
default:
@@ -475,18 +408,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsLoad) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoLoad<ctype>(isolate, buffer, index);
+ return DoLoad<ctype>(isolate, source, index);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoLoad<uint8_t>(isolate, buffer, index);
+ return DoLoad<uint8_t>(isolate, source, index);
default:
break;
@@ -506,18 +440,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsStore) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoStore<ctype>(isolate, buffer, index, value);
+ return DoStore<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoStoreUint8Clamped(isolate, buffer, index, value);
+ return DoStoreUint8Clamped(isolate, source, index, value);
default:
break;
@@ -537,18 +472,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoAdd<ctype>(isolate, buffer, index, value);
+ return DoAdd<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoAddUint8Clamped(isolate, buffer, index, value);
+ return DoAddUint8Clamped(isolate, source, index, value);
default:
break;
@@ -568,18 +504,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsSub) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoSub<ctype>(isolate, buffer, index, value);
+ return DoSub<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoSubUint8Clamped(isolate, buffer, index, value);
+ return DoSubUint8Clamped(isolate, source, index, value);
default:
break;
@@ -599,18 +536,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoAnd<ctype>(isolate, buffer, index, value);
+ return DoAnd<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoAndUint8Clamped(isolate, buffer, index, value);
+ return DoAndUint8Clamped(isolate, source, index, value);
default:
break;
@@ -630,18 +568,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsOr) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoOr<ctype>(isolate, buffer, index, value);
+ return DoOr<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoOrUint8Clamped(isolate, buffer, index, value);
+ return DoOrUint8Clamped(isolate, source, index, value);
default:
break;
@@ -661,18 +600,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoXor<ctype>(isolate, buffer, index, value);
+ return DoXor<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoXorUint8Clamped(isolate, buffer, index, value);
+ return DoXorUint8Clamped(isolate, source, index, value);
default:
break;
@@ -692,18 +632,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoExchange<ctype>(isolate, buffer, index, value);
+ return DoExchange<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoExchangeUint8Clamped(isolate, buffer, index, value);
+ return DoExchangeUint8Clamped(isolate, source, index, value);
default:
break;
@@ -721,5 +662,5 @@ RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) {
uint32_t usize = NumberToUint32(*size);
return isolate->heap()->ToBoolean(AtomicIsLockFree(usize));
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-classes.cc b/chromium/v8/src/runtime/runtime-classes.cc
index 51e682f3255..ccd15e8b5da 100644
--- a/chromium/v8/src/runtime/runtime-classes.cc
+++ b/chromium/v8/src/runtime/runtime-classes.cc
@@ -36,9 +36,11 @@ RUNTIME_FUNCTION(Runtime_ThrowUnsupportedSuperError) {
RUNTIME_FUNCTION(Runtime_ThrowConstructorNonCallableError) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
+ Handle<Object> name(constructor->shared()->name(), isolate);
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNonCallable));
+ isolate, NewTypeError(MessageTemplate::kConstructorNonCallable, name));
}
@@ -106,7 +108,7 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
isolate->factory()->prototype_string(),
SLOPPY),
Object);
- if (!prototype_parent->IsNull() && !prototype_parent->IsSpecObject()) {
+ if (!prototype_parent->IsNull() && !prototype_parent->IsJSReceiver()) {
THROW_NEW_ERROR(
isolate, NewTypeError(MessageTemplate::kPrototypeParentNotAnObject,
prototype_parent),
@@ -123,6 +125,7 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
Handle<Map> map =
isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ map->set_is_prototype_map(true);
if (constructor->map()->is_strong()) {
map->set_is_strong();
if (super_class->IsNull()) {
@@ -141,7 +144,11 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
constructor->shared()->set_name(*name_string);
if (!super_class->IsTheHole()) {
- Handle<Code> stub(isolate->builtins()->JSConstructStubForDerived());
+ // Derived classes, just like builtins, don't create implicit receivers in
+ // [[construct]]. Instead they just set up new.target and call into the
+ // constructor. Hence we can reuse the builtins construct stub for derived
+ // classes.
+ Handle<Code> stub(isolate->builtins()->JSBuiltinsConstructStub());
constructor->shared()->set_construct_stub(*stub);
}
@@ -162,9 +169,8 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
Object);
if (!constructor_parent.is_null()) {
- RETURN_ON_EXCEPTION(
- isolate, JSObject::SetPrototype(constructor, constructor_parent, false),
- Object);
+ MAYBE_RETURN_NULL(JSObject::SetPrototype(constructor, constructor_parent,
+ false, Object::THROW_ON_ERROR));
}
JSObject::AddProperty(prototype, isolate->factory()->constructor_string(),
@@ -224,52 +230,28 @@ RUNTIME_FUNCTION(Runtime_FinalizeClassDefinition) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, constructor, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, prototype, 1);
- JSObject::MigrateSlowToFast(prototype, 0, "RuntimeToFastProperties");
JSObject::MigrateSlowToFast(constructor, 0, "RuntimeToFastProperties");
if (constructor->map()->is_strong()) {
DCHECK(prototype->map()->is_strong());
- RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::Freeze(prototype));
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::Freeze(constructor));
- return *result;
+ MAYBE_RETURN(JSReceiver::SetIntegrityLevel(prototype, FROZEN,
+ Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ MAYBE_RETURN(JSReceiver::SetIntegrityLevel(constructor, FROZEN,
+ Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
}
return *constructor;
}
-RUNTIME_FUNCTION(Runtime_ClassGetSourceCode) {
- HandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
-
- Handle<Symbol> start_position_symbol(
- isolate->heap()->class_start_position_symbol());
- Handle<Object> start_position =
- JSReceiver::GetDataProperty(fun, start_position_symbol);
- if (!start_position->IsSmi()) return isolate->heap()->undefined_value();
-
- Handle<Symbol> end_position_symbol(
- isolate->heap()->class_end_position_symbol());
- Handle<Object> end_position =
- JSReceiver::GetDataProperty(fun, end_position_symbol);
- CHECK(end_position->IsSmi());
-
- Handle<String> source(
- String::cast(Script::cast(fun->shared()->script())->source()));
- return *isolate->factory()->NewSubString(
- source, Handle<Smi>::cast(start_position)->value(),
- Handle<Smi>::cast(end_position)->value());
-}
-
-
static MaybeHandle<Object> LoadFromSuper(Isolate* isolate,
Handle<Object> receiver,
Handle<JSObject> home_object,
Handle<Name> name,
LanguageMode language_mode) {
- if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
+ if (home_object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), home_object)) {
isolate->ReportFailedAccessCheck(home_object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
@@ -293,7 +275,8 @@ static MaybeHandle<Object> LoadElementFromSuper(Isolate* isolate,
Handle<JSObject> home_object,
uint32_t index,
LanguageMode language_mode) {
- if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
+ if (home_object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), home_object)) {
isolate->ReportFailedAccessCheck(home_object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
@@ -369,7 +352,8 @@ RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
static Object* StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
Handle<Object> receiver, Handle<Name> name,
Handle<Object> value, LanguageMode language_mode) {
- if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
+ if (home_object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), home_object)) {
isolate->ReportFailedAccessCheck(home_object);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
}
@@ -379,12 +363,10 @@ static Object* StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
LookupIterator it(receiver, name, Handle<JSReceiver>::cast(proto));
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::SetSuperProperty(&it, value, language_mode,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED));
- return *result;
+ MAYBE_RETURN(Object::SetSuperProperty(&it, value, language_mode,
+ Object::CERTAINLY_NOT_STORE_FROM_KEYED),
+ isolate->heap()->exception());
+ return *value;
}
@@ -393,7 +375,8 @@ static Object* StoreElementToSuper(Isolate* isolate,
Handle<Object> receiver, uint32_t index,
Handle<Object> value,
LanguageMode language_mode) {
- if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
+ if (home_object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), home_object)) {
isolate->ReportFailedAccessCheck(home_object);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
}
@@ -403,12 +386,10 @@ static Object* StoreElementToSuper(Isolate* isolate,
if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
LookupIterator it(isolate, receiver, index, Handle<JSReceiver>::cast(proto));
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::SetSuperProperty(&it, value, language_mode,
- Object::MAY_BE_STORE_FROM_KEYED));
- return *result;
+ MAYBE_RETURN(Object::SetSuperProperty(&it, value, language_mode,
+ Object::MAY_BE_STORE_FROM_KEYED),
+ isolate->heap()->exception());
+ return *value;
}
@@ -483,36 +464,11 @@ RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Sloppy) {
}
-RUNTIME_FUNCTION(Runtime_HandleStepInForDerivedConstructors) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- Debug* debug = isolate->debug();
- // Handle stepping into constructors if step into is active.
- if (debug->StepInActive()) debug->HandleStepIn(function, true);
- return *isolate->factory()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_DefaultConstructorCallSuper) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, original_constructor, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, super_constructor, 1);
- JavaScriptFrameIterator it(isolate);
-
- // Determine the actual arguments passed to the function.
- int argument_count = 0;
- base::SmartArrayPointer<Handle<Object>> arguments =
- Runtime::GetCallerArguments(isolate, 0, &argument_count);
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Execution::New(isolate, super_constructor, original_constructor,
- argument_count, arguments.get()));
-
- return *result;
+RUNTIME_FUNCTION(Runtime_GetSuperConstructor) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(JSFunction, active_function, 0);
+ return active_function->map()->prototype();
}
} // namespace internal
diff --git a/chromium/v8/src/runtime/runtime-compiler.cc b/chromium/v8/src/runtime/runtime-compiler.cc
index 8790da05e39..15a3a14156f 100644
--- a/chromium/v8/src/runtime/runtime-compiler.cc
+++ b/chromium/v8/src/runtime/runtime-compiler.cc
@@ -44,17 +44,13 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
}
-RUNTIME_FUNCTION(Runtime_CompileOptimized) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1);
+namespace {
+Object* CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
+ Compiler::ConcurrencyMode mode) {
StackLimitCheck check(isolate);
if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
- Compiler::ConcurrencyMode mode =
- concurrent ? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT;
Handle<Code> code;
Handle<Code> unoptimized(function->shared()->code());
if (Compiler::GetOptimizedCode(function, unoptimized, mode).ToHandle(&code)) {
@@ -80,6 +76,24 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized) {
return function->code();
}
+} // namespace
+
+
+RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ return CompileOptimized(isolate, function, Compiler::CONCURRENT);
+}
+
+
+RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ return CompileOptimized(isolate, function, Compiler::NOT_CONCURRENT);
+}
+
RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
HandleScope scope(isolate);
@@ -137,6 +151,11 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
RUNTIME_ASSERT(frame->function()->IsJSFunction());
DCHECK(frame->function() == *function);
+ // Ensure the context register is updated for materialized objects.
+ JavaScriptFrameIterator top_it(isolate);
+ JavaScriptFrame* top_frame = top_it.frame();
+ isolate->set_context(Context::cast(top_frame->context()));
+
if (type == Deoptimizer::LAZY) {
return isolate->heap()->undefined_value();
}
@@ -352,40 +371,6 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate,
}
-RUNTIME_FUNCTION(Runtime_CompileString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(function_literal_only, 1);
-
- // Extract native context.
- Handle<Context> context(isolate->native_context());
-
- // Check if native context allows code generation from
- // strings. Throw an exception if it doesn't.
- if (context->allow_code_gen_from_strings()->IsFalse() &&
- !CodeGenerationFromStringsAllowed(isolate, context)) {
- Handle<Object> error_message =
- context->ErrorMessageForCodeGenerationFromStrings();
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewEvalError(MessageTemplate::kCodeGenFromStrings, error_message));
- }
-
- // Compile source string in the native context.
- ParseRestriction restriction = function_literal_only
- ? ONLY_SINGLE_FUNCTION_LITERAL
- : NO_PARSE_RESTRICTION;
- Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
- Handle<JSFunction> fun;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, fun,
- Compiler::GetFunctionFromEval(source, outer_info, context, SLOPPY,
- restriction, RelocInfo::kNoPosition));
- return *fun;
-}
-
-
static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
LanguageMode language_mode,
diff --git a/chromium/v8/src/runtime/runtime-date.cc b/chromium/v8/src/runtime/runtime-date.cc
index 614b4a9ede9..96292ad1c5f 100644
--- a/chromium/v8/src/runtime/runtime-date.cc
+++ b/chromium/v8/src/runtime/runtime-date.cc
@@ -7,7 +7,6 @@
#include "src/arguments.h"
#include "src/conversions-inl.h"
#include "src/date.h"
-#include "src/dateparser-inl.h"
#include "src/factory.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
@@ -15,52 +14,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_DateMakeDay) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_SMI_ARG_CHECKED(year, 0);
- CONVERT_SMI_ARG_CHECKED(month, 1);
-
- int days = isolate->date_cache()->DaysFromYearMonth(year, month);
- RUNTIME_ASSERT(Smi::IsValid(days));
- return Smi::FromInt(days);
-}
-
-
-RUNTIME_FUNCTION(Runtime_DateSetValue) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 0);
- CONVERT_DOUBLE_ARG_CHECKED(time, 1);
- CONVERT_SMI_ARG_CHECKED(is_utc, 2);
-
- DateCache* date_cache = isolate->date_cache();
-
- Handle<Object> value;
- bool is_value_nan = false;
- if (std::isnan(time)) {
- value = isolate->factory()->nan_value();
- is_value_nan = true;
- } else if (!is_utc && (time < -DateCache::kMaxTimeBeforeUTCInMs ||
- time > DateCache::kMaxTimeBeforeUTCInMs)) {
- value = isolate->factory()->nan_value();
- is_value_nan = true;
- } else {
- time = is_utc ? time : date_cache->ToUTC(static_cast<int64_t>(time));
- if (time < -DateCache::kMaxTimeInMs || time > DateCache::kMaxTimeInMs) {
- value = isolate->factory()->nan_value();
- is_value_nan = true;
- } else {
- value = isolate->factory()->NewNumber(DoubleToInteger(time));
- }
- }
- date->SetValue(*value, is_value_nan);
- return *value;
-}
-
-
RUNTIME_FUNCTION(Runtime_IsDate) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -71,7 +24,7 @@ RUNTIME_FUNCTION(Runtime_IsDate) {
RUNTIME_FUNCTION(Runtime_ThrowNotDateError) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
THROW_NEW_ERROR_RETURN_FAILURE(isolate,
NewTypeError(MessageTemplate::kNotDateObject));
}
@@ -79,120 +32,8 @@ RUNTIME_FUNCTION(Runtime_ThrowNotDateError) {
RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- if (FLAG_log_timer_events || FLAG_prof_cpp) LOG(isolate, CurrentTimeEvent());
-
- // According to ECMA-262, section 15.9.1, page 117, the precision of
- // the number in a Date object representing a particular instant in
- // time is milliseconds. Therefore, we floor the result of getting
- // the OS time.
- double millis;
- if (FLAG_verify_predictable) {
- millis = 1388534400000.0; // Jan 1 2014 00:00:00 GMT+0000
- millis += Floor(isolate->heap()->synthetic_time());
- } else {
- millis = Floor(base::OS::TimeCurrentMillis());
- }
- return *isolate->factory()->NewNumber(millis);
-}
-
-
-RUNTIME_FUNCTION(Runtime_DateParseString) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1);
-
- RUNTIME_ASSERT(output->HasFastElements());
- JSObject::EnsureCanContainHeapObjectElements(output);
- RUNTIME_ASSERT(output->HasFastObjectElements());
- Handle<FixedArray> output_array(FixedArray::cast(output->elements()));
- RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
-
- Handle<String> str;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str,
- Object::ToString(isolate, input));
-
- str = String::Flatten(str);
- DisallowHeapAllocation no_gc;
-
- bool result;
- String::FlatContent str_content = str->GetFlatContent();
- if (str_content.IsOneByte()) {
- result = DateParser::Parse(str_content.ToOneByteVector(), *output_array,
- isolate->unicode_cache());
- } else {
- DCHECK(str_content.IsTwoByte());
- result = DateParser::Parse(str_content.ToUC16Vector(), *output_array,
- isolate->unicode_cache());
- }
-
- if (result) {
- return *output;
- } else {
- return isolate->heap()->null_value();
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_DateLocalTimezone) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- RUNTIME_ASSERT(x >= -DateCache::kMaxTimeBeforeUTCInMs &&
- x <= DateCache::kMaxTimeBeforeUTCInMs);
- const char* zone =
- isolate->date_cache()->LocalTimezone(static_cast<int64_t>(x));
- Handle<String> result =
- isolate->factory()->NewStringFromUtf8(CStrVector(zone)).ToHandleChecked();
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_DateToUTC) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- RUNTIME_ASSERT(x >= -DateCache::kMaxTimeBeforeUTCInMs &&
- x <= DateCache::kMaxTimeBeforeUTCInMs);
- int64_t time = isolate->date_cache()->ToUTC(static_cast<int64_t>(x));
-
- return *isolate->factory()->NewNumber(static_cast<double>(time));
-}
-
-
-RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
- HandleScope hs(isolate);
- DCHECK(args.length() == 0);
- if (isolate->serializer_enabled()) return isolate->heap()->undefined_value();
- if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
- Handle<FixedArray> date_cache_version =
- isolate->factory()->NewFixedArray(1, TENURED);
- date_cache_version->set(0, Smi::FromInt(0));
- isolate->eternal_handles()->CreateSingleton(
- isolate, *date_cache_version, EternalHandles::DATE_CACHE_VERSION);
- }
- Handle<FixedArray> date_cache_version =
- Handle<FixedArray>::cast(isolate->eternal_handles()->GetSingleton(
- EternalHandles::DATE_CACHE_VERSION));
- // Return result as a JS array.
- Handle<JSObject> result =
- isolate->factory()->NewJSObject(isolate->array_function());
- JSArray::SetContent(Handle<JSArray>::cast(result), date_cache_version);
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_DateField) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSDate, date, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- DCHECK_LE(0, index);
- if (index == 0) return date->value();
- return JSDate::GetField(date, Smi::FromInt(index));
+ DCHECK_EQ(0, args.length());
+ return *isolate->factory()->NewNumber(JSDate::CurrentTimeValue(isolate));
}
} // namespace internal
diff --git a/chromium/v8/src/runtime/runtime-debug.cc b/chromium/v8/src/runtime/runtime-debug.cc
index 9f49e4d5d2a..d94c75fa0e1 100644
--- a/chromium/v8/src/runtime/runtime-debug.cc
+++ b/chromium/v8/src/runtime/runtime-debug.cc
@@ -150,38 +150,29 @@ static MaybeHandle<JSArray> GetIteratorInternalProperties(
MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
Handle<Object> object) {
Factory* factory = isolate->factory();
- if (object->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(object);
- if (function->shared()->bound()) {
- RUNTIME_ASSERT_HANDLIFIED(function->function_bindings()->IsFixedArray(),
- JSArray);
-
- Handle<FixedArray> bindings(function->function_bindings());
-
- Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
- Handle<String> target =
- factory->NewStringFromAsciiChecked("[[TargetFunction]]");
- result->set(0, *target);
- result->set(1, bindings->get(JSFunction::kBoundFunctionIndex));
-
- Handle<String> bound_this =
- factory->NewStringFromAsciiChecked("[[BoundThis]]");
- result->set(2, *bound_this);
- result->set(3, bindings->get(JSFunction::kBoundThisIndex));
-
- Handle<FixedArray> arguments = factory->NewFixedArray(
- bindings->length() - JSFunction::kBoundArgumentsStartIndex);
- bindings->CopyTo(
- JSFunction::kBoundArgumentsStartIndex, *arguments, 0,
- bindings->length() - JSFunction::kBoundArgumentsStartIndex);
- Handle<String> bound_args =
- factory->NewStringFromAsciiChecked("[[BoundArgs]]");
- result->set(4, *bound_args);
- Handle<JSArray> arguments_array =
- factory->NewJSArrayWithElements(arguments);
- result->set(5, *arguments_array);
- return factory->NewJSArrayWithElements(result);
- }
+ if (object->IsJSBoundFunction()) {
+ Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object);
+
+ Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
+ Handle<String> target =
+ factory->NewStringFromAsciiChecked("[[TargetFunction]]");
+ result->set(0, *target);
+ result->set(1, function->bound_target_function());
+
+ Handle<String> bound_this =
+ factory->NewStringFromAsciiChecked("[[BoundThis]]");
+ result->set(2, *bound_this);
+ result->set(3, function->bound_this());
+
+ Handle<String> bound_args =
+ factory->NewStringFromAsciiChecked("[[BoundArgs]]");
+ result->set(4, *bound_args);
+ Handle<FixedArray> bound_arguments =
+ factory->CopyFixedArray(handle(function->bound_arguments(), isolate));
+ Handle<JSArray> arguments_array =
+ factory->NewJSArrayWithElements(bound_arguments);
+ result->set(5, *arguments_array);
+ return factory->NewJSArrayWithElements(result);
} else if (object->IsJSMapIterator()) {
Handle<JSMapIterator> iterator = Handle<JSMapIterator>::cast(object);
return GetIteratorInternalProperties(isolate, iterator);
@@ -456,7 +447,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameCount) {
it.frame()->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
// Omit functions from native and extension scripts.
- if (frames[i].function()->IsSubjectToDebugging()) n++;
+ if (frames[i].function()->shared()->IsSubjectToDebugging()) n++;
}
}
return Smi::FromInt(n);
@@ -534,7 +525,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// Get scope info and read from it for local variable information.
Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
- RUNTIME_ASSERT(function->IsSubjectToDebugging());
+ RUNTIME_ASSERT(function->shared()->IsSubjectToDebugging());
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
DCHECK(*scope_info != ScopeInfo::Empty(isolate));
@@ -709,22 +700,19 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
}
// Add the receiver (same as in function frame).
- // THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
- // THE FRAME ITERATOR TO WRAP THE RECEIVER.
Handle<Object> receiver(it.frame()->receiver(), isolate);
- DCHECK(!function->IsBuiltin());
+ DCHECK(!function->shared()->IsBuiltin());
if (!receiver->IsJSObject() && is_sloppy(shared->language_mode())) {
- // If the receiver is not a JSObject and the function is not a
- // builtin or strict-mode we have hit an optimization where a
- // value object is not converted into a wrapped JS objects. To
- // hide this optimization from the debugger, we wrap the receiver
- // by creating correct wrapper object based on the calling frame's
- // native context.
- it.Advance();
+ // If the receiver is not a JSObject and the function is not a builtin or
+ // strict-mode we have hit an optimization where a value object is not
+ // converted into a wrapped JS objects. To hide this optimization from the
+ // debugger, we wrap the receiver by creating correct wrapper object based
+ // on the function's native context.
+ // See ECMA-262 6.0, 9.2.1.2, 6 b iii.
if (receiver->IsUndefined()) {
receiver = handle(function->global_proxy());
} else {
- Context* context = Context::cast(it.frame()->context());
+ Context* context = function->context();
Handle<Context> native_context(Context::cast(context->native_context()));
if (!Object::ToObject(isolate, receiver, native_context)
.ToHandle(&receiver)) {
@@ -850,10 +838,10 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
- bool ignore_nested_scopes = false;
+ ScopeIterator::Option option = ScopeIterator::DEFAULT;
if (args.length() == 4) {
CONVERT_BOOLEAN_ARG_CHECKED(flag, 3);
- ignore_nested_scopes = flag;
+ if (flag) option = ScopeIterator::IGNORE_NESTED_SCOPES;
}
// Get the frame where the debugging is performed.
@@ -863,7 +851,7 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
List<Handle<JSObject> > result(4);
- ScopeIterator it(isolate, &frame_inspector, ignore_nested_scopes);
+ ScopeIterator it(isolate, &frame_inspector, option);
for (; !it.Done(); it.Next()) {
Handle<JSObject> details;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
@@ -881,15 +869,18 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
RUNTIME_FUNCTION(Runtime_GetFunctionScopeCount) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
// Check arguments.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
// Count the visible scopes.
int n = 0;
- for (ScopeIterator it(isolate, fun); !it.Done(); it.Next()) {
- n++;
+ if (function->IsJSFunction()) {
+ for (ScopeIterator it(isolate, Handle<JSFunction>::cast(function));
+ !it.Done(); it.Next()) {
+ n++;
+ }
}
return Smi::FromInt(n);
@@ -1070,11 +1061,11 @@ RUNTIME_FUNCTION(Runtime_GetThreadDetails) {
// Sets the disable break state
// args[0]: disable break state
-RUNTIME_FUNCTION(Runtime_SetDisableBreak) {
+RUNTIME_FUNCTION(Runtime_SetBreakPointsActive) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 0);
- isolate->debug()->set_disable_break(disable_break);
+ CONVERT_BOOLEAN_ARG_CHECKED(active, 0);
+ isolate->debug()->set_break_points_active(active);
return isolate->heap()->undefined_value();
}
@@ -1220,39 +1211,18 @@ RUNTIME_FUNCTION(Runtime_IsBreakOnException) {
// of frames to step down.
RUNTIME_FUNCTION(Runtime_PrepareStep) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK(args.length() == 2);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
- if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
+ if (!args[1]->IsNumber()) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
- CONVERT_NUMBER_CHECKED(int, wrapped_frame_id, Int32, args[3]);
-
- StackFrame::Id frame_id;
- if (wrapped_frame_id == 0) {
- frame_id = StackFrame::NO_ID;
- } else {
- frame_id = DebugFrameHelper::UnwrapFrameId(wrapped_frame_id);
- }
-
// Get the step action and check validity.
StepAction step_action = static_cast<StepAction>(NumberToInt32(args[1]));
if (step_action != StepIn && step_action != StepNext &&
- step_action != StepOut && step_action != StepInMin &&
- step_action != StepMin && step_action != StepFrame) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
-
- if (frame_id != StackFrame::NO_ID && step_action != StepNext &&
- step_action != StepMin && step_action != StepOut) {
- return isolate->ThrowIllegalOperation();
- }
-
- // Get the number of steps.
- int step_count = NumberToInt32(args[2]);
- if (step_count < 1) {
+ step_action != StepOut && step_action != StepFrame) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
@@ -1260,8 +1230,7 @@ RUNTIME_FUNCTION(Runtime_PrepareStep) {
isolate->debug()->ClearStepping();
// Prepare step.
- isolate->debug()->PrepareStep(static_cast<StepAction>(step_action),
- step_count, frame_id);
+ isolate->debug()->PrepareStep(static_cast<StepAction>(step_action));
return isolate->heap()->undefined_value();
}
@@ -1289,7 +1258,7 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
CONVERT_ARG_HANDLE_CHECKED(String, source, 3);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4);
- CONVERT_ARG_HANDLE_CHECKED(Object, context_extension, 5);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, context_extension, 5);
StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
@@ -1313,7 +1282,7 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) {
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, context_extension, 3);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, context_extension, 3);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -1359,6 +1328,17 @@ RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
}
+static bool HasInPrototypeChainIgnoringProxies(Isolate* isolate, Object* object,
+ Object* proto) {
+ PrototypeIterator iter(isolate, object, PrototypeIterator::START_AT_RECEIVER);
+ while (true) {
+ iter.AdvanceIgnoringProxies();
+ if (iter.IsAtEnd()) return false;
+ if (iter.IsAtEnd(proto)) return true;
+ }
+}
+
+
// Scan the heap for objects with direct references to an object
// args[0]: the object to find references to
// args[1]: constructor function for instances to exclude (Mirror)
@@ -1388,7 +1368,7 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
// Check filter if supplied. This is normally used to avoid
// references from mirror objects.
if (!filter->IsUndefined() &&
- obj->HasInPrototypeChain(isolate, *filter)) {
+ HasInPrototypeChainIgnoringProxies(isolate, obj, *filter)) {
continue;
}
if (obj->IsJSGlobalObject()) {
@@ -1457,7 +1437,12 @@ RUNTIME_FUNCTION(Runtime_DebugGetPrototype) {
HandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- return *Object::GetPrototypeSkipHiddenPrototypes(isolate, obj);
+ Handle<Object> prototype;
+ // TODO(1543): Come up with a solution for clients to handle potential errors
+ // thrown by an intermediate proxy.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
+ Object::GetPrototype(isolate, obj));
+ return *prototype;
}
@@ -1482,10 +1467,28 @@ RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) {
RUNTIME_FUNCTION(Runtime_FunctionGetInferredName) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return f->shared()->inferred_name();
+ CONVERT_ARG_CHECKED(Object, f, 0);
+ if (f->IsJSFunction()) {
+ return JSFunction::cast(f)->shared()->inferred_name();
+ }
+ return isolate->heap()->empty_string();
+}
+
+
+RUNTIME_FUNCTION(Runtime_FunctionGetDebugName) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
+
+ if (function->IsJSBoundFunction()) {
+ return Handle<JSBoundFunction>::cast(function)->name();
+ }
+ Handle<Object> name =
+ JSFunction::GetDebugName(Handle<JSFunction>::cast(function));
+ return *name;
}
@@ -1620,34 +1623,11 @@ RUNTIME_FUNCTION(Runtime_GetScript) {
}
-// Check whether debugger is about to step into the callback that is passed
-// to a built-in function such as Array.forEach.
-RUNTIME_FUNCTION(Runtime_DebugCallbackSupportsStepping) {
- DCHECK(args.length() == 1);
- Debug* debug = isolate->debug();
- if (!debug->is_active() || !debug->IsStepping() ||
- debug->last_step_action() != StepIn) {
- return isolate->heap()->false_value();
- }
- CONVERT_ARG_CHECKED(Object, callback, 0);
- // We do not step into the callback if it's a builtin other than a bound,
- // or not even a function.
- return isolate->heap()->ToBoolean(
- callback->IsJSFunction() &&
- (JSFunction::cast(callback)->IsSubjectToDebugging() ||
- JSFunction::cast(callback)->shared()->bound()));
-}
-
-
// Set one shot breakpoints for the callback function that is passed to a
-// built-in function such as Array.forEach to enable stepping into the callback.
+// built-in function such as Array.forEach to enable stepping into the callback,
+// if we are indeed stepping and the callback is subject to debugging.
RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
DCHECK(args.length() == 1);
- RUNTIME_ASSERT(isolate->debug()->is_active());
-
- Debug* debug = isolate->debug();
- if (!debug->IsStepping()) return isolate->heap()->undefined_value();
-
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
RUNTIME_ASSERT(object->IsJSFunction() || object->IsJSGeneratorObject());
@@ -1658,11 +1638,8 @@ RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
fun = Handle<JSFunction>(
Handle<JSGeneratorObject>::cast(object)->function(), isolate);
}
- // When leaving the function, step out has been activated, but not performed
- // if we do not leave the builtin. To be able to step into the function
- // again, we need to clear the step out at this point.
- debug->ClearStepOut();
- debug->FloodWithOneShotGeneric(fun);
+
+ isolate->debug()->PrepareStepIn(fun);
return isolate->heap()->undefined_value();
}
@@ -1673,6 +1650,8 @@ RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
isolate->PushPromise(promise, function);
+ // If we are in step-in mode, flood the handler.
+ isolate->debug()->EnableStepIn();
return isolate->heap()->undefined_value();
}
@@ -1709,17 +1688,6 @@ RUNTIME_FUNCTION(Runtime_DebugIsActive) {
}
-RUNTIME_FUNCTION(Runtime_DebugHandleStepIntoAccessor) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- Debug* debug = isolate->debug();
- // Handle stepping into constructors if step into is active.
- if (debug->StepInActive()) debug->HandleStepIn(function, false);
- return *isolate->factory()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_DebugBreakInOptimizedCode) {
UNIMPLEMENTED();
return NULL;
diff --git a/chromium/v8/src/runtime/runtime-function.cc b/chromium/v8/src/runtime/runtime-function.cc
index 18a0865f273..befd3370984 100644
--- a/chromium/v8/src/runtime/runtime-function.cc
+++ b/chromium/v8/src/runtime/runtime-function.cc
@@ -7,7 +7,6 @@
#include "src/accessors.h"
#include "src/arguments.h"
#include "src/compiler.h"
-#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
@@ -38,46 +37,14 @@ RUNTIME_FUNCTION(Runtime_FunctionSetName) {
}
-RUNTIME_FUNCTION(Runtime_FunctionNameShouldPrintAsAnonymous) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(
- f->shared()->name_should_print_as_anonymous());
-}
-
-
-RUNTIME_FUNCTION(Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- f->shared()->set_name_should_print_as_anonymous(true);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_FunctionIsArrow) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(f->shared()->is_arrow());
-}
-
-
-RUNTIME_FUNCTION(Runtime_FunctionIsConciseMethod) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(f->shared()->is_concise_method());
-}
-
-
RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
RUNTIME_ASSERT(f->RemovePrototype());
+ f->shared()->set_construct_stub(
+ *isolate->builtins()->ConstructedNonConstructable());
return isolate->heap()->undefined_value();
}
@@ -85,23 +52,23 @@ RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) {
RUNTIME_FUNCTION(Runtime_FunctionGetScript) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- Handle<Object> script = Handle<Object>(fun->shared()->script(), isolate);
+ if (function->IsJSBoundFunction()) return isolate->heap()->undefined_value();
+ Handle<Object> script(Handle<JSFunction>::cast(function)->shared()->script(),
+ isolate);
if (!script->IsScript()) return isolate->heap()->undefined_value();
-
return *Script::GetWrapper(Handle<Script>::cast(script));
}
RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0);
- Handle<SharedFunctionInfo> shared(f->shared());
- return *shared->GetSourceCode();
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
+ if (function->IsJSBoundFunction()) return isolate->heap()->undefined_value();
+ return *Handle<JSFunction>::cast(function)->shared()->GetSourceCode();
}
@@ -135,7 +102,7 @@ RUNTIME_FUNCTION(Runtime_FunctionSetInstanceClassName) {
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
CONVERT_ARG_CHECKED(String, name, 1);
- fun->SetInstanceClassName(name);
+ fun->shared()->set_instance_class_name(name);
return isolate->heap()->undefined_value();
}
@@ -175,18 +142,6 @@ RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
}
-RUNTIME_FUNCTION(Runtime_FunctionHidesSource) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
-
- SharedFunctionInfo* shared = f->shared();
- bool hide_source = !shared->script()->IsScript() ||
- Script::cast(shared->script())->hide_source();
- return isolate->heap()->ToBoolean(hide_source);
-}
-
-
RUNTIME_FUNCTION(Runtime_SetCode) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -196,7 +151,6 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
Handle<SharedFunctionInfo> target_shared(target->shared());
Handle<SharedFunctionInfo> source_shared(source->shared());
- RUNTIME_ASSERT(!source_shared->bound());
if (!Compiler::Compile(source, KEEP_EXCEPTION)) {
return isolate->heap()->exception();
@@ -292,202 +246,25 @@ RUNTIME_FUNCTION(Runtime_SetForceInlineFlag) {
}
-// Find the arguments of the JavaScript function invocation that called
-// into C++ code. Collect these in a newly allocated array of handles (possibly
-// prefixed by a number of empty handles).
-base::SmartArrayPointer<Handle<Object>> Runtime::GetCallerArguments(
- Isolate* isolate, int prefix_argc, int* total_argc) {
- // Find frame containing arguments passed to the caller.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- List<JSFunction*> functions(2);
- frame->GetFunctions(&functions);
- if (functions.length() > 1) {
- int inlined_jsframe_index = functions.length() - 1;
- TranslatedState translated_values(frame);
- translated_values.Prepare(false, frame->fp());
-
- int argument_count = 0;
- TranslatedFrame* translated_frame =
- translated_values.GetArgumentsInfoFromJSFrameIndex(
- inlined_jsframe_index, &argument_count);
- TranslatedFrame::iterator iter = translated_frame->begin();
-
- // Skip the function.
- iter++;
-
- // Skip the receiver.
- iter++;
- argument_count--;
-
- *total_argc = prefix_argc + argument_count;
- base::SmartArrayPointer<Handle<Object> > param_data(
- NewArray<Handle<Object> >(*total_argc));
- bool should_deoptimize = false;
- for (int i = 0; i < argument_count; i++) {
- should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
- Handle<Object> value = iter->GetValue();
- param_data[prefix_argc + i] = value;
- iter++;
- }
-
- if (should_deoptimize) {
- translated_values.StoreMaterializedValuesAndDeopt();
- }
-
- return param_data;
- } else {
- it.AdvanceToArgumentsFrame();
- frame = it.frame();
- int args_count = frame->ComputeParametersCount();
-
- *total_argc = prefix_argc + args_count;
- base::SmartArrayPointer<Handle<Object> > param_data(
- NewArray<Handle<Object> >(*total_argc));
- for (int i = 0; i < args_count; i++) {
- Handle<Object> val = Handle<Object>(frame->GetParameter(i), isolate);
- param_data[prefix_argc + i] = val;
- }
- return param_data;
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, bound_function, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, bindee, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, this_object, 2);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(new_length, 3);
-
- // TODO(lrn): Create bound function in C++ code from premade shared info.
- bound_function->shared()->set_bound(true);
- bound_function->shared()->set_optimized_code_map(Smi::FromInt(0));
- bound_function->shared()->set_inferred_name(isolate->heap()->empty_string());
- // Get all arguments of calling function (Function.prototype.bind).
- int argc = 0;
- base::SmartArrayPointer<Handle<Object>> arguments =
- Runtime::GetCallerArguments(isolate, 0, &argc);
- // Don't count the this-arg.
- if (argc > 0) {
- RUNTIME_ASSERT(arguments[0].is_identical_to(this_object));
- argc--;
- } else {
- RUNTIME_ASSERT(this_object->IsUndefined());
- }
- // Initialize array of bindings (function, this, and any existing arguments
- // if the function was already bound).
- Handle<FixedArray> new_bindings;
- int i;
- if (bindee->IsJSFunction() && JSFunction::cast(*bindee)->shared()->bound()) {
- Handle<FixedArray> old_bindings(
- JSFunction::cast(*bindee)->function_bindings());
- RUNTIME_ASSERT(old_bindings->length() > JSFunction::kBoundFunctionIndex);
- new_bindings =
- isolate->factory()->NewFixedArray(old_bindings->length() + argc);
- bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex),
- isolate);
- i = 0;
- for (int n = old_bindings->length(); i < n; i++) {
- new_bindings->set(i, old_bindings->get(i));
- }
- } else {
- int array_size = JSFunction::kBoundArgumentsStartIndex + argc;
- new_bindings = isolate->factory()->NewFixedArray(array_size);
- new_bindings->set(JSFunction::kBoundFunctionIndex, *bindee);
- new_bindings->set(JSFunction::kBoundThisIndex, *this_object);
- i = 2;
- }
- // Copy arguments, skipping the first which is "this_arg".
- for (int j = 0; j < argc; j++, i++) {
- new_bindings->set(i, *arguments[j + 1]);
- }
- new_bindings->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
- bound_function->set_function_bindings(*new_bindings);
-
- // Update length. Have to remove the prototype first so that map migration
- // is happy about the number of fields.
- RUNTIME_ASSERT(bound_function->RemovePrototype());
-
- // The new function should have the same [[Prototype]] as the bindee.
- Handle<Map> bound_function_map =
- bindee->IsConstructor()
- ? isolate->bound_function_with_constructor_map()
- : isolate->bound_function_without_constructor_map();
- PrototypeIterator iter(isolate, bindee);
- Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
- if (bound_function_map->prototype() != *proto) {
- bound_function_map = Map::TransitionToPrototype(bound_function_map, proto,
- REGULAR_PROTOTYPE);
- }
- JSObject::MigrateToMap(bound_function, bound_function_map);
- DCHECK_EQ(bindee->IsConstructor(), bound_function->IsConstructor());
-
- Handle<String> length_string = isolate->factory()->length_string();
- // These attributes must be kept in sync with how the bootstrapper
- // configures the bound_function_map retrieved above.
- // We use ...IgnoreAttributes() here because of length's read-onliness.
- PropertyAttributes attr =
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- bound_function, length_string, new_length, attr));
- return *bound_function;
-}
-
-
-RUNTIME_FUNCTION(Runtime_BoundFunctionGetBindings) {
- HandleScope handles(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callable, 0);
- if (callable->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
- if (function->shared()->bound()) {
- RUNTIME_ASSERT(function->function_bindings()->IsFixedArray());
- Handle<FixedArray> bindings(function->function_bindings());
- return *isolate->factory()->NewJSArrayWithElements(bindings);
- }
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_NewObjectFromBound) {
+RUNTIME_FUNCTION(Runtime_Call) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- // First argument is a function to use as a constructor.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- RUNTIME_ASSERT(function->shared()->bound());
-
- // The argument is a bound function. Extract its bound arguments
- // and callable.
- Handle<FixedArray> bound_args =
- Handle<FixedArray>(FixedArray::cast(function->function_bindings()));
- int bound_argc = bound_args->length() - JSFunction::kBoundArgumentsStartIndex;
- Handle<Object> bound_function(
- JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)),
- isolate);
- DCHECK(!bound_function->IsJSFunction() ||
- !Handle<JSFunction>::cast(bound_function)->shared()->bound());
-
- int total_argc = 0;
- base::SmartArrayPointer<Handle<Object>> param_data =
- Runtime::GetCallerArguments(isolate, bound_argc, &total_argc);
- for (int i = 0; i < bound_argc; i++) {
- param_data[i] = Handle<Object>(
- bound_args->get(JSFunction::kBoundArgumentsStartIndex + i), isolate);
+ DCHECK_LE(2, args.length());
+ int const argc = args.length() - 2;
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
+ ScopedVector<Handle<Object>> argv(argc);
+ for (int i = 0; i < argc; ++i) {
+ argv[i] = args.at<Object>(2 + i);
}
-
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Execution::New(isolate, bound_function, bound_function,
- total_argc, param_data.get()));
+ isolate, result,
+ Execution::Call(isolate, target, receiver, argc, argv.start()));
return *result;
}
-RUNTIME_FUNCTION(Runtime_Call) {
+RUNTIME_FUNCTION(Runtime_TailCall) {
HandleScope scope(isolate);
DCHECK_LE(2, args.length());
int const argc = args.length() - 2;
@@ -542,63 +319,23 @@ RUNTIME_FUNCTION(Runtime_Apply) {
}
-RUNTIME_FUNCTION(Runtime_GetOriginalConstructor) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- return frame->IsConstructor() ? frame->GetOriginalConstructor()
- : isolate->heap()->undefined_value();
-}
-
-
-// TODO(bmeurer): Kill %_CallFunction ASAP as it is almost never used
-// correctly because of the weird semantics underneath.
-RUNTIME_FUNCTION(Runtime_CallFunction) {
+// ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
+RUNTIME_FUNCTION(Runtime_ConvertReceiver) {
HandleScope scope(isolate);
- DCHECK(args.length() >= 2);
- int argc = args.length() - 2;
- CONVERT_ARG_CHECKED(JSReceiver, fun, argc + 1);
- Object* receiver = args[0];
-
- // If there are too many arguments, allocate argv via malloc.
- const int argv_small_size = 10;
- Handle<Object> argv_small_buffer[argv_small_size];
- base::SmartArrayPointer<Handle<Object>> argv_large_buffer;
- Handle<Object>* argv = argv_small_buffer;
- if (argc > argv_small_size) {
- argv = new Handle<Object>[argc];
- if (argv == NULL) return isolate->StackOverflow();
- argv_large_buffer = base::SmartArrayPointer<Handle<Object>>(argv);
- }
-
- for (int i = 0; i < argc; ++i) {
- argv[i] = Handle<Object>(args[1 + i], isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
+ if (receiver->IsNull() || receiver->IsUndefined()) {
+ return isolate->global_proxy();
}
-
- Handle<JSReceiver> hfun(fun);
- Handle<Object> hreceiver(receiver, isolate);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Execution::Call(isolate, hfun, hreceiver, argc, argv));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_IsConstructCall) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- return isolate->heap()->ToBoolean(frame->IsConstructor());
+ return *Object::ToObject(isolate, receiver).ToHandleChecked();
}
RUNTIME_FUNCTION(Runtime_IsFunction) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSFunction());
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, object, 0);
+ return isolate->heap()->ToBoolean(object->IsFunction());
}
@@ -608,5 +345,17 @@ RUNTIME_FUNCTION(Runtime_ThrowStrongModeTooFewArguments) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate,
NewTypeError(MessageTemplate::kStrongArity));
}
+
+
+RUNTIME_FUNCTION(Runtime_FunctionToString) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
+ return function->IsJSBoundFunction()
+ ? *JSBoundFunction::ToString(
+ Handle<JSBoundFunction>::cast(function))
+ : *JSFunction::ToString(Handle<JSFunction>::cast(function));
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-futex.cc b/chromium/v8/src/runtime/runtime-futex.cc
index a96758d9f3b..f4ef679bf6a 100644
--- a/chromium/v8/src/runtime/runtime-futex.cc
+++ b/chromium/v8/src/runtime/runtime-futex.cc
@@ -30,7 +30,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsFutexWait) {
RUNTIME_ASSERT(timeout == V8_INFINITY || !std::isnan(timeout));
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr = index << 2;
+ size_t addr = (index << 2) + NumberToSize(isolate, sta->byte_offset());
return FutexEmulation::Wait(isolate, array_buffer, addr, value, timeout);
}
@@ -47,7 +47,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsFutexWake) {
RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr = index << 2;
+ size_t addr = (index << 2) + NumberToSize(isolate, sta->byte_offset());
return FutexEmulation::Wake(isolate, array_buffer, addr, count);
}
@@ -67,8 +67,8 @@ RUNTIME_FUNCTION(Runtime_AtomicsFutexWakeOrRequeue) {
RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr1 = index1 << 2;
- size_t addr2 = index2 << 2;
+ size_t addr1 = (index1 << 2) + NumberToSize(isolate, sta->byte_offset());
+ size_t addr2 = (index2 << 2) + NumberToSize(isolate, sta->byte_offset());
return FutexEmulation::WakeOrRequeue(isolate, array_buffer, addr1, count,
value, addr2);
@@ -85,9 +85,9 @@ RUNTIME_FUNCTION(Runtime_AtomicsFutexNumWaitersForTesting) {
RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr = index << 2;
+ size_t addr = (index << 2) + NumberToSize(isolate, sta->byte_offset());
return FutexEmulation::NumWaitersForTesting(isolate, array_buffer, addr);
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-generator.cc b/chromium/v8/src/runtime/runtime-generator.cc
index 208f7f6680e..926cd3ce2d6 100644
--- a/chromium/v8/src/runtime/runtime-generator.cc
+++ b/chromium/v8/src/runtime/runtime-generator.cc
@@ -209,14 +209,6 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetSourcePosition) {
}
-RUNTIME_FUNCTION(Runtime_FunctionIsGenerator) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(f->shared()->is_generator());
-}
-
-
RUNTIME_FUNCTION(Runtime_GeneratorNext) {
UNREACHABLE(); // Optimization disabled in SetUpGenerators().
return NULL;
diff --git a/chromium/v8/src/runtime/runtime-i18n.cc b/chromium/v8/src/runtime/runtime-i18n.cc
index 8b0c98f1614..e1f0c8e959f 100644
--- a/chromium/v8/src/runtime/runtime-i18n.cc
+++ b/chromium/v8/src/runtime/runtime-i18n.cc
@@ -389,10 +389,11 @@ RUNTIME_FUNCTION(Runtime_InternalDateParse) {
UDate date = date_format->parse(u_date, status);
if (U_FAILURE(status)) return isolate->heap()->undefined_value();
- Handle<Object> result;
+ Handle<JSDate> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Execution::NewDate(isolate, static_cast<double>(date)));
- DCHECK(result->IsJSDate());
+ isolate, result,
+ JSDate::New(isolate->date_function(), isolate->date_function(),
+ static_cast<double>(date)));
return *result;
}
@@ -472,6 +473,8 @@ RUNTIME_FUNCTION(Runtime_InternalNumberParse) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
CONVERT_ARG_HANDLE_CHECKED(String, number_string, 1);
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kIntlV8Parse);
+
v8::String::Utf8Value utf8_number(v8::Utils::ToLocal(number_string));
icu::UnicodeString u_number(icu::UnicodeString::fromUTF8(*utf8_number));
icu::DecimalFormat* number_format =
diff --git a/chromium/v8/src/runtime/runtime-internal.cc b/chromium/v8/src/runtime/runtime-internal.cc
index 90d5532af37..ee664645d4b 100644
--- a/chromium/v8/src/runtime/runtime-internal.cc
+++ b/chromium/v8/src/runtime/runtime-internal.cc
@@ -5,14 +5,14 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
-#include "src/parser.h"
-#include "src/prettyprinter.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -92,7 +92,7 @@ RUNTIME_FUNCTION(Runtime_ReThrow) {
RUNTIME_FUNCTION(Runtime_ThrowStackOverflow) {
SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
+ DCHECK_LE(0, args.length());
return isolate->StackOverflow();
}
@@ -153,6 +153,14 @@ RUNTIME_FUNCTION(Runtime_NewSyntaxError) {
}
+RUNTIME_FUNCTION(Runtime_ThrowIllegalInvocation) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kIllegalInvocation));
+}
+
+
RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -171,6 +179,16 @@ RUNTIME_FUNCTION(Runtime_ThrowStrongModeImplicitConversion) {
}
+RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<String> type = Object::TypeOf(isolate, object);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kApplyNonFunction, object, type));
+}
+
+
RUNTIME_FUNCTION(Runtime_PromiseRejectEvent) {
DCHECK(args.length() == 3);
HandleScope scope(isolate);
@@ -284,18 +302,6 @@ RUNTIME_FUNCTION(Runtime_MessageGetScript) {
}
-RUNTIME_FUNCTION(Runtime_ErrorToStringRT) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, error, 0);
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- isolate->error_tostring_helper()->Stringify(isolate, error));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_FormatMessageString) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
@@ -307,6 +313,7 @@ RUNTIME_FUNCTION(Runtime_FormatMessageString) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
MessageTemplate::FormatMessage(template_index, arg0, arg1, arg2));
+ isolate->native_context()->IncrementErrorsThrown();
return *result;
}
@@ -318,6 +325,7 @@ RUNTIME_FUNCTION(Runtime_FormatMessageString) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, call_site_obj, 0); \
Handle<String> result; \
CallSite call_site(isolate, call_site_obj); \
+ RUNTIME_ASSERT(call_site.IsValid()) \
return RETURN(call_site.NAME(), isolate); \
}
@@ -327,8 +335,8 @@ static inline Object* ReturnDereferencedHandle(Handle<Object> obj,
}
-static inline Object* ReturnPositiveSmiOrNull(int value, Isolate* isolate) {
- if (value >= 0) return Smi::FromInt(value);
+static inline Object* ReturnPositiveNumberOrNull(int value, Isolate* isolate) {
+ if (value >= 0) return *isolate->factory()->NewNumberFromInt(value);
return isolate->heap()->null_value();
}
@@ -342,8 +350,8 @@ CALLSITE_GET(GetFileName, ReturnDereferencedHandle)
CALLSITE_GET(GetFunctionName, ReturnDereferencedHandle)
CALLSITE_GET(GetScriptNameOrSourceUrl, ReturnDereferencedHandle)
CALLSITE_GET(GetMethodName, ReturnDereferencedHandle)
-CALLSITE_GET(GetLineNumber, ReturnPositiveSmiOrNull)
-CALLSITE_GET(GetColumnNumber, ReturnPositiveSmiOrNull)
+CALLSITE_GET(GetLineNumber, ReturnPositiveNumberOrNull)
+CALLSITE_GET(GetColumnNumber, ReturnPositiveNumberOrNull)
CALLSITE_GET(IsNative, ReturnBoolean)
CALLSITE_GET(IsToplevel, ReturnBoolean)
CALLSITE_GET(IsEval, ReturnBoolean)
@@ -370,62 +378,46 @@ RUNTIME_FUNCTION(Runtime_IncrementStatsCounter) {
}
-RUNTIME_FUNCTION(Runtime_Likely) {
- DCHECK(args.length() == 1);
- return args[0];
-}
-
-
-RUNTIME_FUNCTION(Runtime_Unlikely) {
- DCHECK(args.length() == 1);
- return args[0];
-}
-
-
-RUNTIME_FUNCTION(Runtime_HarmonyToString) {
- // TODO(caitp): Delete this runtime method when removing --harmony-tostring
- return isolate->heap()->ToBoolean(FLAG_harmony_tostring);
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetTypeFeedbackVector) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- return function->shared()->feedback_vector();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetCallerJSFunction) {
- SealHandleScope shs(isolate);
- StackFrameIterator it(isolate);
- RUNTIME_ASSERT(it.frame()->type() == StackFrame::STUB);
- it.Advance();
- RUNTIME_ASSERT(it.frame()->type() == StackFrame::JAVA_SCRIPT);
- return JavaScriptFrame::cast(it.frame())->function();
-}
-
+namespace {
-RUNTIME_FUNCTION(Runtime_GetCodeStubExportsObject) {
- HandleScope shs(isolate);
- return isolate->heap()->code_stub_exports_object();
+bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
+ JavaScriptFrameIterator it(isolate);
+ if (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ JSFunction* fun = frame->function();
+ Object* script = fun->shared()->script();
+ if (script->IsScript() &&
+ !(Script::cast(script)->source()->IsUndefined())) {
+ Handle<Script> casted_script(Script::cast(script));
+ // Compute the location from the function and the relocation info of the
+ // baseline code. For optimized code this will use the deoptimization
+ // information to get canonical location information.
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ it.frame()->Summarize(&frames);
+ FrameSummary& summary = frames.last();
+ int pos = summary.code()->SourcePosition(summary.pc());
+ *target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
+ return true;
+ }
+ }
+ return false;
}
-namespace {
-
Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
MessageLocation location;
- if (isolate->ComputeLocation(&location)) {
+ if (ComputeLocation(isolate, &location)) {
Zone zone;
base::SmartPointer<ParseInfo> info(
location.function()->shared()->is_function()
? new ParseInfo(&zone, location.function())
: new ParseInfo(&zone, location.script()));
if (Parser::ParseStatic(info.get())) {
- CallPrinter printer(isolate, &zone);
+ CallPrinter printer(isolate, location.function()->shared()->IsBuiltin());
const char* string = printer.Print(info->literal(), location.start_pos());
- return isolate->factory()->NewStringFromAsciiChecked(string);
+ if (strlen(string) > 0) {
+ return isolate->factory()->NewStringFromAsciiChecked(string);
+ }
} else {
isolate->clear_pending_exception();
}
@@ -445,5 +437,37 @@ RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
isolate, NewTypeError(MessageTemplate::kCalledNonCallable, callsite));
}
+
+RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<String> callsite = RenderCallSite(isolate, object);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotConstructor, callsite));
+}
+
+
+// ES6 section 7.3.17 CreateListFromArrayLike (obj)
+RUNTIME_FUNCTION(Runtime_CreateListFromArrayLike) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<FixedArray> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Object::CreateListFromArrayLike(isolate, object, ElementTypes::kAll));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_SMI_ARG_CHECKED(counter, 0);
+ isolate->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(counter));
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-interpreter.cc b/chromium/v8/src/runtime/runtime-interpreter.cc
index e0a171267fa..d061a4916dc 100644
--- a/chromium/v8/src/runtime/runtime-interpreter.cc
+++ b/chromium/v8/src/runtime/runtime-interpreter.cc
@@ -96,7 +96,7 @@ RUNTIME_FUNCTION(Runtime_InterpreterGreaterThanOrEqual) {
RUNTIME_FUNCTION(Runtime_InterpreterStrictEquals) {
- SealHandleScope scope(isolate);
+ SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(Object, x, 0);
CONVERT_ARG_CHECKED(Object, y, 1);
@@ -105,7 +105,7 @@ RUNTIME_FUNCTION(Runtime_InterpreterStrictEquals) {
RUNTIME_FUNCTION(Runtime_InterpreterStrictNotEquals) {
- SealHandleScope scope(isolate);
+ SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(Object, x, 0);
CONVERT_ARG_CHECKED(Object, y, 1);
@@ -114,12 +114,89 @@ RUNTIME_FUNCTION(Runtime_InterpreterStrictNotEquals) {
RUNTIME_FUNCTION(Runtime_InterpreterToBoolean) {
- SealHandleScope scope(isolate);
+ SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, x, 0);
return isolate->heap()->ToBoolean(x->BooleanValue());
}
+RUNTIME_FUNCTION(Runtime_InterpreterLogicalNot) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, x, 0);
+ return isolate->heap()->ToBoolean(!x->BooleanValue());
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterTypeOf) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ return Object::cast(*Object::TypeOf(isolate, x));
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
+ CONVERT_SMI_ARG_CHECKED(pretenured_flag, 1);
+ Handle<Context> context(isolate->context(), isolate);
+ return *isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, context, static_cast<PretenureFlag>(pretenured_flag));
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterForInPrepare) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ Object* property_names = Runtime_GetPropertyNamesFast(
+ 1, Handle<Object>::cast(receiver).location(), isolate);
+ if (isolate->has_pending_exception()) {
+ return property_names;
+ }
+
+ Handle<Object> cache_type(property_names, isolate);
+ Handle<FixedArray> cache_array;
+ int cache_length;
+
+ Handle<Map> receiver_map = handle(receiver->map(), isolate);
+ if (cache_type->IsMap()) {
+ Handle<Map> cache_type_map =
+ handle(Handle<Map>::cast(cache_type)->map(), isolate);
+ DCHECK(cache_type_map.is_identical_to(isolate->factory()->meta_map()));
+ int enum_length = cache_type_map->EnumLength();
+ DescriptorArray* descriptors = receiver_map->instance_descriptors();
+ if (enum_length > 0 && descriptors->HasEnumCache()) {
+ cache_array = handle(descriptors->GetEnumCache(), isolate);
+ cache_length = cache_array->length();
+ } else {
+ cache_array = isolate->factory()->empty_fixed_array();
+ cache_length = 0;
+ }
+ } else {
+ cache_array = Handle<FixedArray>::cast(cache_type);
+ cache_length = cache_array->length();
+
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ if (receiver_map->instance_type() == JS_PROXY_TYPE) {
+ // Zero indicates proxy
+ cache_type = Handle<Object>(Smi::FromInt(0), isolate);
+ } else {
+ // One entails slow check
+ cache_type = Handle<Object>(Smi::FromInt(1), isolate);
+ }
+ }
+
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(3);
+ result->set(0, *cache_type);
+ result->set(1, *cache_array);
+ result->set(2, Smi::FromInt(cache_length));
+ return *result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-json.cc b/chromium/v8/src/runtime/runtime-json.cc
index 07232d59b8b..45f81830527 100644
--- a/chromium/v8/src/runtime/runtime-json.cc
+++ b/chromium/v8/src/runtime/runtime-json.cc
@@ -7,9 +7,9 @@
#include "src/arguments.h"
#include "src/char-predicates-inl.h"
#include "src/isolate-inl.h"
-#include "src/json-parser.h"
#include "src/json-stringifier.h"
#include "src/objects-inl.h"
+#include "src/parsing/json-parser.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/runtime/runtime-literals.cc b/chromium/v8/src/runtime/runtime-literals.cc
index 903e2feb53b..b0e41dcdaaa 100644
--- a/chromium/v8/src/runtime/runtime-literals.cc
+++ b/chromium/v8/src/runtime/runtime-literals.cc
@@ -6,9 +6,9 @@
#include "src/allocation-site-scopes.h"
#include "src/arguments.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/isolate-inl.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -159,11 +159,9 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
DisallowHeapAllocation no_gc;
DCHECK(IsFastElementsKind(constant_elements_kind));
Context* native_context = isolate->context()->native_context();
- Object* maps_array = is_strong
- ? native_context->js_array_strong_maps()
- : native_context->js_array_maps();
- DCHECK(!maps_array->IsUndefined());
- Object* map = FixedArray::cast(maps_array)->get(constant_elements_kind);
+ Strength strength = is_strong ? Strength::STRONG : Strength::WEAK;
+ Object* map = native_context->get(
+ Context::ArrayMapIndex(constant_elements_kind, strength));
object->set_map(Map::cast(map));
}
@@ -236,13 +234,33 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
}
+RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
+ CONVERT_SMI_ARG_CHECKED(index, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
+ CONVERT_SMI_ARG_CHECKED(flags, 3);
+
+ // Check if boilerplate exists. If not, create it first.
+ Handle<Object> boilerplate(closure->literals()->literal(index), isolate);
+ if (boilerplate->IsUndefined()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, boilerplate, JSRegExp::New(pattern, JSRegExp::Flags(flags)));
+ closure->literals()->set_literal(index, *boilerplate);
+ }
+ return *JSRegExp::Copy(Handle<JSRegExp>::cast(boilerplate));
+}
+
+
RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 0);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
+ Handle<LiteralsArray> literals(closure->literals(), isolate);
bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
@@ -351,13 +369,14 @@ static MaybeHandle<JSObject> CreateArrayLiteralImpl(
RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 0);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
Handle<JSObject> result;
+ Handle<LiteralsArray> literals(closure->literals(), isolate);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, CreateArrayLiteralImpl(isolate, literals, literals_index,
elements, flags));
@@ -367,12 +386,13 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
RUNTIME_FUNCTION(Runtime_CreateArrayLiteralStubBailout) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 0);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
Handle<JSObject> result;
+ Handle<LiteralsArray> literals(closure->literals(), isolate);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
diff --git a/chromium/v8/src/runtime/runtime-liveedit.cc b/chromium/v8/src/runtime/runtime-liveedit.cc
index 947ef2c29b0..189ec08d33d 100644
--- a/chromium/v8/src/runtime/runtime-liveedit.cc
+++ b/chromium/v8/src/runtime/runtime-liveedit.cc
@@ -200,22 +200,34 @@ RUNTIME_FUNCTION(Runtime_LiveEditPatchFunctionPositions) {
RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
- RUNTIME_ASSERT(shared_array->length()->IsSmi());
- RUNTIME_ASSERT(shared_array->HasFastElements())
- int array_length = Smi::cast(shared_array->length())->value();
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, old_shared_array, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, new_shared_array, 1);
+ CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 2);
+ USE(new_shared_array);
+ RUNTIME_ASSERT(old_shared_array->length()->IsSmi());
+ RUNTIME_ASSERT(new_shared_array->length() == old_shared_array->length());
+ RUNTIME_ASSERT(old_shared_array->HasFastElements())
+ RUNTIME_ASSERT(new_shared_array->HasFastElements())
+ int array_length = Smi::cast(old_shared_array->length())->value();
for (int i = 0; i < array_length; i++) {
- Handle<Object> element;
+ Handle<Object> old_element;
+ Handle<Object> new_element;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, old_element, Object::GetElement(isolate, old_shared_array, i));
+ RUNTIME_ASSERT(
+ old_element->IsJSValue() &&
+ Handle<JSValue>::cast(old_element)->value()->IsSharedFunctionInfo());
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, element, Object::GetElement(isolate, shared_array, i));
+ isolate, new_element, Object::GetElement(isolate, new_shared_array, i));
RUNTIME_ASSERT(
- element->IsJSValue() &&
- Handle<JSValue>::cast(element)->value()->IsSharedFunctionInfo());
+ new_element->IsUndefined() ||
+ (new_element->IsJSValue() &&
+ Handle<JSValue>::cast(new_element)->value()->IsSharedFunctionInfo()));
}
- return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
+ return *LiveEdit::CheckAndDropActivations(old_shared_array, new_shared_array,
+ do_drop);
}
@@ -229,7 +241,14 @@ RUNTIME_FUNCTION(Runtime_LiveEditCompareStrings) {
CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
- return *LiveEdit::CompareStrings(s1, s2);
+ Handle<JSArray> result = LiveEdit::CompareStrings(s1, s2);
+ uint32_t array_length;
+ CHECK(result->length()->ToArrayLength(&array_length));
+ if (array_length > 0) {
+ isolate->debug()->feature_tracker()->Track(DebugFeatureTracker::kLiveEdit);
+ }
+
+ return *result;
}
diff --git a/chromium/v8/src/runtime/runtime-maths.cc b/chromium/v8/src/runtime/runtime-maths.cc
index 504261679e4..427d2b868a6 100644
--- a/chromium/v8/src/runtime/runtime-maths.cc
+++ b/chromium/v8/src/runtime/runtime-maths.cc
@@ -6,6 +6,8 @@
#include "src/arguments.h"
#include "src/assembler.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/third_party/fdlibm/fdlibm.h"
@@ -67,8 +69,8 @@ RUNTIME_FUNCTION(Runtime_RemPiO2) {
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_ARG_CHECKED(JSTypedArray, result, 1);
RUNTIME_ASSERT(result->byte_length() == Smi::FromInt(2 * sizeof(double)));
- void* backing_store = JSArrayBuffer::cast(result->buffer())->backing_store();
- double* y = static_cast<double*>(backing_store);
+ FixedFloat64Array* array = FixedFloat64Array::cast(result->elements());
+ double* y = static_cast<double*>(array->DataPtr());
return Smi::FromInt(fdlibm::rempio2(x, y));
}
@@ -105,8 +107,8 @@ RUNTIME_FUNCTION(Runtime_MathExpRT) {
isolate->counters()->math_exp()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- lazily_initialize_fast_exp();
- return *isolate->factory()->NewNumber(fast_exp(x));
+ lazily_initialize_fast_exp(isolate);
+ return *isolate->factory()->NewNumber(fast_exp(x, isolate));
}
@@ -148,7 +150,7 @@ RUNTIME_FUNCTION(Runtime_MathPow) {
}
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- double result = power_helper(x, y);
+ double result = power_helper(isolate, x, y);
if (std::isnan(result)) return isolate->heap()->nan_value();
return *isolate->factory()->NewNumber(result);
}
@@ -222,7 +224,8 @@ RUNTIME_FUNCTION(Runtime_MathSqrt) {
isolate->counters()->math_sqrt()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return *isolate->factory()->NewNumber(fast_sqrt(x));
+ lazily_initialize_fast_sqrt(isolate);
+ return *isolate->factory()->NewNumber(fast_sqrt(x, isolate));
}
@@ -244,5 +247,52 @@ RUNTIME_FUNCTION(Runtime_IsMinusZero) {
HeapNumber* number = HeapNumber::cast(obj);
return isolate->heap()->ToBoolean(IsMinusZero(number->value()));
}
+
+
+RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ // Random numbers in the snapshot are not really that random.
+ DCHECK(!isolate->bootstrapper()->IsActive());
+ static const int kState0Offset = 0;
+ static const int kState1Offset = 1;
+ static const int kRandomBatchSize = 64;
+ CONVERT_ARG_HANDLE_CHECKED(Object, maybe_typed_array, 0);
+ Handle<JSTypedArray> typed_array;
+ // Allocate typed array if it does not yet exist.
+ if (maybe_typed_array->IsJSTypedArray()) {
+ typed_array = Handle<JSTypedArray>::cast(maybe_typed_array);
+ } else {
+ static const int kByteLength = kRandomBatchSize * kDoubleSize;
+ Handle<JSArrayBuffer> buffer =
+ isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
+ JSArrayBuffer::SetupAllocatingData(buffer, isolate, kByteLength, true,
+ SharedFlag::kNotShared);
+ typed_array = isolate->factory()->NewJSTypedArray(
+ kExternalFloat64Array, buffer, 0, kRandomBatchSize);
+ }
+
+ DisallowHeapAllocation no_gc;
+ double* array =
+ reinterpret_cast<double*>(typed_array->GetBuffer()->backing_store());
+ // Fetch existing state.
+ uint64_t state0 = double_to_uint64(array[kState0Offset]);
+ uint64_t state1 = double_to_uint64(array[kState1Offset]);
+ // Initialize state if not yet initialized.
+ while (state0 == 0 || state1 == 0) {
+ isolate->random_number_generator()->NextBytes(&state0, sizeof(state0));
+ isolate->random_number_generator()->NextBytes(&state1, sizeof(state1));
+ }
+ // Create random numbers.
+ for (int i = kState1Offset + 1; i < kRandomBatchSize; i++) {
+ // Generate random numbers using xorshift128+.
+ base::RandomNumberGenerator::XorShift128(&state0, &state1);
+ array[i] = base::RandomNumberGenerator::ToDouble(state0, state1);
+ }
+ // Persist current state.
+ array[kState0Offset] = uint64_to_double(state0);
+ array[kState1Offset] = uint64_to_double(state1);
+ return *typed_array;
+}
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-numbers.cc b/chromium/v8/src/runtime/runtime-numbers.cc
index 177b3ff5841..46fbff34637 100644
--- a/chromium/v8/src/runtime/runtime-numbers.cc
+++ b/chromium/v8/src/runtime/runtime-numbers.cc
@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -113,11 +114,13 @@ RUNTIME_FUNCTION(Runtime_StringToNumber) {
}
+// ES6 18.2.5 parseInt(string, radix) slow path
RUNTIME_FUNCTION(Runtime_StringParseInt) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_NUMBER_CHECKED(int, radix, Int32, args[1]);
+ // Step 8.a. is already handled in the JS function.
RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
subject = String::Flatten(subject);
@@ -127,7 +130,6 @@ RUNTIME_FUNCTION(Runtime_StringParseInt) {
DisallowHeapAllocation no_gc;
String::FlatContent flat = subject->GetFlatContent();
- // ECMA-262 section 15.1.2.3, empty string is NaN
if (flat.IsOneByte()) {
value =
StringToInt(isolate->unicode_cache(), flat.ToOneByteVector(), radix);
@@ -140,6 +142,7 @@ RUNTIME_FUNCTION(Runtime_StringParseInt) {
}
+// ES6 18.2.4 parseFloat(string)
RUNTIME_FUNCTION(Runtime_StringParseFloat) {
HandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -171,12 +174,13 @@ RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
}
+// TODO(bmeurer): Kill this runtime entry. Uses in date.js are wrong anyway.
RUNTIME_FUNCTION(Runtime_NumberToIntegerMapMinusZero) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
- double double_value = DoubleToInteger(number);
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, input, Object::ToNumber(input));
+ double double_value = DoubleToInteger(input->Number());
// Map both -0 and +0 to +0.
if (double_value == 0) double_value = 0;
@@ -314,5 +318,20 @@ RUNTIME_FUNCTION(Runtime_GetRootNaN) {
return isolate->heap()->nan_value();
}
+
+RUNTIME_FUNCTION(Runtime_GetHoleNaNUpper) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ return *isolate->factory()->NewNumberFromUint(kHoleNanUpper32);
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetHoleNaNLower) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ return *isolate->factory()->NewNumberFromUint(kHoleNanLower32);
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-object.cc b/chromium/v8/src/runtime/runtime-object.cc
index 4782a314305..75ddb7bc22b 100644
--- a/chromium/v8/src/runtime/runtime-object.cc
+++ b/chromium/v8/src/runtime/runtime-object.cc
@@ -9,6 +9,7 @@
#include "src/debug/debug.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/property-descriptor.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -26,26 +27,12 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
Object);
}
- // Check if the given key is an array index.
- uint32_t index = 0;
- if (key->ToArrayIndex(&index)) {
- return Object::GetElement(isolate, object, index, language_mode);
- }
+ bool success = false;
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, object, key, &success);
+ if (!success) return MaybeHandle<Object>();
- // Convert the key to a name - possibly by calling back into JavaScript.
- Handle<Name> name;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
- Object);
-
- // Check if the name is trivially convertible to an index and get
- // the element if so.
- // TODO(verwaest): Make sure GetProperty(LookupIterator*) can handle this, and
- // remove the special casing here.
- if (name->AsArrayIndex(&index)) {
- return Object::GetElement(isolate, object, index);
- } else {
- return Object::GetProperty(object, name, language_mode);
- }
+ return Object::GetProperty(&it, language_mode);
}
@@ -70,7 +57,7 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
DisallowHeapAllocation no_allocation;
Handle<JSObject> receiver = Handle<JSObject>::cast(receiver_obj);
Handle<Name> key = Handle<Name>::cast(key_obj);
- if (receiver->IsGlobalObject()) {
+ if (receiver->IsJSGlobalObject()) {
// Attempt dictionary lookup.
GlobalDictionary* dictionary = receiver->global_dictionary();
int entry = dictionary->FindEntry(key);
@@ -131,21 +118,16 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
}
-MaybeHandle<Object> Runtime::DeleteObjectProperty(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<Object> key,
- LanguageMode language_mode) {
- // Check if the given key is an array index.
- uint32_t index = 0;
- if (key->ToArrayIndex(&index)) {
- return JSReceiver::DeleteElement(receiver, index, language_mode);
- }
+Maybe<bool> Runtime::DeleteObjectProperty(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<Object> key,
+ LanguageMode language_mode) {
+ bool success = false;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, receiver, key, &success, LookupIterator::HIDDEN);
+ if (!success) return Nothing<bool>();
- Handle<Name> name;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
- Object);
-
- return JSReceiver::DeletePropertyOrElement(receiver, name, language_mode);
+ return JSReceiver::DeleteProperty(&it, language_mode);
}
@@ -162,18 +144,14 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
}
// Check if the given key is an array index.
- uint32_t index = 0;
- if (key->ToArrayIndex(&index)) {
- return Object::SetElement(isolate, object, index, value, language_mode);
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
- Object);
+ bool success = false;
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, object, key, &success);
+ if (!success) return MaybeHandle<Object>();
- LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name);
- return Object::SetProperty(&it, value, language_mode,
- Object::MAY_BE_STORE_FROM_KEYED);
+ MAYBE_RETURN_NULL(Object::SetProperty(&it, value, language_mode,
+ Object::MAY_BE_STORE_FROM_KEYED));
+ return value;
}
@@ -181,68 +159,34 @@ RUNTIME_FUNCTION(Runtime_GetPrototype) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
- // We don't expect access checks to be needed on JSProxy objects.
- DCHECK(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
- PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
- do {
- if (PrototypeIterator::GetCurrent(iter)->IsAccessCheckNeeded() &&
- !isolate->MayAccess(PrototypeIterator::GetCurrent<JSObject>(iter))) {
- return isolate->heap()->null_value();
- }
- iter.AdvanceIgnoringProxies();
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- return *PrototypeIterator::GetCurrent(iter);
- }
- } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
- return *PrototypeIterator::GetCurrent(iter);
+ Handle<Object> prototype;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
+ Object::GetPrototype(isolate, obj));
+ return *prototype;
}
RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- DCHECK(!obj->IsAccessCheckNeeded());
- DCHECK(!obj->map()->is_observed());
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::SetPrototype(obj, prototype, false));
- return *result;
+ MAYBE_RETURN(
+ JSReceiver::SetPrototype(obj, prototype, false, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ return *obj;
}
RUNTIME_FUNCTION(Runtime_SetPrototype) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- if (obj->IsAccessCheckNeeded() && !isolate->MayAccess(obj)) {
- isolate->ReportFailedAccessCheck(obj);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->undefined_value();
- }
- if (obj->map()->is_observed()) {
- Handle<Object> old_value =
- Object::GetPrototypeSkipHiddenPrototypes(isolate, obj);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::SetPrototype(obj, prototype, true));
-
- Handle<Object> new_value =
- Object::GetPrototypeSkipHiddenPrototypes(isolate, obj);
- if (!new_value->SameValue(*old_value)) {
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::EnqueueChangeRecord(
- obj, "setPrototype", isolate->factory()->proto_string(),
- old_value));
- }
- return *result;
- }
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::SetPrototype(obj, prototype, true));
- return *result;
+ MAYBE_RETURN(
+ JSReceiver::SetPrototype(obj, prototype, true, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ return *obj;
}
@@ -265,14 +209,13 @@ MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
- PropertyAttributes attrs;
// Get attributes.
LookupIterator it = LookupIterator::PropertyOrElement(isolate, obj, name,
LookupIterator::HIDDEN);
Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return MaybeHandle<Object>();
- attrs = maybe.FromJust();
+ PropertyAttributes attrs = maybe.FromJust();
if (attrs == ABSENT) return factory->undefined_value();
DCHECK(!isolate->has_pending_exception());
@@ -310,7 +253,8 @@ MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
// [false, value, Writeable, Enumerable, Configurable]
// if args[1] is an accessor on args[0]
// [true, GetFunction, SetFunction, Enumerable, Configurable]
-RUNTIME_FUNCTION(Runtime_GetOwnProperty) {
+// TODO(jkummerow): Deprecated. Remove all callers and delete.
+RUNTIME_FUNCTION(Runtime_GetOwnProperty_Legacy) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
@@ -322,22 +266,28 @@ RUNTIME_FUNCTION(Runtime_GetOwnProperty) {
}
-RUNTIME_FUNCTION(Runtime_PreventExtensions) {
+// ES6 19.1.2.6
+RUNTIME_FUNCTION(Runtime_GetOwnProperty) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::PreventExtensions(obj));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_IsExtensible) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsExtensible());
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, raw_name, 1);
+ // 1. Let obj be ? ToObject(O).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
+ Execution::ToObject(isolate, object));
+ // 2. Let key be ? ToPropertyKey(P).
+ Handle<Name> key;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+ Object::ToName(isolate, raw_name));
+
+ // 3. Let desc be ? obj.[[GetOwnProperty]](key).
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, Handle<JSReceiver>::cast(object), key, &desc);
+ MAYBE_RETURN(found, isolate->heap()->exception());
+ // 4. Return FromPropertyDescriptor(desc).
+ if (!found.FromJust()) return isolate->heap()->undefined_value();
+ return *desc.ToObject(isolate);
}
@@ -356,36 +306,6 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
}
-RUNTIME_FUNCTION(Runtime_ObjectFreeze) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-
- // %ObjectFreeze is a fast path and these cases are handled elsewhere.
- RUNTIME_ASSERT(!object->HasSloppyArgumentsElements() &&
- !object->map()->is_observed() && !object->IsJSProxy());
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, JSObject::Freeze(object));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_ObjectSeal) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-
- // %ObjectSeal is a fast path and these cases are handled elsewhere.
- RUNTIME_ASSERT(!object->HasSloppyArgumentsElements() &&
- !object->map()->is_observed() && !object->IsJSProxy());
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, JSObject::Seal(object));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_LoadGlobalViaContext) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -399,7 +319,8 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalViaContext) {
// Lookup the named property on the global object.
Handle<ScopeInfo> scope_info(script_context->scope_info(), isolate);
Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
- Handle<GlobalObject> global_object(script_context->global_object(), isolate);
+ Handle<JSGlobalObject> global_object(script_context->global_object(),
+ isolate);
LookupIterator it(global_object, name, LookupIterator::HIDDEN);
// Switch to fast mode only if there is a data property and it's not on
@@ -433,7 +354,8 @@ Object* StoreGlobalViaContext(Isolate* isolate, int slot, Handle<Object> value,
// Lookup the named property on the global object.
Handle<ScopeInfo> scope_info(script_context->scope_info(), isolate);
Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
- Handle<GlobalObject> global_object(script_context->global_object(), isolate);
+ Handle<JSGlobalObject> global_object(script_context->global_object(),
+ isolate);
LookupIterator it(global_object, name, LookupIterator::HIDDEN);
// Switch to fast mode only if there is a data property and it's not on
@@ -449,12 +371,10 @@ Object* StoreGlobalViaContext(Isolate* isolate, int slot, Handle<Object> value,
script_context->set(slot, isolate->heap()->empty_property_cell());
}
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::SetProperty(&it, value, language_mode,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED));
- return *result;
+ MAYBE_RETURN(Object::SetProperty(&it, value, language_mode,
+ Object::CERTAINLY_NOT_STORE_FROM_KEYED),
+ isolate->heap()->exception());
+ return *value;
}
} // namespace
@@ -647,11 +567,10 @@ Object* DeleteProperty(Isolate* isolate, Handle<Object> object,
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
}
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Runtime::DeleteObjectProperty(isolate, receiver, key, language_mode));
- return *result;
+ Maybe<bool> result =
+ Runtime::DeleteObjectProperty(isolate, receiver, key, language_mode);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
}
} // namespace
@@ -686,14 +605,15 @@ static Object* HasOwnPropertyImplementation(Isolate* isolate,
// look like they are on this object.
PrototypeIterator iter(isolate, object);
if (!iter.IsAtEnd() &&
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter))
+ PrototypeIterator::GetCurrent<HeapObject>(iter)
->map()
->is_hidden_prototype()) {
// TODO(verwaest): The recursion is not necessary for keys that are array
// indices. Removing this.
+ // Casting to JSObject is fine because JSProxies are never used as
+ // hidden prototypes.
return HasOwnPropertyImplementation(
- isolate, Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)),
- key);
+ isolate, PrototypeIterator::GetCurrent<JSObject>(iter), key);
}
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->false_value();
@@ -715,9 +635,13 @@ RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
// Fast case: either the key is a real named property or it is not
// an array index and there are no interceptors or hidden
// prototypes.
+ // TODO(jkummerow): Make JSReceiver::HasOwnProperty fast enough to
+ // handle all cases directly (without this custom fast path).
Maybe<bool> maybe = Nothing<bool>();
if (key_is_array_index) {
- maybe = JSObject::HasOwnElement(js_obj, index);
+ LookupIterator it(js_obj->GetIsolate(), js_obj, index,
+ LookupIterator::HIDDEN);
+ maybe = JSReceiver::HasProperty(&it);
} else {
maybe = JSObject::HasRealNamedProperty(js_obj, key);
}
@@ -740,6 +664,11 @@ RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
if (index < static_cast<uint32_t>(string->length())) {
return isolate->heap()->true_value();
}
+ } else if (object->IsJSProxy()) {
+ Maybe<bool> result =
+ JSReceiver::HasOwnProperty(Handle<JSProxy>::cast(object), key);
+ if (!result.IsJust()) return isolate->heap()->exception();
+ return isolate->heap()->ToBoolean(result.FromJust());
}
return isolate->heap()->false_value();
}
@@ -772,17 +701,17 @@ RUNTIME_FUNCTION(Runtime_HasProperty) {
}
-RUNTIME_FUNCTION(Runtime_IsPropertyEnumerable) {
+RUNTIME_FUNCTION(Runtime_PropertyIsEnumerable) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
Maybe<PropertyAttributes> maybe =
JSReceiver::GetOwnPropertyAttributes(object, key);
if (!maybe.IsJust()) return isolate->heap()->exception();
- if (maybe.FromJust() == ABSENT) maybe = Just(DONT_ENUM);
+ if (maybe.FromJust() == ABSENT) return isolate->heap()->false_value();
return isolate->heap()->ToBoolean((maybe.FromJust() & DONT_ENUM) == 0);
}
@@ -803,8 +732,8 @@ RUNTIME_FUNCTION(Runtime_GetPropertyNamesFast) {
Handle<JSReceiver> object(raw_object);
Handle<FixedArray> content;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, content,
- JSReceiver::GetKeys(object, JSReceiver::INCLUDE_PROTOS));
+ isolate, content, JSReceiver::GetKeys(object, JSReceiver::INCLUDE_PROTOS,
+ ENUMERABLE_STRINGS));
// Test again, since cache may have been built by preceding call.
if (object->IsSimpleEnum()) return object->map();
@@ -813,119 +742,19 @@ RUNTIME_FUNCTION(Runtime_GetPropertyNamesFast) {
}
-// Return the names of the own named properties.
-// args[0]: object
-// args[1]: PropertyAttributes as int
-RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
+RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- if (!args[0]->IsJSObject()) {
- return isolate->heap()->undefined_value();
- }
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_SMI_ARG_CHECKED(filter_value, 1);
- PropertyAttributes filter = static_cast<PropertyAttributes>(filter_value);
-
- // Find the number of own properties for each of the objects.
- int total_property_count = 0;
- for (PrototypeIterator iter(isolate, object,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
- Handle<JSObject> jsproto =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
- total_property_count += jsproto->NumberOfOwnProperties(filter);
- }
-
- // Allocate an array with storage for all the property names.
- Handle<FixedArray> names =
- isolate->factory()->NewFixedArray(total_property_count);
-
- // Get the property names.
- int next_copy_index = 0;
- int hidden_strings = 0;
- Handle<Object> hidden_string = isolate->factory()->hidden_string();
- for (PrototypeIterator iter(isolate, object,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
- Handle<JSObject> jsproto =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
- int own = jsproto->GetOwnPropertyNames(*names, next_copy_index, filter);
- // Names from hidden prototypes may already have been added
- // for inherited function template instances. Count the duplicates
- // and stub them out; the final copy pass at the end ignores holes.
- for (int j = next_copy_index; j < next_copy_index + own; j++) {
- Object* name_from_hidden_proto = names->get(j);
- if (isolate->IsInternallyUsedPropertyName(name_from_hidden_proto)) {
- hidden_strings++;
- } else {
- for (int k = 0; k < next_copy_index; k++) {
- Object* name = names->get(k);
- if (name_from_hidden_proto == name) {
- names->set(j, *hidden_string);
- hidden_strings++;
- break;
- }
- }
- }
- }
- next_copy_index += own;
- }
-
- CHECK_EQ(total_property_count, next_copy_index);
+ PropertyFilter filter = static_cast<PropertyFilter>(filter_value);
- if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
- for (int i = 0; i < total_property_count; i++) {
- Handle<Name> name(Name::cast(names->get(i)));
- if (name.is_identical_to(hidden_string)) continue;
- LookupIterator it(object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
- if (!JSObject::AllCanRead(&it)) {
- names->set(i, *hidden_string);
- hidden_strings++;
- }
- }
- }
-
- // Filter out name of hidden properties object and
- // hidden prototype duplicates.
- if (hidden_strings > 0) {
- if (hidden_strings == total_property_count) {
- names = isolate->factory()->empty_fixed_array();
- } else {
- int i;
- for (i = 0; i < total_property_count; i++) {
- Object* name = names->get(i);
- if (name == *hidden_string) break;
- }
- int dest_pos = i;
- for (; i < total_property_count; i++) {
- Object* name = names->get(i);
- if (name == *hidden_string) continue;
- names->set(dest_pos++, name);
- }
-
- isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
- *names, hidden_strings);
- }
- }
-
- return *isolate->factory()->NewJSArrayWithElements(names);
-}
-
-
-// Return the names of the own indexed properties.
-// args[0]: object
-RUNTIME_FUNCTION(Runtime_GetOwnElementNames) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return isolate->heap()->undefined_value();
- }
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys, JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY, filter,
+ CONVERT_TO_STRING));
- int n = obj->NumberOfOwnElements(NONE);
- Handle<FixedArray> names = isolate->factory()->NewFixedArray(n);
- obj->GetOwnElementKeys(*names, NONE);
- return *isolate->factory()->NewJSArrayWithElements(names);
+ return *isolate->factory()->NewJSArrayWithElements(keys);
}
@@ -947,84 +776,11 @@ RUNTIME_FUNCTION(Runtime_GetInterceptorInfo) {
}
-// Return property names from named interceptor.
-// args[0]: object
-RUNTIME_FUNCTION(Runtime_GetNamedInterceptorPropertyNames) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-
- if (obj->HasNamedInterceptor()) {
- Handle<JSObject> result;
- if (JSObject::GetKeysForNamedInterceptor(obj, obj).ToHandle(&result)) {
- return *result;
- }
- }
- return isolate->heap()->undefined_value();
-}
-
-
-// Return element names from indexed interceptor.
-// args[0]: object
-RUNTIME_FUNCTION(Runtime_GetIndexedInterceptorElementNames) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-
- if (obj->HasIndexedInterceptor()) {
- Handle<JSObject> result;
- if (JSObject::GetKeysForIndexedInterceptor(obj, obj).ToHandle(&result)) {
- return *result;
- }
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_OwnKeys) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, raw_object, 0);
- Handle<JSObject> object(raw_object);
-
- Handle<FixedArray> contents;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, contents, JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY));
-
- // Some fast paths through GetKeysInFixedArrayFor reuse a cached
- // property array and since the result is mutable we have to create
- // a fresh clone on each invocation.
- int length = contents->length();
- Handle<FixedArray> copy = isolate->factory()->NewFixedArray(length);
- int offset = 0;
- // Use an outer loop to avoid creating too many handles in the current
- // handle scope.
- while (offset < length) {
- HandleScope scope(isolate);
- offset += 100;
- int end = Min(offset, length);
- for (int i = offset - 100; i < end; i++) {
- Object* entry = contents->get(i);
- if (entry->IsString()) {
- copy->set(i, entry);
- } else {
- DCHECK(entry->IsNumber());
- Handle<Object> entry_handle(entry, isolate);
- Handle<Object> entry_str =
- isolate->factory()->NumberToString(entry_handle);
- copy->set(i, *entry_str);
- }
- }
- }
- return *isolate->factory()->NewJSArrayWithElements(copy);
-}
-
-
RUNTIME_FUNCTION(Runtime_ToFastProperties) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- if (object->IsJSObject() && !object->IsGlobalObject()) {
+ if (object->IsJSObject() && !object->IsJSGlobalObject()) {
JSObject::MigrateSlowToFast(Handle<JSObject>::cast(object), 0,
"RuntimeToFastProperties");
}
@@ -1039,87 +795,15 @@ RUNTIME_FUNCTION(Runtime_AllocateHeapNumber) {
}
-static Object* Runtime_NewObjectHelper(Isolate* isolate,
- Handle<Object> constructor,
- Handle<Object> original_constructor,
- Handle<AllocationSite> site) {
- // If the constructor isn't a proper function we throw a type error.
- if (!constructor->IsJSFunction()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotConstructor, constructor));
- }
-
- Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
-
- CHECK(original_constructor->IsJSFunction());
- Handle<JSFunction> original_function =
- Handle<JSFunction>::cast(original_constructor);
-
-
- // Check that function is a constructor.
- if (!function->IsConstructor()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotConstructor, constructor));
- }
-
- Debug* debug = isolate->debug();
- // Handle stepping into constructors if step into is active.
- if (debug->StepInActive()) debug->HandleStepIn(function, true);
-
- if (function->has_initial_map()) {
- if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
- // The 'Function' function ignores the receiver object when
- // called using 'new' and creates a new JSFunction object that
- // is returned. The receiver object is only used for error
- // reporting if an error occurs when constructing the new
- // JSFunction. Factory::NewJSObject() should not be used to
- // allocate JSFunctions since it does not properly initialize
- // the shared part of the function. Since the receiver is
- // ignored anyway, we use the global object as the receiver
- // instead of a new JSFunction object. This way, errors are
- // reported the same way whether or not 'Function' is called
- // using 'new'.
- return isolate->global_proxy();
- }
- }
-
- // The function should be compiled for the optimization hints to be
- // available.
- Compiler::Compile(function, CLEAR_EXCEPTION);
-
- Handle<JSObject> result;
- if (site.is_null()) {
- result = isolate->factory()->NewJSObject(function);
- } else {
- result = isolate->factory()->NewJSObjectWithMemento(function, site);
- }
-
- // Set up the prototoype using original function.
- // TODO(dslomov): instead of setting the __proto__,
- // use and cache the correct map.
- if (*original_function != *function) {
- if (original_function->has_instance_prototype()) {
- Handle<Object> prototype =
- handle(original_function->instance_prototype(), isolate);
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetPrototype(result, prototype, false));
- }
- }
-
- isolate->counters()->constructed_objects()->Increment();
- isolate->counters()->constructed_objects_runtime()->Increment();
-
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_NewObject) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, original_constructor, 1);
- return Runtime_NewObjectHelper(isolate, constructor, original_constructor,
- Handle<AllocationSite>::null());
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, 1);
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ return *result;
}
@@ -1127,8 +811,8 @@ RUNTIME_FUNCTION(Runtime_FinalizeInstanceSize) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- function->CompleteInobjectSlackTracking();
+ CONVERT_ARG_HANDLE_CHECKED(Map, initial_map, 0);
+ initial_map->CompleteInobjectSlackTracking();
return isolate->heap()->undefined_value();
}
@@ -1307,22 +991,6 @@ RUNTIME_FUNCTION(Runtime_JSValueGetValue) {
}
-RUNTIME_FUNCTION(Runtime_HeapObjectGetMap) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(HeapObject, obj, 0);
- return obj->map();
-}
-
-
-RUNTIME_FUNCTION(Runtime_MapGetInstanceType) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Map, map, 0);
- return Smi::FromInt(map->instance_type());
-}
-
-
RUNTIME_FUNCTION(Runtime_ObjectEquals) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
@@ -1332,11 +1000,11 @@ RUNTIME_FUNCTION(Runtime_ObjectEquals) {
}
-RUNTIME_FUNCTION(Runtime_IsSpecObject) {
+RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsSpecObject());
+ return isolate->heap()->ToBoolean(obj->IsJSReceiver());
}
@@ -1581,15 +1249,12 @@ RUNTIME_FUNCTION(Runtime_InstanceOf) {
if (!object->IsJSReceiver()) {
return isolate->heap()->false_value();
}
- // Check if {callable} is bound, if so, get [[BoundFunction]] from it and use
- // that instead of {callable}.
- if (callable->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
- if (function->shared()->bound()) {
- Handle<FixedArray> bindings(function->function_bindings(), isolate);
- callable =
- handle(bindings->get(JSFunction::kBoundFunctionIndex), isolate);
- }
+ // Check if {callable} is bound, if so, get [[BoundTargetFunction]] from it
+ // and use that instead of {callable}.
+ while (callable->IsJSBoundFunction()) {
+ callable =
+ handle(Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
+ isolate);
}
DCHECK(callable->IsCallable());
// Get the "prototype" of {callable}; raise an error if it's not a receiver.
@@ -1603,18 +1268,20 @@ RUNTIME_FUNCTION(Runtime_InstanceOf) {
NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
}
// Return whether or not {prototype} is in the prototype chain of {object}.
- return isolate->heap()->ToBoolean(
- object->HasInPrototypeChain(isolate, *prototype));
+ Maybe<bool> result = Object::HasInPrototypeChain(isolate, object, prototype);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
}
RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
- SealHandleScope scope(isolate);
+ HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(Object, object, 0);
- CONVERT_ARG_CHECKED(Object, prototype, 1);
- return isolate->heap()->ToBoolean(
- object->HasInPrototypeChain(isolate, prototype));
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
+ Maybe<bool> result = Object::HasInPrototypeChain(isolate, object, prototype);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -1636,5 +1303,25 @@ RUNTIME_FUNCTION(Runtime_IsAccessCheckNeeded) {
}
+RUNTIME_FUNCTION(Runtime_ObjectDefineProperty) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(Object, o, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, attributes, 2);
+ return JSReceiver::DefineProperty(isolate, o, name, attributes);
+}
+
+
+RUNTIME_FUNCTION(Runtime_ObjectDefineProperties) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, o, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, properties, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, o, JSReceiver::DefineProperties(isolate, o, properties));
+ return *o;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-observe.cc b/chromium/v8/src/runtime/runtime-observe.cc
index df0b2a330ca..0407b8a9df1 100644
--- a/chromium/v8/src/runtime/runtime-observe.cc
+++ b/chromium/v8/src/runtime/runtime-observe.cc
@@ -56,7 +56,7 @@ RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
RUNTIME_FUNCTION(Runtime_DeliverObservationChangeRecords) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, callback, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callback, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, argument, 1);
v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
// We should send a message on uncaught exception thrown during
@@ -65,16 +65,8 @@ RUNTIME_FUNCTION(Runtime_DeliverObservationChangeRecords) {
catcher.SetVerbose(true);
Handle<Object> argv[] = {argument};
- // Allow stepping into the observer callback.
- Debug* debug = isolate->debug();
- if (debug->is_active() && debug->IsStepping() &&
- debug->last_step_action() == StepIn) {
- // Previous StepIn may have activated a StepOut if it was at the frame exit.
- // In this case to be able to step into the callback again, we need to clear
- // the step out first.
- debug->ClearStepOut();
- debug->FloodWithOneShot(callback);
- }
+ // If we are in step-in mode, flood the handler.
+ isolate->debug()->EnableStepIn();
USE(Execution::Call(isolate, callback, isolate->factory()->undefined_value(),
arraysize(argv), argv));
@@ -104,11 +96,18 @@ static bool ContextsHaveSameOrigin(Handle<Context> context1,
RUNTIME_FUNCTION(Runtime_ObserverObjectAndRecordHaveSameOrigin) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, observer, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, record, 2);
- Handle<Context> observer_context(observer->context()->native_context());
+ while (observer->IsJSBoundFunction()) {
+ observer = handle(
+ Handle<JSBoundFunction>::cast(observer)->bound_target_function());
+ }
+ if (!observer->IsJSFunction()) return isolate->heap()->false_value();
+
+ Handle<Context> observer_context(
+ Handle<JSFunction>::cast(observer)->context()->native_context());
Handle<Context> object_context(object->GetCreationContext());
Handle<Context> record_context(record->GetCreationContext());
diff --git a/chromium/v8/src/runtime/runtime-proxy.cc b/chromium/v8/src/runtime/runtime-proxy.cc
index 4699647b801..3a521c6b7c4 100644
--- a/chromium/v8/src/runtime/runtime-proxy.cc
+++ b/chromium/v8/src/runtime/runtime-proxy.cc
@@ -5,33 +5,139 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/elements.h"
#include "src/factory.h"
+#include "src/isolate-inl.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_CreateJSProxy) {
+
+// ES6 9.5.13 [[Call]] (thisArgument, argumentsList)
+RUNTIME_FUNCTION(Runtime_JSProxyCall) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, handler, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- if (!prototype->IsJSReceiver()) prototype = isolate->factory()->null_value();
- return *isolate->factory()->NewJSProxy(handler, prototype);
+ DCHECK_LE(2, args.length());
+ // thisArgument == receiver
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, args.length() - 1);
+ Handle<String> trap_name = isolate->factory()->apply_string();
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 2. If handler is null, throw a TypeError exception.
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ }
+ // 3. Assert: Type(handler) is Object.
+ DCHECK(handler->IsJSReceiver());
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 5. Let trap be ? GetMethod(handler, "apply").
+ Handle<Object> trap;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name));
+ // 6. If trap is undefined, then
+ int const arguments_length = args.length() - 2;
+ if (trap->IsUndefined()) {
+ // 6.a. Return Call(target, thisArgument, argumentsList).
+ ScopedVector<Handle<Object>> argv(arguments_length);
+ for (int i = 0; i < arguments_length; ++i) {
+ argv[i] = args.at<Object>(i + 1);
+ }
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Execution::Call(isolate, target, receiver,
+ arguments_length, argv.start()));
+ return *result;
+ }
+ // 7. Let argArray be CreateArrayFromList(argumentsList).
+ Handle<JSArray> arg_array = isolate->factory()->NewJSArray(
+ FAST_ELEMENTS, arguments_length, arguments_length);
+ ElementsAccessor* accessor = arg_array->GetElementsAccessor();
+ {
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase* elements = arg_array->elements();
+ for (int i = 0; i < arguments_length; i++) {
+ accessor->Set(elements, i, args[i + 1]);
+ }
+ }
+ // 8. Return Call(trap, handler, «target, thisArgument, argArray»).
+ Handle<Object> trap_result;
+ Handle<Object> trap_args[] = {target, receiver, arg_array};
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(trap_args), trap_args));
+ return *trap_result;
}
-RUNTIME_FUNCTION(Runtime_CreateJSFunctionProxy) {
+// 9.5.14 [[Construct]] (argumentsList, newTarget)
+RUNTIME_FUNCTION(Runtime_JSProxyConstruct) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, handler, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, call_trap, 1);
- RUNTIME_ASSERT(call_trap->IsJSFunction() || call_trap->IsJSFunctionProxy());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, construct_trap, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 3);
- if (!prototype->IsJSReceiver()) prototype = isolate->factory()->null_value();
- return *isolate->factory()->NewJSFunctionProxy(handler, call_trap,
- construct_trap, prototype);
+ DCHECK_LE(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, args.length() - 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, new_target, args.length() - 1);
+ Handle<String> trap_name = isolate->factory()->construct_string();
+
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 2. If handler is null, throw a TypeError exception.
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ }
+ // 3. Assert: Type(handler) is Object.
+ DCHECK(handler->IsJSReceiver());
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
+ // 5. Let trap be ? GetMethod(handler, "construct").
+ Handle<Object> trap;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name));
+ // 6. If trap is undefined, then
+ int const arguments_length = args.length() - 3;
+ if (trap->IsUndefined()) {
+ // 6.a. Assert: target has a [[Construct]] internal method.
+ DCHECK(target->IsConstructor());
+ // 6.b. Return Construct(target, argumentsList, newTarget).
+ ScopedVector<Handle<Object>> argv(arguments_length);
+ for (int i = 0; i < arguments_length; ++i) {
+ argv[i] = args.at<Object>(i + 1);
+ }
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Execution::New(isolate, target, new_target,
+ arguments_length, argv.start()));
+ return *result;
+ }
+ // 7. Let argArray be CreateArrayFromList(argumentsList).
+ Handle<JSArray> arg_array = isolate->factory()->NewJSArray(
+ FAST_ELEMENTS, arguments_length, arguments_length);
+ ElementsAccessor* accessor = arg_array->GetElementsAccessor();
+ {
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase* elements = arg_array->elements();
+ for (int i = 0; i < arguments_length; i++) {
+ accessor->Set(elements, i, args[i + 1]);
+ }
+ }
+ // 8. Let newObj be ? Call(trap, handler, «target, argArray, newTarget »).
+ Handle<Object> new_object;
+ Handle<Object> trap_args[] = {target, arg_array, new_target};
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, new_object,
+ Execution::Call(isolate, trap, handler, arraysize(trap_args), trap_args));
+ // 9. If Type(newObj) is not Object, throw a TypeError exception.
+ if (!new_object->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kProxyConstructNonObject, new_object));
+ }
+ // 10. Return newObj.
+ return *new_object;
}
@@ -43,15 +149,7 @@ RUNTIME_FUNCTION(Runtime_IsJSProxy) {
}
-RUNTIME_FUNCTION(Runtime_IsJSFunctionProxy) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSFunctionProxy());
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetHandler) {
+RUNTIME_FUNCTION(Runtime_JSProxyGetHandler) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
@@ -59,28 +157,21 @@ RUNTIME_FUNCTION(Runtime_GetHandler) {
}
-RUNTIME_FUNCTION(Runtime_GetCallTrap) {
+RUNTIME_FUNCTION(Runtime_JSProxyGetTarget) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
- return proxy->call_trap();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetConstructTrap) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
- return proxy->construct_trap();
+ CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
+ return proxy->target();
}
-RUNTIME_FUNCTION(Runtime_Fix) {
+RUNTIME_FUNCTION(Runtime_JSProxyRevoke) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, 0);
- JSProxy::Fix(proxy);
+ JSProxy::Revoke(proxy);
return isolate->heap()->undefined_value();
}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-regexp.cc b/chromium/v8/src/runtime/runtime-regexp.cc
index 48154ea2751..138b4dc71c4 100644
--- a/chromium/v8/src/runtime/runtime-regexp.cc
+++ b/chromium/v8/src/runtime/runtime-regexp.cc
@@ -658,7 +658,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- RUNTIME_ASSERT(regexp->GetFlags().is_global());
+ RUNTIME_ASSERT(regexp->GetFlags() & JSRegExp::kGlobal);
RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
subject = String::Flatten(subject);
@@ -693,8 +693,10 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
RUNTIME_ASSERT(pattern_length > 0);
if (limit == 0xffffffffu) {
+ FixedArray* last_match_cache_unused;
Handle<Object> cached_answer(
RegExpResultsCache::Lookup(isolate->heap(), *subject, *pattern,
+ &last_match_cache_unused,
RegExpResultsCache::STRING_SPLIT_SUBSTRINGS),
isolate);
if (*cached_answer != Smi::FromInt(0)) {
@@ -757,6 +759,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
if (limit == 0xffffffffu) {
if (result->HasFastObjectElements()) {
RegExpResultsCache::Enter(isolate, subject, pattern, elements,
+ isolate->factory()->empty_fixed_array(),
RegExpResultsCache::STRING_SPLIT_SUBSTRINGS);
}
}
@@ -785,6 +788,22 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
}
+RUNTIME_FUNCTION(Runtime_RegExpFlags) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+ return regexp->flags();
+}
+
+
+RUNTIME_FUNCTION(Runtime_RegExpSource) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+ return regexp->source();
+}
+
+
RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 3);
@@ -806,197 +825,16 @@ RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
}
-static JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags,
- bool* success) {
- uint32_t value = JSRegExp::NONE;
- int length = flags->length();
- // A longer flags string cannot be valid.
- if (length > 5) return JSRegExp::Flags(0);
- for (int i = 0; i < length; i++) {
- uint32_t flag = JSRegExp::NONE;
- switch (flags->Get(i)) {
- case 'g':
- flag = JSRegExp::GLOBAL;
- break;
- case 'i':
- flag = JSRegExp::IGNORE_CASE;
- break;
- case 'm':
- flag = JSRegExp::MULTILINE;
- break;
- case 'u':
- if (!FLAG_harmony_unicode_regexps) return JSRegExp::Flags(0);
- flag = JSRegExp::UNICODE_ESCAPES;
- break;
- case 'y':
- if (!FLAG_harmony_regexps) return JSRegExp::Flags(0);
- flag = JSRegExp::STICKY;
- break;
- default:
- return JSRegExp::Flags(0);
- }
- // Duplicate flag.
- if (value & flag) return JSRegExp::Flags(0);
- value |= flag;
- }
- *success = true;
- return JSRegExp::Flags(value);
-}
-
-
-template <typename Char>
-inline int CountRequiredEscapes(Handle<String> source) {
- DisallowHeapAllocation no_gc;
- int escapes = 0;
- Vector<const Char> src = source->GetCharVector<Char>();
- for (int i = 0; i < src.length(); i++) {
- if (src[i] == '/' && (i == 0 || src[i - 1] != '\\')) escapes++;
- }
- return escapes;
-}
-
-
-template <typename Char, typename StringType>
-inline Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
- Handle<StringType> result) {
- DisallowHeapAllocation no_gc;
- Vector<const Char> src = source->GetCharVector<Char>();
- Vector<Char> dst(result->GetChars(), result->length());
- int s = 0;
- int d = 0;
- while (s < src.length()) {
- if (src[s] == '/' && (s == 0 || src[s - 1] != '\\')) dst[d++] = '\\';
- dst[d++] = src[s++];
- }
- DCHECK_EQ(result->length(), d);
- return result;
-}
-
-
-MaybeHandle<String> EscapeRegExpSource(Isolate* isolate,
- Handle<String> source) {
- String::Flatten(source);
- if (source->length() == 0) return isolate->factory()->query_colon_string();
- bool one_byte = source->IsOneByteRepresentationUnderneath();
- int escapes = one_byte ? CountRequiredEscapes<uint8_t>(source)
- : CountRequiredEscapes<uc16>(source);
- if (escapes == 0) return source;
- int length = source->length() + escapes;
- if (one_byte) {
- Handle<SeqOneByteString> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
- isolate->factory()->NewRawOneByteString(length),
- String);
- return WriteEscapedRegExpSource<uint8_t>(source, result);
- } else {
- Handle<SeqTwoByteString> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
- isolate->factory()->NewRawTwoByteString(length),
- String);
- return WriteEscapedRegExpSource<uc16>(source, result);
- }
-}
-
-
RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, flags_string, 2);
- Factory* factory = isolate->factory();
- // If source is the empty string we set it to "(?:)" instead as
- // suggested by ECMA-262, 5th, section 15.10.4.1.
- if (source->length() == 0) source = factory->query_colon_string();
-
- bool success = false;
- JSRegExp::Flags flags = RegExpFlagsFromString(flags_string, &success);
- if (!success) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string));
- }
+ CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
- Handle<String> escaped_source;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, escaped_source,
- EscapeRegExpSource(isolate, source));
-
- Handle<Object> global = factory->ToBoolean(flags.is_global());
- Handle<Object> ignore_case = factory->ToBoolean(flags.is_ignore_case());
- Handle<Object> multiline = factory->ToBoolean(flags.is_multiline());
- Handle<Object> sticky = factory->ToBoolean(flags.is_sticky());
- Handle<Object> unicode = factory->ToBoolean(flags.is_unicode());
-
- Map* map = regexp->map();
- Object* constructor = map->GetConstructor();
- if (!FLAG_harmony_regexps && !FLAG_harmony_unicode_regexps &&
- constructor->IsJSFunction() &&
- JSFunction::cast(constructor)->initial_map() == map) {
- // If we still have the original map, set in-object properties directly.
- regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, *escaped_source);
- // Both true and false are immovable immortal objects so no need for write
- // barrier.
- regexp->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, *global,
- SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, *ignore_case,
- SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, *multiline,
- SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
- Smi::FromInt(0), SKIP_WRITE_BARRIER);
- } else {
- // Map has changed, so use generic, but slower, method. We also end here if
- // the --harmony-regexp flag is set, because the initial map does not have
- // space for the 'sticky' flag, since it is from the snapshot, but must work
- // both with and without --harmony-regexp. When sticky comes out from under
- // the flag, we will be able to use the fast initial map.
- PropertyAttributes final =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
- PropertyAttributes writable =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- Handle<Object> zero(Smi::FromInt(0), isolate);
- JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->source_string(),
- escaped_source, final).Check();
- JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->global_string(),
- global, final).Check();
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->ignore_case_string(), ignore_case, final).Check();
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->multiline_string(), multiline, final).Check();
- if (FLAG_harmony_regexps) {
- JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->sticky_string(),
- sticky, final).Check();
- }
- if (FLAG_harmony_unicode_regexps) {
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->unicode_string(), unicode, final).Check();
- }
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->last_index_string(), zero, writable).Check();
- }
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSRegExp::Initialize(regexp, source, flags));
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, RegExpImpl::Compile(regexp, source, flags));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_MaterializeRegExpLiteral) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
- CONVERT_ARG_HANDLE_CHECKED(String, flags, 3);
-
- Handle<JSFunction> constructor = isolate->regexp_function();
- // Compute the regular expression literal.
- Handle<Object> regexp;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, regexp,
- RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags));
- literals->set_literal(index, *regexp);
return *regexp;
}
@@ -1017,23 +855,23 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
static const int kMinLengthToCache = 0x1000;
if (subject_length > kMinLengthToCache) {
- Handle<Object> cached_answer(
- RegExpResultsCache::Lookup(isolate->heap(), *subject, regexp->data(),
- RegExpResultsCache::REGEXP_MULTIPLE_INDICES),
- isolate);
- if (*cached_answer != Smi::FromInt(0)) {
+ FixedArray* last_match_cache;
+ Object* cached_answer = RegExpResultsCache::Lookup(
+ isolate->heap(), *subject, regexp->data(), &last_match_cache,
+ RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
+ if (cached_answer->IsFixedArray()) {
+ int capture_registers = (capture_count + 1) * 2;
+ int32_t* last_match = NewArray<int32_t>(capture_registers);
+ for (int i = 0; i < capture_registers; i++) {
+ last_match[i] = Smi::cast(last_match_cache->get(i))->value();
+ }
Handle<FixedArray> cached_fixed_array =
- Handle<FixedArray>(FixedArray::cast(*cached_answer));
+ Handle<FixedArray>(FixedArray::cast(cached_answer));
// The cache FixedArray is a COW-array and can therefore be reused.
JSArray::SetContent(result_array, cached_fixed_array);
- // The actual length of the result array is stored in the last element of
- // the backing store (the backing FixedArray may have a larger capacity).
- Object* cached_fixed_array_last_element =
- cached_fixed_array->get(cached_fixed_array->length() - 1);
- Smi* js_array_length = Smi::cast(cached_fixed_array_last_element);
- result_array->set_length(js_array_length);
RegExpImpl::SetLastMatchInfo(last_match_array, subject, capture_count,
- NULL);
+ last_match);
+ DeleteArray(last_match);
return *result_array;
}
}
@@ -1121,19 +959,24 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
}
RegExpImpl::SetLastMatchInfo(last_match_array, subject, capture_count,
- NULL);
+ global_cache.LastSuccessfulMatch());
if (subject_length > kMinLengthToCache) {
- // Store the length of the result array into the last element of the
- // backing FixedArray.
- builder.EnsureCapacity(1);
- Handle<FixedArray> fixed_array = builder.array();
- fixed_array->set(fixed_array->length() - 1,
- Smi::FromInt(builder.length()));
+ // Store the last successful match into the array for caching.
+ // TODO(yangguo): do not expose last match to JS and simplify caching.
+ int capture_registers = (capture_count + 1) * 2;
+ Handle<FixedArray> last_match_cache =
+ isolate->factory()->NewFixedArray(capture_registers);
+ int32_t* last_match = global_cache.LastSuccessfulMatch();
+ for (int i = 0; i < capture_registers; i++) {
+ last_match_cache->set(i, Smi::FromInt(last_match[i]));
+ }
+ Handle<FixedArray> result_fixed_array = builder.array();
+ result_fixed_array->Shrink(builder.length());
// Cache the result and turn the FixedArray into a COW array.
- RegExpResultsCache::Enter(isolate, subject,
- handle(regexp->data(), isolate), fixed_array,
- RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
+ RegExpResultsCache::Enter(
+ isolate, subject, handle(regexp->data(), isolate), result_fixed_array,
+ last_match_cache, RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
}
return *builder.ToJSArray(result_array);
} else {
@@ -1149,15 +992,15 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
HandleScope handles(isolate);
DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
RUNTIME_ASSERT(result_array->HasFastObjectElements());
subject = String::Flatten(subject);
- RUNTIME_ASSERT(regexp->GetFlags().is_global());
+ RUNTIME_ASSERT(regexp->GetFlags() & JSRegExp::kGlobal);
if (regexp->CaptureCount() == 0) {
return SearchRegExpMultiple<false>(isolate, subject, regexp,
diff --git a/chromium/v8/src/runtime/runtime-scopes.cc b/chromium/v8/src/runtime/runtime-scopes.cc
index c3928a7703a..13f04479ae4 100644
--- a/chromium/v8/src/runtime/runtime-scopes.cc
+++ b/chromium/v8/src/runtime/runtime-scopes.cc
@@ -6,11 +6,12 @@
#include "src/accessors.h"
#include "src/arguments.h"
+#include "src/ast/scopeinfo.h"
+#include "src/ast/scopes.h"
+#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
-#include "src/scopeinfo.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -30,7 +31,7 @@ RUNTIME_FUNCTION(Runtime_ThrowConstAssignError) {
// May throw a RedeclarationError.
-static Object* DeclareGlobals(Isolate* isolate, Handle<GlobalObject> global,
+static Object* DeclareGlobals(Isolate* isolate, Handle<JSGlobalObject> global,
Handle<String> name, Handle<Object> value,
PropertyAttributes attr, bool is_var,
bool is_const, bool is_function) {
@@ -87,7 +88,7 @@ static Object* DeclareGlobals(Isolate* isolate, Handle<GlobalObject> global,
RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- Handle<GlobalObject> global(isolate->global_object());
+ Handle<JSGlobalObject> global(isolate->global_object());
Handle<Context> context(isolate->context());
CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 0);
@@ -155,7 +156,7 @@ RUNTIME_FUNCTION(Runtime_InitializeVarGlobal) {
CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- Handle<GlobalObject> global(isolate->context()->global_object());
+ Handle<JSGlobalObject> global(isolate->context()->global_object());
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, Object::SetProperty(global, name, value, language_mode));
@@ -172,7 +173,7 @@ RUNTIME_FUNCTION(Runtime_InitializeConstGlobal) {
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- Handle<GlobalObject> global = isolate->global_object();
+ Handle<JSGlobalObject> global = isolate->global_object();
// Lookup the property as own on the global object.
LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
@@ -223,10 +224,23 @@ Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
int index;
PropertyAttributes attributes;
- ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
BindingFlags binding_flags;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes, &binding_flags);
+
+ if ((attr & EVAL_DECLARED) != 0) {
+ // Check for a conflict with a lexically scoped variable
+ context_arg->Lookup(name, LEXICAL_TEST, &index, &attributes,
+ &binding_flags);
+ if (attributes != ABSENT &&
+ (binding_flags == MUTABLE_CHECK_INITIALIZED ||
+ binding_flags == IMMUTABLE_CHECK_INITIALIZED ||
+ binding_flags == IMMUTABLE_CHECK_INITIALIZED_HARMONY)) {
+ return ThrowRedeclarationError(isolate, name);
+ }
+ attr = static_cast<PropertyAttributes>(attr & ~EVAL_DECLARED);
+ }
+
+ Handle<Object> holder = context->Lookup(name, DONT_FOLLOW_CHAINS, &index,
+ &attributes, &binding_flags);
if (holder.is_null()) {
// In case of JSProxy, an exception might have been thrown.
if (isolate->has_pending_exception()) return isolate->heap()->exception();
@@ -281,7 +295,7 @@ Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
DCHECK(context->IsBlockContext());
object = isolate->factory()->NewJSObject(
isolate->context_extension_function());
- Handle<Object> extension =
+ Handle<HeapObject> extension =
isolate->factory()->NewSloppyBlockWithEvalContextExtension(
handle(context->scope_info()), object);
context->set_extension(*extension);
@@ -307,21 +321,14 @@ Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 1);
-
- return DeclareLookupSlot(isolate, name, initial_value, NONE);
-}
-
-
-RUNTIME_FUNCTION(Runtime_DeclareReadOnlyLookupSlot) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, property_attributes, 2);
- return DeclareLookupSlot(isolate, name, initial_value, READ_ONLY);
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(property_attributes->value());
+ return DeclareLookupSlot(isolate, name, initial_value, attributes);
}
@@ -406,11 +413,73 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
namespace {
+// Find the arguments of the JavaScript function invocation that called
+// into C++ code. Collect these in a newly allocated array of handles (possibly
+// prefixed by a number of empty handles).
+base::SmartArrayPointer<Handle<Object>> GetCallerArguments(Isolate* isolate,
+ int prefix_argc,
+ int* total_argc) {
+ // Find frame containing arguments passed to the caller.
+ JavaScriptFrameIterator it(isolate);
+ JavaScriptFrame* frame = it.frame();
+ List<JSFunction*> functions(2);
+ frame->GetFunctions(&functions);
+ if (functions.length() > 1) {
+ int inlined_jsframe_index = functions.length() - 1;
+ TranslatedState translated_values(frame);
+ translated_values.Prepare(false, frame->fp());
+
+ int argument_count = 0;
+ TranslatedFrame* translated_frame =
+ translated_values.GetArgumentsInfoFromJSFrameIndex(
+ inlined_jsframe_index, &argument_count);
+ TranslatedFrame::iterator iter = translated_frame->begin();
+
+ // Skip the function.
+ iter++;
+
+ // Skip the receiver.
+ iter++;
+ argument_count--;
+
+ *total_argc = prefix_argc + argument_count;
+ base::SmartArrayPointer<Handle<Object>> param_data(
+ NewArray<Handle<Object>>(*total_argc));
+ bool should_deoptimize = false;
+ for (int i = 0; i < argument_count; i++) {
+ should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
+ Handle<Object> value = iter->GetValue();
+ param_data[prefix_argc + i] = value;
+ iter++;
+ }
+
+ if (should_deoptimize) {
+ translated_values.StoreMaterializedValuesAndDeopt();
+ }
+
+ return param_data;
+ } else {
+ it.AdvanceToArgumentsFrame();
+ frame = it.frame();
+ int args_count = frame->ComputeParametersCount();
+
+ *total_argc = prefix_argc + args_count;
+ base::SmartArrayPointer<Handle<Object>> param_data(
+ NewArray<Handle<Object>>(*total_argc));
+ for (int i = 0; i < args_count; i++) {
+ Handle<Object> val = Handle<Object>(frame->GetParameter(i), isolate);
+ param_data[prefix_argc + i] = val;
+ }
+ return param_data;
+ }
+}
+
+
template <typename T>
Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
T parameters, int argument_count) {
CHECK(!IsSubclassConstructor(callee->shared()->kind()));
- DCHECK(callee->has_simple_parameters());
+ DCHECK(callee->shared()->has_simple_parameters());
Handle<JSObject> result =
isolate->factory()->NewArgumentsObject(callee, argument_count);
@@ -515,6 +584,26 @@ Handle<JSObject> NewStrictArguments(Isolate* isolate, Handle<JSFunction> callee,
}
+template <typename T>
+Handle<JSObject> NewRestArguments(Isolate* isolate, Handle<JSFunction> callee,
+ T parameters, int argument_count,
+ int start_index) {
+ int num_elements = std::max(0, argument_count - start_index);
+ Handle<JSObject> result = isolate->factory()->NewJSArray(
+ FAST_ELEMENTS, num_elements, num_elements, Strength::WEAK,
+ DONT_INITIALIZE_ARRAY_ELEMENTS);
+ {
+ DisallowHeapAllocation no_gc;
+ FixedArray* elements = FixedArray::cast(result->elements());
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < num_elements; i++) {
+ elements->set(i, parameters[i + start_index], mode);
+ }
+ }
+ return result;
+}
+
+
class HandleArguments BASE_EMBEDDED {
public:
explicit HandleArguments(Handle<Object>* array) : array_(array) {}
@@ -542,10 +631,10 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments_Generic) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
// This generic runtime function can also be used when the caller has been
- // inlined, we use the slow but accurate {Runtime::GetCallerArguments}.
+ // inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
base::SmartArrayPointer<Handle<Object>> arguments =
- Runtime::GetCallerArguments(isolate, 0, &argument_count);
+ GetCallerArguments(isolate, 0, &argument_count);
HandleArguments argument_getter(arguments.get());
return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
}
@@ -556,15 +645,31 @@ RUNTIME_FUNCTION(Runtime_NewStrictArguments_Generic) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
// This generic runtime function can also be used when the caller has been
- // inlined, we use the slow but accurate {Runtime::GetCallerArguments}.
+ // inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
base::SmartArrayPointer<Handle<Object>> arguments =
- Runtime::GetCallerArguments(isolate, 0, &argument_count);
+ GetCallerArguments(isolate, 0, &argument_count);
HandleArguments argument_getter(arguments.get());
return *NewStrictArguments(isolate, callee, argument_getter, argument_count);
}
+RUNTIME_FUNCTION(Runtime_NewRestArguments_Generic) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
+ CONVERT_SMI_ARG_CHECKED(start_index, 1);
+ // This generic runtime function can also be used when the caller has been
+ // inlined, we use the slow but accurate {GetCallerArguments}.
+ int argument_count = 0;
+ base::SmartArrayPointer<Handle<Object>> arguments =
+ GetCallerArguments(isolate, 0, &argument_count);
+ HandleArguments argument_getter(arguments.get());
+ return *NewRestArguments(isolate, callee, argument_getter, argument_count,
+ start_index);
+}
+
+
RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
@@ -599,6 +704,25 @@ RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
}
+RUNTIME_FUNCTION(Runtime_NewRestParam) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_SMI_ARG_CHECKED(num_params, 0);
+ Object** parameters = reinterpret_cast<Object**>(args[1]);
+ CONVERT_SMI_ARG_CHECKED(rest_index, 2);
+#ifdef DEBUG
+ // This runtime function does not materialize the correct arguments when the
+ // caller has been inlined, better make sure we are not hitting that case.
+ JavaScriptFrameIterator it(isolate);
+ DCHECK(!it.frame()->HasInlinedFrames());
+#endif // DEBUG
+ Handle<JSFunction> callee;
+ ParameterArguments argument_getter(parameters);
+ return *NewRestArguments(isolate, callee, argument_getter, num_params,
+ rest_index);
+}
+
+
RUNTIME_FUNCTION(Runtime_NewClosure) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -621,7 +745,7 @@ RUNTIME_FUNCTION(Runtime_NewClosure_Tenured) {
}
static Object* FindNameClash(Handle<ScopeInfo> scope_info,
- Handle<GlobalObject> global_object,
+ Handle<JSGlobalObject> global_object,
Handle<ScriptContextTable> script_context) {
Isolate* isolate = scope_info->GetIsolate();
for (int var = 0; var < scope_info->ContextLocalCount(); var++) {
@@ -643,7 +767,7 @@ static Object* FindNameClash(Handle<ScopeInfo> scope_info,
return ThrowRedeclarationError(isolate, name);
}
- GlobalObject::InvalidatePropertyCell(global_object, name);
+ JSGlobalObject::InvalidatePropertyCell(global_object, name);
}
}
return isolate->heap()->undefined_value();
@@ -656,7 +780,7 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
- Handle<GlobalObject> global_object(function->context()->global_object());
+ Handle<JSGlobalObject> global_object(function->context()->global_object());
Handle<Context> native_context(global_object->native_context());
Handle<ScriptContextTable> script_context_table(
native_context->script_context_table());
@@ -668,9 +792,8 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
// Script contexts have a canonical empty function as their closure, not the
// anonymous closure containing the global code. See
// FullCodeGenerator::PushFunctionArgumentForContextAllocation.
- Handle<JSFunction> closure(global_object->IsJSBuiltinsObject()
- ? *function
- : native_context->closure());
+ Handle<JSFunction> closure(
+ function->shared()->IsBuiltin() ? *function : native_context->closure());
Handle<Context> result =
isolate->factory()->NewScriptContext(closure, scope_info);
@@ -700,31 +823,9 @@ RUNTIME_FUNCTION(Runtime_NewFunctionContext) {
RUNTIME_FUNCTION(Runtime_PushWithContext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- Handle<JSReceiver> extension_object;
- if (args[0]->IsJSReceiver()) {
- extension_object = args.at<JSReceiver>(0);
- } else {
- // Try to convert the object to a proper JavaScript object.
- MaybeHandle<JSReceiver> maybe_object =
- Object::ToObject(isolate, args.at<Object>(0));
- if (!maybe_object.ToHandle(&extension_object)) {
- Handle<Object> handle = args.at<Object>(0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kWithExpression, handle));
- }
- }
-
- Handle<JSFunction> function;
- if (args[1]->IsSmi()) {
- // A smi sentinel indicates a context nested inside global code rather
- // than some function. There is a canonical empty function that can be
- // gotten from the native context.
- function = handle(isolate->native_context()->closure());
- } else {
- function = args.at<JSFunction>(1);
- }
-
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, extension_object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
Handle<Context> current(isolate->context());
Handle<Context> context =
isolate->factory()->NewWithContext(function, current, extension_object);
@@ -735,18 +836,10 @@ RUNTIME_FUNCTION(Runtime_PushWithContext) {
RUNTIME_FUNCTION(Runtime_PushCatchContext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, thrown_object, 1);
- Handle<JSFunction> function;
- if (args[2]->IsSmi()) {
- // A smi sentinel indicates a context nested inside global code rather
- // than some function. There is a canonical empty function that can be
- // gotten from the native context.
- function = handle(isolate->native_context()->closure());
- } else {
- function = args.at<JSFunction>(2);
- }
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 2);
Handle<Context> current(isolate->context());
Handle<Context> context = isolate->factory()->NewCatchContext(
function, current, name, thrown_object);
@@ -757,17 +850,9 @@ RUNTIME_FUNCTION(Runtime_PushCatchContext) {
RUNTIME_FUNCTION(Runtime_PushBlockContext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 0);
- Handle<JSFunction> function;
- if (args[1]->IsSmi()) {
- // A smi sentinel indicates a context nested inside global code rather
- // than some function. There is a canonical empty function that can be
- // gotten from the native context.
- function = handle(isolate->native_context()->closure());
- } else {
- function = args.at<JSFunction>(1);
- }
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
Handle<Context> current(isolate->context());
Handle<Context> context =
isolate->factory()->NewBlockContext(function, current, scope_info);
@@ -809,7 +894,7 @@ RUNTIME_FUNCTION(Runtime_PushModuleContext) {
Context* previous = isolate->context();
context->set_previous(previous);
context->set_closure(previous->closure());
- context->set_global_object(previous->global_object());
+ context->set_native_context(previous->native_context());
isolate->set_context(*context);
// Find hosting scope and initialize internal variable holding module there.
@@ -859,7 +944,10 @@ RUNTIME_FUNCTION(Runtime_DeclareModules) {
}
}
- JSObject::PreventExtensions(module).Assert();
+ if (JSObject::PreventExtensions(module, Object::THROW_ON_ERROR)
+ .IsNothing()) {
+ DCHECK(false);
+ }
}
DCHECK(!isolate->has_pending_exception());
@@ -897,15 +985,14 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
// the global object, or the subject of a with. Try to delete it
// (respecting DONT_DELETE).
Handle<JSObject> object = Handle<JSObject>::cast(holder);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSReceiver::DeleteProperty(object, name));
- return *result;
+ Maybe<bool> result = JSReceiver::DeleteProperty(object, name);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
}
static Object* ComputeReceiverForNonGlobal(Isolate* isolate, JSObject* holder) {
- DCHECK(!holder->IsGlobalObject());
+ DCHECK(!holder->IsJSGlobalObject());
// If the holder isn't a context extension object, we just return it
// as the receiver. This allows arguments objects to be used as
@@ -983,7 +1070,7 @@ static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate,
Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
// GetProperty below can cause GC.
Handle<Object> receiver_handle(
- object->IsGlobalObject()
+ object->IsJSGlobalObject()
? Object::cast(isolate->heap()->undefined_value())
: object->IsJSProxy() ? static_cast<Object*>(*object)
: ComputeReceiverForNonGlobal(
@@ -1088,7 +1175,7 @@ RUNTIME_FUNCTION(Runtime_ArgumentsLength) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
int argument_count = 0;
- Runtime::GetCallerArguments(isolate, 0, &argument_count);
+ GetCallerArguments(isolate, 0, &argument_count);
return Smi::FromInt(argument_count);
}
@@ -1101,7 +1188,7 @@ RUNTIME_FUNCTION(Runtime_Arguments) {
// Determine the actual arguments passed to the function.
int argument_count_signed = 0;
base::SmartArrayPointer<Handle<Object>> arguments =
- Runtime::GetCallerArguments(isolate, 0, &argument_count_signed);
+ GetCallerArguments(isolate, 0, &argument_count_signed);
const uint32_t argument_count = argument_count_signed;
// Try to convert the key to an index. If successful and within
diff --git a/chromium/v8/src/runtime/runtime-simd.cc b/chromium/v8/src/runtime/runtime-simd.cc
index 0a1034dfc25..59e4fa1edb7 100644
--- a/chromium/v8/src/runtime/runtime-simd.cc
+++ b/chromium/v8/src/runtime/runtime-simd.cc
@@ -212,10 +212,19 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
CONVERT_INT32_ARG_CHECKED(name, index); \
RUNTIME_ASSERT(name >= 0 && name < lanes);
+#define CONVERT_SIMD_ARG_HANDLE_THROW(Type, name, index) \
+ Handle<Type> name; \
+ if (args[index]->Is##Type()) { \
+ name = args.at<Type>(index); \
+ } else { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewTypeError(MessageTemplate::kInvalidSimdOperation)); \
+ }
+
#define SIMD_UNARY_OP(type, lane_type, lane_count, op, result) \
static const int kLaneCount = lane_count; \
DCHECK(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
lanes[i] = op(a->get_lane(i)); \
@@ -225,8 +234,8 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
#define SIMD_BINARY_OP(type, lane_type, lane_count, op, result) \
static const int kLaneCount = lane_count; \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
- CONVERT_ARG_HANDLE_CHECKED(type, b, 1); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
lanes[i] = op(a->get_lane(i), b->get_lane(i)); \
@@ -236,8 +245,8 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
#define SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, op, result) \
static const int kLaneCount = lane_count; \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
- CONVERT_ARG_HANDLE_CHECKED(type, b, 1); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1); \
bool lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
lanes[i] = a->get_lane(i) op b->get_lane(i); \
@@ -283,7 +292,7 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
RUNTIME_FUNCTION(Runtime_##type##ExtractLane) { \
HandleScope scope(isolate); \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, lane_count); \
return *isolate->factory()->extract(a->get_lane(lane)); \
}
@@ -293,7 +302,7 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 3); \
- CONVERT_ARG_HANDLE_CHECKED(type, simd, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, simd, 0); \
CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, kLaneCount); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
@@ -307,7 +316,7 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
#define SIMD_CHECK_FUNCTION(type, lane_type, lane_count, extract, replace) \
RUNTIME_FUNCTION(Runtime_##type##Check) { \
HandleScope scope(isolate); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
return *a; \
}
@@ -316,7 +325,7 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 1 + kLaneCount); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
CONVERT_SIMD_LANE_ARG_CHECKED(index, i + 1, kLaneCount); \
@@ -331,8 +340,8 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 2 + kLaneCount); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
- CONVERT_ARG_HANDLE_CHECKED(type, b, 1); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
CONVERT_SIMD_LANE_ARG_CHECKED(index, i + 2, kLaneCount * 2); \
@@ -437,7 +446,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
lane_type lanes[kLaneCount] = {0}; \
if (shift < lane_bits) { \
@@ -454,7 +463,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
lane_type lanes[kLaneCount] = {0}; \
if (shift < lane_bits) { \
@@ -472,7 +481,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
if (shift >= lane_bits) shift = lane_bits - 1; \
lane_type lanes[kLaneCount]; \
@@ -502,7 +511,7 @@ SIMD_UINT_TYPES(SIMD_LSR_FUNCTION)
RUNTIME_FUNCTION(Runtime_##type##AnyTrue) { \
HandleScope scope(isolate); \
DCHECK(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
bool result = false; \
for (int i = 0; i < lane_count; i++) { \
if (a->get_lane(i)) { \
@@ -517,7 +526,7 @@ SIMD_UINT_TYPES(SIMD_LSR_FUNCTION)
RUNTIME_FUNCTION(Runtime_##type##AllTrue) { \
HandleScope scope(isolate); \
DCHECK(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
bool result = true; \
for (int i = 0; i < lane_count; i++) { \
if (!a->get_lane(i)) { \
@@ -759,9 +768,9 @@ SIMD_LOGICAL_TYPES(SIMD_NOT_FUNCTION)
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 3); \
- CONVERT_ARG_HANDLE_CHECKED(bool_type, mask, 0); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 1); \
- CONVERT_ARG_HANDLE_CHECKED(type, b, 2); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(bool_type, mask, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 1); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 2); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
lanes[i] = mask->get_lane(i) ? a->get_lane(i) : b->get_lane(i); \
@@ -812,7 +821,7 @@ SIMD_SIGNED_TYPES(SIMD_NEG_FUNCTION)
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(from_type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(from_type, a, 0); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
from_ctype a_value = a->get_lane(i); \
@@ -875,7 +884,7 @@ SIMD_FROM_TYPES(SIMD_FROM_FUNCTION)
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(from_type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(from_type, a, 0); \
lane_type lanes[kLaneCount]; \
a->CopyBits(lanes); \
Handle<type> result = isolate->factory()->New##type(lanes); \
@@ -900,7 +909,7 @@ SIMD_FROM_BITS_TYPES(SIMD_FROM_BITS_FUNCTION)
#define SIMD_LOAD(type, lane_type, lane_count, count, result) \
static const int kLaneCount = lane_count; \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, tarray, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0); \
CONVERT_INT32_ARG_CHECKED(index, 1) \
size_t bpe = tarray->element_size(); \
uint32_t bytes = count * sizeof(lane_type); \
@@ -918,9 +927,9 @@ SIMD_FROM_BITS_TYPES(SIMD_FROM_BITS_FUNCTION)
#define SIMD_STORE(type, lane_type, lane_count, count, a) \
static const int kLaneCount = lane_count; \
DCHECK(args.length() == 3); \
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, tarray, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 2); \
CONVERT_INT32_ARG_CHECKED(index, 1) \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 2); \
size_t bpe = tarray->element_size(); \
uint32_t bytes = count * sizeof(lane_type); \
size_t byte_length = NumberToSize(isolate, tarray->byte_length()); \
diff --git a/chromium/v8/src/runtime/runtime-strings.cc b/chromium/v8/src/runtime/runtime-strings.cc
index 3ce5a58e2bf..bd4dd699b45 100644
--- a/chromium/v8/src/runtime/runtime-strings.cc
+++ b/chromium/v8/src/runtime/runtime-strings.cc
@@ -405,18 +405,6 @@ RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) {
}
-RUNTIME_FUNCTION(Runtime_CharFromCode) {
- HandleScope handlescope(isolate);
- DCHECK(args.length() == 1);
- if (args[0]->IsNumber()) {
- CONVERT_NUMBER_CHECKED(uint32_t, code, Uint32, args[0]);
- code &= 0xffff;
- return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
- }
- return isolate->heap()->empty_string();
-}
-
-
RUNTIME_FUNCTION(Runtime_StringCompare) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
@@ -1185,8 +1173,14 @@ RUNTIME_FUNCTION(Runtime_FlattenString) {
RUNTIME_FUNCTION(Runtime_StringCharFromCode) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_CharFromCode(args, isolate);
+ HandleScope handlescope(isolate);
+ DCHECK_EQ(1, args.length());
+ if (args[0]->IsNumber()) {
+ CONVERT_NUMBER_CHECKED(uint32_t, code, Uint32, args[0]);
+ code &= 0xffff;
+ return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
+ }
+ return isolate->heap()->empty_string();
}
@@ -1198,7 +1192,7 @@ RUNTIME_FUNCTION(Runtime_StringCharAt) {
if (std::isinf(args.number_at(1))) return isolate->heap()->empty_string();
Object* code = __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
if (code->IsNaN()) return isolate->heap()->empty_string();
- return __RT_impl_Runtime_CharFromCode(Arguments(1, &code), isolate);
+ return __RT_impl_Runtime_StringCharFromCode(Arguments(1, &code), isolate);
}
@@ -1251,12 +1245,5 @@ RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
return __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
}
-
-RUNTIME_FUNCTION(Runtime_StringGetLength) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- return Smi::FromInt(s->length());
-}
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-symbol.cc b/chromium/v8/src/runtime/runtime-symbol.cc
index 778c2417095..234b45606d8 100644
--- a/chromium/v8/src/runtime/runtime-symbol.cc
+++ b/chromium/v8/src/runtime/runtime-symbol.cc
@@ -28,7 +28,9 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
- return *isolate->factory()->NewPrivateSymbol(name);
+ Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
+ if (name->IsString()) symbol->set_name(*name);
+ return *symbol;
}
diff --git a/chromium/v8/src/runtime/runtime-test.cc b/chromium/v8/src/runtime/runtime-test.cc
index fdfa42a6af2..3b92d7f6ee3 100644
--- a/chromium/v8/src/runtime/runtime-test.cc
+++ b/chromium/v8/src/runtime/runtime-test.cc
@@ -378,8 +378,7 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
RUNTIME_FUNCTION(Runtime_NativeScriptsCount) {
DCHECK(args.length() == 0);
return Smi::FromInt(Natives::GetBuiltinsCount() +
- ExtraNatives::GetBuiltinsCount() +
- CodeStubNatives::GetBuiltinsCount());
+ ExtraNatives::GetBuiltinsCount());
}
diff --git a/chromium/v8/src/runtime/runtime-typedarray.cc b/chromium/v8/src/runtime/runtime-typedarray.cc
index 8a3fce0a923..a82b71ddf28 100644
--- a/chromium/v8/src/runtime/runtime-typedarray.cc
+++ b/chromium/v8/src/runtime/runtime-typedarray.cc
@@ -13,32 +13,6 @@
namespace v8 {
namespace internal {
-
-RUNTIME_FUNCTION(Runtime_ArrayBufferInitialize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, holder, 0);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(byteLength, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(is_shared, 2);
- if (!holder->byte_length()->IsUndefined()) {
- // ArrayBuffer is already initialized; probably a fuzz test.
- return *holder;
- }
- size_t allocated_length = 0;
- if (!TryNumberToSize(isolate, *byteLength, &allocated_length)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
- }
- if (!JSArrayBuffer::SetupAllocatingData(
- holder, isolate, allocated_length, true,
- is_shared ? SharedFlag::kShared : SharedFlag::kNotShared)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
- }
- return *holder;
-}
-
-
RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -49,14 +23,16 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, source, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(first, 2);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(new_length, 3);
RUNTIME_ASSERT(!source.is_identical_to(target));
- size_t start = 0;
+ size_t start = 0, target_length = 0;
RUNTIME_ASSERT(TryNumberToSize(isolate, *first, &start));
- size_t target_length = NumberToSize(isolate, target->byte_length());
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *new_length, &target_length));
+ RUNTIME_ASSERT(NumberToSize(isolate, target->byte_length()) >= target_length);
if (target_length == 0) return isolate->heap()->undefined_value();
@@ -70,14 +46,6 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) {
}
-RUNTIME_FUNCTION(Runtime_ArrayBufferIsView) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, object, 0);
- return isolate->heap()->ToBoolean(object->IsJSArrayBufferView());
-}
-
-
RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -164,8 +132,8 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
// All checks are done, now we can modify objects.
- DCHECK(holder->GetInternalFieldCount() ==
- v8::ArrayBufferView::kInternalFieldCount);
+ DCHECK_EQ(v8::ArrayBufferView::kInternalFieldCount,
+ holder->GetInternalFieldCount());
for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
holder->SetInternalField(i, Smi::FromInt(0));
}
@@ -238,8 +206,8 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
}
size_t byte_length = length * element_size;
- DCHECK(holder->GetInternalFieldCount() ==
- v8::ArrayBufferView::kInternalFieldCount);
+ DCHECK_EQ(v8::ArrayBufferView::kInternalFieldCount,
+ holder->GetInternalFieldCount());
for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
holder->SetInternalField(i, Smi::FromInt(0));
}
@@ -433,6 +401,19 @@ RUNTIME_FUNCTION(Runtime_IsSharedIntegerTypedArray) {
}
+RUNTIME_FUNCTION(Runtime_IsSharedInteger32TypedArray) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ if (!args[0]->IsJSTypedArray()) {
+ return isolate->heap()->false_value();
+ }
+
+ Handle<JSTypedArray> obj(JSTypedArray::cast(args[0]));
+ return isolate->heap()->ToBoolean(obj->GetBuffer()->is_shared() &&
+ obj->type() == kExternalInt32Array);
+}
+
+
RUNTIME_FUNCTION(Runtime_DataViewInitialize) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
@@ -441,8 +422,8 @@ RUNTIME_FUNCTION(Runtime_DataViewInitialize) {
CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset, 2);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length, 3);
- DCHECK(holder->GetInternalFieldCount() ==
- v8::ArrayBufferView::kInternalFieldCount);
+ DCHECK_EQ(v8::ArrayBufferView::kInternalFieldCount,
+ holder->GetInternalFieldCount());
for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
holder->SetInternalField(i, Smi::FromInt(0));
}
diff --git a/chromium/v8/src/runtime/runtime-utils.h b/chromium/v8/src/runtime/runtime-utils.h
index 4b072b1eb60..ded2c090c88 100644
--- a/chromium/v8/src/runtime/runtime-utils.h
+++ b/chromium/v8/src/runtime/runtime-utils.h
@@ -162,7 +162,7 @@ static inline ObjectPair MakePair(Object* x, Object* y) {
}
#endif
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_RUNTIME_RUNTIME_UTILS_H_
diff --git a/chromium/v8/src/runtime/runtime.cc b/chromium/v8/src/runtime/runtime.cc
index 15451c5c6e3..90f4e4ce335 100644
--- a/chromium/v8/src/runtime/runtime.cc
+++ b/chromium/v8/src/runtime/runtime.cc
@@ -4,6 +4,7 @@
#include "src/runtime/runtime.h"
+#include "src/assembler.h"
#include "src/contexts.h"
#include "src/handles-inl.h"
#include "src/heap/heap.h"
@@ -94,6 +95,31 @@ const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
}
+const Runtime::Function* Runtime::RuntimeFunctionTable(Isolate* isolate) {
+ if (isolate->external_reference_redirector()) {
+ // When running with the simulator we need to provide a table which has
+ // redirected runtime entry addresses.
+ if (!isolate->runtime_state()->redirected_intrinsic_functions()) {
+ size_t function_count = arraysize(kIntrinsicFunctions);
+ Function* redirected_functions = new Function[function_count];
+ memcpy(redirected_functions, kIntrinsicFunctions,
+ sizeof(kIntrinsicFunctions));
+ for (size_t i = 0; i < function_count; i++) {
+ ExternalReference redirected_entry(static_cast<Runtime::FunctionId>(i),
+ isolate);
+ redirected_functions[i].entry = redirected_entry.address();
+ }
+ isolate->runtime_state()->set_redirected_intrinsic_functions(
+ redirected_functions);
+ }
+
+ return isolate->runtime_state()->redirected_intrinsic_functions();
+ } else {
+ return kIntrinsicFunctions;
+ }
+}
+
+
std::ostream& operator<<(std::ostream& os, Runtime::FunctionId id) {
return os << Runtime::FunctionForId(id)->name;
}
diff --git a/chromium/v8/src/runtime/runtime.h b/chromium/v8/src/runtime/runtime.h
index 6e55d747943..283087ae06a 100644
--- a/chromium/v8/src/runtime/runtime.h
+++ b/chromium/v8/src/runtime/runtime.h
@@ -30,27 +30,28 @@ namespace internal {
// Entries have the form F(name, number of arguments, number of values):
-#define FOR_EACH_INTRINSIC_ARRAY(F) \
- F(FinishArrayPrototypeSetup, 1, 1) \
- F(SpecialArrayFunctions, 0, 1) \
- F(TransitionElementsKind, 2, 1) \
- F(PushIfAbsent, 2, 1) \
- F(RemoveArrayHoles, 2, 1) \
- F(MoveArrayContents, 2, 1) \
- F(EstimateNumberOfElements, 1, 1) \
- F(GetArrayKeys, 2, 1) \
- F(ArrayConstructor, -1, 1) \
- F(ArrayConstructorWithSubclassing, -1, 1) \
- F(InternalArrayConstructor, -1, 1) \
- F(NormalizeElements, 1, 1) \
- F(GrowArrayElements, 2, 1) \
- F(HasComplexElements, 1, 1) \
- F(IsArray, 1, 1) \
- F(HasCachedArrayIndex, 1, 1) \
- F(GetCachedArrayIndex, 1, 1) \
- F(FixedArrayGet, 2, 1) \
- F(FixedArraySet, 3, 1) \
- F(FastOneByteArrayJoin, 2, 1)
+#define FOR_EACH_INTRINSIC_ARRAY(F) \
+ F(FinishArrayPrototypeSetup, 1, 1) \
+ F(SpecialArrayFunctions, 0, 1) \
+ F(TransitionElementsKind, 2, 1) \
+ F(PushIfAbsent, 2, 1) \
+ F(RemoveArrayHoles, 2, 1) \
+ F(MoveArrayContents, 2, 1) \
+ F(EstimateNumberOfElements, 1, 1) \
+ F(GetArrayKeys, 2, 1) \
+ F(ArrayConstructor, -1, 1) \
+ F(NewArray, -1 /* >= 3 */, 1) \
+ F(InternalArrayConstructor, -1, 1) \
+ F(NormalizeElements, 1, 1) \
+ F(GrowArrayElements, 2, 1) \
+ F(HasComplexElements, 1, 1) \
+ F(IsArray, 1, 1) \
+ F(HasCachedArrayIndex, 1, 1) \
+ F(GetCachedArrayIndex, 1, 1) \
+ F(FixedArrayGet, 2, 1) \
+ F(FixedArraySet, 3, 1) \
+ F(FastOneByteArrayJoin, 2, 1) \
+ F(ArraySpeciesConstructor, 1, 1)
#define FOR_EACH_INTRINSIC_ATOMICS(F) \
@@ -73,26 +74,24 @@ namespace internal {
F(AtomicsFutexNumWaitersForTesting, 2, 1)
-#define FOR_EACH_INTRINSIC_CLASSES(F) \
- F(ThrowNonMethodError, 0, 1) \
- F(ThrowUnsupportedSuperError, 0, 1) \
- F(ThrowConstructorNonCallableError, 0, 1) \
- F(ThrowArrayNotSubclassableError, 0, 1) \
- F(ThrowStaticPrototypeError, 0, 1) \
- F(ThrowIfStaticPrototype, 1, 1) \
- F(HomeObjectSymbol, 0, 1) \
- F(DefineClass, 5, 1) \
- F(FinalizeClassDefinition, 2, 1) \
- F(DefineClassMethod, 3, 1) \
- F(ClassGetSourceCode, 1, 1) \
- F(LoadFromSuper, 4, 1) \
- F(LoadKeyedFromSuper, 4, 1) \
- F(StoreToSuper_Strict, 4, 1) \
- F(StoreToSuper_Sloppy, 4, 1) \
- F(StoreKeyedToSuper_Strict, 4, 1) \
- F(StoreKeyedToSuper_Sloppy, 4, 1) \
- F(HandleStepInForDerivedConstructors, 1, 1) \
- F(DefaultConstructorCallSuper, 2, 1)
+#define FOR_EACH_INTRINSIC_CLASSES(F) \
+ F(ThrowNonMethodError, 0, 1) \
+ F(ThrowUnsupportedSuperError, 0, 1) \
+ F(ThrowConstructorNonCallableError, 1, 1) \
+ F(ThrowArrayNotSubclassableError, 0, 1) \
+ F(ThrowStaticPrototypeError, 0, 1) \
+ F(ThrowIfStaticPrototype, 1, 1) \
+ F(HomeObjectSymbol, 0, 1) \
+ F(DefineClass, 5, 1) \
+ F(FinalizeClassDefinition, 2, 1) \
+ F(DefineClassMethod, 3, 1) \
+ F(LoadFromSuper, 4, 1) \
+ F(LoadKeyedFromSuper, 4, 1) \
+ F(StoreToSuper_Strict, 4, 1) \
+ F(StoreToSuper_Sloppy, 4, 1) \
+ F(StoreKeyedToSuper_Strict, 4, 1) \
+ F(StoreKeyedToSuper_Sloppy, 4, 1) \
+ F(GetSuperConstructor, 1, 1)
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
@@ -126,28 +125,21 @@ namespace internal {
F(ObservationWeakMapCreate, 0, 1)
-#define FOR_EACH_INTRINSIC_COMPILER(F) \
- F(CompileLazy, 1, 1) \
- F(CompileOptimized, 2, 1) \
- F(NotifyStubFailure, 0, 1) \
- F(NotifyDeoptimized, 1, 1) \
- F(CompileForOnStackReplacement, 1, 1) \
- F(TryInstallOptimizedCode, 1, 1) \
- F(CompileString, 2, 1) \
+#define FOR_EACH_INTRINSIC_COMPILER(F) \
+ F(CompileLazy, 1, 1) \
+ F(CompileOptimized_Concurrent, 1, 1) \
+ F(CompileOptimized_NotConcurrent, 1, 1) \
+ F(NotifyStubFailure, 0, 1) \
+ F(NotifyDeoptimized, 1, 1) \
+ F(CompileForOnStackReplacement, 1, 1) \
+ F(TryInstallOptimizedCode, 1, 1) \
F(ResolvePossiblyDirectEval, 5, 1)
#define FOR_EACH_INTRINSIC_DATE(F) \
- F(DateMakeDay, 2, 1) \
- F(DateSetValue, 3, 1) \
F(IsDate, 1, 1) \
- F(ThrowNotDateError, 0, 1) \
F(DateCurrentTime, 0, 1) \
- F(DateParseString, 2, 1) \
- F(DateLocalTimezone, 1, 1) \
- F(DateToUTC, 1, 1) \
- F(DateCacheVersion, 0, 1) \
- F(DateField, 2 /* date object, field index */, 1)
+ F(ThrowNotDateError, 0, 1)
#define FOR_EACH_INTRINSIC_DEBUG(F) \
@@ -176,14 +168,14 @@ namespace internal {
F(DebugPrintScopes, 0, 1) \
F(GetThreadCount, 1, 1) \
F(GetThreadDetails, 2, 1) \
- F(SetDisableBreak, 1, 1) \
+ F(SetBreakPointsActive, 1, 1) \
F(GetBreakLocations, 2, 1) \
F(SetFunctionBreakPoint, 3, 1) \
F(SetScriptBreakPoint, 4, 1) \
F(ClearBreakPoint, 1, 1) \
F(ChangeBreakOnException, 2, 1) \
F(IsBreakOnException, 1, 1) \
- F(PrepareStep, 4, 1) \
+ F(PrepareStep, 2, 1) \
F(ClearStepping, 0, 1) \
F(DebugEvaluate, 6, 1) \
F(DebugEvaluateGlobal, 4, 1) \
@@ -193,13 +185,13 @@ namespace internal {
F(DebugGetPrototype, 1, 1) \
F(DebugSetScriptSource, 2, 1) \
F(FunctionGetInferredName, 1, 1) \
+ F(FunctionGetDebugName, 1, 1) \
F(GetFunctionCodePositionFromSource, 2, 1) \
F(ExecuteInDebugContext, 1, 1) \
F(GetDebugContext, 0, 1) \
F(CollectGarbage, 1, 1) \
F(GetHeapUsage, 0, 1) \
F(GetScript, 1, 1) \
- F(DebugCallbackSupportsStepping, 1, 1) \
F(DebugPrepareStepInIfStepping, 1, 1) \
F(DebugPushPromise, 2, 1) \
F(DebugPopPromise, 0, 1) \
@@ -225,40 +217,36 @@ namespace internal {
F(InterpreterGreaterThan, 2, 1) \
F(InterpreterLessThanOrEqual, 2, 1) \
F(InterpreterGreaterThanOrEqual, 2, 1) \
- F(InterpreterToBoolean, 1, 1)
-
-
-#define FOR_EACH_INTRINSIC_FUNCTION(F) \
- F(FunctionGetName, 1, 1) \
- F(FunctionSetName, 2, 1) \
- F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
- F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
- F(FunctionIsArrow, 1, 1) \
- F(FunctionIsConciseMethod, 1, 1) \
- F(FunctionRemovePrototype, 1, 1) \
- F(FunctionGetScript, 1, 1) \
- F(FunctionGetSourceCode, 1, 1) \
- F(FunctionGetScriptSourcePosition, 1, 1) \
- F(FunctionGetPositionForOffset, 2, 1) \
- F(FunctionSetInstanceClassName, 2, 1) \
- F(FunctionSetLength, 2, 1) \
- F(FunctionSetPrototype, 2, 1) \
- F(FunctionIsAPIFunction, 1, 1) \
- F(FunctionHidesSource, 1, 1) \
- F(SetCode, 2, 1) \
- F(SetNativeFlag, 1, 1) \
- F(ThrowStrongModeTooFewArguments, 0, 1) \
- F(IsConstructor, 1, 1) \
- F(SetForceInlineFlag, 1, 1) \
- F(FunctionBindArguments, 4, 1) \
- F(BoundFunctionGetBindings, 1, 1) \
- F(NewObjectFromBound, 1, 1) \
- F(Call, -1 /* >= 2 */, 1) \
- F(Apply, 5, 1) \
- F(GetOriginalConstructor, 0, 1) \
- F(CallFunction, -1 /* receiver + n args + function */, 1) \
- F(IsConstructCall, 0, 1) \
- F(IsFunction, 1, 1)
+ F(InterpreterToBoolean, 1, 1) \
+ F(InterpreterLogicalNot, 1, 1) \
+ F(InterpreterTypeOf, 1, 1) \
+ F(InterpreterNewClosure, 2, 1) \
+ F(InterpreterForInPrepare, 1, 1)
+
+
+#define FOR_EACH_INTRINSIC_FUNCTION(F) \
+ F(FunctionGetName, 1, 1) \
+ F(FunctionSetName, 2, 1) \
+ F(FunctionRemovePrototype, 1, 1) \
+ F(FunctionGetScript, 1, 1) \
+ F(FunctionGetSourceCode, 1, 1) \
+ F(FunctionGetScriptSourcePosition, 1, 1) \
+ F(FunctionGetPositionForOffset, 2, 1) \
+ F(FunctionSetInstanceClassName, 2, 1) \
+ F(FunctionSetLength, 2, 1) \
+ F(FunctionSetPrototype, 2, 1) \
+ F(FunctionIsAPIFunction, 1, 1) \
+ F(SetCode, 2, 1) \
+ F(SetNativeFlag, 1, 1) \
+ F(ThrowStrongModeTooFewArguments, 0, 1) \
+ F(IsConstructor, 1, 1) \
+ F(SetForceInlineFlag, 1, 1) \
+ F(Call, -1 /* >= 2 */, 1) \
+ F(TailCall, -1 /* >= 2 */, 1) \
+ F(Apply, 5, 1) \
+ F(ConvertReceiver, 1, 1) \
+ F(IsFunction, 1, 1) \
+ F(FunctionToString, 1, 1)
#define FOR_EACH_INTRINSIC_GENERATOR(F) \
@@ -271,7 +259,6 @@ namespace internal {
F(GeneratorGetReceiver, 1, 1) \
F(GeneratorGetContinuation, 1, 1) \
F(GeneratorGetSourcePosition, 1, 1) \
- F(FunctionIsGenerator, 1, 1) \
F(GeneratorNext, 2, 1) \
F(GeneratorThrow, 2, 1)
@@ -316,9 +303,11 @@ namespace internal {
F(UnwindAndFindExceptionHandler, 0, 1) \
F(PromoteScheduledException, 0, 1) \
F(ThrowReferenceError, 1, 1) \
+ F(ThrowApplyNonFunction, 1, 1) \
F(NewTypeError, 2, 1) \
F(NewSyntaxError, 2, 1) \
F(NewReferenceError, 2, 1) \
+ F(ThrowIllegalInvocation, 0, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
F(ThrowStackOverflow, 0, 1) \
F(ThrowStrongModeImplicitConversion, 0, 1) \
@@ -331,7 +320,6 @@ namespace internal {
F(CollectStackTrace, 2, 1) \
F(MessageGetStartPosition, 1, 1) \
F(MessageGetScript, 1, 1) \
- F(ErrorToStringRT, 1, 1) \
F(FormatMessageString, 4, 1) \
F(CallSiteGetFileNameRT, 1, 1) \
F(CallSiteGetFunctionNameRT, 1, 1) \
@@ -345,13 +333,10 @@ namespace internal {
F(CallSiteIsConstructorRT, 1, 1) \
F(IS_VAR, 1, 1) \
F(IncrementStatsCounter, 1, 1) \
- F(Likely, 1, 1) \
- F(Unlikely, 1, 1) \
- F(HarmonyToString, 0, 1) \
- F(GetTypeFeedbackVector, 1, 1) \
- F(GetCallerJSFunction, 0, 1) \
- F(GetCodeStubExportsObject, 0, 1) \
- F(ThrowCalledNonCallable, 1, 1)
+ F(ThrowConstructedNonConstructable, 1, 1) \
+ F(ThrowCalledNonCallable, 1, 1) \
+ F(CreateListFromArrayLike, 1, 1) \
+ F(IncrementUseCounter, 1, 1)
#define FOR_EACH_INTRINSIC_JSON(F) \
@@ -361,6 +346,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_LITERALS(F) \
+ F(CreateRegExpLiteral, 4, 1) \
F(CreateObjectLiteral, 4, 1) \
F(CreateArrayLiteral, 4, 1) \
F(CreateArrayLiteralStubBailout, 3, 1) \
@@ -376,7 +362,7 @@ namespace internal {
F(LiveEditFunctionSetScript, 2, 1) \
F(LiveEditReplaceRefToNestedFunction, 3, 1) \
F(LiveEditPatchFunctionPositions, 2, 1) \
- F(LiveEditCheckAndDropActivations, 2, 1) \
+ F(LiveEditCheckAndDropActivations, 3, 1) \
F(LiveEditCompareStrings, 2, 1) \
F(LiveEditRestartFrame, 2, 1)
@@ -399,7 +385,8 @@ namespace internal {
F(RoundNumber, 1, 1) \
F(MathSqrt, 1, 1) \
F(MathFround, 1, 1) \
- F(IsMinusZero, 1, 1)
+ F(IsMinusZero, 1, 1) \
+ F(GenerateRandomNumbers, 1, 1)
#define FOR_EACH_INTRINSIC_NUMBERS(F) \
@@ -419,7 +406,9 @@ namespace internal {
F(SmiLexicographicCompare, 2, 1) \
F(MaxSmi, 0, 1) \
F(IsSmi, 1, 1) \
- F(GetRootNaN, 0, 1)
+ F(GetRootNaN, 0, 1) \
+ F(GetHoleNaNUpper, 0, 1) \
+ F(GetHoleNaNLower, 0, 1)
#define FOR_EACH_INTRINSIC_OBJECT(F) \
@@ -427,11 +416,8 @@ namespace internal {
F(InternalSetPrototype, 2, 1) \
F(SetPrototype, 2, 1) \
F(GetOwnProperty, 2, 1) \
- F(PreventExtensions, 1, 1) \
- F(IsExtensible, 1, 1) \
+ F(GetOwnProperty_Legacy, 2, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
- F(ObjectFreeze, 1, 1) \
- F(ObjectSeal, 1, 1) \
F(GetProperty, 2, 1) \
F(GetPropertyStrong, 2, 1) \
F(KeyedGetProperty, 2, 1) \
@@ -447,14 +433,10 @@ namespace internal {
F(DeleteProperty_Strict, 2, 1) \
F(HasOwnProperty, 2, 1) \
F(HasProperty, 2, 1) \
- F(IsPropertyEnumerable, 2, 1) \
+ F(PropertyIsEnumerable, 2, 1) \
F(GetPropertyNamesFast, 1, 1) \
- F(GetOwnPropertyNames, 2, 1) \
- F(GetOwnElementNames, 1, 1) \
+ F(GetOwnPropertyKeys, 2, 1) \
F(GetInterceptorInfo, 1, 1) \
- F(GetNamedInterceptorPropertyNames, 1, 1) \
- F(GetIndexedInterceptorElementNames, 1, 1) \
- F(OwnKeys, 1, 1) \
F(ToFastProperties, 1, 1) \
F(AllocateHeapNumber, 0, 1) \
F(NewObject, 2, 1) \
@@ -471,10 +453,8 @@ namespace internal {
F(ValueOf, 1, 1) \
F(SetValueOf, 2, 1) \
F(JSValueGetValue, 1, 1) \
- F(HeapObjectGetMap, 1, 1) \
- F(MapGetInstanceType, 1, 1) \
F(ObjectEquals, 2, 1) \
- F(IsSpecObject, 1, 1) \
+ F(IsJSReceiver, 1, 1) \
F(IsStrong, 1, 1) \
F(ClassOf, 1, 1) \
F(DefineGetterPropertyUnchecked, 4, 1) \
@@ -495,7 +475,9 @@ namespace internal {
F(InstanceOf, 2, 1) \
F(HasInPrototypeChain, 2, 1) \
F(CreateIterResultObject, 2, 1) \
- F(IsAccessCheckNeeded, 1, 1)
+ F(IsAccessCheckNeeded, 1, 1) \
+ F(ObjectDefineProperties, 2, 1) \
+ F(ObjectDefineProperty, 3, 1)
#define FOR_EACH_INTRINSIC_OBSERVE(F) \
@@ -536,24 +518,22 @@ namespace internal {
F(BitwiseXor, 2, 1) \
F(BitwiseXor_Strong, 2, 1)
-#define FOR_EACH_INTRINSIC_PROXY(F) \
- F(CreateJSProxy, 2, 1) \
- F(CreateJSFunctionProxy, 4, 1) \
- F(IsJSProxy, 1, 1) \
- F(IsJSFunctionProxy, 1, 1) \
- F(GetHandler, 1, 1) \
- F(GetCallTrap, 1, 1) \
- F(GetConstructTrap, 1, 1) \
- F(Fix, 1, 1)
-
+#define FOR_EACH_INTRINSIC_PROXY(F) \
+ F(IsJSProxy, 1, 1) \
+ F(JSProxyCall, -1 /* >= 2 */, 1) \
+ F(JSProxyConstruct, -1 /* >= 3 */, 1) \
+ F(JSProxyGetTarget, 1, 1) \
+ F(JSProxyGetHandler, 1, 1) \
+ F(JSProxyRevoke, 1, 1)
#define FOR_EACH_INTRINSIC_REGEXP(F) \
F(StringReplaceGlobalRegExpWithString, 4, 1) \
F(StringSplit, 3, 1) \
F(RegExpExec, 4, 1) \
+ F(RegExpFlags, 1, 1) \
+ F(RegExpSource, 1, 1) \
F(RegExpConstructResult, 3, 1) \
F(RegExpInitializeAndCompile, 3, 1) \
- F(MaterializeRegExpLiteral, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
F(RegExpExecReThrow, 4, 1) \
F(IsRegExp, 1, 1)
@@ -564,13 +544,14 @@ namespace internal {
F(DeclareGlobals, 2, 1) \
F(InitializeVarGlobal, 3, 1) \
F(InitializeConstGlobal, 2, 1) \
- F(DeclareLookupSlot, 2, 1) \
- F(DeclareReadOnlyLookupSlot, 2, 1) \
+ F(DeclareLookupSlot, 3, 1) \
F(InitializeLegacyConstLookupSlot, 3, 1) \
F(NewSloppyArguments_Generic, 1, 1) \
F(NewStrictArguments_Generic, 1, 1) \
+ F(NewRestArguments_Generic, 2, 1) \
F(NewSloppyArguments, 3, 1) \
F(NewStrictArguments, 3, 1) \
+ F(NewRestParam, 3, 1) \
F(NewClosure, 1, 1) \
F(NewClosure_Tenured, 1, 1) \
F(NewScriptContext, 2, 1) \
@@ -894,37 +875,35 @@ namespace internal {
F(Bool8x16Shuffle, 18, 1)
-#define FOR_EACH_INTRINSIC_STRINGS(F) \
- F(StringReplaceOneCharWithString, 3, 1) \
- F(StringIndexOf, 3, 1) \
- F(StringLastIndexOf, 3, 1) \
- F(StringLocaleCompare, 2, 1) \
- F(SubString, 3, 1) \
- F(StringAdd, 2, 1) \
- F(InternalizeString, 1, 1) \
- F(StringMatch, 3, 1) \
- F(StringCharCodeAtRT, 2, 1) \
- F(CharFromCode, 1, 1) \
- F(StringCompare, 2, 1) \
- F(StringBuilderConcat, 3, 1) \
- F(StringBuilderJoin, 3, 1) \
- F(SparseJoinWithSeparator, 3, 1) \
- F(StringToArray, 2, 1) \
- F(StringToLowerCase, 1, 1) \
- F(StringToUpperCase, 1, 1) \
- F(StringTrim, 3, 1) \
- F(TruncateString, 2, 1) \
- F(NewString, 2, 1) \
- F(StringEquals, 2, 1) \
- F(FlattenString, 1, 1) \
- F(StringCharFromCode, 1, 1) \
- F(StringCharAt, 2, 1) \
- F(OneByteSeqStringGetChar, 2, 1) \
- F(OneByteSeqStringSetChar, 3, 1) \
- F(TwoByteSeqStringGetChar, 2, 1) \
- F(TwoByteSeqStringSetChar, 3, 1) \
- F(StringCharCodeAt, 2, 1) \
- F(StringGetLength, 1, 1)
+#define FOR_EACH_INTRINSIC_STRINGS(F) \
+ F(StringReplaceOneCharWithString, 3, 1) \
+ F(StringIndexOf, 3, 1) \
+ F(StringLastIndexOf, 3, 1) \
+ F(StringLocaleCompare, 2, 1) \
+ F(SubString, 3, 1) \
+ F(StringAdd, 2, 1) \
+ F(InternalizeString, 1, 1) \
+ F(StringMatch, 3, 1) \
+ F(StringCharCodeAtRT, 2, 1) \
+ F(StringCompare, 2, 1) \
+ F(StringBuilderConcat, 3, 1) \
+ F(StringBuilderJoin, 3, 1) \
+ F(SparseJoinWithSeparator, 3, 1) \
+ F(StringToArray, 2, 1) \
+ F(StringToLowerCase, 1, 1) \
+ F(StringToUpperCase, 1, 1) \
+ F(StringTrim, 3, 1) \
+ F(TruncateString, 2, 1) \
+ F(NewString, 2, 1) \
+ F(StringEquals, 2, 1) \
+ F(FlattenString, 1, 1) \
+ F(StringCharFromCode, 1, 1) \
+ F(StringCharAt, 2, 1) \
+ F(OneByteSeqStringGetChar, 2, 1) \
+ F(OneByteSeqStringSetChar, 3, 1) \
+ F(TwoByteSeqStringGetChar, 2, 1) \
+ F(TwoByteSeqStringSetChar, 3, 1) \
+ F(StringCharCodeAt, 2, 1)
#define FOR_EACH_INTRINSIC_SYMBOL(F) \
@@ -986,10 +965,8 @@ namespace internal {
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
- F(ArrayBufferInitialize, 3, 1) \
F(ArrayBufferGetByteLength, 1, 1) \
- F(ArrayBufferSliceImpl, 3, 1) \
- F(ArrayBufferIsView, 1, 1) \
+ F(ArrayBufferSliceImpl, 4, 1) \
F(ArrayBufferNeuter, 1, 1) \
F(TypedArrayInitialize, 6, 1) \
F(TypedArrayInitializeFromArrayLike, 4, 1) \
@@ -1003,6 +980,7 @@ namespace internal {
F(IsTypedArray, 1, 1) \
F(IsSharedTypedArray, 1, 1) \
F(IsSharedIntegerTypedArray, 1, 1) \
+ F(IsSharedInteger32TypedArray, 1, 1) \
F(DataViewInitialize, 4, 1) \
F(DataViewGetUint8, 3, 1) \
F(DataViewGetInt8, 3, 1) \
@@ -1035,29 +1013,29 @@ namespace internal {
// Most intrinsics are implemented in the runtime/ directory, but ICs are
// implemented in ic.cc for now.
#define FOR_EACH_INTRINSIC_IC(F) \
- F(LoadIC_Miss, 3, 1) \
- F(KeyedLoadIC_Miss, 3, 1) \
+ F(BinaryOpIC_Miss, 2, 1) \
+ F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
F(CallIC_Miss, 3, 1) \
- F(StoreIC_Miss, 3, 1) \
- F(StoreIC_Slow, 3, 1) \
- F(KeyedStoreIC_Miss, 3, 1) \
- F(KeyedStoreIC_Slow, 3, 1) \
- F(StoreCallbackProperty, 5, 1) \
- F(LoadPropertyWithInterceptorOnly, 3, 1) \
- F(LoadPropertyWithInterceptor, 3, 1) \
- F(LoadElementWithInterceptor, 2, 1) \
- F(StorePropertyWithInterceptor, 3, 1) \
F(CompareIC_Miss, 3, 1) \
- F(BinaryOpIC_Miss, 2, 1) \
F(CompareNilIC_Miss, 1, 1) \
- F(Unreachable, 0, 1) \
- F(ToBooleanIC_Miss, 1, 1) \
+ F(ElementsTransitionAndStoreIC_Miss, 5, 1) \
+ F(KeyedLoadIC_Miss, 4, 1) \
F(KeyedLoadIC_MissFromStubFailure, 4, 1) \
- F(KeyedStoreIC_MissFromStubFailure, 3, 1) \
- F(StoreIC_MissFromStubFailure, 3, 1) \
- F(ElementsTransitionAndStoreIC_Miss, 4, 1) \
- F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
- F(LoadIC_MissFromStubFailure, 0, 1)
+ F(KeyedStoreIC_Miss, 5, 1) \
+ F(KeyedStoreIC_MissFromStubFailure, 5, 1) \
+ F(KeyedStoreIC_Slow, 5, 1) \
+ F(LoadElementWithInterceptor, 2, 1) \
+ F(LoadIC_Miss, 4, 1) \
+ F(LoadIC_MissFromStubFailure, 4, 1) \
+ F(LoadPropertyWithInterceptor, 3, 1) \
+ F(LoadPropertyWithInterceptorOnly, 3, 1) \
+ F(StoreCallbackProperty, 5, 1) \
+ F(StoreIC_Miss, 5, 1) \
+ F(StoreIC_MissFromStubFailure, 5, 1) \
+ F(StoreIC_Slow, 5, 1) \
+ F(StorePropertyWithInterceptor, 3, 1) \
+ F(ToBooleanIC_Miss, 1, 1) \
+ F(Unreachable, 0, 1)
#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
@@ -1110,27 +1088,6 @@ FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
//---------------------------------------------------------------------------
// Runtime provides access to all C++ runtime functions.
-class RuntimeState {
- public:
- unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
- return &to_upper_mapping_;
- }
- unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
- return &to_lower_mapping_;
- }
-
- private:
- RuntimeState() {}
- unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
- unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
-
- friend class Isolate;
- friend class Runtime;
-
- DISALLOW_COPY_AND_ASSIGN(RuntimeState);
-};
-
-
class Runtime : public AllStatic {
public:
enum FunctionId {
@@ -1179,7 +1136,10 @@ class Runtime : public AllStatic {
// Get the intrinsic function with the given function entry address.
static const Function* FunctionForEntry(Address ref);
- MUST_USE_RESULT static MaybeHandle<Object> DeleteObjectProperty(
+ // Get the runtime intrinsic function table.
+ static const Function* RuntimeFunctionTable(Isolate* isolate);
+
+ MUST_USE_RESULT static Maybe<bool> DeleteObjectProperty(
Isolate* isolate, Handle<JSReceiver> receiver, Handle<Object> key,
LanguageMode language_mode);
@@ -1217,15 +1177,38 @@ class Runtime : public AllStatic {
static MaybeHandle<JSArray> GetInternalProperties(Isolate* isolate,
Handle<Object>);
+};
+
+
+class RuntimeState {
+ public:
+ unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
+ return &to_upper_mapping_;
+ }
+ unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
+ return &to_lower_mapping_;
+ }
+
+ Runtime::Function* redirected_intrinsic_functions() {
+ return redirected_intrinsic_functions_.get();
+ }
+
+ void set_redirected_intrinsic_functions(
+ Runtime::Function* redirected_intrinsic_functions) {
+ redirected_intrinsic_functions_.Reset(redirected_intrinsic_functions);
+ }
+
+ private:
+ RuntimeState() {}
+ unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
+ unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
+
+ base::SmartArrayPointer<Runtime::Function> redirected_intrinsic_functions_;
- // Find the arguments of the JavaScript function invocation that called
- // into C++ code. Collect these in a newly allocated array of handles
- // (possibly prefixed by a number of empty handles).
- // TODO(mstarzinger): Temporary workaround until this is only used by the
- // %_Arguments and %_ArgumentsLength intrinsics. Make this function local to
- // runtime-scopes.cc then.
- static base::SmartArrayPointer<Handle<Object>> GetCallerArguments(
- Isolate* isolate, int prefix_argc, int* total_argc);
+ friend class Isolate;
+ friend class Runtime;
+
+ DISALLOW_COPY_AND_ASSIGN(RuntimeState);
};
diff --git a/chromium/v8/src/safepoint-table.h b/chromium/v8/src/safepoint-table.h
index a7719e036d4..fbb0152eb31 100644
--- a/chromium/v8/src/safepoint-table.h
+++ b/chromium/v8/src/safepoint-table.h
@@ -230,6 +230,7 @@ class SafepointTableBuilder BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SAFEPOINT_TABLE_H_
diff --git a/chromium/v8/src/small-pointer-list.h b/chromium/v8/src/small-pointer-list.h
index 241689e5b23..9ece2490644 100644
--- a/chromium/v8/src/small-pointer-list.h
+++ b/chromium/v8/src/small-pointer-list.h
@@ -170,6 +170,7 @@ class SmallPointerList {
DISALLOW_COPY_AND_ASSIGN(SmallPointerList);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SMALL_POINTER_LIST_H_
diff --git a/chromium/v8/src/snapshot/mksnapshot.cc b/chromium/v8/src/snapshot/mksnapshot.cc
index 09cbf93e1ef..c69025adcaa 100644
--- a/chromium/v8/src/snapshot/mksnapshot.cc
+++ b/chromium/v8/src/snapshot/mksnapshot.cc
@@ -9,10 +9,8 @@
#include "include/libplatform/libplatform.h"
#include "src/assembler.h"
#include "src/base/platform/platform.h"
-#include "src/bootstrapper.h"
#include "src/flags.h"
#include "src/list.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
#include "src/snapshot/natives.h"
#include "src/snapshot/serialize.h"
diff --git a/chromium/v8/src/snapshot/natives-common.cc b/chromium/v8/src/snapshot/natives-common.cc
index d027ec9dc79..f30e7940098 100644
--- a/chromium/v8/src/snapshot/natives-common.cc
+++ b/chromium/v8/src/snapshot/natives-common.cc
@@ -35,12 +35,6 @@ FixedArray* NativesCollection<EXPERIMENTAL_EXTRAS>::GetSourceCache(Heap* heap) {
}
-template <>
-FixedArray* NativesCollection<CODE_STUB>::GetSourceCache(Heap* heap) {
- return heap->code_stub_natives_source_cache();
-}
-
-
template <NativeType type>
void NativesCollection<type>::UpdateSourceCache(Heap* heap) {
for (int i = 0; i < GetBuiltinsCount(); i++) {
@@ -54,7 +48,6 @@ void NativesCollection<type>::UpdateSourceCache(Heap* heap) {
// Explicit template instantiations.
template void NativesCollection<CORE>::UpdateSourceCache(Heap* heap);
-template void NativesCollection<CODE_STUB>::UpdateSourceCache(Heap* heap);
template void NativesCollection<EXPERIMENTAL>::UpdateSourceCache(Heap* heap);
template void NativesCollection<EXTRAS>::UpdateSourceCache(Heap* heap);
template void NativesCollection<EXPERIMENTAL_EXTRAS>::UpdateSourceCache(
diff --git a/chromium/v8/src/snapshot/natives-external.cc b/chromium/v8/src/snapshot/natives-external.cc
index 7e5e6c7ba04..6505d15571e 100644
--- a/chromium/v8/src/snapshot/natives-external.cc
+++ b/chromium/v8/src/snapshot/natives-external.cc
@@ -93,23 +93,17 @@ class NativesStore {
return Vector<const char>::cast(name);
}
- bool ReadNameAndContentPair(SnapshotByteSource* bytes) {
+ void ReadNameAndContentPair(SnapshotByteSource* bytes) {
const byte* id;
- int id_length;
const byte* source;
- int source_length;
- bool success = bytes->GetBlob(&id, &id_length) &&
- bytes->GetBlob(&source, &source_length);
- if (success) {
- Vector<const char> id_vector(reinterpret_cast<const char*>(id),
- id_length);
- Vector<const char> source_vector(
- reinterpret_cast<const char*>(source), source_length);
- native_ids_.Add(id_vector);
- native_source_.Add(source_vector);
- native_names_.Add(NameFromId(id, id_length));
- }
- return success;
+ int id_length = bytes->GetBlob(&id);
+ int source_length = bytes->GetBlob(&source);
+ Vector<const char> id_vector(reinterpret_cast<const char*>(id), id_length);
+ Vector<const char> source_vector(reinterpret_cast<const char*>(source),
+ source_length);
+ native_ids_.Add(id_vector);
+ native_source_.Add(source_vector);
+ native_names_.Add(NameFromId(id, id_length));
}
List<Vector<const char> > native_ids_;
@@ -125,11 +119,11 @@ template<NativeType type>
class NativesHolder {
public:
static NativesStore* get() {
- DCHECK(holder_);
+ CHECK(holder_);
return holder_;
}
static void set(NativesStore* store) {
- DCHECK(store);
+ CHECK(store);
holder_ = store;
}
static bool empty() { return holder_ == NULL; }
@@ -157,7 +151,6 @@ void ReadNatives() {
if (natives_blob_ && NativesHolder<CORE>::empty()) {
SnapshotByteSource bytes(natives_blob_->data, natives_blob_->raw_size);
NativesHolder<CORE>::set(NativesStore::MakeFromScriptsSource(&bytes));
- NativesHolder<CODE_STUB>::set(NativesStore::MakeFromScriptsSource(&bytes));
NativesHolder<EXPERIMENTAL>::set(
NativesStore::MakeFromScriptsSource(&bytes));
NativesHolder<EXTRAS>::set(NativesStore::MakeFromScriptsSource(&bytes));
@@ -188,7 +181,6 @@ void SetNativesFromFile(StartupData* natives_blob) {
*/
void DisposeNatives() {
NativesHolder<CORE>::Dispose();
- NativesHolder<CODE_STUB>::Dispose();
NativesHolder<EXPERIMENTAL>::Dispose();
NativesHolder<EXTRAS>::Dispose();
NativesHolder<EXPERIMENTAL_EXTRAS>::Dispose();
@@ -241,7 +233,6 @@ Vector<const char> NativesCollection<type>::GetScriptsSource() {
template Vector<const char> NativesCollection<T>::GetScriptName(int i); \
template Vector<const char> NativesCollection<T>::GetScriptsSource();
INSTANTIATE_TEMPLATES(CORE)
-INSTANTIATE_TEMPLATES(CODE_STUB)
INSTANTIATE_TEMPLATES(EXPERIMENTAL)
INSTANTIATE_TEMPLATES(EXTRAS)
INSTANTIATE_TEMPLATES(EXPERIMENTAL_EXTRAS)
diff --git a/chromium/v8/src/snapshot/natives.h b/chromium/v8/src/snapshot/natives.h
index c923a0f353b..07f6b1aed35 100644
--- a/chromium/v8/src/snapshot/natives.h
+++ b/chromium/v8/src/snapshot/natives.h
@@ -15,7 +15,6 @@ namespace internal {
enum NativeType {
CORE,
- CODE_STUB,
EXPERIMENTAL,
EXTRAS,
EXPERIMENTAL_EXTRAS,
@@ -49,7 +48,6 @@ class NativesCollection {
};
typedef NativesCollection<CORE> Natives;
-typedef NativesCollection<CODE_STUB> CodeStubNatives;
typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
typedef NativesCollection<EXTRAS> ExtraNatives;
typedef NativesCollection<EXPERIMENTAL_EXTRAS> ExperimentalExtraNatives;
@@ -62,6 +60,7 @@ void ReadNatives();
void DisposeNatives();
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SNAPSHOT_NATIVES_H_
diff --git a/chromium/v8/src/snapshot/serialize.cc b/chromium/v8/src/snapshot/serialize.cc
index fde170d0bfb..421cf0721cd 100644
--- a/chromium/v8/src/snapshot/serialize.cc
+++ b/chromium/v8/src/snapshot/serialize.cc
@@ -15,7 +15,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/objects.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/profiler/cpu-profiler.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/natives.h"
@@ -60,8 +60,6 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
"Heap::NewSpaceAllocationLimitAddress()");
Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
"Heap::NewSpaceAllocationTopAddress()");
- Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
- "Debug::step_in_fp_addr()");
Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
"mod_two_doubles");
// Keyed lookup cache.
@@ -129,17 +127,20 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
"double_constants.minus_one_half");
Add(ExternalReference::stress_deopt_count(isolate).address(),
"Isolate::stress_deopt_count_address()");
- Add(ExternalReference::vector_store_virtual_register(isolate).address(),
- "Isolate::vector_store_virtual_register()");
+ Add(ExternalReference::virtual_handler_register(isolate).address(),
+ "Isolate::virtual_handler_register()");
+ Add(ExternalReference::virtual_slot_register(isolate).address(),
+ "Isolate::virtual_slot_register()");
+ Add(ExternalReference::runtime_function_table_address(isolate).address(),
+ "Runtime::runtime_function_table_address()");
// Debug addresses
Add(ExternalReference::debug_after_break_target_address(isolate).address(),
"Debug::after_break_target_address()");
- Add(ExternalReference::debug_restarter_frame_function_pointer_address(isolate)
- .address(),
- "Debug::restarter_frame_function_pointer_address()");
Add(ExternalReference::debug_is_active_address(isolate).address(),
"Debug::is_active_address()");
+ Add(ExternalReference::debug_step_in_enabled_address(isolate).address(),
+ "Debug::step_in_enabled_address()");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
@@ -350,31 +351,6 @@ const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
}
-RootIndexMap::RootIndexMap(Isolate* isolate) {
- map_ = isolate->root_index_map();
- if (map_ != NULL) return;
- map_ = new HashMap(HashMap::PointersMatch);
- for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
- Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
- Object* root = isolate->heap()->root(root_index);
- // Omit root entries that can be written after initialization. They must
- // not be referenced through the root list in the snapshot.
- if (root->IsHeapObject() &&
- isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
- HeapObject* heap_object = HeapObject::cast(root);
- HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
- if (entry != NULL) {
- // Some are initialized to a previous value in the root list.
- DCHECK_LT(GetValue(entry), i);
- } else {
- SetValue(LookupEntry(map_, heap_object, true), i);
- }
- }
- }
- isolate->set_root_index_map(map_);
-}
-
-
class CodeAddressMap: public CodeEventLogger {
public:
explicit CodeAddressMap(Isolate* isolate)
@@ -382,18 +358,17 @@ class CodeAddressMap: public CodeEventLogger {
isolate->logger()->addCodeEventListener(this);
}
- virtual ~CodeAddressMap() {
+ ~CodeAddressMap() override {
isolate_->logger()->removeCodeEventListener(this);
}
- virtual void CodeMoveEvent(Address from, Address to) {
+ void CodeMoveEvent(Address from, Address to) override {
address_to_name_map_.Move(from, to);
}
- virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
- }
+ void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) override {}
- virtual void CodeDeleteEvent(Address from) {
+ void CodeDeleteEvent(Address from) override {
address_to_name_map_.Remove(from);
}
@@ -473,10 +448,8 @@ class CodeAddressMap: public CodeEventLogger {
DISALLOW_COPY_AND_ASSIGN(NameMap);
};
- virtual void LogRecordedBuffer(Code* code,
- SharedFunctionInfo*,
- const char* name,
- int length) {
+ void LogRecordedBuffer(Code* code, SharedFunctionInfo*, const char* name,
+ int length) override {
address_to_name_map_.Insert(code->address(), name, length);
}
@@ -564,8 +537,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
}
isolate_->heap()->set_native_contexts_list(
- isolate_->heap()->code_stub_context());
-
+ isolate_->heap()->undefined_value());
// The allocation site list is build during root iteration, but if no sites
// were encountered then it needs to be initialized to undefined.
if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
@@ -576,7 +548,6 @@ void Deserializer::Deserialize(Isolate* isolate) {
// Update data pointers to the external strings containing natives sources.
Natives::UpdateSourceCache(isolate_->heap());
ExtraNatives::UpdateSourceCache(isolate_->heap());
- CodeStubNatives::UpdateSourceCache(isolate_->heap());
// Issue code events for newly deserialized code objects.
LOG_CODE_EVENT(isolate_, LogCodeObjects());
@@ -585,8 +556,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
MaybeHandle<Object> Deserializer::DeserializePartial(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
- Handle<FixedArray>* outdated_contexts_out) {
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
Initialize(isolate);
if (!ReserveSpace()) {
V8::FatalProcessOutOfMemory("deserialize context");
@@ -603,18 +573,13 @@ MaybeHandle<Object> Deserializer::DeserializePartial(
OldSpace* code_space = isolate_->heap()->code_space();
Address start_address = code_space->top();
Object* root;
- Object* outdated_contexts;
VisitPointer(&root);
DeserializeDeferredObjects();
- VisitPointer(&outdated_contexts);
// There's no code deserialized here. If this assert fires then that's
// changed and logging should be added to notify the profiler et al of the
// new code, which also has to be flushed from instruction cache.
CHECK_EQ(start_address, code_space->top());
- CHECK(outdated_contexts->IsFixedArray());
- *outdated_contexts_out =
- Handle<FixedArray>(FixedArray::cast(outdated_contexts), isolate);
return Handle<Object>(root, isolate);
}
@@ -707,7 +672,7 @@ class StringTableInsertionKey : public HashTableKey {
return String::cast(key)->Hash();
}
- MUST_USE_RESULT virtual Handle<Object> AsHandle(Isolate* isolate) override {
+ MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
return handle(string_, isolate);
}
@@ -1008,7 +973,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
if (how == kFromCode) { \
Address location_of_branch_data = reinterpret_cast<Address>(current); \
Assembler::deserialization_set_special_target_at( \
- location_of_branch_data, \
+ isolate, location_of_branch_data, \
Code::cast(HeapObject::FromAddress(current_object_address)), \
reinterpret_cast<Address>(new_object)); \
location_of_branch_data += Assembler::kSpecialTargetSize; \
@@ -1147,9 +1112,9 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
Address pc = code->entry() + pc_offset;
Address target = code->entry() + target_offset;
Assembler::deserialization_set_target_internal_reference_at(
- pc, target, data == kInternalReference
- ? RelocInfo::INTERNAL_REFERENCE
- : RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ isolate, pc, target, data == kInternalReference
+ ? RelocInfo::INTERNAL_REFERENCE
+ : RelocInfo::INTERNAL_REFERENCE_ENCODED);
break;
}
@@ -1198,11 +1163,6 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
ExtraNatives::GetScriptSource(source_.Get()), current);
break;
- case kCodeStubNativesStringResource:
- current = CopyInNativesSource(
- CodeStubNatives::GetScriptSource(source_.Get()), current);
- break;
-
// Deserialize raw data of variable length.
case kVariableRawData: {
int size_in_bytes = source_.GetInt();
@@ -1397,21 +1357,21 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
code_has_been_output_(false) {}
void Serialize();
void SerializeDeferred();
- void VisitPointers(Object** start, Object** end);
- void VisitEmbeddedPointer(RelocInfo* target);
- void VisitExternalReference(Address* p);
- void VisitExternalReference(RelocInfo* rinfo);
- void VisitInternalReference(RelocInfo* rinfo);
- void VisitCodeTarget(RelocInfo* target);
- void VisitCodeEntry(Address entry_address);
- void VisitCell(RelocInfo* rinfo);
- void VisitRuntimeEntry(RelocInfo* reloc);
+ void VisitPointers(Object** start, Object** end) override;
+ void VisitEmbeddedPointer(RelocInfo* target) override;
+ void VisitExternalReference(Address* p) override;
+ void VisitExternalReference(RelocInfo* rinfo) override;
+ void VisitInternalReference(RelocInfo* rinfo) override;
+ void VisitCodeTarget(RelocInfo* target) override;
+ void VisitCodeEntry(Address entry_address) override;
+ void VisitCell(RelocInfo* rinfo) override;
+ void VisitRuntimeEntry(RelocInfo* reloc) override;
// Used for seralizing the external strings that hold the natives source.
void VisitExternalOneByteString(
- v8::String::ExternalOneByteStringResource** resource);
+ v8::String::ExternalOneByteStringResource** resource) override;
// We can't serialize a heap with external two byte strings.
void VisitExternalTwoByteString(
- v8::String::ExternalStringResource** resource) {
+ v8::String::ExternalStringResource** resource) override {
UNREACHABLE();
}
@@ -1504,44 +1464,14 @@ void PartialSerializer::Serialize(Object** o) {
context->set(Context::NEXT_CONTEXT_LINK,
isolate_->heap()->undefined_value());
DCHECK(!context->global_object()->IsUndefined());
- DCHECK(!context->builtins()->IsUndefined());
}
}
VisitPointer(o);
SerializeDeferredObjects();
- SerializeOutdatedContextsAsFixedArray();
Pad();
}
-void PartialSerializer::SerializeOutdatedContextsAsFixedArray() {
- int length = outdated_contexts_.length();
- if (length == 0) {
- FixedArray* empty = isolate_->heap()->empty_fixed_array();
- SerializeObject(empty, kPlain, kStartOfObject, 0);
- } else {
- // Serialize an imaginary fixed array containing outdated contexts.
- int size = FixedArray::SizeFor(length);
- Allocate(NEW_SPACE, size);
- sink_->Put(kNewObject + NEW_SPACE, "emulated FixedArray");
- sink_->PutInt(size >> kObjectAlignmentBits, "FixedArray size in words");
- Map* map = isolate_->heap()->fixed_array_map();
- SerializeObject(map, kPlain, kStartOfObject, 0);
- Smi* length_smi = Smi::FromInt(length);
- sink_->Put(kOnePointerRawData, "Smi");
- for (int i = 0; i < kPointerSize; i++) {
- sink_->Put(reinterpret_cast<byte*>(&length_smi)[i], "Byte");
- }
- for (int i = 0; i < length; i++) {
- Context* context = outdated_contexts_[i];
- BackReference back_reference = back_reference_map_.Lookup(context);
- sink_->Put(kBackref + back_reference.space(), "BackRef");
- PutBackReference(context, back_reference);
- }
- }
-}
-
-
bool Serializer::ShouldBeSkipped(Object** current) {
Object** roots = isolate()->heap()->roots_array_start();
return current == &roots[Heap::kStoreBufferTopRootIndex]
@@ -1739,10 +1669,7 @@ StartupSerializer::StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
- // Make sure that all functions are derived from the code-stub context
- DCHECK(!obj->IsJSFunction() ||
- JSFunction::cast(obj)->GetCreationContext() ==
- isolate()->heap()->code_stub_context());
+ DCHECK(!obj->IsJSFunction());
int root_index = root_index_map_.Lookup(obj);
// We can only encode roots as such if it has already been serialized.
@@ -1869,7 +1796,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
FlushSkip(skip);
// Clear literal boilerplates.
- if (obj->IsJSFunction() && !JSFunction::cast(obj)->shared()->bound()) {
+ if (obj->IsJSFunction()) {
FixedArray* literals = JSFunction::cast(obj)->literals();
for (int i = 0; i < literals->length(); i++) literals->set_undefined(i);
}
@@ -1877,13 +1804,6 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
serializer.Serialize();
-
- if (obj->IsContext() &&
- Context::cast(obj)->global_object() == global_object_) {
- // Context refers to the current global object. This reference will
- // become outdated after deserialization.
- outdated_contexts_.Add(Context::cast(obj));
- }
}
@@ -1999,7 +1919,7 @@ class UnlinkWeakCellScope {
if (object->IsWeakCell()) {
weak_cell_ = WeakCell::cast(object);
next_ = weak_cell_->next();
- weak_cell_->clear_next(object->GetHeap());
+ weak_cell_->clear_next(object->GetHeap()->the_hole_value());
}
}
@@ -2274,12 +2194,6 @@ void Serializer::ObjectSerializer::VisitExternalOneByteString(
kExtraNativesStringResource)) {
return;
}
- if (SerializeExternalNativeSourceString(
- CodeStubNatives::GetBuiltinsCount(), resource_pointer,
- CodeStubNatives::GetSourceCache(serializer_->isolate()->heap()),
- kCodeStubNativesStringResource)) {
- return;
- }
// One of the strings in the natives cache should match the resource. We
// don't expect any other kinds of external strings here.
UNREACHABLE();
@@ -2425,7 +2339,7 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate,
// Serialize code object.
SnapshotByteSink sink(info->code()->CodeSize() * 2);
- CodeSerializer cs(isolate, &sink, *source, info->code());
+ CodeSerializer cs(isolate, &sink, *source);
DisallowHeapAllocation no_gc;
Object** location = Handle<Object>::cast(info).location();
cs.VisitPointer(location);
@@ -2479,14 +2393,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
return;
case Code::FUNCTION:
DCHECK(code_object->has_reloc_info_for_serialization());
- // Only serialize the code for the toplevel function unless specified
- // by flag. Replace code of inner functions by the lazy compile builtin.
- // This is safe, as checked in Compiler::GetSharedFunctionInfo.
- if (code_object != main_code_ && !FLAG_serialize_inner) {
- SerializeBuiltin(Builtins::kCompileLazy, how_to_code, where_to_point);
- } else {
- SerializeGeneric(code_object, how_to_code, where_to_point);
- }
+ SerializeGeneric(code_object, how_to_code, where_to_point);
return;
case Code::WASM_FUNCTION:
UNREACHABLE();
@@ -2497,7 +2404,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Past this point we should not see any (context-specific) maps anymore.
CHECK(!obj->IsMap());
// There should be no references to the global object embedded.
- CHECK(!obj->IsJSGlobalProxy() && !obj->IsGlobalObject());
+ CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
// There should be no hash table embedded. They would require rehashing.
CHECK(!obj->IsHashTable());
// We expect no instantiated function objects or contexts.
diff --git a/chromium/v8/src/snapshot/serialize.h b/chromium/v8/src/snapshot/serialize.h
index d5374a28e01..7f4676eafa0 100644
--- a/chromium/v8/src/snapshot/serialize.h
+++ b/chromium/v8/src/snapshot/serialize.h
@@ -5,7 +5,7 @@
#ifndef V8_SNAPSHOT_SERIALIZE_H_
#define V8_SNAPSHOT_SERIALIZE_H_
-#include "src/hashmap.h"
+#include "src/address-map.h"
#include "src/heap/heap.h"
#include "src/objects.h"
#include "src/snapshot/snapshot-source-sink.h"
@@ -70,54 +70,6 @@ class ExternalReferenceEncoder {
};
-class AddressMapBase {
- protected:
- static void SetValue(HashMap::Entry* entry, uint32_t v) {
- entry->value = reinterpret_cast<void*>(v);
- }
-
- static uint32_t GetValue(HashMap::Entry* entry) {
- return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
- }
-
- inline static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
- bool insert) {
- if (insert) {
- map->LookupOrInsert(Key(obj), Hash(obj));
- }
- return map->Lookup(Key(obj), Hash(obj));
- }
-
- private:
- static uint32_t Hash(HeapObject* obj) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
- }
-
- static void* Key(HeapObject* obj) {
- return reinterpret_cast<void*>(obj->address());
- }
-};
-
-
-class RootIndexMap : public AddressMapBase {
- public:
- explicit RootIndexMap(Isolate* isolate);
-
- static const int kInvalidRootIndex = -1;
-
- int Lookup(HeapObject* obj) {
- HashMap::Entry* entry = LookupEntry(map_, obj, false);
- if (entry) return GetValue(entry);
- return kInvalidRootIndex;
- }
-
- private:
- HashMap* map_;
-
- DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
-};
-
-
class PartialCacheIndexMap : public AddressMapBase {
public:
PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
@@ -140,125 +92,6 @@ class PartialCacheIndexMap : public AddressMapBase {
};
-class BackReference {
- public:
- explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
-
- BackReference() : bitfield_(kInvalidValue) {}
-
- static BackReference SourceReference() { return BackReference(kSourceValue); }
-
- static BackReference GlobalProxyReference() {
- return BackReference(kGlobalProxyValue);
- }
-
- static BackReference LargeObjectReference(uint32_t index) {
- return BackReference(SpaceBits::encode(LO_SPACE) |
- ChunkOffsetBits::encode(index));
- }
-
- static BackReference DummyReference() { return BackReference(kDummyValue); }
-
- static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
- uint32_t chunk_offset) {
- DCHECK(IsAligned(chunk_offset, kObjectAlignment));
- DCHECK_NE(LO_SPACE, space);
- return BackReference(
- SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
- ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
- }
-
- bool is_valid() const { return bitfield_ != kInvalidValue; }
- bool is_source() const { return bitfield_ == kSourceValue; }
- bool is_global_proxy() const { return bitfield_ == kGlobalProxyValue; }
-
- AllocationSpace space() const {
- DCHECK(is_valid());
- return SpaceBits::decode(bitfield_);
- }
-
- uint32_t chunk_offset() const {
- DCHECK(is_valid());
- return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
- }
-
- uint32_t large_object_index() const {
- DCHECK(is_valid());
- DCHECK(chunk_index() == 0);
- return ChunkOffsetBits::decode(bitfield_);
- }
-
- uint32_t chunk_index() const {
- DCHECK(is_valid());
- return ChunkIndexBits::decode(bitfield_);
- }
-
- uint32_t reference() const {
- DCHECK(is_valid());
- return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
- }
-
- uint32_t bitfield() const { return bitfield_; }
-
- private:
- static const uint32_t kInvalidValue = 0xFFFFFFFF;
- static const uint32_t kSourceValue = 0xFFFFFFFE;
- static const uint32_t kGlobalProxyValue = 0xFFFFFFFD;
- static const uint32_t kDummyValue = 0xFFFFFFFC;
- static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
- static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
-
- public:
- static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1;
-
- private:
- class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
- class ChunkIndexBits
- : public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
- class SpaceBits
- : public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> {
- };
-
- uint32_t bitfield_;
-};
-
-
-// Mapping objects to their location after deserialization.
-// This is used during building, but not at runtime by V8.
-class BackReferenceMap : public AddressMapBase {
- public:
- BackReferenceMap()
- : no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {}
-
- ~BackReferenceMap() { delete map_; }
-
- BackReference Lookup(HeapObject* obj) {
- HashMap::Entry* entry = LookupEntry(map_, obj, false);
- return entry ? BackReference(GetValue(entry)) : BackReference();
- }
-
- void Add(HeapObject* obj, BackReference b) {
- DCHECK(b.is_valid());
- DCHECK_NULL(LookupEntry(map_, obj, false));
- HashMap::Entry* entry = LookupEntry(map_, obj, true);
- SetValue(entry, b.bitfield());
- }
-
- void AddSourceString(String* string) {
- Add(string, BackReference::SourceReference());
- }
-
- void AddGlobalProxy(HeapObject* global_proxy) {
- Add(global_proxy, BackReference::GlobalProxyReference());
- }
-
- private:
- DisallowHeapAllocation no_allocation_;
- HashMap* map_;
- DISALLOW_COPY_AND_ASSIGN(BackReferenceMap);
-};
-
-
class HotObjectsList {
public:
HotObjectsList() : index_(0) {
@@ -303,8 +136,6 @@ class SerializerDeserializer: public ObjectVisitor {
public:
static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
- static int nop() { return kNop; }
-
// No reservation for large object space necessary.
static const int kNumberOfPreallocatedSpaces = LAST_PAGED_SPACE + 1;
static const int kNumberOfSpaces = LAST_SPACE + 1;
@@ -388,10 +219,7 @@ class SerializerDeserializer: public ObjectVisitor {
static const int kNativesStringResource = 0x5d;
// Used for the source code for compiled stubs, which is in the executable,
// but is referred to from external strings in the snapshot.
- static const int kCodeStubNativesStringResource = 0x5e;
- // Used for the source code for V8 extras, which is in the executable,
- // but is referred to from external strings in the snapshot.
- static const int kExtraNativesStringResource = 0x5f;
+ static const int kExtraNativesStringResource = 0x5e;
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
@@ -534,15 +362,14 @@ class Deserializer: public SerializerDeserializer {
DecodeReservation(data->Reservations());
}
- virtual ~Deserializer();
+ ~Deserializer() override;
// Deserialize the snapshot into an empty heap.
void Deserialize(Isolate* isolate);
// Deserialize a single object and the objects reachable from it.
- MaybeHandle<Object> DeserializePartial(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
- Handle<FixedArray>* outdated_contexts_out);
+ MaybeHandle<Object> DeserializePartial(Isolate* isolate,
+ Handle<JSGlobalProxy> global_proxy);
// Deserialize a shared function info. Fail gracefully.
MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
@@ -554,11 +381,9 @@ class Deserializer: public SerializerDeserializer {
}
private:
- virtual void VisitPointers(Object** start, Object** end);
+ void VisitPointers(Object** start, Object** end) override;
- virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
- UNREACHABLE();
- }
+ void VisitRuntimeEntry(RelocInfo* rinfo) override { UNREACHABLE(); }
void Initialize(Isolate* isolate);
@@ -645,7 +470,7 @@ class CodeAddressMap;
class Serializer : public SerializerDeserializer {
public:
Serializer(Isolate* isolate, SnapshotByteSink* sink);
- ~Serializer();
+ ~Serializer() override;
void VisitPointers(Object** start, Object** end) override;
void EncodeReservations(List<SerializedData::Reservation>* out) const;
@@ -787,26 +612,22 @@ class PartialSerializer : public Serializer {
SnapshotByteSink* sink)
: Serializer(isolate, sink),
startup_serializer_(startup_snapshot_serializer),
- outdated_contexts_(0),
global_object_(NULL) {
InitializeCodeAddressMap();
}
- ~PartialSerializer() { OutputStatistics("PartialSerializer"); }
+ ~PartialSerializer() override { OutputStatistics("PartialSerializer"); }
// Serialize the objects reachable from a single object pointer.
void Serialize(Object** o);
- virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
+ void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) override;
private:
int PartialSnapshotCacheIndex(HeapObject* o);
bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
- void SerializeOutdatedContextsAsFixedArray();
-
Serializer* startup_serializer_;
- List<Context*> outdated_contexts_;
Object* global_object_;
PartialCacheIndexMap partial_cache_index_map_;
DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
@@ -816,7 +637,7 @@ class PartialSerializer : public Serializer {
class StartupSerializer : public Serializer {
public:
StartupSerializer(Isolate* isolate, SnapshotByteSink* sink);
- ~StartupSerializer() { OutputStatistics("StartupSerializer"); }
+ ~StartupSerializer() override { OutputStatistics("StartupSerializer"); }
// The StartupSerializer has to serialize the root array, which is slightly
// different.
@@ -827,13 +648,9 @@ class StartupSerializer : public Serializer {
// 2) Partial snapshot cache.
// 3) Weak references (e.g. the string table).
virtual void SerializeStrongReferences();
- virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
+ void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) override;
void SerializeWeakReferencesAndDeferred();
- void Serialize() {
- SerializeStrongReferences();
- SerializeWeakReferencesAndDeferred();
- }
private:
intptr_t root_index_wave_front_;
@@ -863,16 +680,15 @@ class CodeSerializer : public Serializer {
const List<uint32_t>* stub_keys() const { return &stub_keys_; }
private:
- CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source,
- Code* main_code)
- : Serializer(isolate, sink), source_(source), main_code_(main_code) {
+ CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
+ : Serializer(isolate, sink), source_(source) {
back_reference_map_.AddSourceString(source);
}
- ~CodeSerializer() { OutputStatistics("CodeSerializer"); }
+ ~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
- virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
+ void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) override;
void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
WhereToPoint where_to_point);
@@ -886,7 +702,6 @@ class CodeSerializer : public Serializer {
DisallowHeapAllocation no_gc_;
String* source_;
- Code* main_code_;
List<uint32_t> stub_keys_;
DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
};
@@ -989,6 +804,7 @@ class SerializedCodeData : public SerializedData {
static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
static const int kHeaderSize = kChecksum2Offset + kInt32Size;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SNAPSHOT_SERIALIZE_H_
diff --git a/chromium/v8/src/snapshot/snapshot-common.cc b/chromium/v8/src/snapshot/snapshot-common.cc
index 0b7e11d1ec4..97e7c6b5065 100644
--- a/chromium/v8/src/snapshot/snapshot-common.cc
+++ b/chromium/v8/src/snapshot/snapshot-common.cc
@@ -66,8 +66,7 @@ bool Snapshot::Initialize(Isolate* isolate) {
MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
- Handle<FixedArray>* outdated_contexts_out) {
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
if (!isolate->snapshot_available()) return Handle<Context>();
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
@@ -77,15 +76,11 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
SnapshotData snapshot_data(context_data);
Deserializer deserializer(&snapshot_data);
- MaybeHandle<Object> maybe_context = deserializer.DeserializePartial(
- isolate, global_proxy, outdated_contexts_out);
+ MaybeHandle<Object> maybe_context =
+ deserializer.DeserializePartial(isolate, global_proxy);
Handle<Object> result;
if (!maybe_context.ToHandle(&result)) return MaybeHandle<Context>();
CHECK(result->IsContext());
- // If the snapshot does not contain a custom script, we need to update
- // the global object for exactly two contexts: the builtins context and the
- // script context that has the global "this" binding.
- CHECK(EmbedsScript(isolate) || (*outdated_contexts_out)->length() == 2);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int bytes = context_data.length();
diff --git a/chromium/v8/src/snapshot/snapshot-source-sink.cc b/chromium/v8/src/snapshot/snapshot-source-sink.cc
index 7048c355ec2..812de5e2a82 100644
--- a/chromium/v8/src/snapshot/snapshot-source-sink.cc
+++ b/chromium/v8/src/snapshot/snapshot-source-sink.cc
@@ -40,27 +40,12 @@ void SnapshotByteSink::PutRaw(const byte* data, int number_of_bytes,
}
-bool SnapshotByteSource::AtEOF() {
- if (0u + length_ - position_ > 2 * sizeof(uint32_t)) return false;
- for (int x = position_; x < length_; x++) {
- if (data_[x] != SerializerDeserializer::nop()) return false;
- }
- return true;
-}
-
-
-bool SnapshotByteSource::GetBlob(const byte** data, int* number_of_bytes) {
+int SnapshotByteSource::GetBlob(const byte** data) {
int size = GetInt();
- *number_of_bytes = size;
-
- if (position_ + size <= length_) {
- *data = &data_[position_];
- Advance(size);
- return true;
- } else {
- Advance(length_ - position_); // proceed until end.
- return false;
- }
+ CHECK(position_ + size <= length_);
+ *data = &data_[position_];
+ Advance(size);
+ return size;
}
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/snapshot/snapshot-source-sink.h b/chromium/v8/src/snapshot/snapshot-source-sink.h
index e0290c9415b..360ec76bb61 100644
--- a/chromium/v8/src/snapshot/snapshot-source-sink.h
+++ b/chromium/v8/src/snapshot/snapshot-source-sink.h
@@ -57,9 +57,8 @@ class SnapshotByteSource final {
return answer;
}
- bool GetBlob(const byte** data, int* number_of_bytes);
-
- bool AtEOF();
+ // Returns length.
+ int GetBlob(const byte** data);
int position() { return position_; }
@@ -101,7 +100,7 @@ class SnapshotByteSink {
List<byte> data_;
};
-} // namespace v8::internal
+} // namespace internal
} // namespace v8
#endif // V8_SNAPSHOT_SNAPSHOT_SOURCE_SINK_H_
diff --git a/chromium/v8/src/snapshot/snapshot.h b/chromium/v8/src/snapshot/snapshot.h
index 1379644fd85..d99f118bff2 100644
--- a/chromium/v8/src/snapshot/snapshot.h
+++ b/chromium/v8/src/snapshot/snapshot.h
@@ -37,8 +37,7 @@ class Snapshot : public AllStatic {
static bool Initialize(Isolate* isolate);
// Create a new context using the internal partial snapshot.
static MaybeHandle<Context> NewContextFromSnapshot(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
- Handle<FixedArray>* outdated_contexts_out);
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy);
static bool HaveASnapshotToStartFrom(Isolate* isolate);
@@ -89,6 +88,7 @@ class Snapshot : public AllStatic {
void SetSnapshotFromFile(StartupData* snapshot_blob);
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SNAPSHOT_SNAPSHOT_H_
diff --git a/chromium/v8/src/splay-tree-inl.h b/chromium/v8/src/splay-tree-inl.h
index 6c7b4f404ca..1adfdac6db8 100644
--- a/chromium/v8/src/splay-tree-inl.h
+++ b/chromium/v8/src/splay-tree-inl.h
@@ -290,6 +290,7 @@ void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SPLAY_TREE_INL_H_
diff --git a/chromium/v8/src/splay-tree.h b/chromium/v8/src/splay-tree.h
index 30e5d6787f3..bee8429e39d 100644
--- a/chromium/v8/src/splay-tree.h
+++ b/chromium/v8/src/splay-tree.h
@@ -198,6 +198,7 @@ class SplayTree {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_SPLAY_TREE_H_
diff --git a/chromium/v8/src/startup-data-util.cc b/chromium/v8/src/startup-data-util.cc
index 92c4b5b3e98..4e0ad97a0cf 100644
--- a/chromium/v8/src/startup-data-util.cc
+++ b/chromium/v8/src/startup-data-util.cc
@@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
+#include "src/utils.h"
namespace v8 {
@@ -44,10 +45,13 @@ void Load(const char* blob_file, v8::StartupData* startup_data,
void (*setter_fn)(v8::StartupData*)) {
ClearStartupData(startup_data);
- if (!blob_file) return;
+ CHECK(blob_file);
FILE* file = fopen(blob_file, "rb");
- if (!file) return;
+ if (!file) {
+ PrintF(stderr, "Failed to open startup resource '%s'.\n", blob_file);
+ return;
+ }
fseek(file, 0, SEEK_END);
startup_data->raw_size = static_cast<int>(ftell(file));
@@ -58,7 +62,11 @@ void Load(const char* blob_file, v8::StartupData* startup_data,
1, startup_data->raw_size, file));
fclose(file);
- if (startup_data->raw_size == read_size) (*setter_fn)(startup_data);
+ if (startup_data->raw_size == read_size) {
+ (*setter_fn)(startup_data);
+ } else {
+ PrintF(stderr, "Corrupted startup resource '%s'.\n", blob_file);
+ }
}
diff --git a/chromium/v8/src/string-builder.cc b/chromium/v8/src/string-builder.cc
index e24def6b683..6c5144d574a 100644
--- a/chromium/v8/src/string-builder.cc
+++ b/chromium/v8/src/string-builder.cc
@@ -52,7 +52,7 @@ IncrementalStringBuilder::IncrementalStringBuilder(Isolate* isolate)
part_length_(kInitialPartLength),
current_index_(0) {
// Create an accumulator handle starting with the empty string.
- accumulator_ = Handle<String>(isolate->heap()->empty_string(), isolate);
+ accumulator_ = Handle<String>::New(isolate->heap()->empty_string(), isolate);
current_part_ =
factory()->NewRawOneByteString(part_length_).ToHandleChecked();
}
diff --git a/chromium/v8/src/string-builder.h b/chromium/v8/src/string-builder.h
index 554277dab17..98bd82b97a0 100644
--- a/chromium/v8/src/string-builder.h
+++ b/chromium/v8/src/string-builder.h
@@ -346,10 +346,12 @@ class IncrementalStringBuilder {
DCHECK(string->length() >= required_length);
}
- ~NoExtendString() {
+ Handle<String> Finalize() {
Handle<SeqString> string = Handle<SeqString>::cast(string_);
int length = NoExtend<DestChar>::written();
- *string_.location() = *SeqString::Truncate(string, length);
+ Handle<String> result = SeqString::Truncate(string, length);
+ string_ = Handle<String>();
+ return result;
}
private:
@@ -429,7 +431,7 @@ void IncrementalStringBuilder::Append(SrcChar c) {
}
if (current_index_ == part_length_) Extend();
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STRING_BUILDER_H_
diff --git a/chromium/v8/src/string-search.h b/chromium/v8/src/string-search.h
index c0cc2cad4b3..7db09934f54 100644
--- a/chromium/v8/src/string-search.h
+++ b/chromium/v8/src/string-search.h
@@ -563,6 +563,7 @@ int SearchString(Isolate* isolate,
return search.Search(subject, start_index);
}
-}} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STRING_SEARCH_H_
diff --git a/chromium/v8/src/string-stream.h b/chromium/v8/src/string-stream.h
index 2f11b182d00..03ea0620ad2 100644
--- a/chromium/v8/src/string-stream.h
+++ b/chromium/v8/src/string-stream.h
@@ -178,6 +178,7 @@ class StringStream final {
DISALLOW_IMPLICIT_CONSTRUCTORS(StringStream);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STRING_STREAM_H_
diff --git a/chromium/v8/src/strtod.h b/chromium/v8/src/strtod.h
index 737b5484c5e..75e60b029e3 100644
--- a/chromium/v8/src/strtod.h
+++ b/chromium/v8/src/strtod.h
@@ -14,6 +14,7 @@ namespace internal {
// contain a dot or a sign. It must not start with '0', and must not be empty.
double Strtod(Vector<const char> buffer, int exponent);
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_STRTOD_H_
diff --git a/chromium/v8/src/third_party/fdlibm/fdlibm.cc b/chromium/v8/src/third_party/fdlibm/fdlibm.cc
index 1d49de02484..0ef2301ae38 100644
--- a/chromium/v8/src/third_party/fdlibm/fdlibm.cc
+++ b/chromium/v8/src/third_party/fdlibm/fdlibm.cc
@@ -29,75 +29,6 @@ namespace fdlibm {
inline double scalbn(double x, int y) { return _scalb(x, y); }
#endif // _MSC_VER
-const double MathConstants::constants[] = {
- 6.36619772367581382433e-01, // invpio2 0
- 1.57079632673412561417e+00, // pio2_1 1
- 6.07710050650619224932e-11, // pio2_1t 2
- 6.07710050630396597660e-11, // pio2_2 3
- 2.02226624879595063154e-21, // pio2_2t 4
- 2.02226624871116645580e-21, // pio2_3 5
- 8.47842766036889956997e-32, // pio2_3t 6
- -1.66666666666666324348e-01, // S1 7 coefficients for sin
- 8.33333333332248946124e-03, // 8
- -1.98412698298579493134e-04, // 9
- 2.75573137070700676789e-06, // 10
- -2.50507602534068634195e-08, // 11
- 1.58969099521155010221e-10, // S6 12
- 4.16666666666666019037e-02, // C1 13 coefficients for cos
- -1.38888888888741095749e-03, // 14
- 2.48015872894767294178e-05, // 15
- -2.75573143513906633035e-07, // 16
- 2.08757232129817482790e-09, // 17
- -1.13596475577881948265e-11, // C6 18
- 3.33333333333334091986e-01, // T0 19 coefficients for tan
- 1.33333333333201242699e-01, // 20
- 5.39682539762260521377e-02, // 21
- 2.18694882948595424599e-02, // 22
- 8.86323982359930005737e-03, // 23
- 3.59207910759131235356e-03, // 24
- 1.45620945432529025516e-03, // 25
- 5.88041240820264096874e-04, // 26
- 2.46463134818469906812e-04, // 27
- 7.81794442939557092300e-05, // 28
- 7.14072491382608190305e-05, // 29
- -1.85586374855275456654e-05, // 30
- 2.59073051863633712884e-05, // T12 31
- 7.85398163397448278999e-01, // pio4 32
- 3.06161699786838301793e-17, // pio4lo 33
- 6.93147180369123816490e-01, // ln2_hi 34
- 1.90821492927058770002e-10, // ln2_lo 35
- 6.666666666666666666e-01, // 2/3 36
- 6.666666666666735130e-01, // LP1 37 coefficients for log1p
- 3.999999999940941908e-01, // 38
- 2.857142874366239149e-01, // 39
- 2.222219843214978396e-01, // 40
- 1.818357216161805012e-01, // 41
- 1.531383769920937332e-01, // 42
- 1.479819860511658591e-01, // LP7 43
- 7.09782712893383973096e+02, // 44 overflow threshold for expm1
- 1.44269504088896338700e+00, // 1/ln2 45
- -3.33333333333331316428e-02, // Q1 46 coefficients for expm1
- 1.58730158725481460165e-03, // 47
- -7.93650757867487942473e-05, // 48
- 4.00821782732936239552e-06, // 49
- -2.01099218183624371326e-07, // Q5 50
- 710.4758600739439, // 51 overflow threshold sinh, cosh
- 4.34294481903251816668e-01, // ivln10 52 coefficients for log10
- 3.01029995663611771306e-01, // log10_2hi 53
- 3.69423907715893078616e-13, // log10_2lo 54
- 5.99999999999994648725e-01, // L1 55 coefficients for log2
- 4.28571428578550184252e-01, // 56
- 3.33333329818377432918e-01, // 57
- 2.72728123808534006489e-01, // 58
- 2.30660745775561754067e-01, // 59
- 2.06975017800338417784e-01, // L6 60
- 9.61796693925975554329e-01, // cp 61 2/(3*ln(2))
- 9.61796700954437255859e-01, // cp_h 62
- -7.02846165095275826516e-09, // cp_l 63
- 5.84962487220764160156e-01, // dp_h 64
- 1.35003920212974897128e-08 // dp_l 65
-};
-
// Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
static const int two_over_pi[] = {
diff --git a/chromium/v8/src/third_party/fdlibm/fdlibm.h b/chromium/v8/src/third_party/fdlibm/fdlibm.h
index c7bc09a1b89..e417c8ce595 100644
--- a/chromium/v8/src/third_party/fdlibm/fdlibm.h
+++ b/chromium/v8/src/third_party/fdlibm/fdlibm.h
@@ -21,11 +21,7 @@ namespace fdlibm {
int rempio2(double x, double* y);
-// Constants to be exposed to builtins via Float64Array.
-struct MathConstants {
- static const double constants[66];
-};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_FDLIBM_H_
diff --git a/chromium/v8/src/third_party/fdlibm/fdlibm.js b/chromium/v8/src/third_party/fdlibm/fdlibm.js
index a8935565b78..a5e789f38a5 100644
--- a/chromium/v8/src/third_party/fdlibm/fdlibm.js
+++ b/chromium/v8/src/third_party/fdlibm/fdlibm.js
@@ -16,16 +16,9 @@
// The following is a straightforward translation of fdlibm routines
// by Raymond Toy (rtoy@google.com).
-// Double constants that do not have empty lower 32 bits are found in fdlibm.cc
-// and exposed through kMath as typed array. We assume the compiler to convert
-// from decimal to binary accurately enough to produce the intended values.
-// kMath is initialized to a Float64Array during genesis and not writable.
// rempio2result is used as a container for return values of %RemPiO2. It is
// initialized to a two-element Float64Array during genesis.
-var kMath;
-var rempio2result;
-
(function(global, utils) {
"use strict";
@@ -35,27 +28,33 @@ var rempio2result;
// -------------------------------------------------------------------
// Imports
+var GlobalFloat64Array = global.Float64Array;
var GlobalMath = global.Math;
-
var MathAbs;
var MathExp;
+var NaN = %GetRootNaN();
+var rempio2result;
utils.Import(function(from) {
MathAbs = from.MathAbs;
MathExp = from.MathExp;
});
+utils.CreateDoubleResultArray = function(global) {
+ rempio2result = new GlobalFloat64Array(2);
+};
+
// -------------------------------------------------------------------
-define INVPIO2 = kMath[0];
-define PIO2_1 = kMath[1];
-define PIO2_1T = kMath[2];
-define PIO2_2 = kMath[3];
-define PIO2_2T = kMath[4];
-define PIO2_3 = kMath[5];
-define PIO2_3T = kMath[6];
-define PIO4 = kMath[32];
-define PIO4LO = kMath[33];
+define INVPIO2 = 6.36619772367581382433e-01;
+define PIO2_1 = 1.57079632673412561417;
+define PIO2_1T = 6.07710050650619224932e-11;
+define PIO2_2 = 6.07710050630396597660e-11;
+define PIO2_2T = 2.02226624879595063154e-21;
+define PIO2_3 = 2.02226624871116645580e-21;
+define PIO2_3T = 8.47842766036889956997e-32;
+define PIO4 = 7.85398163397448278999e-01;
+define PIO4LO = 3.06161699786838301793e-17;
// Compute k and r such that x - k*pi/2 = r where |r| < pi/4. For
// precision, r is returned as two values y0 and y1 such that r = y0 + y1
@@ -267,9 +266,19 @@ endmacro
// Set returnTan to 1 for tan; -1 for cot. Anything else is illegal
// and will cause incorrect results.
//
-macro KTAN(x)
-kMath[19+x]
-endmacro
+define T00 = 3.33333333333334091986e-01;
+define T01 = 1.33333333333201242699e-01;
+define T02 = 5.39682539762260521377e-02;
+define T03 = 2.18694882948595424599e-02;
+define T04 = 8.86323982359930005737e-03;
+define T05 = 3.59207910759131235356e-03;
+define T06 = 1.45620945432529025516e-03;
+define T07 = 5.88041240820264096874e-04;
+define T08 = 2.46463134818469906812e-04;
+define T09 = 7.81794442939557092300e-05;
+define T10 = 7.14072491382608190305e-05;
+define T11 = -1.85586374855275456654e-05;
+define T12 = 2.59073051863633712884e-05;
function KernelTan(x, y, returnTan) {
var z;
@@ -312,13 +321,13 @@ function KernelTan(x, y, returnTan) {
// Break x^5 * (T1 + x^2*T2 + ...) into
// x^5 * (T1 + x^4*T3 + ... + x^20*T11) +
// x^5 * (x^2 * (T2 + x^4*T4 + ... + x^22*T12))
- var r = KTAN(1) + w * (KTAN(3) + w * (KTAN(5) +
- w * (KTAN(7) + w * (KTAN(9) + w * KTAN(11)))));
- var v = z * (KTAN(2) + w * (KTAN(4) + w * (KTAN(6) +
- w * (KTAN(8) + w * (KTAN(10) + w * KTAN(12))))));
+ var r = T01 + w * (T03 + w * (T05 +
+ w * (T07 + w * (T09 + w * T11))));
+ var v = z * (T02 + w * (T04 + w * (T06 +
+ w * (T08 + w * (T10 + w * T12)))));
var s = z * x;
r = y + z * (s * (r + v) + y);
- r = r + KTAN(0) * s;
+ r = r + T00 * s;
w = x + r;
if (ix >= 0x3fe59428) {
return (1 - ((hx >> 30) & 2)) *
@@ -451,12 +460,17 @@ function MathTan(x) {
//
// See HP-15C Advanced Functions Handbook, p.193.
//
-define LN2_HI = kMath[34];
-define LN2_LO = kMath[35];
-define TWO_THIRD = kMath[36];
-macro KLOG1P(x)
-(kMath[37+x])
-endmacro
+define LN2_HI = 6.93147180369123816490e-01;
+define LN2_LO = 1.90821492927058770002e-10;
+define TWO_THIRD = 6.666666666666666666e-01;
+define LP1 = 6.666666666666735130e-01;
+define LP2 = 3.999999999940941908e-01;
+define LP3 = 2.857142874366239149e-01;
+define LP4 = 2.222219843214978396e-01;
+define LP5 = 1.818357216161805012e-01;
+define LP6 = 1.531383769920937332e-01;
+define LP7 = 1.479819860511658591e-01;
+
// 2^54
define TWO54 = 18014398509481984;
@@ -476,7 +490,7 @@ function MathLog1p(x) {
if (x === -1) {
return -INFINITY; // log1p(-1) = -inf
} else {
- return NAN; // log1p(x<-1) = NaN
+ return NaN; // log1p(x<-1) = NaN
}
} else if (ax < 0x3c900000) {
// For |x| < 2^-54 we can return x.
@@ -492,7 +506,7 @@ function MathLog1p(x) {
}
}
- // Handle Infinity and NAN
+ // Handle Infinity and NaN
if (hx >= 0x7ff00000) return x;
if (k !== 0) {
@@ -538,9 +552,8 @@ function MathLog1p(x) {
var s = f / (2 + f);
var z = s * s;
- var R = z * (KLOG1P(0) + z * (KLOG1P(1) + z *
- (KLOG1P(2) + z * (KLOG1P(3) + z *
- (KLOG1P(4) + z * (KLOG1P(5) + z * KLOG1P(6)))))));
+ var R = z * (LP1 + z * (LP2 + z * (LP3 + z * (LP4 +
+ z * (LP5 + z * (LP6 + z * LP7))))));
if (k === 0) {
return f - (hfsq - s * (hfsq + R));
} else {
@@ -637,11 +650,13 @@ function MathLog1p(x) {
// For IEEE double
// if x > 7.09782712893383973096e+02 then expm1(x) overflow
//
-define KEXPM1_OVERFLOW = kMath[44];
-define INVLN2 = kMath[45];
-macro KEXPM1(x)
-(kMath[46+x])
-endmacro
+define KEXPM1_OVERFLOW = 7.09782712893383973096e+02;
+define INVLN2 = 1.44269504088896338700;
+define EXPM1_1 = -3.33333333333331316428e-02;
+define EXPM1_2 = 1.58730158725481460165e-03;
+define EXPM1_3 = -7.93650757867487942473e-05;
+define EXPM1_4 = 4.00821782732936239552e-06;
+define EXPM1_5 = -2.01099218183624371326e-07;
function MathExpm1(x) {
x = x * 1; // Convert to number.
@@ -701,8 +716,8 @@ function MathExpm1(x) {
// x is now in primary range
var hfx = 0.5 * x;
var hxs = x * hfx;
- var r1 = 1 + hxs * (KEXPM1(0) + hxs * (KEXPM1(1) + hxs *
- (KEXPM1(2) + hxs * (KEXPM1(3) + hxs * KEXPM1(4)))));
+ var r1 = 1 + hxs * (EXPM1_1 + hxs * (EXPM1_2 + hxs *
+ (EXPM1_3 + hxs * (EXPM1_4 + hxs * EXPM1_5))));
t = 3 - r1 * hfx;
var e = hxs * ((r1 - t) / (6 - x * t));
if (k === 0) { // c is 0
@@ -760,7 +775,7 @@ function MathExpm1(x) {
// sinh(x) is |x| if x is +Infinity, -Infinity, or NaN.
// only sinh(0)=0 is exact for finite x.
//
-define KSINH_OVERFLOW = kMath[51];
+define KSINH_OVERFLOW = 710.4758600739439;
define TWO_M28 = 3.725290298461914e-9; // 2^-28, empty lower half
define LOG_MAXD = 709.7822265625; // 0x40862e42 00000000, empty lower half
@@ -812,7 +827,7 @@ function MathSinh(x) {
// cosh(x) is |x| if x is +INF, -INF, or NaN.
// only cosh(0)=1 is exact for finite x.
//
-define KCOSH_OVERFLOW = kMath[51];
+define KCOSH_OVERFLOW = 710.4758600739439;
function MathCosh(x) {
x = x * 1; // Convert to number.
@@ -843,6 +858,63 @@ function MathCosh(x) {
return INFINITY;
}
+// ES6 draft 09-27-13, section 20.2.2.33.
+// Math.tanh(x)
+// Method :
+// x -x
+// e - e
+// 0. tanh(x) is defined to be -----------
+// x -x
+// e + e
+// 1. reduce x to non-negative by tanh(-x) = -tanh(x).
+// 2. 0 <= x <= 2**-55 : tanh(x) := x*(one+x)
+// -t
+// 2**-55 < x <= 1 : tanh(x) := -----; t = expm1(-2x)
+// t + 2
+// 2
+// 1 <= x <= 22.0 : tanh(x) := 1- ----- ; t = expm1(2x)
+// t + 2
+// 22.0 < x <= INF : tanh(x) := 1.
+//
+// Special cases:
+// tanh(NaN) is NaN;
+// only tanh(0) = 0 is exact for finite argument.
+//
+
+define TWO_M55 = 2.77555756156289135105e-17; // 2^-55, empty lower half
+
+function MathTanh(x) {
+ x = x * 1; // Convert to number.
+ // x is Infinity or NaN
+ if (!NUMBER_IS_FINITE(x)) {
+ if (x > 0) return 1;
+ if (x < 0) return -1;
+ return x;
+ }
+
+ var ax = MathAbs(x);
+ var z;
+ // |x| < 22
+ if (ax < 22) {
+ if (ax < TWO_M55) {
+ // |x| < 2^-55, tanh(small) = small.
+ return x;
+ }
+ if (ax >= 1) {
+ // |x| >= 1
+ var t = MathExpm1(2 * ax);
+ z = 1 - 2 / (t + 2);
+ } else {
+ var t = MathExpm1(-2 * ax);
+ z = -t / (t + 2);
+ }
+ } else {
+ // |x| > 22, return +/- 1
+ z = 1;
+ }
+ return (x >= 0) ? z : -z;
+}
+
// ES6 draft 09-27-13, section 20.2.2.21.
// Return the base 10 logarithm of x
//
@@ -870,9 +942,9 @@ function MathCosh(x) {
// log10(10**N) = N for N=0,1,...,22.
//
-define IVLN10 = kMath[52];
-define LOG10_2HI = kMath[53];
-define LOG10_2LO = kMath[54];
+define IVLN10 = 4.34294481903251816668e-01;
+define LOG10_2HI = 3.01029995663611771306e-01;
+define LOG10_2LO = 3.69423907715893078616e-13;
function MathLog10(x) {
x = x * 1; // Convert to number.
@@ -885,7 +957,7 @@ function MathLog10(x) {
// log10(+/- 0) = -Infinity.
if (((hx & 0x7fffffff) | lx) === 0) return -INFINITY;
// log10 of negative number is NaN.
- if (hx < 0) return NAN;
+ if (hx < 0) return NaN;
// Subnormal number. Scale up x.
k -= 54;
x *= TWO54;
@@ -920,18 +992,21 @@ function MathLog10(x) {
// log2(x) = w1 + w2
// where w1 has 53-24 = 29 bits of trailing zeroes.
-define DP_H = kMath[64];
-define DP_L = kMath[65];
+define DP_H = 5.84962487220764160156e-01;
+define DP_L = 1.35003920212974897128e-08;
// Polynomial coefficients for (3/2)*(log2(x) - 2*s - 2/3*s^3)
-macro KLOG2(x)
-(kMath[55+x])
-endmacro
+define LOG2_1 = 5.99999999999994648725e-01;
+define LOG2_2 = 4.28571428578550184252e-01;
+define LOG2_3 = 3.33333329818377432918e-01;
+define LOG2_4 = 2.72728123808534006489e-01;
+define LOG2_5 = 2.30660745775561754067e-01;
+define LOG2_6 = 2.06975017800338417784e-01;
// cp = 2/(3*ln(2)). Note that cp_h + cp_l is cp, but with more accuracy.
-define CP = kMath[61];
-define CP_H = kMath[62];
-define CP_L = kMath[63];
+define CP = 9.61796693925975554329e-01;
+define CP_H = 9.61796700954437255859e-01;
+define CP_L = -7.02846165095275826516e-09;
// 2^53
define TWO53 = 9007199254740992;
@@ -947,7 +1022,7 @@ function MathLog2(x) {
if ((ix | lx) == 0) return -INFINITY;
// log(x) = NaN, if x < 0
- if (hx < 0) return NAN;
+ if (hx < 0) return NaN;
// log2(Infinity) = Infinity, log2(NaN) = NaN
if (ix >= 0x7ff00000) return x;
@@ -996,8 +1071,8 @@ function MathLog2(x) {
// Compute log2(ax)
var s2 = ss * ss;
- var r = s2 * s2 * (KLOG2(0) + s2 * (KLOG2(1) + s2 * (KLOG2(2) + s2 * (
- KLOG2(3) + s2 * (KLOG2(4) + s2 * KLOG2(5))))));
+ var r = s2 * s2 * (LOG2_1 + s2 * (LOG2_2 + s2 * (LOG2_3 + s2 * (
+ LOG2_4 + s2 * (LOG2_5 + s2 * LOG2_6)))));
r += s_l * (s_h + ss);
s2 = s_h * s_h;
t_h = %_ConstructDouble(%_DoubleHi(3.0 + s2 + r), 0);
@@ -1007,10 +1082,10 @@ function MathLog2(x) {
v = s_l * t_h + t_l * ss;
// 2 / (3 * log(2)) * (ss + ...)
- p_h = %_ConstructDouble(%_DoubleHi(u + v), 0);
- p_l = v - (p_h - u);
- z_h = CP_H * p_h;
- z_l = CP_L * p_h + p_l * CP + dp_l;
+ var p_h = %_ConstructDouble(%_DoubleHi(u + v), 0);
+ var p_l = v - (p_h - u);
+ var z_h = CP_H * p_h;
+ var z_l = CP_L * p_h + p_l * CP + dp_l;
// log2(ax) = (ss + ...) * 2 / (3 * log(2)) = n + dp_h + z_h + z_l
var t = n;
@@ -1029,6 +1104,7 @@ utils.InstallFunctions(GlobalMath, DONT_ENUM, [
"tan", MathTan,
"sinh", MathSinh,
"cosh", MathCosh,
+ "tanh", MathTanh,
"log10", MathLog10,
"log2", MathLog2,
"log1p", MathLog1p,
diff --git a/chromium/v8/src/third_party/vtune/v8vtune.gyp b/chromium/v8/src/third_party/vtune/v8vtune.gyp
index 92df29a82b9..6adf3656892 100644
--- a/chromium/v8/src/third_party/vtune/v8vtune.gyp
+++ b/chromium/v8/src/third_party/vtune/v8vtune.gyp
@@ -37,10 +37,6 @@
'dependencies': [
'../../../tools/gyp/v8.gyp:v8',
],
- 'defines': [
- # TODO(jochen): Remove again after this is globally turned on.
- 'V8_IMMINENT_DEPRECATION_WARNINGS',
- ],
'sources': [
'ittnotify_config.h',
'ittnotify_types.h',
diff --git a/chromium/v8/src/tracing/trace-event.cc b/chromium/v8/src/tracing/trace-event.cc
new file mode 100644
index 00000000000..04f1f2e2ea0
--- /dev/null
+++ b/chromium/v8/src/tracing/trace-event.cc
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/tracing/trace-event.h"
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace tracing {
+
+v8::Platform* TraceEventHelper::GetCurrentPlatform() {
+ return v8::internal::V8::GetCurrentPlatform();
+}
+
+} // namespace tracing
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/tracing/trace-event.h b/chromium/v8/src/tracing/trace-event.h
new file mode 100644
index 00000000000..d2d423c3be4
--- /dev/null
+++ b/chromium/v8/src/tracing/trace-event.h
@@ -0,0 +1,535 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_TRACING_TRACE_EVENT_H_
+#define SRC_TRACING_TRACE_EVENT_H_
+
+#include <stddef.h>
+
+#include "base/trace_event/common/trace_event_common.h"
+#include "include/v8-platform.h"
+#include "src/base/atomicops.h"
+
+// This header file defines implementation details of how the trace macros in
+// trace_event_common.h collect and store trace events. Anything not
+// implementation-specific should go in trace_macros_common.h instead of here.
+
+
+// The pointer returned from GetCategoryGroupEnabled() points to a
+// value with zero or more of the following bits. Used in this class only.
+// The TRACE_EVENT macros should only use the value as a bool.
+// These values must be in sync with macro values in trace_log.h in
+// chromium.
+enum CategoryGroupEnabledFlags {
+ // Category group enabled for the recording mode.
+ kEnabledForRecording_CategoryGroupEnabledFlags = 1 << 0,
+ // Category group enabled for the monitoring mode.
+ kEnabledForMonitoring_CategoryGroupEnabledFlags = 1 << 1,
+ // Category group enabled by SetEventCallbackEnabled().
+ kEnabledForEventCallback_CategoryGroupEnabledFlags = 1 << 2,
+ // Category group enabled to export events to ETW.
+ kEnabledForETWExport_CategoryGroupEnabledFlags = 1 << 3,
+};
+
+// By default, const char* asrgument values are assumed to have long-lived scope
+// and will not be copied. Use this macro to force a const char* to be copied.
+#define TRACE_STR_COPY(str) v8::internal::tracing::TraceStringWithCopy(str)
+
+// By default, uint64 ID argument values are not mangled with the Process ID in
+// TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
+#define TRACE_ID_MANGLE(id) v8::internal::tracing::TraceID::ForceMangle(id)
+
+// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
+// macros. Use this macro to prevent Process ID mangling.
+#define TRACE_ID_DONT_MANGLE(id) v8::internal::tracing::TraceID::DontMangle(id)
+
+// Sets the current sample state to the given category and name (both must be
+// constant strings). These states are intended for a sampling profiler.
+// Implementation note: we store category and name together because we don't
+// want the inconsistency/expense of storing two pointers.
+// |thread_bucket| is [0..2] and is used to statically isolate samples in one
+// thread from others.
+#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \
+ name) \
+ v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Set( \
+ category "\0" name)
+
+// Returns a current sampling state of the given bucket.
+#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
+ v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Current()
+
+// Creates a scope of a sampling state of the given bucket.
+//
+// { // The sampling state is set within this scope.
+// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
+// ...;
+// }
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \
+ name) \
+ v8::internal::TraceEventSamplingStateScope<bucket_number> \
+ traceEventSamplingScope(category "\0" name);
+
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
+ *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ (kEnabledForRecording_CategoryGroupEnabledFlags | \
+ kEnabledForEventCallback_CategoryGroupEnabledFlags)
+
+// The following macro has no implementation, but it needs to exist since
+// it gets called from scoped trace events. It cannot call UNIMPLEMENTED()
+// since an empty implementation is a valid one.
+#define INTERNAL_TRACE_MEMORY(category, name)
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category group. The
+// returned pointer can be held permanently in a local static for example. If
+// the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const uint8_t*
+// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
+ v8::internal::tracing::TraceEventHelper::GetCurrentPlatform() \
+ ->GetCategoryGroupEnabled
+
+// Get the number of times traces have been recorded. This is used to implement
+// the TRACE_EVENT_IS_NEW_TRACE facility.
+// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
+#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \
+ v8::internal::tracing::TraceEventHelper::GetCurrentPlatform() \
+ ->getNumTracesRecorded
+
+// Add a trace event to the platform tracing system.
+// uint64_t TRACE_EVENT_API_ADD_TRACE_EVENT(
+// char phase,
+// const uint8_t* category_group_enabled,
+// const char* name,
+// uint64_t id,
+// uint64_t bind_id,
+// int num_args,
+// const char** arg_names,
+// const uint8_t* arg_types,
+// const uint64_t* arg_values,
+// unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT \
+ v8::internal::tracing::TraceEventHelper::GetCurrentPlatform()->AddTraceEvent
+
+// Set the duration field of a COMPLETE trace event.
+// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+// const uint8_t* category_group_enabled,
+// const char* name,
+// uint64_t id)
+#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
+ v8::internal::tracing::TraceEventHelper::GetCurrentPlatform() \
+ ->UpdateTraceEventDuration
+
+// Defines atomic operations used internally by the tracing system.
+#define TRACE_EVENT_API_ATOMIC_WORD v8::base::AtomicWord
+#define TRACE_EVENT_API_ATOMIC_LOAD(var) v8::base::NoBarrier_Load(&(var))
+#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
+ v8::base::NoBarrier_Store(&(var), (value))
+
+// The thread buckets for the sampling profiler.
+extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
+#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
+ g_trace_state[thread_bucket]
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collisions.
+#define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b)
+#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
+ INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+// Implementation detail: internal macro to create static category.
+// No barriers are needed, because this code is designed to operate safely
+// even when the unsigned char* points to garbage data (which may be the case
+// on processors without cache coherency).
+// TODO(fmeawad): This implementation contradicts that we can have a different
+// configuration for each isolate,
+// https://code.google.com/p/v8/issues/detail?id=4563
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, atomic, category_group_enabled) \
+ category_group_enabled = \
+ reinterpret_cast<const uint8_t*>(TRACE_EVENT_API_ATOMIC_LOAD(atomic)); \
+ if (!category_group_enabled) { \
+ category_group_enabled = \
+ TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
+ TRACE_EVENT_API_ATOMIC_STORE( \
+ atomic, reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \
+ category_group_enabled)); \
+ }
+
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
+ static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
+ const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, INTERNAL_TRACE_EVENT_UID(atomic), \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled));
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ v8::internal::tracing::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ v8::internal::tracing::kNoId, v8::internal::tracing::kNoId, flags, \
+ ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ v8::internal::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ uint64_t h = v8::internal::tracing::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ v8::internal::tracing::kNoId, v8::internal::tracing::kNoId, \
+ TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer) \
+ .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ h); \
+ }
+
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, \
+ bind_id, flow_flags, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ v8::internal::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned int trace_event_flags = flow_flags; \
+ v8::internal::tracing::TraceID trace_event_bind_id(bind_id, \
+ &trace_event_flags); \
+ uint64_t h = v8::internal::tracing::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ v8::internal::tracing::kNoId, trace_event_bind_id.data(), \
+ trace_event_flags, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer) \
+ .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ h); \
+ }
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
+ flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ v8::internal::tracing::TraceID trace_event_trace_id(id, \
+ &trace_event_flags); \
+ v8::internal::tracing::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_trace_id.data(), v8::internal::tracing::kNoId, \
+ trace_event_flags, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Adds a trace event with a given timestamp. Not Implemented.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category_group, name, \
+ timestamp, flags, ...) \
+ UNIMPLEMENTED()
+
+// Adds a trace event with a given id and timestamp. Not Implemented.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \
+ phase, category_group, name, id, timestamp, flags, ...) \
+ UNIMPLEMENTED()
+
+// Adds a trace event with a given id, thread_id, and timestamp. Not
+// Implemented.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ phase, category_group, name, id, thread_id, timestamp, flags, ...) \
+ UNIMPLEMENTED()
+
+namespace v8 {
+namespace internal {
+namespace tracing {
+
+// Specify these values when the corresponding argument of AddTraceEvent is not
+// used.
+const int kZeroNumArgs = 0;
+const uint64_t kNoId = 0;
+
+class TraceEventHelper {
+ public:
+ static v8::Platform* GetCurrentPlatform();
+};
+
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are by default mangled with the Process ID so that they are unlikely to
+// collide when the same pointer is used on different processes.
+class TraceID {
+ public:
+ class DontMangle {
+ public:
+ explicit DontMangle(const void* id)
+ : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {}
+ explicit DontMangle(uint64_t id) : data_(id) {}
+ explicit DontMangle(unsigned int id) : data_(id) {}
+ explicit DontMangle(uint16_t id) : data_(id) {}
+ explicit DontMangle(unsigned char id) : data_(id) {}
+ explicit DontMangle(int64_t id) : data_(static_cast<uint64_t>(id)) {}
+ explicit DontMangle(int id) : data_(static_cast<uint64_t>(id)) {}
+ explicit DontMangle(int16_t id) : data_(static_cast<uint64_t>(id)) {}
+ explicit DontMangle(signed char id) : data_(static_cast<uint64_t>(id)) {}
+ uint64_t data() const { return data_; }
+
+ private:
+ uint64_t data_;
+ };
+
+ class ForceMangle {
+ public:
+ explicit ForceMangle(uint64_t id) : data_(id) {}
+ explicit ForceMangle(unsigned int id) : data_(id) {}
+ explicit ForceMangle(uint16_t id) : data_(id) {}
+ explicit ForceMangle(unsigned char id) : data_(id) {}
+ explicit ForceMangle(int64_t id) : data_(static_cast<uint64_t>(id)) {}
+ explicit ForceMangle(int id) : data_(static_cast<uint64_t>(id)) {}
+ explicit ForceMangle(int16_t id) : data_(static_cast<uint64_t>(id)) {}
+ explicit ForceMangle(signed char id) : data_(static_cast<uint64_t>(id)) {}
+ uint64_t data() const { return data_; }
+
+ private:
+ uint64_t data_;
+ };
+
+ TraceID(const void* id, unsigned int* flags)
+ : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(ForceMangle id, unsigned int* flags) : data_(id.data()) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(DontMangle id, unsigned int* flags) : data_(id.data()) {}
+ TraceID(uint64_t id, unsigned int* flags) : data_(id) { (void)flags; }
+ TraceID(unsigned int id, unsigned int* flags) : data_(id) { (void)flags; }
+ TraceID(uint16_t id, unsigned int* flags) : data_(id) { (void)flags; }
+ TraceID(unsigned char id, unsigned int* flags) : data_(id) { (void)flags; }
+ TraceID(int64_t id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) {
+ (void)flags;
+ }
+ TraceID(int id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) {
+ (void)flags;
+ }
+ TraceID(int16_t id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) {
+ (void)flags;
+ }
+ TraceID(signed char id, unsigned int* flags)
+ : data_(static_cast<uint64_t>(id)) {
+ (void)flags;
+ }
+
+ uint64_t data() const { return data_; }
+
+ private:
+ uint64_t data_;
+};
+
+// Simple union to store various types as uint64_t.
+union TraceValueUnion {
+ bool as_bool;
+ uint64_t as_uint;
+ int64_t as_int;
+ double as_double;
+ const void* as_pointer;
+ const char* as_string;
+};
+
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+ public:
+ explicit TraceStringWithCopy(const char* str) : str_(str) {}
+ operator const char*() const { return str_; }
+
+ private:
+ const char* str_;
+};
+
+// Define SetTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, union_member, \
+ value_type_id) \
+ static V8_INLINE void SetTraceValue(actual_type arg, unsigned char* type, \
+ uint64_t* value) { \
+ TraceValueUnion type_value; \
+ type_value.union_member = arg; \
+ *type = value_type_id; \
+ *value = type_value.as_uint; \
+ }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, value_type_id) \
+ static V8_INLINE void SetTraceValue(actual_type arg, unsigned char* type, \
+ uint64_t* value) { \
+ *type = value_type_id; \
+ *value = static_cast<uint64_t>(arg); \
+ }
+
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint16_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int64_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int16_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer,
+ TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string,
+ TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
+ TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+// These AddTraceEvent template
+// function is defined here instead of in the macro, because the arg_values
+// could be temporary objects, such as std::string. In order to store
+// pointers to the internal c_str and pass through to the tracing API,
+// the arg_values must live throughout these procedures.
+
+static V8_INLINE uint64_t AddTraceEvent(char phase,
+ const uint8_t* category_group_enabled,
+ const char* name, uint64_t id,
+ uint64_t bind_id, unsigned int flags) {
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
+ id, bind_id, kZeroNumArgs, NULL, NULL,
+ NULL, flags);
+}
+
+template <class ARG1_TYPE>
+static V8_INLINE uint64_t AddTraceEvent(char phase,
+ const uint8_t* category_group_enabled,
+ const char* name, uint64_t id,
+ uint64_t bind_id, unsigned int flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val) {
+ const int num_args = 1;
+ uint8_t arg_types[1];
+ uint64_t arg_values[1];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
+ id, bind_id, num_args, &arg1_name,
+ arg_types, arg_values, flags);
+}
+
+template <class ARG1_TYPE, class ARG2_TYPE>
+static V8_INLINE uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_group_enabled, const char* name,
+ uint64_t id, uint64_t bind_id, unsigned int flags, const char* arg1_name,
+ const ARG1_TYPE& arg1_val, const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = {arg1_name, arg2_name};
+ unsigned char arg_types[2];
+ uint64_t arg_values[2];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
+ id, bind_id, num_args, arg_names,
+ arg_types, arg_values, flags);
+}
+
+// Used by TRACE_EVENTx macros. Do not use directly.
+class ScopedTracer {
+ public:
+ // Note: members of data_ intentionally left uninitialized. See Initialize.
+ ScopedTracer() : p_data_(NULL) {}
+
+ ~ScopedTracer() {
+ if (p_data_ && *data_.category_group_enabled)
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+ data_.category_group_enabled, data_.name, data_.event_handle);
+ }
+
+ void Initialize(const uint8_t* category_group_enabled, const char* name,
+ uint64_t event_handle) {
+ data_.category_group_enabled = category_group_enabled;
+ data_.name = name;
+ data_.event_handle = event_handle;
+ p_data_ = &data_;
+ }
+
+ private:
+ // This Data struct workaround is to avoid initializing all the members
+ // in Data during construction of this object, since this object is always
+ // constructed, even when tracing is disabled. If the members of Data were
+ // members of this class instead, compiler warnings occur about potential
+ // uninitialized accesses.
+ struct Data {
+ const uint8_t* category_group_enabled;
+ const char* name;
+ uint64_t event_handle;
+ };
+ Data* p_data_;
+ Data data_;
+};
+
+// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
+class ScopedTraceBinaryEfficient {
+ public:
+ ScopedTraceBinaryEfficient(const char* category_group, const char* name);
+ ~ScopedTraceBinaryEfficient();
+
+ private:
+ const uint8_t* category_group_enabled_;
+ const char* name_;
+ uint64_t event_handle_;
+};
+
+// TraceEventSamplingStateScope records the current sampling state
+// and sets a new sampling state. When the scope exists, it restores
+// the sampling state having recorded.
+template <size_t BucketNumber>
+class TraceEventSamplingStateScope {
+ public:
+ explicit TraceEventSamplingStateScope(const char* category_and_name) {
+ previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
+ TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
+ }
+
+ ~TraceEventSamplingStateScope() {
+ TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
+ }
+
+ static V8_INLINE const char* Current() {
+ return reinterpret_cast<const char*>(
+ TRACE_EVENT_API_ATOMIC_LOAD(g_trace_state[BucketNumber]));
+ }
+
+ static V8_INLINE void Set(const char* category_and_name) {
+ TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber],
+ reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
+ const_cast<char*>(category_and_name)));
+ }
+
+ private:
+ const char* previous_state_;
+};
+
+} // namespace tracing
+} // namespace internal
+} // namespace v8
+
+#endif // SRC_TRACING_TRACE_EVENT_H_
diff --git a/chromium/v8/src/transitions-inl.h b/chromium/v8/src/transitions-inl.h
index f31eff96ba1..96d9495bf40 100644
--- a/chromium/v8/src/transitions-inl.h
+++ b/chromium/v8/src/transitions-inl.h
@@ -17,6 +17,14 @@ TransitionArray* TransitionArray::cast(Object* object) {
}
+Object* TransitionArray::next_link() { return get(kNextLinkIndex); }
+
+
+void TransitionArray::set_next_link(Object* next, WriteBarrierMode mode) {
+ return set(kNextLinkIndex, next, mode);
+}
+
+
bool TransitionArray::HasPrototypeTransitions() {
return get(kPrototypeTransitionsIndex) != Smi::FromInt(0);
}
@@ -29,10 +37,9 @@ FixedArray* TransitionArray::GetPrototypeTransitions() {
}
-void TransitionArray::SetPrototypeTransitions(FixedArray* transitions,
- WriteBarrierMode mode) {
+void TransitionArray::SetPrototypeTransitions(FixedArray* transitions) {
DCHECK(transitions->IsFixedArray());
- set(kPrototypeTransitionsIndex, transitions, mode);
+ set(kPrototypeTransitionsIndex, transitions);
}
@@ -104,6 +111,8 @@ bool TransitionArray::IsSpecialTransition(Name* name) {
return name == heap->nonextensible_symbol() ||
name == heap->sealed_symbol() || name == heap->frozen_symbol() ||
name == heap->elements_transition_symbol() ||
+ name == heap->strict_function_transition_symbol() ||
+ name == heap->strong_function_transition_symbol() ||
name == heap->observed_symbol();
}
#endif
@@ -158,13 +167,9 @@ PropertyDetails TransitionArray::GetTargetDetails(Name* name, Map* target) {
}
-void TransitionArray::NoIncrementalWriteBarrierSet(int transition_number,
- Name* key,
- Map* target) {
- FixedArray::NoIncrementalWriteBarrierSet(
- this, ToKeyIndex(transition_number), key);
- FixedArray::NoIncrementalWriteBarrierSet(
- this, ToTargetIndex(transition_number), target);
+void TransitionArray::Set(int transition_number, Name* key, Map* target) {
+ set(ToKeyIndex(transition_number), key);
+ set(ToTargetIndex(transition_number), target);
}
@@ -173,6 +178,7 @@ void TransitionArray::SetNumberOfTransitions(int number_of_transitions) {
set(kTransitionLengthIndex, Smi::FromInt(number_of_transitions));
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TRANSITIONS_INL_H_
diff --git a/chromium/v8/src/transitions.cc b/chromium/v8/src/transitions.cc
index 64b8133528e..fc24b288677 100644
--- a/chromium/v8/src/transitions.cc
+++ b/chromium/v8/src/transitions.cc
@@ -51,8 +51,7 @@ void TransitionArray::Insert(Handle<Map> map, Handle<Name> name,
// Re-read existing data; the allocation might have caused it to be cleared.
if (IsSimpleTransition(map->raw_transitions())) {
old_target = GetSimpleTransition(map->raw_transitions());
- result->NoIncrementalWriteBarrierSet(
- 0, GetSimpleTransitionKey(old_target), old_target);
+ result->Set(0, GetSimpleTransitionKey(old_target), old_target);
} else {
result->SetNumberOfTransitions(0);
}
@@ -145,11 +144,11 @@ void TransitionArray::Insert(Handle<Map> map, Handle<Name> name,
DCHECK_NE(kNotFound, insertion_index);
for (int i = 0; i < insertion_index; ++i) {
- result->NoIncrementalWriteBarrierCopyFrom(array, i, i);
+ result->Set(i, array->GetKey(i), array->GetTarget(i));
}
- result->NoIncrementalWriteBarrierSet(insertion_index, *name, *target);
+ result->Set(insertion_index, *name, *target);
for (int i = insertion_index; i < number_of_transitions; ++i) {
- result->NoIncrementalWriteBarrierCopyFrom(array, i, i + 1);
+ result->Set(i + 1, array->GetKey(i), array->GetTarget(i));
}
SLOW_DCHECK(result->IsSortedNoDuplicates());
@@ -234,6 +233,61 @@ bool TransitionArray::CanHaveMoreTransitions(Handle<Map> map) {
// static
+bool TransitionArray::CompactPrototypeTransitionArray(FixedArray* array) {
+ const int header = kProtoTransitionHeaderSize;
+ int number_of_transitions = NumberOfPrototypeTransitions(array);
+ if (number_of_transitions == 0) {
+ // Empty array cannot be compacted.
+ return false;
+ }
+ int new_number_of_transitions = 0;
+ for (int i = 0; i < number_of_transitions; i++) {
+ Object* cell = array->get(header + i);
+ if (!WeakCell::cast(cell)->cleared()) {
+ if (new_number_of_transitions != i) {
+ array->set(header + new_number_of_transitions, cell);
+ }
+ new_number_of_transitions++;
+ }
+ }
+ // Fill slots that became free with undefined value.
+ for (int i = new_number_of_transitions; i < number_of_transitions; i++) {
+ array->set_undefined(header + i);
+ }
+ if (number_of_transitions != new_number_of_transitions) {
+ SetNumberOfPrototypeTransitions(array, new_number_of_transitions);
+ }
+ return new_number_of_transitions < number_of_transitions;
+}
+
+
+// static
+Handle<FixedArray> TransitionArray::GrowPrototypeTransitionArray(
+ Handle<FixedArray> array, int new_capacity, Isolate* isolate) {
+ // Grow array by factor 2 up to MaxCachedPrototypeTransitions.
+ int capacity = array->length() - kProtoTransitionHeaderSize;
+ new_capacity = Min(kMaxCachedPrototypeTransitions, new_capacity);
+ DCHECK_GT(new_capacity, capacity);
+ int grow_by = new_capacity - capacity;
+ array = isolate->factory()->CopyFixedArrayAndGrow(array, grow_by, TENURED);
+ if (capacity < 0) {
+ // There was no prototype transitions array before, so the size
+ // couldn't be copied. Initialize it explicitly.
+ SetNumberOfPrototypeTransitions(*array, 0);
+ }
+ return array;
+}
+
+
+// static
+int TransitionArray::NumberOfPrototypeTransitionsForTest(Map* map) {
+ FixedArray* transitions = GetPrototypeTransitions(map);
+ CompactPrototypeTransitionArray(transitions);
+ return TransitionArray::NumberOfPrototypeTransitions(transitions);
+}
+
+
+// static
void TransitionArray::PutPrototypeTransition(Handle<Map> map,
Handle<Object> prototype,
Handle<Map> target_map) {
@@ -252,23 +306,16 @@ void TransitionArray::PutPrototypeTransition(Handle<Map> map,
int transitions = NumberOfPrototypeTransitions(*cache) + 1;
if (transitions > capacity) {
- // Grow array by factor 2 up to MaxCachedPrototypeTransitions.
- int new_capacity = Min(kMaxCachedPrototypeTransitions, transitions * 2);
- if (new_capacity == capacity) return;
- int grow_by = new_capacity - capacity;
-
- Isolate* isolate = map->GetIsolate();
- cache = isolate->factory()->CopyFixedArrayAndGrow(cache, grow_by);
- if (capacity < 0) {
- // There was no prototype transitions array before, so the size
- // couldn't be copied. Initialize it explicitly.
- SetNumberOfPrototypeTransitions(*cache, 0);
+ // Grow the array if compacting it doesn't free space.
+ if (!CompactPrototypeTransitionArray(*cache)) {
+ if (capacity == kMaxCachedPrototypeTransitions) return;
+ cache = GrowPrototypeTransitionArray(cache, 2 * transitions,
+ map->GetIsolate());
+ SetPrototypeTransitions(map, cache);
}
-
- SetPrototypeTransitions(map, cache);
}
- // Reload number of transitions as GC might shrink them.
+ // Reload number of transitions as they might have been compacted.
int last = NumberOfPrototypeTransitions(*cache);
int entry = header + last;
@@ -344,27 +391,23 @@ int TransitionArray::Capacity(Object* raw_transitions) {
Handle<TransitionArray> TransitionArray::Allocate(Isolate* isolate,
int number_of_transitions,
int slack) {
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(
+ Handle<FixedArray> array = isolate->factory()->NewTransitionArray(
LengthFor(number_of_transitions + slack));
+ array->set(kNextLinkIndex, isolate->heap()->undefined_value());
array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
array->set(kTransitionLengthIndex, Smi::FromInt(number_of_transitions));
return Handle<TransitionArray>::cast(array);
}
-void TransitionArray::NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
- int origin_transition,
- int target_transition) {
- NoIncrementalWriteBarrierSet(target_transition,
- origin->GetKey(origin_transition),
- origin->GetTarget(origin_transition));
-}
-
-
-static void ZapTransitionArray(TransitionArray* transitions) {
- MemsetPointer(transitions->data_start(),
+// static
+void TransitionArray::ZapTransitionArray(TransitionArray* transitions) {
+ // Do not zap the next link that is used by GC.
+ STATIC_ASSERT(kNextLinkIndex + 1 == kPrototypeTransitionsIndex);
+ MemsetPointer(transitions->data_start() + kPrototypeTransitionsIndex,
transitions->GetHeap()->the_hole_value(),
- transitions->length());
+ transitions->length() - kPrototypeTransitionsIndex);
+ transitions->SetNumberOfTransitions(0);
}
@@ -387,25 +430,9 @@ void TransitionArray::ReplaceTransitions(Handle<Map> map,
}
-static void ZapPrototypeTransitions(Object* raw_transitions) {
- DCHECK(TransitionArray::IsFullTransitionArray(raw_transitions));
- TransitionArray* transitions = TransitionArray::cast(raw_transitions);
- if (!transitions->HasPrototypeTransitions()) return;
- FixedArray* proto_transitions = transitions->GetPrototypeTransitions();
- MemsetPointer(proto_transitions->data_start(),
- proto_transitions->GetHeap()->the_hole_value(),
- proto_transitions->length());
-}
-
-
void TransitionArray::SetPrototypeTransitions(
Handle<Map> map, Handle<FixedArray> proto_transitions) {
EnsureHasFullTransitionArray(map);
- if (Heap::ShouldZapGarbage()) {
- Object* raw_transitions = map->raw_transitions();
- DCHECK(raw_transitions != *proto_transitions);
- ZapPrototypeTransitions(raw_transitions);
- }
TransitionArray* transitions = TransitionArray::cast(map->raw_transitions());
transitions->SetPrototypeTransitions(*proto_transitions);
}
@@ -427,7 +454,7 @@ void TransitionArray::EnsureHasFullTransitionArray(Handle<Map> map) {
} else if (nof == 1) {
Map* target = GetSimpleTransition(raw_transitions);
Name* key = GetSimpleTransitionKey(target);
- result->NoIncrementalWriteBarrierSet(0, key, target);
+ result->Set(0, key, target);
}
ReplaceTransitions(map, *result);
}
@@ -444,8 +471,10 @@ void TransitionArray::TraverseTransitionTreeInternal(Map* map,
for (int i = 0; i < NumberOfPrototypeTransitions(proto_trans); ++i) {
int index = TransitionArray::kProtoTransitionHeaderSize + i;
WeakCell* cell = WeakCell::cast(proto_trans->get(index));
- TraverseTransitionTreeInternal(Map::cast(cell->value()), callback,
- data);
+ if (!cell->cleared()) {
+ TraverseTransitionTreeInternal(Map::cast(cell->value()), callback,
+ data);
+ }
}
}
for (int i = 0; i < transitions->number_of_transitions(); ++i) {
diff --git a/chromium/v8/src/transitions.h b/chromium/v8/src/transitions.h
index b0aab9502e5..73aca7864ea 100644
--- a/chromium/v8/src/transitions.h
+++ b/chromium/v8/src/transitions.h
@@ -41,6 +41,14 @@ class TransitionArray: public FixedArray {
static Map* SearchTransition(Map* map, PropertyKind kind, Name* name,
PropertyAttributes attributes);
+ static MaybeHandle<Map> SearchTransition(Handle<Map> map, PropertyKind kind,
+ Handle<Name> name,
+ PropertyAttributes attributes) {
+ if (Map* transition = SearchTransition(*map, kind, *name, attributes)) {
+ return handle(transition);
+ }
+ return MaybeHandle<Map>();
+ }
static Map* SearchSpecial(Map* map, Symbol* name);
@@ -105,14 +113,13 @@ class TransitionArray: public FixedArray {
Object* raw = proto_transitions->get(kProtoTransitionNumberOfEntriesOffset);
return Smi::cast(raw)->value();
}
+ static int NumberOfPrototypeTransitionsForTest(Map* map);
static void SetNumberOfPrototypeTransitions(FixedArray* proto_transitions,
int value);
inline FixedArray* GetPrototypeTransitions();
- inline void SetPrototypeTransitions(
- FixedArray* prototype_transitions,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SetPrototypeTransitions(FixedArray* prototype_transitions);
inline Object** GetPrototypeTransitionsSlot();
inline bool HasPrototypeTransitions();
@@ -156,8 +163,11 @@ class TransitionArray: public FixedArray {
static int Capacity(Object* raw_transitions);
- // Casting.
- static inline TransitionArray* cast(Object* obj);
+ inline static TransitionArray* cast(Object* object);
+
+ // This field should be used only by GC.
+ inline void set_next_link(Object* next, WriteBarrierMode mode);
+ inline Object* next_link();
static const int kTransitionSize = 2;
static const int kProtoTransitionHeaderSize = 1;
@@ -171,6 +181,14 @@ class TransitionArray: public FixedArray {
bool print_header = true); // NOLINT
#endif
+#ifdef OBJECT_PRINT
+ void TransitionArrayPrint(std::ostream& os); // NOLINT
+#endif
+
+#ifdef VERIFY_HEAP
+ void TransitionArrayVerify();
+#endif
+
#ifdef DEBUG
bool IsSortedNoDuplicates(int valid_entries = -1);
static bool IsSortedNoDuplicates(Map* map);
@@ -190,9 +208,10 @@ class TransitionArray: public FixedArray {
private:
// Layout for full transition arrays.
- static const int kPrototypeTransitionsIndex = 0;
- static const int kTransitionLengthIndex = 1;
- static const int kFirstIndex = 2;
+ static const int kNextLinkIndex = 0;
+ static const int kPrototypeTransitionsIndex = 1;
+ static const int kTransitionLengthIndex = 2;
+ static const int kFirstIndex = 3;
// Layout of map transition entries in full transition arrays.
static const int kTransitionKey = 0;
@@ -264,6 +283,11 @@ class TransitionArray: public FixedArray {
static void SetPrototypeTransitions(Handle<Map> map,
Handle<FixedArray> proto_transitions);
+ static bool CompactPrototypeTransitionArray(FixedArray* array);
+
+ static Handle<FixedArray> GrowPrototypeTransitionArray(
+ Handle<FixedArray> array, int new_capacity, Isolate* isolate);
+
// Compares two tuples <key, kind, attributes>, returns -1 if
// tuple1 is "less" than tuple2, 0 if tuple1 equal to tuple2 and 1 otherwise.
static inline int CompareKeys(Name* key1, uint32_t hash1, PropertyKind kind1,
@@ -283,25 +307,20 @@ class TransitionArray: public FixedArray {
PropertyKind kind2,
PropertyAttributes attributes2);
- inline void NoIncrementalWriteBarrierSet(int transition_number,
- Name* key,
- Map* target);
-
- // Copy a single transition from the origin array.
- inline void NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
- int origin_transition,
- int target_transition);
+ inline void Set(int transition_number, Name* key, Map* target);
#ifdef DEBUG
static void CheckNewTransitionsAreConsistent(Handle<Map> map,
TransitionArray* old_transitions,
Object* transitions);
#endif
+ static void ZapTransitionArray(TransitionArray* transitions);
DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TRANSITIONS_H_
diff --git a/chromium/v8/src/type-cache.cc b/chromium/v8/src/type-cache.cc
new file mode 100644
index 00000000000..9ed86214876
--- /dev/null
+++ b/chromium/v8/src/type-cache.cc
@@ -0,0 +1,24 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/type-cache.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+base::LazyInstance<TypeCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+
+// static
+TypeCache const& TypeCache::Get() { return kCache.Get(); }
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/type-cache.h b/chromium/v8/src/type-cache.h
new file mode 100644
index 00000000000..1b3a26033bb
--- /dev/null
+++ b/chromium/v8/src/type-cache.h
@@ -0,0 +1,154 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TYPE_CACHE_H_
+#define V8_TYPE_CACHE_H_
+
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+
+class TypeCache final {
+ private:
+ // This has to be first for the initialization magic to work.
+ Zone zone_;
+
+ public:
+ static TypeCache const& Get();
+
+ TypeCache() = default;
+
+ Type* const kInt8 =
+ CreateNative(CreateRange<int8_t>(), Type::UntaggedIntegral8());
+ Type* const kUint8 =
+ CreateNative(CreateRange<uint8_t>(), Type::UntaggedIntegral8());
+ Type* const kUint8Clamped = kUint8;
+ Type* const kInt16 =
+ CreateNative(CreateRange<int16_t>(), Type::UntaggedIntegral16());
+ Type* const kUint16 =
+ CreateNative(CreateRange<uint16_t>(), Type::UntaggedIntegral16());
+ Type* const kInt32 =
+ CreateNative(Type::Signed32(), Type::UntaggedIntegral32());
+ Type* const kUint32 =
+ CreateNative(Type::Unsigned32(), Type::UntaggedIntegral32());
+ Type* const kFloat32 = CreateNative(Type::Number(), Type::UntaggedFloat32());
+ Type* const kFloat64 = CreateNative(Type::Number(), Type::UntaggedFloat64());
+
+ Type* const kSmi = CreateNative(Type::SignedSmall(), Type::TaggedSigned());
+ Type* const kHeapNumber = CreateNative(Type::Number(), Type::TaggedPointer());
+
+ Type* const kSingletonZero = CreateRange(0.0, 0.0);
+ Type* const kSingletonOne = CreateRange(1.0, 1.0);
+ Type* const kZeroOrOne = CreateRange(0.0, 1.0);
+ Type* const kZeroToThirtyOne = CreateRange(0.0, 31.0);
+ Type* const kZeroToThirtyTwo = CreateRange(0.0, 32.0);
+ Type* const kZeroish =
+ Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
+ Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
+ Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
+ Type* const kIntegerOrMinusZero =
+ Type::Union(kInteger, Type::MinusZero(), zone());
+ Type* const kIntegerOrMinusZeroOrNaN =
+ Type::Union(kIntegerOrMinusZero, Type::NaN(), zone());
+
+ Type* const kAdditiveSafeInteger =
+ CreateRange(-4503599627370496.0, 4503599627370496.0);
+ Type* const kSafeInteger = CreateRange(-kMaxSafeInteger, kMaxSafeInteger);
+ Type* const kPositiveSafeInteger = CreateRange(0.0, kMaxSafeInteger);
+
+ Type* const kUntaggedUndefined =
+ Type::Intersect(Type::Undefined(), Type::Untagged(), zone());
+
+ // Asm.js related types.
+ Type* const kAsmSigned = kInt32;
+ Type* const kAsmUnsigned = kUint32;
+ Type* const kAsmInt = Type::Union(kAsmSigned, kAsmUnsigned, zone());
+ Type* const kAsmFixnum = Type::Intersect(kAsmSigned, kAsmUnsigned, zone());
+ Type* const kAsmFloat = kFloat32;
+ Type* const kAsmDouble = kFloat64;
+ Type* const kAsmFloatQ = Type::Union(kAsmFloat, kUntaggedUndefined, zone());
+ Type* const kAsmDoubleQ = Type::Union(kAsmDouble, kUntaggedUndefined, zone());
+ // Not part of the Asm.js type hierarchy, but represents a part of what
+ // intish encompasses.
+ Type* const kAsmIntQ = Type::Union(kAsmInt, kUntaggedUndefined, zone());
+ Type* const kAsmFloatDoubleQ = Type::Union(kAsmFloatQ, kAsmDoubleQ, zone());
+ // Asm.js size unions.
+ Type* const kAsmSize8 = Type::Union(kInt8, kUint8, zone());
+ Type* const kAsmSize16 = Type::Union(kInt16, kUint16, zone());
+ Type* const kAsmSize32 =
+ Type::Union(Type::Union(kInt32, kUint32, zone()), kAsmFloat, zone());
+ Type* const kAsmSize64 = kFloat64;
+ // Asm.js other types.
+ Type* const kAsmComparable = Type::Union(
+ kAsmSigned,
+ Type::Union(kAsmUnsigned, Type::Union(kAsmDouble, kAsmFloat, zone()),
+ zone()),
+ zone());
+ Type* const kAsmIntArrayElement =
+ Type::Union(Type::Union(kInt8, kUint8, zone()),
+ Type::Union(Type::Union(kInt16, kUint16, zone()),
+ Type::Union(kInt32, kUint32, zone()), zone()),
+ zone());
+
+ // The FixedArray::length property always containts a smi in the range
+ // [0, FixedArray::kMaxLength].
+ Type* const kFixedArrayLengthType = CreateNative(
+ CreateRange(0.0, FixedArray::kMaxLength), Type::TaggedSigned());
+
+ // The FixedDoubleArray::length property always containts a smi in the range
+ // [0, FixedDoubleArray::kMaxLength].
+ Type* const kFixedDoubleArrayLengthType = CreateNative(
+ CreateRange(0.0, FixedDoubleArray::kMaxLength), Type::TaggedSigned());
+
+ // The JSArray::length property always contains a tagged number in the range
+ // [0, kMaxUInt32].
+ Type* const kJSArrayLengthType =
+ CreateNative(Type::Unsigned32(), Type::Tagged());
+
+ // The String::length property always contains a smi in the range
+ // [0, String::kMaxLength].
+ Type* const kStringLengthType =
+ CreateNative(CreateRange(0.0, String::kMaxLength), Type::TaggedSigned());
+
+ // When initializing arrays, we'll unfold the loop if the number of
+ // elements is known to be of this type.
+ Type* const kElementLoopUnrollType = CreateRange(0.0, 16.0);
+
+#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
+ Type* const k##TypeName##Array = CreateArray(k##TypeName);
+ TYPED_ARRAYS(TYPED_ARRAY)
+#undef TYPED_ARRAY
+
+ private:
+ Type* CreateArray(Type* element) { return Type::Array(element, zone()); }
+
+ Type* CreateArrayFunction(Type* array) {
+ Type* arg1 = Type::Union(Type::Unsigned32(), Type::Object(), zone());
+ Type* arg2 = Type::Union(Type::Unsigned32(), Type::Undefined(), zone());
+ Type* arg3 = arg2;
+ return Type::Function(array, arg1, arg2, arg3, zone());
+ }
+
+ Type* CreateNative(Type* semantic, Type* representation) {
+ return Type::Intersect(semantic, representation, zone());
+ }
+
+ template <typename T>
+ Type* CreateRange() {
+ return CreateRange(std::numeric_limits<T>::min(),
+ std::numeric_limits<T>::max());
+ }
+
+ Type* CreateRange(double min, double max) {
+ return Type::Range(min, max, zone());
+ }
+
+ Zone* zone() { return &zone_; }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TYPE_CACHE_H_
diff --git a/chromium/v8/src/type-feedback-vector-inl.h b/chromium/v8/src/type-feedback-vector-inl.h
index 4d1c345e680..97df1b9ae99 100644
--- a/chromium/v8/src/type-feedback-vector-inl.h
+++ b/chromium/v8/src/type-feedback-vector-inl.h
@@ -10,93 +10,88 @@
namespace v8 {
namespace internal {
-// static
-TypeFeedbackVector* TypeFeedbackVector::cast(Object* obj) {
- DCHECK(obj->IsTypeFeedbackVector());
- return reinterpret_cast<TypeFeedbackVector*>(obj);
-}
+template <typename Derived>
+FeedbackVectorSlot FeedbackVectorSpecBase<Derived>::AddSlot(
+ FeedbackVectorSlotKind kind) {
+ Derived* derived = static_cast<Derived*>(this);
-int TypeFeedbackVector::first_ic_slot_index() const {
- DCHECK(length() >= kReservedIndexCount);
- return Smi::cast(get(kFirstICSlotIndex))->value();
+ int slot = derived->slots();
+ int entries_per_slot = TypeFeedbackMetadata::GetSlotSize(kind);
+ derived->append(kind);
+ for (int i = 1; i < entries_per_slot; i++) {
+ derived->append(FeedbackVectorSlotKind::INVALID);
+ }
+ return FeedbackVectorSlot(slot);
}
-int TypeFeedbackVector::ic_with_type_info_count() {
- return length() > 0 ? Smi::cast(get(kWithTypesIndex))->value() : 0;
+// static
+TypeFeedbackMetadata* TypeFeedbackMetadata::cast(Object* obj) {
+ DCHECK(obj->IsTypeFeedbackVector());
+ return reinterpret_cast<TypeFeedbackMetadata*>(obj);
}
-void TypeFeedbackVector::change_ic_with_type_info_count(int delta) {
- if (delta == 0) return;
- int value = ic_with_type_info_count() + delta;
- // Could go negative because of the debugger.
- if (value >= 0) {
- set(kWithTypesIndex, Smi::FromInt(value));
- }
+int TypeFeedbackMetadata::slot_count() const {
+ if (length() == 0) return 0;
+ DCHECK(length() > kReservedIndexCount);
+ return Smi::cast(get(kSlotsCountIndex))->value();
}
-int TypeFeedbackVector::ic_generic_count() {
- return length() > 0 ? Smi::cast(get(kGenericCountIndex))->value() : 0;
+// static
+TypeFeedbackVector* TypeFeedbackVector::cast(Object* obj) {
+ DCHECK(obj->IsTypeFeedbackVector());
+ return reinterpret_cast<TypeFeedbackVector*>(obj);
}
-void TypeFeedbackVector::change_ic_generic_count(int delta) {
- if (delta == 0) return;
- int value = ic_generic_count() + delta;
- if (value >= 0) {
- set(kGenericCountIndex, Smi::FromInt(value));
- }
+int TypeFeedbackMetadata::GetSlotSize(FeedbackVectorSlotKind kind) {
+ DCHECK_NE(FeedbackVectorSlotKind::INVALID, kind);
+ DCHECK_NE(FeedbackVectorSlotKind::KINDS_NUMBER, kind);
+ return kind == FeedbackVectorSlotKind::GENERAL ? 1 : 2;
}
-int TypeFeedbackVector::Slots() const {
- if (length() == 0) return 0;
- return Max(
- 0, first_ic_slot_index() - ic_metadata_length() - kReservedIndexCount);
+bool TypeFeedbackVector::is_empty() const {
+ if (length() == 0) return true;
+ DCHECK(length() > kReservedIndexCount);
+ return false;
}
-int TypeFeedbackVector::ICSlots() const {
+int TypeFeedbackVector::slot_count() const {
if (length() == 0) return 0;
- return (length() - first_ic_slot_index()) / elements_per_ic_slot();
+ DCHECK(length() > kReservedIndexCount);
+ return length() - kReservedIndexCount;
}
-int TypeFeedbackVector::ic_metadata_length() const {
- return VectorICComputer::word_count(ICSlots());
+TypeFeedbackMetadata* TypeFeedbackVector::metadata() const {
+ return is_empty() ? TypeFeedbackMetadata::cast(GetHeap()->empty_fixed_array())
+ : TypeFeedbackMetadata::cast(get(kMetadataIndex));
}
-// Conversion from a slot or ic slot to an integer index to the underlying
-// array.
-int TypeFeedbackVector::GetIndex(FeedbackVectorSlot slot) const {
- DCHECK(slot.ToInt() < first_ic_slot_index());
- return kReservedIndexCount + ic_metadata_length() + slot.ToInt();
+FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
+ FeedbackVectorSlot slot) const {
+ DCHECK(!is_empty());
+ return metadata()->GetKind(slot);
}
-int TypeFeedbackVector::GetIndex(FeedbackVectorICSlot slot) const {
- int first_ic_slot = first_ic_slot_index();
- DCHECK(slot.ToInt() < ICSlots());
- return first_ic_slot + slot.ToInt() * elements_per_ic_slot();
+int TypeFeedbackVector::GetIndex(FeedbackVectorSlot slot) const {
+ DCHECK(slot.ToInt() < slot_count());
+ return kReservedIndexCount + slot.ToInt();
}
// Conversion from an integer index to either a slot or an ic slot. The caller
// should know what kind she expects.
FeedbackVectorSlot TypeFeedbackVector::ToSlot(int index) const {
- DCHECK(index >= kReservedIndexCount && index < first_ic_slot_index());
- return FeedbackVectorSlot(index - ic_metadata_length() - kReservedIndexCount);
-}
-
-
-FeedbackVectorICSlot TypeFeedbackVector::ToICSlot(int index) const {
- DCHECK(index >= first_ic_slot_index() && index < length());
- int ic_slot = (index - first_ic_slot_index()) / elements_per_ic_slot();
- return FeedbackVectorICSlot(ic_slot);
+ DCHECK(index >= kReservedIndexCount && index < length());
+ return FeedbackVectorSlot(index - kReservedIndexCount);
}
@@ -111,14 +106,31 @@ void TypeFeedbackVector::Set(FeedbackVectorSlot slot, Object* value,
}
-Object* TypeFeedbackVector::Get(FeedbackVectorICSlot slot) const {
- return get(GetIndex(slot));
-}
-
+void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic) {
+ Object* uninitialized_sentinel =
+ TypeFeedbackVector::RawUninitializedSentinel(GetIsolate());
+ Object* megamorphic_sentinel =
+ *TypeFeedbackVector::MegamorphicSentinel(GetIsolate());
+ int with = 0;
+ int gen = 0;
+ TypeFeedbackMetadataIterator iter(metadata());
+ while (iter.HasNext()) {
+ FeedbackVectorSlot slot = iter.Next();
+ FeedbackVectorSlotKind kind = iter.kind();
+
+ Object* obj = Get(slot);
+ if (obj != uninitialized_sentinel &&
+ kind != FeedbackVectorSlotKind::GENERAL) {
+ if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
+ with++;
+ } else if (obj == megamorphic_sentinel) {
+ gen++;
+ }
+ }
+ }
-void TypeFeedbackVector::Set(FeedbackVectorICSlot slot, Object* value,
- WriteBarrierMode mode) {
- set(GetIndex(slot), value, mode);
+ *with_type_info = with;
+ *generic = gen;
}
@@ -137,8 +149,8 @@ Handle<Object> TypeFeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
}
-Object* TypeFeedbackVector::RawUninitializedSentinel(Heap* heap) {
- return heap->uninitialized_symbol();
+Object* TypeFeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
+ return isolate->heap()->uninitialized_symbol();
}
@@ -146,7 +158,10 @@ Object* FeedbackNexus::GetFeedback() const { return vector()->Get(slot()); }
Object* FeedbackNexus::GetFeedbackExtra() const {
- DCHECK(TypeFeedbackVector::elements_per_ic_slot() > 1);
+#ifdef DEBUG
+ FeedbackVectorSlotKind kind = vector()->GetKind(slot());
+ DCHECK_LT(1, TypeFeedbackMetadata::GetSlotSize(kind));
+#endif
int extra_index = vector()->GetIndex(slot()) + 1;
return vector()->get(extra_index);
}
@@ -159,14 +174,17 @@ void FeedbackNexus::SetFeedback(Object* feedback, WriteBarrierMode mode) {
void FeedbackNexus::SetFeedbackExtra(Object* feedback_extra,
WriteBarrierMode mode) {
- DCHECK(TypeFeedbackVector::elements_per_ic_slot() > 1);
+#ifdef DEBUG
+ FeedbackVectorSlotKind kind = vector()->GetKind(slot());
+ DCHECK_LT(1, TypeFeedbackMetadata::GetSlotSize(kind));
+#endif
int index = vector()->GetIndex(slot()) + 1;
vector()->set(index, feedback_extra, mode);
}
Isolate* FeedbackNexus::GetIsolate() const { return vector()->GetIsolate(); }
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TYPE_FEEDBACK_VECTOR_INL_H_
diff --git a/chromium/v8/src/type-feedback-vector.cc b/chromium/v8/src/type-feedback-vector.cc
index 9da3c655fbe..698f2a6d173 100644
--- a/chromium/v8/src/type-feedback-vector.cc
+++ b/chromium/v8/src/type-feedback-vector.cc
@@ -13,21 +13,28 @@
namespace v8 {
namespace internal {
+
+static bool IsPropertyNameFeedback(Object* feedback) {
+ return feedback->IsString() ||
+ (feedback->IsSymbol() && !Symbol::cast(feedback)->is_private());
+}
+
+
std::ostream& operator<<(std::ostream& os, FeedbackVectorSlotKind kind) {
- return os << TypeFeedbackVector::Kind2String(kind);
+ return os << TypeFeedbackMetadata::Kind2String(kind);
}
-FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
- FeedbackVectorICSlot slot) const {
+FeedbackVectorSlotKind TypeFeedbackMetadata::GetKind(
+ FeedbackVectorSlot slot) const {
int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
int data = Smi::cast(get(index))->value();
return VectorICComputer::decode(data, slot.ToInt());
}
-void TypeFeedbackVector::SetKind(FeedbackVectorICSlot slot,
- FeedbackVectorSlotKind kind) {
+void TypeFeedbackMetadata::SetKind(FeedbackVectorSlot slot,
+ FeedbackVectorSlotKind kind) {
int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
int data = Smi::cast(get(index))->value();
int new_data = VectorICComputer::encode(data, slot.ToInt(), kind);
@@ -35,100 +42,120 @@ void TypeFeedbackVector::SetKind(FeedbackVectorICSlot slot,
}
-template Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(
+template Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(
Isolate* isolate, const StaticFeedbackVectorSpec* spec);
-template Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(
+template Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(
Isolate* isolate, const FeedbackVectorSpec* spec);
// static
template <typename Spec>
-Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(Isolate* isolate,
- const Spec* spec) {
+Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
+ const Spec* spec) {
const int slot_count = spec->slots();
- const int ic_slot_count = spec->ic_slots();
- const int index_count = VectorICComputer::word_count(ic_slot_count);
- const int length = slot_count + (ic_slot_count * elements_per_ic_slot()) +
- index_count + kReservedIndexCount;
+ const int slot_kinds_length = VectorICComputer::word_count(slot_count);
+ const int length = slot_kinds_length + kReservedIndexCount;
if (length == kReservedIndexCount) {
- return Handle<TypeFeedbackVector>::cast(
+ return Handle<TypeFeedbackMetadata>::cast(
isolate->factory()->empty_fixed_array());
}
+#ifdef DEBUG
+ for (int i = 0; i < slot_count;) {
+ FeedbackVectorSlotKind kind = spec->GetKind(i);
+ int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+ for (int j = 1; j < entry_size; j++) {
+ FeedbackVectorSlotKind kind = spec->GetKind(i + j);
+ DCHECK_EQ(FeedbackVectorSlotKind::INVALID, kind);
+ }
+ i += entry_size;
+ }
+#endif
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length, TENURED);
- if (ic_slot_count > 0) {
- array->set(kFirstICSlotIndex,
- Smi::FromInt(slot_count + index_count + kReservedIndexCount));
- } else {
- array->set(kFirstICSlotIndex, Smi::FromInt(length));
- }
- array->set(kWithTypesIndex, Smi::FromInt(0));
- array->set(kGenericCountIndex, Smi::FromInt(0));
- // Fill the indexes with zeros.
- for (int i = 0; i < index_count; i++) {
+ array->set(kSlotsCountIndex, Smi::FromInt(slot_count));
+ // Fill the bit-vector part with zeros.
+ for (int i = 0; i < slot_kinds_length; i++) {
array->set(kReservedIndexCount + i, Smi::FromInt(0));
}
- // Ensure we can skip the write barrier
- Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
- DCHECK_EQ(isolate->heap()->uninitialized_symbol(), *uninitialized_sentinel);
- for (int i = kReservedIndexCount + index_count; i < length; i++) {
- array->set(i, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ Handle<TypeFeedbackMetadata> metadata =
+ Handle<TypeFeedbackMetadata>::cast(array);
+ for (int i = 0; i < slot_count; i++) {
+ metadata->SetKind(FeedbackVectorSlot(i), spec->GetKind(i));
+ }
+ return metadata;
+}
+
+
+bool TypeFeedbackMetadata::SpecDiffersFrom(
+ const FeedbackVectorSpec* other_spec) const {
+ if (other_spec->slots() != slot_count()) {
+ return true;
}
- Handle<TypeFeedbackVector> vector = Handle<TypeFeedbackVector>::cast(array);
- for (int i = 0; i < ic_slot_count; i++) {
- vector->SetKind(FeedbackVectorICSlot(i), spec->GetKind(i));
+ int slots = slot_count();
+ for (int i = 0; i < slots; i++) {
+ if (GetKind(FeedbackVectorSlot(i)) != other_spec->GetKind(i)) {
+ return true;
+ }
}
- return vector;
+ return false;
}
-template int TypeFeedbackVector::GetIndexFromSpec(const FeedbackVectorSpec*,
- FeedbackVectorICSlot);
-template int TypeFeedbackVector::GetIndexFromSpec(const FeedbackVectorSpec*,
- FeedbackVectorSlot);
+const char* TypeFeedbackMetadata::Kind2String(FeedbackVectorSlotKind kind) {
+ switch (kind) {
+ case FeedbackVectorSlotKind::INVALID:
+ return "INVALID";
+ case FeedbackVectorSlotKind::CALL_IC:
+ return "CALL_IC";
+ case FeedbackVectorSlotKind::LOAD_IC:
+ return "LOAD_IC";
+ case FeedbackVectorSlotKind::KEYED_LOAD_IC:
+ return "KEYED_LOAD_IC";
+ case FeedbackVectorSlotKind::STORE_IC:
+ return "STORE_IC";
+ case FeedbackVectorSlotKind::KEYED_STORE_IC:
+ return "KEYED_STORE_IC";
+ case FeedbackVectorSlotKind::GENERAL:
+ return "STUB";
+ case FeedbackVectorSlotKind::KINDS_NUMBER:
+ break;
+ }
+ UNREACHABLE();
+ return "?";
+}
// static
-template <typename Spec>
-int TypeFeedbackVector::GetIndexFromSpec(const Spec* spec,
- FeedbackVectorSlot slot) {
- const int ic_slot_count = spec->ic_slots();
- const int index_count = VectorICComputer::word_count(ic_slot_count);
- return kReservedIndexCount + index_count + slot.ToInt();
-}
+Handle<TypeFeedbackVector> TypeFeedbackVector::New(
+ Isolate* isolate, Handle<TypeFeedbackMetadata> metadata) {
+ Factory* factory = isolate->factory();
+ const int slot_count = metadata->slot_count();
+ const int length = slot_count + kReservedIndexCount;
+ if (length == kReservedIndexCount) {
+ return Handle<TypeFeedbackVector>::cast(factory->empty_fixed_array());
+ }
-// static
-template <typename Spec>
-int TypeFeedbackVector::GetIndexFromSpec(const Spec* spec,
- FeedbackVectorICSlot slot) {
- const int slot_count = spec->slots();
- const int ic_slot_count = spec->ic_slots();
- const int index_count = VectorICComputer::word_count(ic_slot_count);
- return kReservedIndexCount + index_count + slot_count +
- slot.ToInt() * elements_per_ic_slot();
-}
+ Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
+ array->set(kMetadataIndex, *metadata);
+ // Ensure we can skip the write barrier
+ Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
+ DCHECK_EQ(*factory->uninitialized_symbol(), *uninitialized_sentinel);
+ for (int i = kReservedIndexCount; i < length; i++) {
+ array->set(i, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ }
-// static
-int TypeFeedbackVector::PushAppliedArgumentsIndex() {
- const int index_count = VectorICComputer::word_count(1);
- return kReservedIndexCount + index_count;
+ return Handle<TypeFeedbackVector>::cast(array);
}
// static
-Handle<TypeFeedbackVector> TypeFeedbackVector::CreatePushAppliedArgumentsVector(
- Isolate* isolate) {
- FeedbackVectorSlotKind kinds[] = {FeedbackVectorSlotKind::KEYED_LOAD_IC};
- StaticFeedbackVectorSpec spec(0, 1, kinds);
- Handle<TypeFeedbackVector> feedback_vector =
- isolate->factory()->NewTypeFeedbackVector(&spec);
- DCHECK(PushAppliedArgumentsIndex() ==
- feedback_vector->GetIndex(FeedbackVectorICSlot(0)));
- return feedback_vector;
+int TypeFeedbackVector::GetIndexFromSpec(const FeedbackVectorSpec* spec,
+ FeedbackVectorSlot slot) {
+ return kReservedIndexCount + slot.ToInt();
}
@@ -142,100 +169,69 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Copy(
}
-bool TypeFeedbackVector::SpecDiffersFrom(
- const FeedbackVectorSpec* other_spec) const {
- if (other_spec->slots() != Slots() || other_spec->ic_slots() != ICSlots()) {
- return true;
- }
-
- int ic_slots = ICSlots();
- for (int i = 0; i < ic_slots; i++) {
- if (GetKind(FeedbackVectorICSlot(i)) != other_spec->GetKind(i)) {
- return true;
- }
- }
- return false;
-}
-
-
// This logic is copied from
// StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget.
-static bool ClearLogic(Heap* heap) {
- return FLAG_cleanup_code_caches_at_gc &&
- heap->isolate()->serializer_enabled();
+static bool ClearLogic(Isolate* isolate) {
+ return FLAG_cleanup_code_caches_at_gc && isolate->serializer_enabled();
}
void TypeFeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
bool force_clear) {
- int slots = Slots();
- Heap* heap = GetIsolate()->heap();
+ Isolate* isolate = GetIsolate();
- if (!force_clear && !ClearLogic(heap)) return;
+ if (!force_clear && !ClearLogic(isolate)) return;
Object* uninitialized_sentinel =
- TypeFeedbackVector::RawUninitializedSentinel(heap);
- for (int i = 0; i < slots; i++) {
- FeedbackVectorSlot slot(i);
- Object* obj = Get(slot);
- if (obj->IsHeapObject()) {
- InstanceType instance_type =
- HeapObject::cast(obj)->map()->instance_type();
- // AllocationSites are exempt from clearing. They don't store Maps
- // or Code pointers which can cause memory leaks if not cleared
- // regularly.
- if (instance_type != ALLOCATION_SITE_TYPE) {
- Set(slot, uninitialized_sentinel, SKIP_WRITE_BARRIER);
- }
- }
- }
-}
+ TypeFeedbackVector::RawUninitializedSentinel(isolate);
+ TypeFeedbackMetadataIterator iter(metadata());
+ while (iter.HasNext()) {
+ FeedbackVectorSlot slot = iter.Next();
+ FeedbackVectorSlotKind kind = iter.kind();
-void TypeFeedbackVector::ClearICSlotsImpl(SharedFunctionInfo* shared,
- bool force_clear) {
- Heap* heap = GetIsolate()->heap();
-
- if (!force_clear && !ClearLogic(heap)) return;
-
- int slots = ICSlots();
- Code* host = shared->code();
- Object* uninitialized_sentinel =
- TypeFeedbackVector::RawUninitializedSentinel(heap);
- for (int i = 0; i < slots; i++) {
- FeedbackVectorICSlot slot(i);
Object* obj = Get(slot);
if (obj != uninitialized_sentinel) {
- FeedbackVectorSlotKind kind = GetKind(slot);
switch (kind) {
case FeedbackVectorSlotKind::CALL_IC: {
CallICNexus nexus(this, slot);
- nexus.Clear(host);
+ nexus.Clear(shared->code());
break;
}
case FeedbackVectorSlotKind::LOAD_IC: {
LoadICNexus nexus(this, slot);
- nexus.Clear(host);
+ nexus.Clear(shared->code());
break;
}
case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
KeyedLoadICNexus nexus(this, slot);
- nexus.Clear(host);
+ nexus.Clear(shared->code());
break;
}
case FeedbackVectorSlotKind::STORE_IC: {
- DCHECK(FLAG_vector_stores);
StoreICNexus nexus(this, slot);
- nexus.Clear(host);
+ nexus.Clear(shared->code());
break;
}
case FeedbackVectorSlotKind::KEYED_STORE_IC: {
- DCHECK(FLAG_vector_stores);
KeyedStoreICNexus nexus(this, slot);
- nexus.Clear(host);
+ nexus.Clear(shared->code());
+ break;
+ }
+ case FeedbackVectorSlotKind::GENERAL: {
+ if (obj->IsHeapObject()) {
+ InstanceType instance_type =
+ HeapObject::cast(obj)->map()->instance_type();
+ // AllocationSites are exempt from clearing. They don't store Maps
+ // or Code pointers which can cause memory leaks if not cleared
+ // regularly.
+ if (instance_type != ALLOCATION_SITE_TYPE) {
+ Set(slot, uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ }
+ }
break;
}
- case FeedbackVectorSlotKind::UNUSED:
+ case FeedbackVectorSlotKind::INVALID:
case FeedbackVectorSlotKind::KINDS_NUMBER:
UNREACHABLE();
break;
@@ -247,7 +243,6 @@ void TypeFeedbackVector::ClearICSlotsImpl(SharedFunctionInfo* shared,
// static
void TypeFeedbackVector::ClearAllKeyedStoreICs(Isolate* isolate) {
- DCHECK(FLAG_vector_stores);
SharedFunctionInfo::Iterator iterator(isolate);
SharedFunctionInfo* shared;
while ((shared = iterator.Next())) {
@@ -258,22 +253,21 @@ void TypeFeedbackVector::ClearAllKeyedStoreICs(Isolate* isolate) {
void TypeFeedbackVector::ClearKeyedStoreICs(SharedFunctionInfo* shared) {
- Heap* heap = GetIsolate()->heap();
+ Isolate* isolate = GetIsolate();
- int slots = ICSlots();
Code* host = shared->code();
Object* uninitialized_sentinel =
- TypeFeedbackVector::RawUninitializedSentinel(heap);
- for (int i = 0; i < slots; i++) {
- FeedbackVectorICSlot slot(i);
+ TypeFeedbackVector::RawUninitializedSentinel(isolate);
+
+ TypeFeedbackMetadataIterator iter(metadata());
+ while (iter.HasNext()) {
+ FeedbackVectorSlot slot = iter.Next();
+ FeedbackVectorSlotKind kind = iter.kind();
+ if (kind != FeedbackVectorSlotKind::KEYED_STORE_IC) continue;
Object* obj = Get(slot);
if (obj != uninitialized_sentinel) {
- FeedbackVectorSlotKind kind = GetKind(slot);
- if (kind == FeedbackVectorSlotKind::KEYED_STORE_IC) {
- DCHECK(FLAG_vector_stores);
- KeyedStoreICNexus nexus(this, slot);
- nexus.Clear(host);
- }
+ KeyedStoreICNexus nexus(this, slot);
+ nexus.Clear(host);
}
}
}
@@ -281,29 +275,7 @@ void TypeFeedbackVector::ClearKeyedStoreICs(SharedFunctionInfo* shared) {
// static
Handle<TypeFeedbackVector> TypeFeedbackVector::DummyVector(Isolate* isolate) {
- return Handle<TypeFeedbackVector>::cast(isolate->factory()->dummy_vector());
-}
-
-
-const char* TypeFeedbackVector::Kind2String(FeedbackVectorSlotKind kind) {
- switch (kind) {
- case FeedbackVectorSlotKind::UNUSED:
- return "UNUSED";
- case FeedbackVectorSlotKind::CALL_IC:
- return "CALL_IC";
- case FeedbackVectorSlotKind::LOAD_IC:
- return "LOAD_IC";
- case FeedbackVectorSlotKind::KEYED_LOAD_IC:
- return "KEYED_LOAD_IC";
- case FeedbackVectorSlotKind::STORE_IC:
- return "STORE_IC";
- case FeedbackVectorSlotKind::KEYED_STORE_IC:
- return "KEYED_STORE_IC";
- case FeedbackVectorSlotKind::KINDS_NUMBER:
- break;
- }
- UNREACHABLE();
- return "?";
+ return isolate->factory()->dummy_vector();
}
@@ -520,6 +492,19 @@ void CallICNexus::ConfigureMonomorphic(Handle<JSFunction> function) {
}
+void CallICNexus::ConfigureMegamorphic() {
+ FeedbackNexus::ConfigureMegamorphic();
+}
+
+
+void CallICNexus::ConfigureMegamorphic(int call_count) {
+ SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(Smi::FromInt(call_count * kCallCountIncrement),
+ SKIP_WRITE_BARRIER);
+}
+
+
void LoadICNexus::ConfigureMonomorphic(Handle<Map> receiver_map,
Handle<Code> handler) {
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
@@ -536,8 +521,8 @@ void KeyedLoadICNexus::ConfigureMonomorphic(Handle<Name> name,
SetFeedback(*cell);
SetFeedbackExtra(*handler);
} else {
- SetFeedback(*name);
Handle<FixedArray> array = EnsureExtraArrayOfSize(2);
+ SetFeedback(*name);
array->set(0, *cell);
array->set(1, *handler);
}
@@ -560,8 +545,8 @@ void KeyedStoreICNexus::ConfigureMonomorphic(Handle<Name> name,
SetFeedback(*cell);
SetFeedbackExtra(*handler);
} else {
- SetFeedback(*name);
Handle<FixedArray> array = EnsureExtraArrayOfSize(2);
+ SetFeedback(*name);
array->set(0, *cell);
array->set(1, *handler);
}
@@ -590,8 +575,8 @@ void KeyedLoadICNexus::ConfigurePolymorphic(Handle<Name> name,
SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
SKIP_WRITE_BARRIER);
} else {
- SetFeedback(*name);
array = EnsureExtraArrayOfSize(receiver_count * 2);
+ SetFeedback(*name);
}
InstallHandlers(array, maps, handlers);
@@ -620,8 +605,8 @@ void KeyedStoreICNexus::ConfigurePolymorphic(Handle<Name> name,
SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
SKIP_WRITE_BARRIER);
} else {
- SetFeedback(*name);
array = EnsureExtraArrayOfSize(receiver_count * 2);
+ SetFeedback(*name);
}
InstallHandlers(array, maps, handlers);
@@ -657,9 +642,10 @@ void KeyedStoreICNexus::ConfigurePolymorphic(MapHandleList* maps,
int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
- if (feedback->IsFixedArray() || feedback->IsString()) {
+ bool is_named_feedback = IsPropertyNameFeedback(feedback);
+ if (feedback->IsFixedArray() || is_named_feedback) {
int found = 0;
- if (feedback->IsString()) {
+ if (is_named_feedback) {
feedback = GetFeedbackExtra();
}
FixedArray* array = FixedArray::cast(feedback);
@@ -694,8 +680,9 @@ int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
Object* feedback = GetFeedback();
- if (feedback->IsFixedArray() || feedback->IsString()) {
- if (feedback->IsString()) {
+ bool is_named_feedback = IsPropertyNameFeedback(feedback);
+ if (feedback->IsFixedArray() || is_named_feedback) {
+ if (is_named_feedback) {
feedback = GetFeedbackExtra();
}
FixedArray* array = FixedArray::cast(feedback);
@@ -732,8 +719,9 @@ MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
bool FeedbackNexus::FindHandlers(CodeHandleList* code_list, int length) const {
Object* feedback = GetFeedback();
int count = 0;
- if (feedback->IsFixedArray() || feedback->IsString()) {
- if (feedback->IsString()) {
+ bool is_named_feedback = IsPropertyNameFeedback(feedback);
+ if (feedback->IsFixedArray() || is_named_feedback) {
+ if (is_named_feedback) {
feedback = GetFeedbackExtra();
}
FixedArray* array = FixedArray::cast(feedback);
@@ -777,7 +765,7 @@ void KeyedLoadICNexus::Clear(Code* host) {
Name* KeyedLoadICNexus::FindFirstName() const {
Object* feedback = GetFeedback();
- if (feedback->IsString()) {
+ if (IsPropertyNameFeedback(feedback)) {
return Name::cast(feedback);
}
return NULL;
@@ -786,7 +774,7 @@ Name* KeyedLoadICNexus::FindFirstName() const {
Name* KeyedStoreICNexus::FindFirstName() const {
Object* feedback = GetFeedback();
- if (feedback->IsString()) {
+ if (IsPropertyNameFeedback(feedback)) {
return Name::cast(feedback);
}
return NULL;
diff --git a/chromium/v8/src/type-feedback-vector.h b/chromium/v8/src/type-feedback-vector.h
index 5c28fca55fe..d83b77fa3e6 100644
--- a/chromium/v8/src/type-feedback-vector.h
+++ b/chromium/v8/src/type-feedback-vector.h
@@ -7,10 +7,8 @@
#include <vector>
-#include "src/checks.h"
+#include "src/base/logging.h"
#include "src/elements-kind.h"
-#include "src/heap/heap.h"
-#include "src/isolate.h"
#include "src/objects.h"
#include "src/zone-containers.h"
@@ -19,13 +17,20 @@ namespace internal {
enum class FeedbackVectorSlotKind {
- UNUSED,
+ // This kind means that the slot points to the middle of other slot
+ // which occupies more than one feedback vector element.
+ // There must be no such slots in the system.
+ INVALID,
+
CALL_IC,
LOAD_IC,
KEYED_LOAD_IC,
STORE_IC,
KEYED_STORE_IC,
+ // This is a general purpose slot that occupies one feedback vector element.
+ GENERAL,
+
KINDS_NUMBER // Last value indicating number of kinds.
};
@@ -33,168 +38,177 @@ enum class FeedbackVectorSlotKind {
std::ostream& operator<<(std::ostream& os, FeedbackVectorSlotKind kind);
-class StaticFeedbackVectorSpec {
+template <typename Derived>
+class FeedbackVectorSpecBase {
public:
- StaticFeedbackVectorSpec() : slots_(0), ic_slots_(0), ic_kinds_(NULL) {}
- StaticFeedbackVectorSpec(int slots, int ic_slots,
- FeedbackVectorSlotKind* ic_slot_kinds)
- : slots_(slots), ic_slots_(ic_slots), ic_kinds_(ic_slot_kinds) {}
+ inline FeedbackVectorSlot AddSlot(FeedbackVectorSlotKind kind);
- int slots() const { return slots_; }
+ FeedbackVectorSlot AddCallICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::CALL_IC);
+ }
- int ic_slots() const { return ic_slots_; }
+ FeedbackVectorSlot AddLoadICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::LOAD_IC);
+ }
- FeedbackVectorSlotKind GetKind(int ic_slot) const {
- DCHECK(ic_slots_ > 0 && ic_slot < ic_slots_);
- return ic_kinds_[ic_slot];
+ FeedbackVectorSlot AddKeyedLoadICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::KEYED_LOAD_IC);
}
- private:
- int slots_;
- int ic_slots_;
- FeedbackVectorSlotKind* ic_kinds_;
+ FeedbackVectorSlot AddStoreICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::STORE_IC);
+ }
+
+ FeedbackVectorSlot AddKeyedStoreICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::KEYED_STORE_IC);
+ }
+
+ FeedbackVectorSlot AddGeneralSlot() {
+ return AddSlot(FeedbackVectorSlotKind::GENERAL);
+ }
};
-class FeedbackVectorSpec {
+class StaticFeedbackVectorSpec
+ : public FeedbackVectorSpecBase<StaticFeedbackVectorSpec> {
public:
- explicit FeedbackVectorSpec(Zone* zone)
- : slots_(0), ic_slots_(0), ic_slot_kinds_(zone) {}
+ StaticFeedbackVectorSpec() : slots_(0) {}
int slots() const { return slots_; }
- void increase_slots(int count) {
- DCHECK_LT(0, count);
- slots_ += count;
- }
- int ic_slots() const { return ic_slots_; }
- void increase_ic_slots(int count) {
- DCHECK_LT(0, count);
- ic_slots_ += count;
- ic_slot_kinds_.resize(ic_slots_);
+ FeedbackVectorSlotKind GetKind(int slot) const {
+ DCHECK(slot >= 0 && slot < slots_);
+ return kinds_[slot];
}
- FeedbackVectorICSlot AddSlot(FeedbackVectorSlotKind kind) {
- int slot = ic_slots_;
- increase_ic_slots(1);
- ic_slot_kinds_[slot] = static_cast<unsigned char>(kind);
- return FeedbackVectorICSlot(slot);
- }
+ private:
+ friend class FeedbackVectorSpecBase<StaticFeedbackVectorSpec>;
- FeedbackVectorICSlot AddSlots(FeedbackVectorSlotKind kind, int count) {
- int slot = ic_slots_;
- increase_ic_slots(count);
- for (int i = 0; i < count; i++) {
- ic_slot_kinds_[slot + i] = static_cast<unsigned char>(kind);
- }
- return FeedbackVectorICSlot(slot);
+ void append(FeedbackVectorSlotKind kind) {
+ DCHECK(slots_ < kMaxLength);
+ kinds_[slots_++] = kind;
}
- FeedbackVectorICSlot AddCallICSlot() {
- return AddSlot(FeedbackVectorSlotKind::CALL_IC);
- }
+ static const int kMaxLength = 12;
- FeedbackVectorICSlot AddLoadICSlot() {
- return AddSlot(FeedbackVectorSlotKind::LOAD_IC);
- }
+ int slots_;
+ FeedbackVectorSlotKind kinds_[kMaxLength];
+};
- FeedbackVectorICSlot AddLoadICSlots(int count) {
- return AddSlots(FeedbackVectorSlotKind::LOAD_IC, count);
- }
- FeedbackVectorICSlot AddKeyedLoadICSlot() {
- return AddSlot(FeedbackVectorSlotKind::KEYED_LOAD_IC);
+class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
+ public:
+ explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
+ slot_kinds_.reserve(16);
}
- FeedbackVectorICSlot AddStoreICSlot() {
- return AddSlot(FeedbackVectorSlotKind::STORE_IC);
- }
+ int slots() const { return static_cast<int>(slot_kinds_.size()); }
- FeedbackVectorSlot AddStubSlot() {
- int slot = slots_;
- increase_slots(1);
- return FeedbackVectorSlot(slot);
+ FeedbackVectorSlotKind GetKind(int slot) const {
+ return static_cast<FeedbackVectorSlotKind>(slot_kinds_.at(slot));
}
- FeedbackVectorSlot AddStubSlots(int count) {
- int slot = slots_;
- increase_slots(count);
- return FeedbackVectorSlot(slot);
- }
+ private:
+ friend class FeedbackVectorSpecBase<FeedbackVectorSpec>;
- FeedbackVectorSlotKind GetKind(int ic_slot) const {
- return static_cast<FeedbackVectorSlotKind>(ic_slot_kinds_.at(ic_slot));
+ void append(FeedbackVectorSlotKind kind) {
+ slot_kinds_.push_back(static_cast<unsigned char>(kind));
}
+ ZoneVector<unsigned char> slot_kinds_;
+};
+
+
+// The shape of the TypeFeedbackMetadata is an array with:
+// 0: slot_count
+// 1..N: slot kinds packed into a bit vector
+//
+class TypeFeedbackMetadata : public FixedArray {
+ public:
+ // Casting.
+ static inline TypeFeedbackMetadata* cast(Object* obj);
+
+ static const int kSlotsCountIndex = 0;
+ static const int kReservedIndexCount = 1;
+
+ // Returns number of feedback vector elements used by given slot kind.
+ static inline int GetSlotSize(FeedbackVectorSlotKind kind);
+
+ bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
+
+ // Returns number of slots in the vector.
+ inline int slot_count() const;
+
+ // Returns slot kind for given slot.
+ FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
+
+ template <typename Spec>
+ static Handle<TypeFeedbackMetadata> New(Isolate* isolate, const Spec* spec);
+
+#ifdef OBJECT_PRINT
+ // For gdb debugging.
+ void Print();
+#endif // OBJECT_PRINT
+
+ DECLARE_PRINTER(TypeFeedbackMetadata)
+
+ static const char* Kind2String(FeedbackVectorSlotKind kind);
+
private:
- int slots_;
- int ic_slots_;
- ZoneVector<unsigned char> ic_slot_kinds_;
+ static const int kFeedbackVectorSlotKindBits = 3;
+ STATIC_ASSERT(static_cast<int>(FeedbackVectorSlotKind::KINDS_NUMBER) <
+ (1 << kFeedbackVectorSlotKindBits));
+
+ void SetKind(FeedbackVectorSlot slot, FeedbackVectorSlotKind kind);
+
+ typedef BitSetComputer<FeedbackVectorSlotKind, kFeedbackVectorSlotKindBits,
+ kSmiValueSize, uint32_t> VectorICComputer;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackMetadata);
};
// The shape of the TypeFeedbackVector is an array with:
-// 0: first_ic_slot_index (== length() if no ic slots are present)
+// 0: feedback metadata
// 1: ics_with_types
// 2: ics_with_generic_info
-// 3: type information for ic slots, if any
-// ...
-// N: first feedback slot (N >= 3)
+// 3: feedback slot #0 (N >= 3)
// ...
-// [<first_ic_slot_index>: feedback slot]
-// ...to length() - 1
+// N + slot_count - 1: feedback slot #(slot_count-1)
//
class TypeFeedbackVector : public FixedArray {
public:
// Casting.
static inline TypeFeedbackVector* cast(Object* obj);
- static const int kReservedIndexCount = 3;
- static const int kFirstICSlotIndex = 0;
- static const int kWithTypesIndex = 1;
- static const int kGenericCountIndex = 2;
+ static const int kMetadataIndex = 0;
+ static const int kReservedIndexCount = 1;
- static int elements_per_ic_slot() { return 2; }
+ inline void ComputeCounts(int* with_type_info, int* generic);
- inline int first_ic_slot_index() const;
- inline int ic_with_type_info_count();
- inline void change_ic_with_type_info_count(int delta);
- inline int ic_generic_count();
- inline void change_ic_generic_count(int delta);
- inline int ic_metadata_length() const;
+ inline bool is_empty() const;
- bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
+ // Returns number of slots in the vector.
+ inline int slot_count() const;
- inline int Slots() const;
- inline int ICSlots() const;
+ inline TypeFeedbackMetadata* metadata() const;
- // Conversion from a slot or ic slot to an integer index to the underlying
- // array.
+ // Conversion from a slot to an integer index to the underlying array.
inline int GetIndex(FeedbackVectorSlot slot) const;
- inline int GetIndex(FeedbackVectorICSlot slot) const;
-
- template <typename Spec>
- static int GetIndexFromSpec(const Spec* spec, FeedbackVectorSlot slot);
- template <typename Spec>
- static int GetIndexFromSpec(const Spec* spec, FeedbackVectorICSlot slot);
+ static int GetIndexFromSpec(const FeedbackVectorSpec* spec,
+ FeedbackVectorSlot slot);
- // Conversion from an integer index to either a slot or an ic slot. The caller
- // should know what kind she expects.
+ // Conversion from an integer index to the underlying array to a slot.
inline FeedbackVectorSlot ToSlot(int index) const;
- inline FeedbackVectorICSlot ToICSlot(int index) const;
inline Object* Get(FeedbackVectorSlot slot) const;
inline void Set(FeedbackVectorSlot slot, Object* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline Object* Get(FeedbackVectorICSlot slot) const;
- inline void Set(FeedbackVectorICSlot slot, Object* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- // IC slots need metadata to recognize the type of IC.
- FeedbackVectorSlotKind GetKind(FeedbackVectorICSlot slot) const;
+ // Returns slot kind for given slot.
+ inline FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
- template <typename Spec>
- static Handle<TypeFeedbackVector> Allocate(Isolate* isolate,
- const Spec* spec);
+ static Handle<TypeFeedbackVector> New(Isolate* isolate,
+ Handle<TypeFeedbackMetadata> metadata);
static Handle<TypeFeedbackVector> Copy(Isolate* isolate,
Handle<TypeFeedbackVector> vector);
@@ -206,19 +220,13 @@ class TypeFeedbackVector : public FixedArray {
DECLARE_PRINTER(TypeFeedbackVector)
- // Clears the vector slots and the vector ic slots.
+ // Clears the vector slots.
void ClearSlots(SharedFunctionInfo* shared) { ClearSlotsImpl(shared, true); }
+
void ClearSlotsAtGCTime(SharedFunctionInfo* shared) {
ClearSlotsImpl(shared, false);
}
- void ClearICSlots(SharedFunctionInfo* shared) {
- ClearICSlotsImpl(shared, true);
- }
- void ClearICSlotsAtGCTime(SharedFunctionInfo* shared) {
- ClearICSlotsImpl(shared, false);
- }
-
static void ClearAllKeyedStoreICs(Isolate* isolate);
void ClearKeyedStoreICs(SharedFunctionInfo* shared);
@@ -233,37 +241,21 @@ class TypeFeedbackVector : public FixedArray {
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
- static inline Object* RawUninitializedSentinel(Heap* heap);
+ static inline Object* RawUninitializedSentinel(Isolate* isolate);
static const int kDummyLoadICSlot = 0;
- static const int kDummyKeyedLoadICSlot = 1;
- static const int kDummyStoreICSlot = 2;
- static const int kDummyKeyedStoreICSlot = 3;
+ static const int kDummyKeyedLoadICSlot = 2;
+ static const int kDummyStoreICSlot = 4;
+ static const int kDummyKeyedStoreICSlot = 6;
static Handle<TypeFeedbackVector> DummyVector(Isolate* isolate);
- static FeedbackVectorICSlot DummySlot(int dummyIndex) {
+ static FeedbackVectorSlot DummySlot(int dummyIndex) {
DCHECK(dummyIndex >= 0 && dummyIndex <= kDummyKeyedStoreICSlot);
- return FeedbackVectorICSlot(dummyIndex);
+ return FeedbackVectorSlot(dummyIndex);
}
- static int PushAppliedArgumentsIndex();
- static Handle<TypeFeedbackVector> CreatePushAppliedArgumentsVector(
- Isolate* isolate);
-
- static const char* Kind2String(FeedbackVectorSlotKind kind);
-
private:
- static const int kFeedbackVectorSlotKindBits = 3;
- STATIC_ASSERT(static_cast<int>(FeedbackVectorSlotKind::KINDS_NUMBER) <
- (1 << kFeedbackVectorSlotKindBits));
-
- void SetKind(FeedbackVectorICSlot slot, FeedbackVectorSlotKind kind);
-
- typedef BitSetComputer<FeedbackVectorSlotKind, kFeedbackVectorSlotKindBits,
- kSmiValueSize, uint32_t> VectorICComputer;
-
void ClearSlotsImpl(SharedFunctionInfo* shared, bool force_clear);
- void ClearICSlotsImpl(SharedFunctionInfo* shared, bool force_clear);
DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackVector);
};
@@ -284,13 +276,60 @@ STATIC_ASSERT(Name::kEmptyHashField == 0x3);
STATIC_ASSERT(Name::kHashNotComputedMask == kHeapObjectTag);
+class TypeFeedbackMetadataIterator {
+ public:
+ explicit TypeFeedbackMetadataIterator(Handle<TypeFeedbackMetadata> metadata)
+ : metadata_handle_(metadata),
+ slot_(FeedbackVectorSlot(0)),
+ slot_kind_(FeedbackVectorSlotKind::INVALID) {}
+
+ explicit TypeFeedbackMetadataIterator(TypeFeedbackMetadata* metadata)
+ : metadata_(metadata),
+ slot_(FeedbackVectorSlot(0)),
+ slot_kind_(FeedbackVectorSlotKind::INVALID) {}
+
+ bool HasNext() const { return slot_.ToInt() < metadata()->slot_count(); }
+
+ FeedbackVectorSlot Next() {
+ DCHECK(HasNext());
+ FeedbackVectorSlot slot = slot_;
+ slot_kind_ = metadata()->GetKind(slot);
+ slot_ = FeedbackVectorSlot(slot_.ToInt() + entry_size());
+ return slot;
+ }
+
+ // Returns slot kind of the last slot returned by Next().
+ FeedbackVectorSlotKind kind() const {
+ DCHECK_NE(FeedbackVectorSlotKind::INVALID, slot_kind_);
+ DCHECK_NE(FeedbackVectorSlotKind::KINDS_NUMBER, slot_kind_);
+ return slot_kind_;
+ }
+
+ // Returns entry size of the last slot returned by Next().
+ int entry_size() const { return TypeFeedbackMetadata::GetSlotSize(kind()); }
+
+ private:
+ TypeFeedbackMetadata* metadata() const {
+ return !metadata_handle_.is_null() ? *metadata_handle_ : metadata_;
+ }
+
+ // The reason for having a handle and a raw pointer to the meta data is
+ // to have a single iterator implementation for both "handlified" and raw
+ // pointer use cases.
+ Handle<TypeFeedbackMetadata> metadata_handle_;
+ TypeFeedbackMetadata* metadata_;
+ FeedbackVectorSlot slot_;
+ FeedbackVectorSlotKind slot_kind_;
+};
+
+
// A FeedbackNexus is the combination of a TypeFeedbackVector and a slot.
// Derived classes customize the update and retrieval of feedback.
class FeedbackNexus {
public:
- FeedbackNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ FeedbackNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: vector_handle_(vector), vector_(NULL), slot_(slot) {}
- FeedbackNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ FeedbackNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: vector_(vector), slot_(slot) {}
virtual ~FeedbackNexus() {}
@@ -301,7 +340,7 @@ class FeedbackNexus {
TypeFeedbackVector* vector() const {
return vector_handle_.is_null() ? vector_ : *vector_handle_;
}
- FeedbackVectorICSlot slot() const { return slot_; }
+ FeedbackVectorSlot slot() const { return slot_; }
InlineCacheState ic_state() const { return StateFromFeedback(); }
Map* FindFirstMap() const {
@@ -327,9 +366,9 @@ class FeedbackNexus {
inline Object* GetFeedback() const;
inline Object* GetFeedbackExtra() const;
- protected:
inline Isolate* GetIsolate() const;
+ protected:
inline void SetFeedback(Object* feedback,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void SetFeedbackExtra(Object* feedback_extra,
@@ -347,21 +386,21 @@ class FeedbackNexus {
// be done, like allocation.
Handle<TypeFeedbackVector> vector_handle_;
TypeFeedbackVector* vector_;
- FeedbackVectorICSlot slot_;
+ FeedbackVectorSlot slot_;
};
-class CallICNexus : public FeedbackNexus {
+class CallICNexus final : public FeedbackNexus {
public:
// Monomorphic call ics store call counts. Platform code needs to increment
// the count appropriately (ie, by 2).
static const int kCallCountIncrement = 2;
- CallICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ CallICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::CALL_IC, vector->GetKind(slot));
}
- CallICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ CallICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::CALL_IC, vector->GetKind(slot));
}
@@ -370,17 +409,19 @@ class CallICNexus : public FeedbackNexus {
void ConfigureMonomorphicArray();
void ConfigureMonomorphic(Handle<JSFunction> function);
+ void ConfigureMegamorphic() final;
+ void ConfigureMegamorphic(int call_count);
- InlineCacheState StateFromFeedback() const override;
+ InlineCacheState StateFromFeedback() const final;
- int ExtractMaps(MapHandleList* maps) const override {
+ int ExtractMaps(MapHandleList* maps) const final {
// CallICs don't record map feedback.
return 0;
}
- MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const override {
+ MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const final {
return MaybeHandle<Code>();
}
- bool FindHandlers(CodeHandleList* code_list, int length = -1) const override {
+ bool FindHandlers(CodeHandleList* code_list, int length = -1) const final {
return length == 0;
}
@@ -390,15 +431,15 @@ class CallICNexus : public FeedbackNexus {
class LoadICNexus : public FeedbackNexus {
public:
- LoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ LoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::LOAD_IC, vector->GetKind(slot));
}
explicit LoadICNexus(Isolate* isolate)
- : FeedbackNexus(TypeFeedbackVector::DummyVector(isolate),
- TypeFeedbackVector::DummySlot(
- TypeFeedbackVector::kDummyLoadICSlot)) {}
- LoadICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ : FeedbackNexus(
+ TypeFeedbackVector::DummyVector(isolate),
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot)) {}
+ LoadICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::LOAD_IC, vector->GetKind(slot));
}
@@ -415,11 +456,11 @@ class LoadICNexus : public FeedbackNexus {
class KeyedLoadICNexus : public FeedbackNexus {
public:
- KeyedLoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ KeyedLoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
}
- KeyedLoadICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ KeyedLoadICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
}
@@ -440,15 +481,15 @@ class KeyedLoadICNexus : public FeedbackNexus {
class StoreICNexus : public FeedbackNexus {
public:
- StoreICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+ StoreICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
}
explicit StoreICNexus(Isolate* isolate)
- : FeedbackNexus(TypeFeedbackVector::DummyVector(isolate),
- TypeFeedbackVector::DummySlot(
- TypeFeedbackVector::kDummyStoreICSlot)) {}
- StoreICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ : FeedbackNexus(
+ TypeFeedbackVector::DummyVector(isolate),
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot)) {}
+ StoreICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
}
@@ -465,16 +506,15 @@ class StoreICNexus : public FeedbackNexus {
class KeyedStoreICNexus : public FeedbackNexus {
public:
- KeyedStoreICNexus(Handle<TypeFeedbackVector> vector,
- FeedbackVectorICSlot slot)
+ KeyedStoreICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, vector->GetKind(slot));
}
explicit KeyedStoreICNexus(Isolate* isolate)
- : FeedbackNexus(TypeFeedbackVector::DummyVector(isolate),
- TypeFeedbackVector::DummySlot(
- TypeFeedbackVector::kDummyKeyedStoreICSlot)) {}
- KeyedStoreICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+ : FeedbackNexus(
+ TypeFeedbackVector::DummyVector(isolate),
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)) {}
+ KeyedStoreICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, vector->GetKind(slot));
}
@@ -497,7 +537,7 @@ class KeyedStoreICNexus : public FeedbackNexus {
InlineCacheState StateFromFeedback() const override;
Name* FindFirstName() const override;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TRANSITIONS_H_
diff --git a/chromium/v8/src/type-info.cc b/chromium/v8/src/type-info.cc
index ef5432176ba..a8a406efde9 100644
--- a/chromium/v8/src/type-info.cc
+++ b/chromium/v8/src/type-info.cc
@@ -4,7 +4,7 @@
#include "src/type-info.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/ic/ic.h"
@@ -55,27 +55,8 @@ Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorSlot slot) {
Handle<Object>::cast(isolate()->factory()->undefined_value());
Object* obj = feedback_vector_->Get(slot);
- // Slots do not embed direct pointers to functions. Instead a WeakCell is
- // always used.
- DCHECK(!obj->IsJSFunction());
- if (obj->IsWeakCell()) {
- WeakCell* cell = WeakCell::cast(obj);
- if (cell->cleared()) return undefined;
- obj = cell->value();
- }
-
- return Handle<Object>(obj, isolate());
-}
-
-
-Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorICSlot slot) {
- DCHECK(slot.ToInt() >= 0 && slot.ToInt() < feedback_vector_->length());
- Handle<Object> undefined =
- Handle<Object>::cast(isolate()->factory()->undefined_value());
- Object* obj = feedback_vector_->Get(slot);
-
- // Vector-based ICs do not embed direct pointers to maps, functions.
- // Instead a WeakCell is always used.
+ // Slots do not embed direct pointers to maps, functions. Instead
+ // a WeakCell is always used.
if (obj->IsWeakCell()) {
WeakCell* cell = WeakCell::cast(obj);
if (cell->cleared()) return undefined;
@@ -91,21 +72,8 @@ Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorICSlot slot) {
}
-InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(TypeFeedbackId id) {
- Handle<Object> maybe_code = GetInfo(id);
- if (maybe_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(maybe_code);
- if (code->is_inline_cache_stub()) return code->ic_state();
- }
-
- // If we can't find an IC, assume we've seen *something*, but we don't know
- // what. PREMONOMORPHIC roughly encodes this meaning.
- return PREMONOMORPHIC;
-}
-
-
InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
if (!slot.IsInvalid()) {
FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
if (kind == FeedbackVectorSlotKind::LOAD_IC) {
@@ -123,15 +91,7 @@ InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(
}
-bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
- Handle<Object> maybe_code = GetInfo(ast_id);
- if (!maybe_code->IsCode()) return false;
- Handle<Code> code = Handle<Code>::cast(maybe_code);
- return code->ic_state() == UNINITIALIZED;
-}
-
-
-bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorICSlot slot) {
+bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorSlot slot) {
if (!slot.IsInvalid()) {
FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
if (kind == FeedbackVectorSlotKind::STORE_IC) {
@@ -146,7 +106,7 @@ bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorICSlot slot) {
}
-bool TypeFeedbackOracle::CallIsUninitialized(FeedbackVectorICSlot slot) {
+bool TypeFeedbackOracle::CallIsUninitialized(FeedbackVectorSlot slot) {
Handle<Object> value = GetInfo(slot);
return value->IsUndefined() ||
value.is_identical_to(
@@ -154,7 +114,7 @@ bool TypeFeedbackOracle::CallIsUninitialized(FeedbackVectorICSlot slot) {
}
-bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackVectorICSlot slot) {
+bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackVectorSlot slot) {
Handle<Object> value = GetInfo(slot);
return value->IsAllocationSite() || value->IsJSFunction();
}
@@ -176,25 +136,7 @@ byte TypeFeedbackOracle::ForInType(FeedbackVectorSlot feedback_vector_slot) {
void TypeFeedbackOracle::GetStoreModeAndKeyType(
- TypeFeedbackId ast_id, KeyedAccessStoreMode* store_mode,
- IcCheckType* key_type) {
- Handle<Object> maybe_code = GetInfo(ast_id);
- if (maybe_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(maybe_code);
- if (code->kind() == Code::KEYED_STORE_IC) {
- ExtraICState extra_ic_state = code->extra_ic_state();
- *store_mode = KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state);
- *key_type = KeyedStoreIC::GetKeyType(extra_ic_state);
- return;
- }
- }
- *store_mode = STANDARD_STORE;
- *key_type = ELEMENT;
-}
-
-
-void TypeFeedbackOracle::GetStoreModeAndKeyType(
- FeedbackVectorICSlot slot, KeyedAccessStoreMode* store_mode,
+ FeedbackVectorSlot slot, KeyedAccessStoreMode* store_mode,
IcCheckType* key_type) {
if (!slot.IsInvalid() &&
feedback_vector_->GetKind(slot) ==
@@ -209,8 +151,7 @@ void TypeFeedbackOracle::GetStoreModeAndKeyType(
}
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(
- FeedbackVectorICSlot slot) {
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(FeedbackVectorSlot slot) {
Handle<Object> info = GetInfo(slot);
if (info->IsAllocationSite()) {
return Handle<JSFunction>(isolate()->native_context()->array_function());
@@ -233,7 +174,7 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(
Handle<AllocationSite> TypeFeedbackOracle::GetCallAllocationSite(
- FeedbackVectorICSlot slot) {
+ FeedbackVectorSlot slot) {
Handle<Object> info = GetInfo(slot);
if (info->IsAllocationSite()) {
return Handle<AllocationSite>::cast(info);
@@ -252,12 +193,6 @@ Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(
}
-bool TypeFeedbackOracle::LoadIsBuiltin(
- TypeFeedbackId id, Builtins::Name builtin) {
- return *GetInfo(id) == isolate()->builtins()->builtin(builtin);
-}
-
-
void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
Type** left_type,
Type** right_type,
@@ -310,9 +245,9 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
BinaryOpICState state(isolate(), code->extra_ic_state());
DCHECK_EQ(op, state.op());
- *left = state.GetLeftType(zone());
- *right = state.GetRightType(zone());
- *result = state.GetResultType(zone());
+ *left = state.GetLeftType();
+ *right = state.GetRightType();
+ *result = state.GetResultType();
*fixed_right_arg = state.fixed_right_arg();
AllocationSite* first_allocation_site = code->FindFirstAllocationSite();
@@ -330,7 +265,7 @@ Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
Handle<Code> code = Handle<Code>::cast(object);
DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
BinaryOpICState state(isolate(), code->extra_ic_state());
- return state.GetLeftType(zone());
+ return state.GetLeftType();
}
@@ -343,7 +278,7 @@ bool TypeFeedbackOracle::HasOnlyStringMaps(SmallMapList* receiver_types) {
}
-void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorICSlot slot,
+void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorSlot slot,
Handle<Name> name,
SmallMapList* receiver_types) {
receiver_types->Clear();
@@ -356,7 +291,7 @@ void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorICSlot slot,
void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
- FeedbackVectorICSlot slot, SmallMapList* receiver_types, bool* is_string,
+ FeedbackVectorSlot slot, SmallMapList* receiver_types, bool* is_string,
IcCheckType* key_type) {
receiver_types->Clear();
if (slot.IsInvalid()) {
@@ -371,16 +306,7 @@ void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
}
-void TypeFeedbackOracle::AssignmentReceiverTypes(TypeFeedbackId id,
- Handle<Name> name,
- SmallMapList* receiver_types) {
- receiver_types->Clear();
- Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
- CollectReceiverTypes(id, name, flags, receiver_types);
-}
-
-
-void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorICSlot slot,
+void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorSlot slot,
Handle<Name> name,
SmallMapList* receiver_types) {
receiver_types->Clear();
@@ -390,16 +316,7 @@ void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorICSlot slot,
void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
- TypeFeedbackId id, SmallMapList* receiver_types,
- KeyedAccessStoreMode* store_mode, IcCheckType* key_type) {
- receiver_types->Clear();
- CollectReceiverTypes(id, receiver_types);
- GetStoreModeAndKeyType(id, store_mode, key_type);
-}
-
-
-void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
- FeedbackVectorICSlot slot, SmallMapList* receiver_types,
+ FeedbackVectorSlot slot, SmallMapList* receiver_types,
KeyedAccessStoreMode* store_mode, IcCheckType* key_type) {
receiver_types->Clear();
CollectReceiverTypes(slot, receiver_types);
@@ -407,21 +324,14 @@ void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
}
-void TypeFeedbackOracle::CountReceiverTypes(TypeFeedbackId id,
- SmallMapList* receiver_types) {
- receiver_types->Clear();
- CollectReceiverTypes(id, receiver_types);
-}
-
-
-void TypeFeedbackOracle::CountReceiverTypes(FeedbackVectorICSlot slot,
+void TypeFeedbackOracle::CountReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* receiver_types) {
receiver_types->Clear();
if (!slot.IsInvalid()) CollectReceiverTypes(slot, receiver_types);
}
-void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorICSlot slot,
+void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorSlot slot,
Handle<Name> name,
Code::Flags flags,
SmallMapList* types) {
@@ -430,19 +340,6 @@ void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorICSlot slot,
}
-void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
- Handle<Name> name,
- Code::Flags flags,
- SmallMapList* types) {
- Handle<Object> object = GetInfo(ast_id);
- if (object->IsUndefined() || object->IsSmi()) return;
-
- DCHECK(object->IsCode());
- Handle<Code> code(Handle<Code>::cast(object));
- CollectReceiverTypes<Code>(*code, name, flags, types);
-}
-
-
template <class T>
void TypeFeedbackOracle::CollectReceiverTypes(T* obj, Handle<Name> name,
Code::Flags flags,
@@ -458,16 +355,7 @@ void TypeFeedbackOracle::CollectReceiverTypes(T* obj, Handle<Name> name,
}
-void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
- SmallMapList* types) {
- Handle<Object> object = GetInfo(ast_id);
- if (!object->IsCode()) return;
- Handle<Code> code = Handle<Code>::cast(object);
- CollectReceiverTypes<Code>(*code, types);
-}
-
-
-void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorICSlot slot,
+void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* types) {
FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
if (kind == FeedbackVectorSlotKind::STORE_IC) {
diff --git a/chromium/v8/src/type-info.h b/chromium/v8/src/type-info.h
index 96cc39f0079..13a7f88b660 100644
--- a/chromium/v8/src/type-info.h
+++ b/chromium/v8/src/type-info.h
@@ -8,7 +8,7 @@
#include "src/allocation.h"
#include "src/contexts.h"
#include "src/globals.h"
-#include "src/token.h"
+#include "src/parsing/token.h"
#include "src/types.h"
#include "src/zone.h"
@@ -25,13 +25,10 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<TypeFeedbackVector> feedback_vector,
Handle<Context> native_context);
- InlineCacheState LoadInlineCacheState(TypeFeedbackId id);
- InlineCacheState LoadInlineCacheState(FeedbackVectorICSlot slot);
- bool StoreIsUninitialized(TypeFeedbackId id);
- bool StoreIsUninitialized(FeedbackVectorICSlot slot);
- bool CallIsUninitialized(FeedbackVectorICSlot slot);
- bool CallIsMonomorphic(FeedbackVectorICSlot slot);
- bool KeyedArrayCallIsHoley(TypeFeedbackId id);
+ InlineCacheState LoadInlineCacheState(FeedbackVectorSlot slot);
+ bool StoreIsUninitialized(FeedbackVectorSlot slot);
+ bool CallIsUninitialized(FeedbackVectorSlot slot);
+ bool CallIsMonomorphic(FeedbackVectorSlot slot);
bool CallNewIsMonomorphic(FeedbackVectorSlot slot);
// TODO(1571) We can't use ForInStatement::ForInType as the return value due
@@ -40,38 +37,25 @@ class TypeFeedbackOracle: public ZoneObject {
// be possible.
byte ForInType(FeedbackVectorSlot feedback_vector_slot);
- void GetStoreModeAndKeyType(TypeFeedbackId id,
- KeyedAccessStoreMode* store_mode,
- IcCheckType* key_type);
- void GetStoreModeAndKeyType(FeedbackVectorICSlot slot,
+ void GetStoreModeAndKeyType(FeedbackVectorSlot slot,
KeyedAccessStoreMode* store_mode,
IcCheckType* key_type);
- void PropertyReceiverTypes(FeedbackVectorICSlot slot, Handle<Name> name,
+ void PropertyReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
SmallMapList* receiver_types);
- void KeyedPropertyReceiverTypes(FeedbackVectorICSlot slot,
+ void KeyedPropertyReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* receiver_types, bool* is_string,
IcCheckType* key_type);
- void AssignmentReceiverTypes(TypeFeedbackId id, Handle<Name> name,
- SmallMapList* receiver_types);
- void AssignmentReceiverTypes(FeedbackVectorICSlot slot, Handle<Name> name,
+ void AssignmentReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
SmallMapList* receiver_types);
- void KeyedAssignmentReceiverTypes(TypeFeedbackId id,
- SmallMapList* receiver_types,
- KeyedAccessStoreMode* store_mode,
- IcCheckType* key_type);
- void KeyedAssignmentReceiverTypes(FeedbackVectorICSlot slot,
+ void KeyedAssignmentReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* receiver_types,
KeyedAccessStoreMode* store_mode,
IcCheckType* key_type);
- void CountReceiverTypes(TypeFeedbackId id,
- SmallMapList* receiver_types);
- void CountReceiverTypes(FeedbackVectorICSlot slot,
+ void CountReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* receiver_types);
- void CollectReceiverTypes(FeedbackVectorICSlot slot, SmallMapList* types);
- void CollectReceiverTypes(TypeFeedbackId id,
- SmallMapList* types);
+ void CollectReceiverTypes(FeedbackVectorSlot slot, SmallMapList* types);
template <class T>
void CollectReceiverTypes(T* obj, SmallMapList* types);
@@ -82,13 +66,11 @@ class TypeFeedbackOracle: public ZoneObject {
native_context;
}
- Handle<JSFunction> GetCallTarget(FeedbackVectorICSlot slot);
- Handle<AllocationSite> GetCallAllocationSite(FeedbackVectorICSlot slot);
+ Handle<JSFunction> GetCallTarget(FeedbackVectorSlot slot);
+ Handle<AllocationSite> GetCallAllocationSite(FeedbackVectorSlot slot);
Handle<JSFunction> GetCallNewTarget(FeedbackVectorSlot slot);
Handle<AllocationSite> GetCallNewAllocationSite(FeedbackVectorSlot slot);
- bool LoadIsBuiltin(TypeFeedbackId id, Builtins::Name builtin_id);
-
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
// of various cycles in our headers. Death to tons of implementations in
// headers!! :-P
@@ -114,9 +96,7 @@ class TypeFeedbackOracle: public ZoneObject {
Isolate* isolate() const { return isolate_; }
private:
- void CollectReceiverTypes(FeedbackVectorICSlot slot, Handle<Name> name,
- Code::Flags flags, SmallMapList* types);
- void CollectReceiverTypes(TypeFeedbackId id, Handle<Name> name,
+ void CollectReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
Code::Flags flags, SmallMapList* types);
template <class T>
void CollectReceiverTypes(T* obj, Handle<Name> name, Code::Flags flags,
@@ -143,7 +123,6 @@ class TypeFeedbackOracle: public ZoneObject {
// Returns an element from the type feedback vector. Returns undefined
// if there is no information.
Handle<Object> GetInfo(FeedbackVectorSlot slot);
- Handle<Object> GetInfo(FeedbackVectorICSlot slot);
private:
Handle<Context> native_context_;
@@ -155,6 +134,7 @@ class TypeFeedbackOracle: public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TYPE_INFO_H_
diff --git a/chromium/v8/src/typedarray.js b/chromium/v8/src/typedarray.js
deleted file mode 100644
index b45d304514e..00000000000
--- a/chromium/v8/src/typedarray.js
+++ /dev/null
@@ -1,514 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalArray = global.Array;
-var GlobalArrayBuffer = global.ArrayBuffer;
-var GlobalDataView = global.DataView;
-var GlobalObject = global.Object;
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-macro TYPED_ARRAYS(FUNCTION)
-// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
-FUNCTION(1, Uint8Array, 1)
-FUNCTION(2, Int8Array, 1)
-FUNCTION(3, Uint16Array, 2)
-FUNCTION(4, Int16Array, 2)
-FUNCTION(5, Uint32Array, 4)
-FUNCTION(6, Int32Array, 4)
-FUNCTION(7, Float32Array, 4)
-FUNCTION(8, Float64Array, 8)
-FUNCTION(9, Uint8ClampedArray, 1)
-endmacro
-
-macro DECLARE_GLOBALS(INDEX, NAME, SIZE)
-var GlobalNAME = global.NAME;
-endmacro
-
-TYPED_ARRAYS(DECLARE_GLOBALS)
-
-var ToNumber;
-
-utils.Import(function(from) {
- ToNumber = from.ToNumber;
-});
-
-var InternalArray = utils.InternalArray;
-
-// --------------- Typed Arrays ---------------------
-
-macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
-function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
- if (!IS_UNDEFINED(byteOffset)) {
- byteOffset =
- $toPositiveInteger(byteOffset, kInvalidTypedArrayLength);
- }
- if (!IS_UNDEFINED(length)) {
- length = $toPositiveInteger(length, kInvalidTypedArrayLength);
- }
-
- var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
- var offset;
- if (IS_UNDEFINED(byteOffset)) {
- offset = 0;
- } else {
- offset = byteOffset;
-
- if (offset % ELEMENT_SIZE !== 0) {
- throw MakeRangeError(kInvalidTypedArrayAlignment,
- "start offset", "NAME", ELEMENT_SIZE);
- }
- if (offset > bufferByteLength) {
- throw MakeRangeError(kInvalidTypedArrayOffset);
- }
- }
-
- var newByteLength;
- var newLength;
- if (IS_UNDEFINED(length)) {
- if (bufferByteLength % ELEMENT_SIZE !== 0) {
- throw MakeRangeError(kInvalidTypedArrayAlignment,
- "byte length", "NAME", ELEMENT_SIZE);
- }
- newByteLength = bufferByteLength - offset;
- newLength = newByteLength / ELEMENT_SIZE;
- } else {
- var newLength = length;
- newByteLength = newLength * ELEMENT_SIZE;
- }
- if ((offset + newByteLength > bufferByteLength)
- || (newLength > %_MaxSmi())) {
- throw MakeRangeError(kInvalidTypedArrayLength);
- }
- %_TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength, true);
-}
-
-function NAMEConstructByLength(obj, length) {
- var l = IS_UNDEFINED(length) ?
- 0 : $toPositiveInteger(length, kInvalidTypedArrayLength);
- if (l > %_MaxSmi()) {
- throw MakeRangeError(kInvalidTypedArrayLength);
- }
- var byteLength = l * ELEMENT_SIZE;
- if (byteLength > %_TypedArrayMaxSizeInHeap()) {
- var buffer = new GlobalArrayBuffer(byteLength);
- %_TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength, true);
- } else {
- %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength, true);
- }
-}
-
-function NAMEConstructByArrayLike(obj, arrayLike) {
- var length = arrayLike.length;
- var l = $toPositiveInteger(length, kInvalidTypedArrayLength);
-
- if (l > %_MaxSmi()) {
- throw MakeRangeError(kInvalidTypedArrayLength);
- }
- var initialized = false;
- var byteLength = l * ELEMENT_SIZE;
- if (byteLength <= %_TypedArrayMaxSizeInHeap()) {
- %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength, false);
- } else {
- initialized =
- %TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l);
- }
- if (!initialized) {
- for (var i = 0; i < l; i++) {
- // It is crucial that we let any execptions from arrayLike[i]
- // propagate outside the function.
- obj[i] = arrayLike[i];
- }
- }
-}
-
-function NAMEConstructByIterable(obj, iterable, iteratorFn) {
- var list = new InternalArray();
- // Reading the Symbol.iterator property of iterable twice would be
- // observable with getters, so instead, we call the function which
- // was already looked up, and wrap it in another iterable. The
- // __proto__ of the new iterable is set to null to avoid any chance
- // of modifications to Object.prototype being observable here.
- var iterator = %_Call(iteratorFn, iterable);
- var newIterable = {
- __proto__: null
- };
- // TODO(littledan): Computed properties don't work yet in nosnap.
- // Rephrase when they do.
- newIterable[iteratorSymbol] = function() { return iterator; }
- for (var value of newIterable) {
- list.push(value);
- }
- NAMEConstructByArrayLike(obj, list);
-}
-
-function NAMEConstructor(arg1, arg2, arg3) {
- if (%_IsConstructCall()) {
- if (IS_ARRAYBUFFER(arg1) || IS_SHAREDARRAYBUFFER(arg1)) {
- NAMEConstructByArrayBuffer(this, arg1, arg2, arg3);
- } else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
- IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
- NAMEConstructByLength(this, arg1);
- } else {
- var iteratorFn = arg1[iteratorSymbol];
- if (IS_UNDEFINED(iteratorFn) || iteratorFn === $arrayValues) {
- NAMEConstructByArrayLike(this, arg1);
- } else {
- NAMEConstructByIterable(this, arg1, iteratorFn);
- }
- }
- } else {
- throw MakeTypeError(kConstructorNotFunction, "NAME")
- }
-}
-
-function NAME_GetBuffer() {
- if (!(%_ClassOf(this) === 'NAME')) {
- throw MakeTypeError(kIncompatibleMethodReceiver, "NAME.buffer", this);
- }
- return %TypedArrayGetBuffer(this);
-}
-
-function NAME_GetByteLength() {
- if (!(%_ClassOf(this) === 'NAME')) {
- throw MakeTypeError(kIncompatibleMethodReceiver, "NAME.byteLength", this);
- }
- return %_ArrayBufferViewGetByteLength(this);
-}
-
-function NAME_GetByteOffset() {
- if (!(%_ClassOf(this) === 'NAME')) {
- throw MakeTypeError(kIncompatibleMethodReceiver, "NAME.byteOffset", this);
- }
- return %_ArrayBufferViewGetByteOffset(this);
-}
-
-function NAME_GetLength() {
- if (!(%_ClassOf(this) === 'NAME')) {
- throw MakeTypeError(kIncompatibleMethodReceiver, "NAME.length", this);
- }
- return %_TypedArrayGetLength(this);
-}
-
-function NAMESubArray(begin, end) {
- if (!(%_ClassOf(this) === 'NAME')) {
- throw MakeTypeError(kIncompatibleMethodReceiver, "NAME.subarray", this);
- }
- var beginInt = TO_INTEGER(begin);
- if (!IS_UNDEFINED(end)) {
- var endInt = TO_INTEGER(end);
- var srcLength = %_TypedArrayGetLength(this);
- } else {
- var srcLength = %_TypedArrayGetLength(this);
- var endInt = srcLength;
- }
-
- if (beginInt < 0) {
- beginInt = MAX_SIMPLE(0, srcLength + beginInt);
- } else {
- beginInt = MIN_SIMPLE(beginInt, srcLength);
- }
-
- if (endInt < 0) {
- endInt = MAX_SIMPLE(0, srcLength + endInt);
- } else {
- endInt = MIN_SIMPLE(endInt, srcLength);
- }
-
- if (endInt < beginInt) {
- endInt = beginInt;
- }
-
- var newLength = endInt - beginInt;
- var beginByteOffset =
- %_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE;
- return new GlobalNAME(%TypedArrayGetBuffer(this),
- beginByteOffset, newLength);
-}
-endmacro
-
-TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR)
-
-
-function TypedArraySetFromArrayLike(target, source, sourceLength, offset) {
- if (offset > 0) {
- for (var i = 0; i < sourceLength; i++) {
- target[offset + i] = source[i];
- }
- }
- else {
- for (var i = 0; i < sourceLength; i++) {
- target[i] = source[i];
- }
- }
-}
-
-function TypedArraySetFromOverlappingTypedArray(target, source, offset) {
- var sourceElementSize = source.BYTES_PER_ELEMENT;
- var targetElementSize = target.BYTES_PER_ELEMENT;
- var sourceLength = source.length;
-
- // Copy left part.
- function CopyLeftPart() {
- // First un-mutated byte after the next write
- var targetPtr = target.byteOffset + (offset + 1) * targetElementSize;
- // Next read at sourcePtr. We do not care for memory changing before
- // sourcePtr - we have already copied it.
- var sourcePtr = source.byteOffset;
- for (var leftIndex = 0;
- leftIndex < sourceLength && targetPtr <= sourcePtr;
- leftIndex++) {
- target[offset + leftIndex] = source[leftIndex];
- targetPtr += targetElementSize;
- sourcePtr += sourceElementSize;
- }
- return leftIndex;
- }
- var leftIndex = CopyLeftPart();
-
- // Copy rigth part;
- function CopyRightPart() {
- // First unmutated byte before the next write
- var targetPtr =
- target.byteOffset + (offset + sourceLength - 1) * targetElementSize;
- // Next read before sourcePtr. We do not care for memory changing after
- // sourcePtr - we have already copied it.
- var sourcePtr =
- source.byteOffset + sourceLength * sourceElementSize;
- for(var rightIndex = sourceLength - 1;
- rightIndex >= leftIndex && targetPtr >= sourcePtr;
- rightIndex--) {
- target[offset + rightIndex] = source[rightIndex];
- targetPtr -= targetElementSize;
- sourcePtr -= sourceElementSize;
- }
- return rightIndex;
- }
- var rightIndex = CopyRightPart();
-
- var temp = new GlobalArray(rightIndex + 1 - leftIndex);
- for (var i = leftIndex; i <= rightIndex; i++) {
- temp[i - leftIndex] = source[i];
- }
- for (i = leftIndex; i <= rightIndex; i++) {
- target[offset + i] = temp[i - leftIndex];
- }
-}
-
-function TypedArraySet(obj, offset) {
- var intOffset = IS_UNDEFINED(offset) ? 0 : TO_INTEGER(offset);
- if (intOffset < 0) throw MakeTypeError(kTypedArraySetNegativeOffset);
-
- if (intOffset > %_MaxSmi()) {
- throw MakeRangeError(kTypedArraySetSourceTooLarge);
- }
- switch (%TypedArraySetFastCases(this, obj, intOffset)) {
- // These numbers should be synchronized with runtime.cc.
- case 0: // TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE
- return;
- case 1: // TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING
- TypedArraySetFromOverlappingTypedArray(this, obj, intOffset);
- return;
- case 2: // TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING
- TypedArraySetFromArrayLike(this, obj, obj.length, intOffset);
- return;
- case 3: // TYPED_ARRAY_SET_NON_TYPED_ARRAY
- var l = obj.length;
- if (IS_UNDEFINED(l)) {
- if (IS_NUMBER(obj)) {
- // For number as a first argument, throw TypeError
- // instead of silently ignoring the call, so that
- // the user knows (s)he did something wrong.
- // (Consistent with Firefox and Blink/WebKit)
- throw MakeTypeError(kInvalidArgument);
- }
- return;
- }
- l = TO_LENGTH(l);
- if (intOffset + l > this.length) {
- throw MakeRangeError(kTypedArraySetSourceTooLarge);
- }
- TypedArraySetFromArrayLike(this, obj, l, intOffset);
- return;
- }
-}
-
-function TypedArrayGetToStringTag() {
- if (!%_IsTypedArray(this)) return;
- var name = %_ClassOf(this);
- if (IS_UNDEFINED(name)) return;
- return name;
-}
-
-// -------------------------------------------------------------------
-
-macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
- %SetCode(GlobalNAME, NAMEConstructor);
- %FunctionSetPrototype(GlobalNAME, new GlobalObject());
-
- %AddNamedProperty(GlobalNAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE,
- READ_ONLY | DONT_ENUM | DONT_DELETE);
- %AddNamedProperty(GlobalNAME.prototype,
- "constructor", global.NAME, DONT_ENUM);
- %AddNamedProperty(GlobalNAME.prototype,
- "BYTES_PER_ELEMENT", ELEMENT_SIZE,
- READ_ONLY | DONT_ENUM | DONT_DELETE);
- utils.InstallGetter(GlobalNAME.prototype, "buffer", NAME_GetBuffer);
- utils.InstallGetter(GlobalNAME.prototype, "byteOffset", NAME_GetByteOffset,
- DONT_ENUM | DONT_DELETE);
- utils.InstallGetter(GlobalNAME.prototype, "byteLength", NAME_GetByteLength,
- DONT_ENUM | DONT_DELETE);
- utils.InstallGetter(GlobalNAME.prototype, "length", NAME_GetLength,
- DONT_ENUM | DONT_DELETE);
- utils.InstallGetter(GlobalNAME.prototype, toStringTagSymbol,
- TypedArrayGetToStringTag);
- utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
- "subarray", NAMESubArray,
- "set", TypedArraySet
- ]);
-endmacro
-
-TYPED_ARRAYS(SETUP_TYPED_ARRAY)
-
-// --------------------------- DataView -----------------------------
-
-function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
- if (%_IsConstructCall()) {
- // TODO(binji): support SharedArrayBuffers?
- if (!IS_ARRAYBUFFER(buffer)) throw MakeTypeError(kDataViewNotArrayBuffer);
- if (!IS_UNDEFINED(byteOffset)) {
- byteOffset = $toPositiveInteger(byteOffset, kInvalidDataViewOffset);
- }
- if (!IS_UNDEFINED(byteLength)) {
- byteLength = TO_INTEGER(byteLength);
- }
-
- var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
-
- var offset = IS_UNDEFINED(byteOffset) ? 0 : byteOffset;
- if (offset > bufferByteLength) throw MakeRangeError(kInvalidDataViewOffset);
-
- var length = IS_UNDEFINED(byteLength)
- ? bufferByteLength - offset
- : byteLength;
- if (length < 0 || offset + length > bufferByteLength) {
- throw new MakeRangeError(kInvalidDataViewLength);
- }
- %_DataViewInitialize(this, buffer, offset, length);
- } else {
- throw MakeTypeError(kConstructorNotFunction, "DataView");
- }
-}
-
-function DataViewGetBufferJS() {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver, 'DataView.buffer', this);
- }
- return %DataViewGetBuffer(this);
-}
-
-function DataViewGetByteOffset() {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'DataView.byteOffset', this);
- }
- return %_ArrayBufferViewGetByteOffset(this);
-}
-
-function DataViewGetByteLength() {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'DataView.byteLength', this);
- }
- return %_ArrayBufferViewGetByteLength(this);
-}
-
-macro DATA_VIEW_TYPES(FUNCTION)
- FUNCTION(Int8)
- FUNCTION(Uint8)
- FUNCTION(Int16)
- FUNCTION(Uint16)
- FUNCTION(Int32)
- FUNCTION(Uint32)
- FUNCTION(Float32)
- FUNCTION(Float64)
-endmacro
-
-
-macro DATA_VIEW_GETTER_SETTER(TYPENAME)
-function DataViewGetTYPENAMEJS(offset, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'DataView.getTYPENAME', this);
- }
- if (%_ArgumentsLength() < 1) throw MakeTypeError(kInvalidArgument);
- offset = $toPositiveInteger(offset, kInvalidDataViewAccessorOffset);
- return %DataViewGetTYPENAME(this, offset, !!little_endian);
-}
-
-function DataViewSetTYPENAMEJS(offset, value, little_endian) {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'DataView.setTYPENAME', this);
- }
- if (%_ArgumentsLength() < 2) throw MakeTypeError(kInvalidArgument);
- offset = $toPositiveInteger(offset, kInvalidDataViewAccessorOffset);
- %DataViewSetTYPENAME(this, offset, TO_NUMBER_INLINE(value), !!little_endian);
-}
-endmacro
-
-DATA_VIEW_TYPES(DATA_VIEW_GETTER_SETTER)
-
-// Setup the DataView constructor.
-%SetCode(GlobalDataView, DataViewConstructor);
-%FunctionSetPrototype(GlobalDataView, new GlobalObject);
-
-// Set up constructor property on the DataView prototype.
-%AddNamedProperty(GlobalDataView.prototype, "constructor", GlobalDataView,
- DONT_ENUM);
-%AddNamedProperty(GlobalDataView.prototype, toStringTagSymbol, "DataView",
- READ_ONLY|DONT_ENUM);
-
-utils.InstallGetter(GlobalDataView.prototype, "buffer", DataViewGetBufferJS);
-utils.InstallGetter(GlobalDataView.prototype, "byteOffset",
- DataViewGetByteOffset);
-utils.InstallGetter(GlobalDataView.prototype, "byteLength",
- DataViewGetByteLength);
-
-utils.InstallFunctions(GlobalDataView.prototype, DONT_ENUM, [
- "getInt8", DataViewGetInt8JS,
- "setInt8", DataViewSetInt8JS,
-
- "getUint8", DataViewGetUint8JS,
- "setUint8", DataViewSetUint8JS,
-
- "getInt16", DataViewGetInt16JS,
- "setInt16", DataViewSetInt16JS,
-
- "getUint16", DataViewGetUint16JS,
- "setUint16", DataViewSetUint16JS,
-
- "getInt32", DataViewGetInt32JS,
- "setInt32", DataViewSetInt32JS,
-
- "getUint32", DataViewGetUint32JS,
- "setUint32", DataViewSetUint32JS,
-
- "getFloat32", DataViewGetFloat32JS,
- "setFloat32", DataViewSetFloat32JS,
-
- "getFloat64", DataViewGetFloat64JS,
- "setFloat64", DataViewSetFloat64JS
-]);
-
-})
diff --git a/chromium/v8/src/types-inl.h b/chromium/v8/src/types-inl.h
index 699d642d590..9af4bccd2ed 100644
--- a/chromium/v8/src/types-inl.h
+++ b/chromium/v8/src/types-inl.h
@@ -481,6 +481,7 @@ void HeapTypeConfig::range_set_double(i::Handle<HeapTypeConfig::Range> range,
i::Handle<Object> number = isolate->factory()->NewNumber(value);
range->set(index + 2, *number);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TYPES_INL_H_
diff --git a/chromium/v8/src/types.cc b/chromium/v8/src/types.cc
index af1700254bb..92610606d5f 100644
--- a/chromium/v8/src/types.cc
+++ b/chromium/v8/src/types.cc
@@ -173,7 +173,7 @@ TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
if (type->IsRange()) return type->AsRange()->Lub();
if (type->IsContext()) return kInternal & kTaggedPointer;
if (type->IsArray()) return kOtherObject;
- if (type->IsFunction()) return kOtherObject; // TODO(rossberg): kFunction
+ if (type->IsFunction()) return kFunction;
UNREACHABLE();
return kNone;
}
@@ -231,7 +231,6 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_TYPE:
- case JS_BUILTINS_OBJECT_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_ARRAY_BUFFER_TYPE:
@@ -245,14 +244,16 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case JS_ITERATOR_RESULT_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_BOUND_FUNCTION_TYPE:
if (map->is_undetectable()) return kUndetectable;
return kOtherObject;
case JS_FUNCTION_TYPE:
- return kOtherObject; // TODO(rossberg): there should be a Function type.
+ if (map->is_undetectable()) return kUndetectable;
+ return kFunction;
case JS_REGEXP_TYPE:
return kOtherObject; // TODO(rossberg): there should be a RegExp type.
case JS_PROXY_TYPE:
- case JS_FUNCTION_PROXY_TYPE:
return kProxy;
case MAP_TYPE:
// When compiling stub templates, the meta map is used as a place holder
@@ -265,6 +266,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
// We ought to find a cleaner solution for compiling stubs parameterised
// over type or class variables, esp ones with bounds...
return kDetectable & kTaggedPointer;
+ case ALLOCATION_SITE_TYPE:
case DECLARED_ACCESSOR_INFO_TYPE:
case EXECUTABLE_ACCESSOR_INFO_TYPE:
case SHARED_FUNCTION_INFO_TYPE:
@@ -273,6 +275,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case FIXED_DOUBLE_ARRAY_TYPE:
case BYTE_ARRAY_TYPE:
case BYTECODE_ARRAY_TYPE:
+ case TRANSITION_ARRAY_TYPE:
case FOREIGN_TYPE:
case SCRIPT_TYPE:
case CODE_TYPE:
@@ -297,7 +300,6 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case OBJECT_TEMPLATE_INFO_TYPE:
case SIGNATURE_INFO_TYPE:
case TYPE_SWITCH_INFO_TYPE:
- case ALLOCATION_SITE_TYPE:
case ALLOCATION_MEMENTO_TYPE:
case CODE_CACHE_TYPE:
case POLYMORPHIC_CODE_CACHE_TYPE:
diff --git a/chromium/v8/src/types.h b/chromium/v8/src/types.h
index 3acd5cc842d..9ce650d943d 100644
--- a/chromium/v8/src/types.h
+++ b/chromium/v8/src/types.h
@@ -159,38 +159,29 @@ namespace internal {
// clang-format off
#define MASK_BITSET_TYPE_LIST(V) \
- V(Representation, 0xfff00000u) \
- V(Semantic, 0x000ffffeu)
+ V(Representation, 0xff800000u) \
+ V(Semantic, 0x007ffffeu)
#define REPRESENTATION(k) ((k) & BitsetType::kRepresentation)
#define SEMANTIC(k) ((k) & BitsetType::kSemantic)
#define REPRESENTATION_BITSET_TYPE_LIST(V) \
V(None, 0) \
- V(UntaggedBit, 1u << 20 | kSemantic) \
- V(UntaggedSigned8, 1u << 21 | kSemantic) \
- V(UntaggedSigned16, 1u << 22 | kSemantic) \
- V(UntaggedSigned32, 1u << 23 | kSemantic) \
- V(UntaggedUnsigned8, 1u << 24 | kSemantic) \
- V(UntaggedUnsigned16, 1u << 25 | kSemantic) \
- V(UntaggedUnsigned32, 1u << 26 | kSemantic) \
+ V(UntaggedBit, 1u << 23 | kSemantic) \
+ V(UntaggedIntegral8, 1u << 24 | kSemantic) \
+ V(UntaggedIntegral16, 1u << 25 | kSemantic) \
+ V(UntaggedIntegral32, 1u << 26 | kSemantic) \
V(UntaggedFloat32, 1u << 27 | kSemantic) \
V(UntaggedFloat64, 1u << 28 | kSemantic) \
V(UntaggedPointer, 1u << 29 | kSemantic) \
V(TaggedSigned, 1u << 30 | kSemantic) \
V(TaggedPointer, 1u << 31 | kSemantic) \
\
- V(UntaggedSigned, kUntaggedSigned8 | kUntaggedSigned16 | \
- kUntaggedSigned32) \
- V(UntaggedUnsigned, kUntaggedUnsigned8 | kUntaggedUnsigned16 | \
- kUntaggedUnsigned32) \
- V(UntaggedIntegral8, kUntaggedSigned8 | kUntaggedUnsigned8) \
- V(UntaggedIntegral16, kUntaggedSigned16 | kUntaggedUnsigned16) \
- V(UntaggedIntegral32, kUntaggedSigned32 | kUntaggedUnsigned32) \
- V(UntaggedIntegral, kUntaggedBit | kUntaggedSigned | kUntaggedUnsigned) \
- V(UntaggedFloat, kUntaggedFloat32 | kUntaggedFloat64) \
- V(UntaggedNumber, kUntaggedIntegral | kUntaggedFloat) \
- V(Untagged, kUntaggedNumber | kUntaggedPointer) \
+ V(UntaggedIntegral, kUntaggedBit | kUntaggedIntegral8 | \
+ kUntaggedIntegral16 | kUntaggedIntegral32) \
+ V(UntaggedFloat, kUntaggedFloat32 | kUntaggedFloat64) \
+ V(UntaggedNumber, kUntaggedIntegral | kUntaggedFloat) \
+ V(Untagged, kUntaggedNumber | kUntaggedPointer) \
V(Tagged, kTaggedSigned | kTaggedPointer)
#define INTERNAL_BITSET_TYPE_LIST(V) \
@@ -214,37 +205,39 @@ namespace internal {
V(Undetectable, 1u << 16 | REPRESENTATION(kTaggedPointer)) \
V(OtherObject, 1u << 17 | REPRESENTATION(kTaggedPointer)) \
V(Proxy, 1u << 18 | REPRESENTATION(kTaggedPointer)) \
- V(Internal, 1u << 19 | REPRESENTATION(kTagged | kUntagged)) \
+ V(Function, 1u << 19 | REPRESENTATION(kTaggedPointer)) \
+ V(Internal, 1u << 20 | REPRESENTATION(kTagged | kUntagged)) \
\
- V(Signed31, kUnsigned30 | kNegative31) \
- V(Signed32, kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
- V(Negative32, kNegative31 | kOtherSigned32) \
- V(Unsigned31, kUnsigned30 | kOtherUnsigned31) \
- V(Unsigned32, kUnsigned30 | kOtherUnsigned31 | kOtherUnsigned32) \
- V(Integral32, kSigned32 | kUnsigned32) \
- V(PlainNumber, kIntegral32 | kOtherNumber) \
- V(OrderedNumber, kPlainNumber | kMinusZero) \
- V(MinusZeroOrNaN, kMinusZero | kNaN) \
- V(Number, kOrderedNumber | kNaN) \
- V(String, kInternalizedString | kOtherString) \
- V(UniqueName, kSymbol | kInternalizedString) \
- V(Name, kSymbol | kString) \
- V(BooleanOrNumber, kBoolean | kNumber) \
- V(NullOrUndefined, kNull | kUndefined) \
- V(NumberOrString, kNumber | kString) \
- V(NumberOrUndefined, kNumber | kUndefined) \
- V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
- V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
- V(DetectableReceiver, kOtherObject | kProxy) \
- V(Detectable, kDetectableReceiver | kNumber | kName) \
- V(Object, kOtherObject | kUndetectable) \
- V(Receiver, kObject | kProxy) \
- V(ReceiverOrUndefined, kReceiver | kUndefined) \
- V(StringOrReceiver, kString | kReceiver) \
- V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
- kReceiver) \
- V(NonNumber, kUnique | kString | kInternal) \
- V(Any, 0xfffffffeu)
+ V(Signed31, kUnsigned30 | kNegative31) \
+ V(Signed32, kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
+ V(Negative32, kNegative31 | kOtherSigned32) \
+ V(Unsigned31, kUnsigned30 | kOtherUnsigned31) \
+ V(Unsigned32, kUnsigned30 | kOtherUnsigned31 | \
+ kOtherUnsigned32) \
+ V(Integral32, kSigned32 | kUnsigned32) \
+ V(PlainNumber, kIntegral32 | kOtherNumber) \
+ V(OrderedNumber, kPlainNumber | kMinusZero) \
+ V(MinusZeroOrNaN, kMinusZero | kNaN) \
+ V(Number, kOrderedNumber | kNaN) \
+ V(String, kInternalizedString | kOtherString) \
+ V(UniqueName, kSymbol | kInternalizedString) \
+ V(Name, kSymbol | kString) \
+ V(BooleanOrNumber, kBoolean | kNumber) \
+ V(BooleanOrNullOrUndefined, kBoolean | kNull | kUndefined) \
+ V(NullOrUndefined, kNull | kUndefined) \
+ V(NumberOrString, kNumber | kString) \
+ V(NumberOrUndefined, kNumber | kUndefined) \
+ V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
+ V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
+ V(DetectableReceiver, kFunction | kOtherObject | kProxy) \
+ V(Detectable, kDetectableReceiver | kNumber | kName) \
+ V(Object, kFunction | kOtherObject | kUndetectable) \
+ V(Receiver, kObject | kProxy) \
+ V(StringOrReceiver, kString | kReceiver) \
+ V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
+ kReceiver) \
+ V(NonNumber, kUnique | kString | kInternal) \
+ V(Any, 0xfffffffeu)
// clang-format on
@@ -1186,6 +1179,7 @@ struct BoundsImpl {
typedef BoundsImpl<ZoneTypeConfig> Bounds;
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TYPES_H_
diff --git a/chromium/v8/src/typing-asm.cc b/chromium/v8/src/typing-asm.cc
index f7688964a57..509ba7b1253 100644
--- a/chromium/v8/src/typing-asm.cc
+++ b/chromium/v8/src/typing-asm.cc
@@ -2,23 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/typing-asm.h"
-#include "src/ast.h"
+#include <limits>
+
+#include "src/v8.h"
+
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/codegen.h"
-#include "src/scopes.h"
-#include "src/zone-type-cache.h"
+#include "src/type-cache.h"
namespace v8 {
namespace internal {
-namespace {
-
-base::LazyInstance<ZoneTypeCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
-
-} // namespace
-
#define FAIL(node, msg) \
do { \
@@ -43,22 +39,31 @@ base::LazyInstance<ZoneTypeCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
FunctionLiteral* root)
- : script_(script),
+ : zone_(zone),
+ isolate_(isolate),
+ script_(script),
root_(root),
valid_(true),
+ allow_simd_(false),
+ property_info_(NULL),
+ intish_(0),
stdlib_types_(zone),
stdlib_heap_types_(zone),
stdlib_math_types_(zone),
- global_variable_type_(HashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)),
+#define V(NAME, Name, name, lane_count, lane_type) \
+ stdlib_simd_##name##_types_(zone),
+ SIMD128_TYPES(V)
+#undef V
+ global_variable_type_(HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
local_variable_type_(HashMap::PointersMatch,
ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
in_function_(false),
building_function_tables_(false),
- cache_(kCache.Get()) {
- InitializeAstVisitor(isolate, zone);
+ cache_(TypeCache::Get()) {
+ InitializeAstVisitor(isolate);
InitializeStdlib();
}
@@ -73,6 +78,13 @@ void AsmTyper::VisitAsmModule(FunctionLiteral* fun) {
Scope* scope = fun->scope();
if (!scope->is_function_scope()) FAIL(fun, "not at function scope");
+ ExpressionStatement* use_asm = fun->body()->first()->AsExpressionStatement();
+ if (use_asm == NULL) FAIL(fun, "missing \"use asm\"");
+ Literal* use_asm_literal = use_asm->expression()->AsLiteral();
+ if (use_asm_literal == NULL) FAIL(fun, "missing \"use asm\"");
+ if (!use_asm_literal->raw_value()->AsString()->IsOneByteEqualTo("use asm"))
+ FAIL(fun, "missing \"use asm\"");
+
// Module parameters.
for (int i = 0; i < scope->num_parameters(); ++i) {
Variable* param = scope->parameter(i);
@@ -96,7 +108,10 @@ void AsmTyper::VisitAsmModule(FunctionLiteral* fun) {
if (decl != NULL) {
RECURSE(VisitFunctionAnnotation(decl->fun()));
Variable* var = decl->proxy()->var();
- DCHECK(GetType(var) == NULL);
+ if (property_info_ != NULL) {
+ SetVariableInfo(var, property_info_);
+ property_info_ = NULL;
+ }
SetType(var, computed_type_);
DCHECK(GetType(var) != NULL);
}
@@ -121,6 +136,9 @@ void AsmTyper::VisitAsmModule(FunctionLiteral* fun) {
// Validate exports.
ReturnStatement* stmt = fun->body()->last()->AsReturnStatement();
+ if (stmt == nullptr) {
+ FAIL(fun->body()->last(), "last statement in module is not a return");
+ }
RECURSE(VisitWithExpectation(stmt->expression(), Type::Object(),
"expected object export"));
}
@@ -144,6 +162,10 @@ void AsmTyper::VisitFunctionDeclaration(FunctionDeclaration* decl) {
if (in_function_) {
FAIL(decl, "function declared inside another");
}
+ // Set function type so global references to functions have some type
+ // (so they can give a more useful error).
+ Variable* var = decl->proxy()->var();
+ SetType(var, Type::Function(zone()));
}
@@ -154,7 +176,15 @@ void AsmTyper::VisitFunctionAnnotation(FunctionLiteral* fun) {
if (body->length() > 0) {
ReturnStatement* stmt = body->last()->AsReturnStatement();
if (stmt != NULL) {
- RECURSE(VisitExpressionAnnotation(stmt->expression()));
+ Literal* literal = stmt->expression()->AsLiteral();
+ Type* old_expected = expected_type_;
+ expected_type_ = Type::Any();
+ if (literal) {
+ RECURSE(VisitLiteral(literal, true));
+ } else {
+ RECURSE(VisitExpressionAnnotation(stmt->expression(), NULL, true));
+ }
+ expected_type_ = old_expected;
result_type = computed_type_;
}
}
@@ -176,7 +206,11 @@ void AsmTyper::VisitFunctionAnnotation(FunctionLiteral* fun) {
Variable* var = proxy->var();
if (var->location() != VariableLocation::PARAMETER || var->index() != i)
break;
- RECURSE(VisitExpressionAnnotation(expr->value()));
+ RECURSE(VisitExpressionAnnotation(expr->value(), var, false));
+ if (property_info_ != NULL) {
+ SetVariableInfo(var, property_info_);
+ property_info_ = NULL;
+ }
SetType(var, computed_type_);
type->InitParameter(i, computed_type_);
good = true;
@@ -187,24 +221,38 @@ void AsmTyper::VisitFunctionAnnotation(FunctionLiteral* fun) {
}
-void AsmTyper::VisitExpressionAnnotation(Expression* expr) {
+void AsmTyper::VisitExpressionAnnotation(Expression* expr, Variable* var,
+ bool is_return) {
// Normal +x or x|0 annotations.
BinaryOperation* bin = expr->AsBinaryOperation();
if (bin != NULL) {
+ if (var != NULL) {
+ VariableProxy* proxy = bin->left()->AsVariableProxy();
+ if (proxy == NULL) {
+ FAIL(bin->left(), "expected variable for type annotation");
+ }
+ if (proxy->var() != var) {
+ FAIL(proxy, "annotation source doesn't match destination");
+ }
+ }
Literal* right = bin->right()->AsLiteral();
if (right != NULL) {
switch (bin->op()) {
- case Token::MUL: // We encode +x as 1*x
+ case Token::MUL: // We encode +x as x*1.0
if (right->raw_value()->ContainsDot() &&
right->raw_value()->AsNumber() == 1.0) {
- SetResult(expr, cache_.kFloat64);
+ SetResult(expr, cache_.kAsmDouble);
return;
}
break;
case Token::BIT_OR:
if (!right->raw_value()->ContainsDot() &&
right->raw_value()->AsNumber() == 0.0) {
- SetResult(expr, cache_.kInt32);
+ if (is_return) {
+ SetResult(expr, cache_.kAsmSigned);
+ } else {
+ SetResult(expr, cache_.kAsmInt);
+ }
return;
}
break;
@@ -223,19 +271,28 @@ void AsmTyper::VisitExpressionAnnotation(Expression* expr) {
Call* call = expr->AsCall();
if (call != NULL) {
- if (call->expression()->IsVariableProxy()) {
- RECURSE(VisitWithExpectation(
- call->expression(), Type::Any(zone()),
- "only fround allowed on expression annotations"));
- if (!computed_type_->Is(
- Type::Function(cache_.kFloat32, Type::Number(zone()), zone()))) {
- FAIL(call->expression(),
- "only fround allowed on expression annotations");
+ VariableProxy* proxy = call->expression()->AsVariableProxy();
+ if (proxy != NULL) {
+ VariableInfo* info = GetVariableInfo(proxy->var(), false);
+ if (!info ||
+ (!info->is_check_function && !info->is_constructor_function)) {
+ if (allow_simd_) {
+ FAIL(call->expression(),
+ "only fround/SIMD.checks allowed on expression annotations");
+ } else {
+ FAIL(call->expression(),
+ "only fround allowed on expression annotations");
+ }
+ }
+ Type* type = info->type;
+ DCHECK(type->IsFunction());
+ if (info->is_check_function) {
+ DCHECK(type->AsFunction()->Arity() == 1);
}
- if (call->arguments()->length() != 1) {
- FAIL(call, "invalid argument count calling fround");
+ if (call->arguments()->length() != type->AsFunction()->Arity()) {
+ FAIL(call, "invalid argument count calling function");
}
- SetResult(expr, cache_.kFloat32);
+ SetResult(expr, type->AsFunction()->Result());
return;
}
}
@@ -279,7 +336,7 @@ void AsmTyper::VisitIfStatement(IfStatement* stmt) {
if (!in_function_) {
FAIL(stmt, "if statement inside module body");
}
- RECURSE(VisitWithExpectation(stmt->condition(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(stmt->condition(), cache_.kAsmSigned,
"if condition expected to be integer"));
RECURSE(Visit(stmt->then_statement()));
RECURSE(Visit(stmt->else_statement()));
@@ -305,9 +362,17 @@ void AsmTyper::VisitReturnStatement(ReturnStatement* stmt) {
if (!in_function_) {
return;
}
- RECURSE(
- VisitWithExpectation(stmt->expression(), return_type_,
- "return expression expected to have return type"));
+ Literal* literal = stmt->expression()->AsLiteral();
+ if (literal) {
+ VisitLiteral(literal, true);
+ } else {
+ RECURSE(
+ VisitWithExpectation(stmt->expression(), Type::Any(),
+ "return expression expected to have return type"));
+ }
+ if (!computed_type_->Is(return_type_) || !return_type_->Is(computed_type_)) {
+ FAIL(stmt->expression(), "return type does not match function signature");
+ }
}
@@ -320,23 +385,40 @@ void AsmTyper::VisitSwitchStatement(SwitchStatement* stmt) {
if (!in_function_) {
FAIL(stmt, "switch statement inside module body");
}
- RECURSE(VisitWithExpectation(stmt->tag(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(stmt->tag(), cache_.kAsmSigned,
"switch expression non-integer"));
ZoneList<CaseClause*>* clauses = stmt->cases();
+ ZoneSet<int32_t> cases(zone());
for (int i = 0; i < clauses->length(); ++i) {
CaseClause* clause = clauses->at(i);
- if (clause->is_default()) continue;
- Expression* label = clause->label();
- RECURSE(
- VisitWithExpectation(label, cache_.kInt32, "case label non-integer"));
- if (!label->IsLiteral()) FAIL(label, "non-literal case label");
- Handle<Object> value = label->AsLiteral()->value();
- int32_t value32;
- if (!value->ToInt32(&value32)) FAIL(label, "illegal case label value");
+ if (clause->is_default()) {
+ if (i != clauses->length() - 1) {
+ FAIL(clause, "default case out of order");
+ }
+ } else {
+ Expression* label = clause->label();
+ RECURSE(VisitWithExpectation(label, cache_.kAsmSigned,
+ "case label non-integer"));
+ if (!label->IsLiteral()) FAIL(label, "non-literal case label");
+ Handle<Object> value = label->AsLiteral()->value();
+ int32_t value32;
+ if (!value->ToInt32(&value32)) FAIL(label, "illegal case label value");
+ if (cases.find(value32) != cases.end()) {
+ FAIL(label, "duplicate case value");
+ }
+ cases.insert(value32);
+ }
// TODO(bradnelson): Detect duplicates.
ZoneList<Statement*>* stmts = clause->statements();
RECURSE(VisitStatements(stmts));
}
+ if (cases.size() > 0) {
+ int64_t min_case = *cases.begin();
+ int64_t max_case = *cases.rbegin();
+ if (max_case - min_case > std::numeric_limits<int32_t>::max()) {
+ FAIL(stmt, "case range too large");
+ }
+ }
}
@@ -348,7 +430,7 @@ void AsmTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
FAIL(stmt, "do statement inside module body");
}
RECURSE(Visit(stmt->body()));
- RECURSE(VisitWithExpectation(stmt->cond(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(stmt->cond(), cache_.kAsmSigned,
"do condition expected to be integer"));
}
@@ -357,7 +439,7 @@ void AsmTyper::VisitWhileStatement(WhileStatement* stmt) {
if (!in_function_) {
FAIL(stmt, "while statement inside module body");
}
- RECURSE(VisitWithExpectation(stmt->cond(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(stmt->cond(), cache_.kAsmSigned,
"while condition expected to be integer"));
RECURSE(Visit(stmt->body()));
}
@@ -371,7 +453,7 @@ void AsmTyper::VisitForStatement(ForStatement* stmt) {
RECURSE(Visit(stmt->init()));
}
if (stmt->cond() != NULL) {
- RECURSE(VisitWithExpectation(stmt->cond(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(stmt->cond(), cache_.kAsmSigned,
"for condition expected to be integer"));
}
if (stmt->next() != NULL) {
@@ -435,56 +517,87 @@ void AsmTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
}
+void AsmTyper::VisitDoExpression(DoExpression* expr) {
+ FAIL(expr, "do-expression encountered");
+}
+
+
void AsmTyper::VisitConditional(Conditional* expr) {
- RECURSE(VisitWithExpectation(expr->condition(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(expr->condition(), Type::Number(),
"condition expected to be integer"));
+ if (!computed_type_->Is(cache_.kAsmInt)) {
+ FAIL(expr->condition(), "condition must be of type int");
+ }
+
RECURSE(VisitWithExpectation(
expr->then_expression(), expected_type_,
"conditional then branch type mismatch with enclosing expression"));
- Type* then_type = computed_type_;
+ Type* then_type = StorageType(computed_type_);
+ if (intish_ != 0 || !then_type->Is(cache_.kAsmComparable)) {
+ FAIL(expr->then_expression(), "invalid type in ? then expression");
+ }
+
RECURSE(VisitWithExpectation(
expr->else_expression(), expected_type_,
"conditional else branch type mismatch with enclosing expression"));
- Type* else_type = computed_type_;
- Type* type = Type::Intersect(then_type, else_type, zone());
- if (!(type->Is(cache_.kInt32) || type->Is(cache_.kFloat64))) {
- FAIL(expr, "ill-typed conditional");
+ Type* else_type = StorageType(computed_type_);
+ if (intish_ != 0 || !else_type->Is(cache_.kAsmComparable)) {
+ FAIL(expr->else_expression(), "invalid type in ? else expression");
}
- IntersectResult(expr, type);
+
+ if (!then_type->Is(else_type) || !else_type->Is(then_type)) {
+ FAIL(expr, "then and else expressions in ? must have the same type");
+ }
+
+ IntersectResult(expr, then_type);
}
void AsmTyper::VisitVariableProxy(VariableProxy* expr) {
Variable* var = expr->var();
- if (GetType(var) == NULL) {
- FAIL(expr, "unbound variable");
+ VariableInfo* info = GetVariableInfo(var, false);
+ if (info == NULL || info->type == NULL) {
+ if (var->mode() == TEMPORARY) {
+ SetType(var, Type::Any(zone()));
+ info = GetVariableInfo(var, false);
+ } else {
+ FAIL(expr, "unbound variable");
+ }
+ }
+ if (property_info_ != NULL) {
+ SetVariableInfo(var, property_info_);
+ property_info_ = NULL;
}
- Type* type = Type::Intersect(GetType(var), expected_type_, zone());
- if (type->Is(cache_.kInt32)) {
- type = cache_.kInt32;
+ Type* type = Type::Intersect(info->type, expected_type_, zone());
+ if (type->Is(cache_.kAsmInt)) {
+ type = cache_.kAsmInt;
}
- SetType(var, type);
+ info->type = type;
intish_ = 0;
IntersectResult(expr, type);
}
-void AsmTyper::VisitLiteral(Literal* expr) {
+void AsmTyper::VisitLiteral(Literal* expr, bool is_return) {
intish_ = 0;
Handle<Object> value = expr->value();
if (value->IsNumber()) {
int32_t i;
uint32_t u;
if (expr->raw_value()->ContainsDot()) {
- IntersectResult(expr, cache_.kFloat64);
- } else if (value->ToUint32(&u)) {
- IntersectResult(expr, cache_.kInt32);
+ IntersectResult(expr, cache_.kAsmDouble);
+ } else if (!is_return && value->ToUint32(&u)) {
+ if (u <= 0x7fffffff) {
+ IntersectResult(expr, cache_.kAsmFixnum);
+ } else {
+ IntersectResult(expr, cache_.kAsmUnsigned);
+ }
} else if (value->ToInt32(&i)) {
- IntersectResult(expr, cache_.kInt32);
+ IntersectResult(expr, cache_.kAsmSigned);
} else {
FAIL(expr, "illegal number");
}
- } else if (value->IsString()) {
+ } else if (!is_return && value->IsString()) {
IntersectResult(expr, Type::String());
} else if (value->IsUndefined()) {
IntersectResult(expr, Type::Undefined());
@@ -494,6 +607,9 @@ void AsmTyper::VisitLiteral(Literal* expr) {
}
+void AsmTyper::VisitLiteral(Literal* expr) { VisitLiteral(expr, false); }
+
+
void AsmTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
FAIL(expr, "regular expression encountered");
}
@@ -554,15 +670,23 @@ void AsmTyper::VisitAssignment(Assignment* expr) {
Type* type = expected_type_;
RECURSE(VisitWithExpectation(
expr->value(), type, "assignment value expected to match surrounding"));
+ Type* target_type = StorageType(computed_type_);
if (intish_ != 0) {
- FAIL(expr, "value still an intish");
+ FAIL(expr, "intish or floatish assignment");
}
- RECURSE(VisitWithExpectation(expr->target(), computed_type_,
- "assignment target expected to match value"));
- if (intish_ != 0) {
- FAIL(expr, "value still an intish");
+ if (expr->target()->IsVariableProxy()) {
+ RECURSE(VisitWithExpectation(expr->target(), target_type,
+ "assignment target expected to match value"));
+ } else if (expr->target()->IsProperty()) {
+ Property* property = expr->target()->AsProperty();
+ RECURSE(VisitWithExpectation(property->obj(), Type::Any(),
+ "bad propety object"));
+ if (!computed_type_->IsArray()) {
+ FAIL(property->obj(), "array expected");
+ }
+ VisitHeapAccess(property, true, target_type);
}
- IntersectResult(expr, computed_type_);
+ IntersectResult(expr, target_type);
}
@@ -577,131 +701,206 @@ void AsmTyper::VisitThrow(Throw* expr) {
int AsmTyper::ElementShiftSize(Type* type) {
- if (type->Is(cache_.kInt8) || type->Is(cache_.kUint8)) return 0;
- if (type->Is(cache_.kInt16) || type->Is(cache_.kUint16)) return 1;
- if (type->Is(cache_.kInt32) || type->Is(cache_.kUint32) ||
- type->Is(cache_.kFloat32))
- return 2;
- if (type->Is(cache_.kFloat64)) return 3;
+ if (type->Is(cache_.kAsmSize8)) return 0;
+ if (type->Is(cache_.kAsmSize16)) return 1;
+ if (type->Is(cache_.kAsmSize32)) return 2;
+ if (type->Is(cache_.kAsmSize64)) return 3;
return -1;
}
-void AsmTyper::VisitHeapAccess(Property* expr) {
+Type* AsmTyper::StorageType(Type* type) {
+ if (type->Is(cache_.kAsmInt)) {
+ return cache_.kAsmInt;
+ } else {
+ return type;
+ }
+}
+
+
+void AsmTyper::VisitHeapAccess(Property* expr, bool assigning,
+ Type* assignment_type) {
Type::ArrayType* array_type = computed_type_->AsArray();
size_t size = array_size_;
Type* type = array_type->AsArray()->Element();
if (type->IsFunction()) {
+ if (assigning) {
+ FAIL(expr, "assigning to function table is illegal");
+ }
BinaryOperation* bin = expr->key()->AsBinaryOperation();
if (bin == NULL || bin->op() != Token::BIT_AND) {
FAIL(expr->key(), "expected & in call");
}
- RECURSE(VisitWithExpectation(bin->left(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
"array index expected to be integer"));
Literal* right = bin->right()->AsLiteral();
if (right == NULL || right->raw_value()->ContainsDot()) {
FAIL(right, "call mask must be integer");
}
- RECURSE(VisitWithExpectation(bin->right(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
"call mask expected to be integer"));
if (static_cast<size_t>(right->raw_value()->AsNumber()) != size - 1) {
FAIL(right, "call mask must match function table");
}
- bin->set_bounds(Bounds(cache_.kInt32));
+ bin->set_bounds(Bounds(cache_.kAsmSigned));
+ IntersectResult(expr, type);
} else {
- BinaryOperation* bin = expr->key()->AsBinaryOperation();
- if (bin == NULL || bin->op() != Token::SAR) {
- FAIL(expr->key(), "expected >> in heap access");
+ Literal* literal = expr->key()->AsLiteral();
+ if (literal) {
+ RECURSE(VisitWithExpectation(literal, cache_.kAsmSigned,
+ "array index expected to be integer"));
+ } else {
+ BinaryOperation* bin = expr->key()->AsBinaryOperation();
+ if (bin == NULL || bin->op() != Token::SAR) {
+ FAIL(expr->key(), "expected >> in heap access");
+ }
+ RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
+ "array index expected to be integer"));
+ Literal* right = bin->right()->AsLiteral();
+ if (right == NULL || right->raw_value()->ContainsDot()) {
+ FAIL(right, "heap access shift must be integer");
+ }
+ RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
+ "array shift expected to be integer"));
+ int n = static_cast<int>(right->raw_value()->AsNumber());
+ int expected_shift = ElementShiftSize(type);
+ if (expected_shift < 0 || n != expected_shift) {
+ FAIL(right, "heap access shift must match element size");
+ }
+ bin->set_bounds(Bounds(cache_.kAsmSigned));
}
- RECURSE(VisitWithExpectation(bin->left(), cache_.kInt32,
- "array index expected to be integer"));
- Literal* right = bin->right()->AsLiteral();
- if (right == NULL || right->raw_value()->ContainsDot()) {
- FAIL(right, "heap access shift must be integer");
+ Type* result_type;
+ if (type->Is(cache_.kAsmIntArrayElement)) {
+ result_type = cache_.kAsmIntQ;
+ intish_ = kMaxUncombinedAdditiveSteps;
+ } else if (type->Is(cache_.kAsmFloat)) {
+ if (assigning) {
+ result_type = cache_.kAsmFloatDoubleQ;
+ } else {
+ result_type = cache_.kAsmFloatQ;
+ }
+ intish_ = 0;
+ } else if (type->Is(cache_.kAsmDouble)) {
+ if (assigning) {
+ result_type = cache_.kAsmFloatDoubleQ;
+ if (intish_ != 0) {
+ FAIL(expr, "Assignment of floatish to Float64Array");
+ }
+ } else {
+ result_type = cache_.kAsmDoubleQ;
+ }
+ intish_ = 0;
+ } else {
+ UNREACHABLE();
}
- RECURSE(VisitWithExpectation(bin->right(), cache_.kInt32,
- "array shift expected to be integer"));
- int n = static_cast<int>(right->raw_value()->AsNumber());
- int expected_shift = ElementShiftSize(type);
- if (expected_shift < 0 || n != expected_shift) {
- FAIL(right, "heap access shift must match element size");
+ if (assigning) {
+ if (!assignment_type->Is(result_type)) {
+ FAIL(expr, "illegal type in assignment");
+ }
+ } else {
+ IntersectResult(expr, expected_type_);
+ IntersectResult(expr, result_type);
}
- bin->set_bounds(Bounds(cache_.kInt32));
}
- IntersectResult(expr, type);
}
-void AsmTyper::VisitProperty(Property* expr) {
- // stdlib.Math.x
- Property* inner_prop = expr->obj()->AsProperty();
- if (inner_prop != NULL) {
- // Get property name.
- Literal* key = expr->key()->AsLiteral();
- if (key == NULL || !key->IsPropertyName())
- FAIL(expr, "invalid type annotation on property 2");
- Handle<String> name = key->AsPropertyName();
-
- // Check that inner property name is "Math".
- Literal* math_key = inner_prop->key()->AsLiteral();
- if (math_key == NULL || !math_key->IsPropertyName() ||
- !math_key->AsPropertyName()->IsUtf8EqualTo(CStrVector("Math")))
- FAIL(expr, "invalid type annotation on stdlib (a1)");
-
- // Check that object is stdlib.
- VariableProxy* proxy = inner_prop->obj()->AsVariableProxy();
- if (proxy == NULL) FAIL(expr, "invalid type annotation on stdlib (a2)");
- Variable* var = proxy->var();
- if (var->location() != VariableLocation::PARAMETER || var->index() != 0)
- FAIL(expr, "invalid type annotation on stdlib (a3)");
+bool AsmTyper::IsStdlibObject(Expression* expr) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy == NULL) {
+ return false;
+ }
+ Variable* var = proxy->var();
+ VariableInfo* info = GetVariableInfo(var, false);
+ if (info) {
+ if (info->standard_member == kStdlib) return true;
+ }
+ if (var->location() != VariableLocation::PARAMETER || var->index() != 0) {
+ return false;
+ }
+ info = GetVariableInfo(var, true);
+ info->type = Type::Object();
+ info->standard_member = kStdlib;
+ return true;
+}
+
+
+Expression* AsmTyper::GetReceiverOfPropertyAccess(Expression* expr,
+ const char* name) {
+ Property* property = expr->AsProperty();
+ if (property == NULL) {
+ return NULL;
+ }
+ Literal* key = property->key()->AsLiteral();
+ if (key == NULL || !key->IsPropertyName() ||
+ !key->AsPropertyName()->IsUtf8EqualTo(CStrVector(name))) {
+ return NULL;
+ }
+ return property->obj();
+}
+
+
+bool AsmTyper::IsMathObject(Expression* expr) {
+ Expression* obj = GetReceiverOfPropertyAccess(expr, "Math");
+ return obj && IsStdlibObject(obj);
+}
+
+
+bool AsmTyper::IsSIMDObject(Expression* expr) {
+ Expression* obj = GetReceiverOfPropertyAccess(expr, "SIMD");
+ return obj && IsStdlibObject(obj);
+}
- // Look up library type.
- Type* type = LibType(stdlib_math_types_, name);
- if (type == NULL) FAIL(expr, "unknown standard function 3 ");
- SetResult(expr, type);
+
+bool AsmTyper::IsSIMDTypeObject(Expression* expr, const char* name) {
+ Expression* obj = GetReceiverOfPropertyAccess(expr, name);
+ return obj && IsSIMDObject(obj);
+}
+
+
+void AsmTyper::VisitProperty(Property* expr) {
+ if (IsMathObject(expr->obj())) {
+ VisitLibraryAccess(&stdlib_math_types_, expr);
+ return;
+ }
+#define V(NAME, Name, name, lane_count, lane_type) \
+ if (IsSIMDTypeObject(expr->obj(), #Name)) { \
+ VisitLibraryAccess(&stdlib_simd_##name##_types_, expr); \
+ return; \
+ } \
+ if (IsSIMDTypeObject(expr, #Name)) { \
+ VariableInfo* info = stdlib_simd_##name##_constructor_type_; \
+ SetResult(expr, info->type); \
+ property_info_ = info; \
+ return; \
+ }
+ SIMD128_TYPES(V)
+#undef V
+ if (IsStdlibObject(expr->obj())) {
+ VisitLibraryAccess(&stdlib_types_, expr);
return;
}
+ property_info_ = NULL;
+
// Only recurse at this point so that we avoid needing
// stdlib.Math to have a real type.
- RECURSE(VisitWithExpectation(expr->obj(), Type::Any(),
- "property holder expected to be object"));
+ RECURSE(VisitWithExpectation(expr->obj(), Type::Any(), "bad propety object"));
// For heap view or function table access.
if (computed_type_->IsArray()) {
- VisitHeapAccess(expr);
+ VisitHeapAccess(expr, false, NULL);
return;
}
- // Get property name.
- Literal* key = expr->key()->AsLiteral();
- if (key == NULL || !key->IsPropertyName())
- FAIL(expr, "invalid type annotation on property 3");
- Handle<String> name = key->AsPropertyName();
-
// stdlib.x or foreign.x
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->location() != VariableLocation::PARAMETER) {
- FAIL(expr, "invalid type annotation on variable");
- }
- switch (var->index()) {
- case 0: {
- // Object is stdlib, look up library type.
- Type* type = LibType(stdlib_types_, name);
- if (type == NULL) {
- FAIL(expr, "unknown standard function 4");
- }
- SetResult(expr, type);
- return;
- }
- case 1:
- // Object is foreign lib.
- SetResult(expr, expected_type_);
- return;
- default:
- FAIL(expr, "invalid type annotation on parameter");
+ if (var->location() == VariableLocation::PARAMETER && var->index() == 1) {
+ // foreign.x is ok.
+ SetResult(expr, expected_type_);
+ return;
}
}
@@ -712,8 +911,20 @@ void AsmTyper::VisitProperty(Property* expr) {
void AsmTyper::VisitCall(Call* expr) {
RECURSE(VisitWithExpectation(expr->expression(), Type::Any(),
"callee expected to be any"));
+ StandardMember standard_member = kNone;
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy) {
+ standard_member = VariableAsStandardMember(proxy->var());
+ }
+ if (!in_function_ && (proxy == NULL || standard_member != kMathFround)) {
+ FAIL(expr, "calls forbidden outside function bodies");
+ }
+ if (proxy == NULL && !expr->expression()->IsProperty()) {
+ FAIL(expr, "calls must be to bound variables or function tables");
+ }
if (computed_type_->IsFunction()) {
Type::FunctionType* fun_type = computed_type_->AsFunction();
+ Type* result_type = fun_type->Result();
ZoneList<Expression*>* args = expr->arguments();
if (fun_type->Arity() != args->length()) {
FAIL(expr, "call with wrong arity");
@@ -723,8 +934,36 @@ void AsmTyper::VisitCall(Call* expr) {
RECURSE(VisitWithExpectation(
arg, fun_type->Parameter(i),
"call argument expected to match callee parameter"));
+ if (standard_member != kNone && standard_member != kMathFround &&
+ i == 0) {
+ result_type = computed_type_;
+ }
}
- IntersectResult(expr, fun_type->Result());
+ // Handle polymorphic stdlib functions specially.
+ if (standard_member == kMathCeil || standard_member == kMathFloor ||
+ standard_member == kMathSqrt) {
+ if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
+ !args->at(0)->bounds().upper->Is(cache_.kAsmDouble)) {
+ FAIL(expr, "illegal function argument type");
+ }
+ } else if (standard_member == kMathAbs || standard_member == kMathMin ||
+ standard_member == kMathMax) {
+ if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
+ !args->at(0)->bounds().upper->Is(cache_.kAsmDouble) &&
+ !args->at(0)->bounds().upper->Is(cache_.kAsmSigned)) {
+ FAIL(expr, "illegal function argument type");
+ }
+ if (args->length() > 1) {
+ Type* other = Type::Intersect(args->at(0)->bounds().upper,
+ args->at(1)->bounds().upper, zone());
+ if (!other->Is(cache_.kAsmFloat) && !other->Is(cache_.kAsmDouble) &&
+ !other->Is(cache_.kAsmSigned)) {
+ FAIL(expr, "function arguments types don't match");
+ }
+ }
+ }
+ intish_ = 0;
+ IntersectResult(expr, result_type);
} else if (computed_type_->Is(Type::Any())) {
// For foreign calls.
ZoneList<Expression*>* args = expr->arguments();
@@ -733,6 +972,7 @@ void AsmTyper::VisitCall(Call* expr) {
RECURSE(VisitWithExpectation(arg, Type::Any(),
"foreign call argument expected to be any"));
}
+ intish_ = kMaxUncombinedAdditiveSteps;
IntersectResult(expr, Type::Number());
} else {
FAIL(expr, "invalid callee");
@@ -773,9 +1013,9 @@ void AsmTyper::VisitCallRuntime(CallRuntime* expr) {
void AsmTyper::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::NOT: // Used to encode != and !==
- RECURSE(VisitWithExpectation(expr->expression(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(expr->expression(), cache_.kAsmInt,
"operand expected to be integer"));
- IntersectResult(expr, cache_.kInt32);
+ IntersectResult(expr, cache_.kAsmSigned);
return;
case Token::DELETE:
FAIL(expr, "delete operator encountered");
@@ -794,6 +1034,50 @@ void AsmTyper::VisitCountOperation(CountOperation* expr) {
}
+void AsmTyper::VisitIntegerBitwiseOperator(BinaryOperation* expr,
+ Type* left_expected,
+ Type* right_expected,
+ Type* result_type, bool conversion) {
+ RECURSE(VisitWithExpectation(expr->left(), Type::Number(),
+ "left bitwise operand expected to be a number"));
+ int left_intish = intish_;
+ Type* left_type = computed_type_;
+ if (!left_type->Is(left_expected)) {
+ FAIL(expr->left(), "left bitwise operand expected to be an integer");
+ }
+ if (left_intish > kMaxUncombinedAdditiveSteps) {
+ FAIL(expr->left(), "too many consecutive additive ops");
+ }
+
+ RECURSE(
+ VisitWithExpectation(expr->right(), Type::Number(),
+ "right bitwise operand expected to be a number"));
+ int right_intish = intish_;
+ Type* right_type = computed_type_;
+ if (!right_type->Is(right_expected)) {
+ FAIL(expr->right(), "right bitwise operand expected to be an integer");
+ }
+ if (right_intish > kMaxUncombinedAdditiveSteps) {
+ FAIL(expr->right(), "too many consecutive additive ops");
+ }
+
+ intish_ = 0;
+
+ if (left_type->Is(cache_.kAsmFixnum) && right_type->Is(cache_.kAsmInt)) {
+ left_type = right_type;
+ }
+ if (right_type->Is(cache_.kAsmFixnum) && left_type->Is(cache_.kAsmInt)) {
+ right_type = left_type;
+ }
+ if (!conversion) {
+ if (!left_type->Is(right_type) || !right_type->Is(left_type)) {
+ FAIL(expr, "ill-typed bitwise operation");
+ }
+ }
+ IntersectResult(expr, result_type);
+}
+
+
void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
switch (expr->op()) {
case Token::COMMA: {
@@ -806,35 +1090,42 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
}
case Token::OR:
case Token::AND:
- FAIL(expr, "logical operator encountered");
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
+ FAIL(expr, "illegal logical operator");
+ case Token::BIT_OR: {
// BIT_OR allows Any since it is used as a type coercion.
- // BIT_XOR allows Number since it is used as a type coercion (encoding ~).
- Type* expectation =
- expr->op() == Token::BIT_OR
- ? Type::Any()
- : expr->op() == Token::BIT_XOR ? Type::Number() : cache_.kInt32;
- Type* result =
- expr->op() == Token::SHR ? Type::Unsigned32() : cache_.kInt32;
- RECURSE(VisitWithExpectation(expr->left(), expectation,
- "left bit operand expected to be integer"));
- int left_intish = intish_;
- RECURSE(VisitWithExpectation(expr->right(), expectation,
- "right bit operand expected to be integer"));
- int right_intish = intish_;
- if (left_intish > kMaxUncombinedAdditiveSteps) {
- FAIL(expr, "too many consecutive additive ops");
- }
- if (right_intish > kMaxUncombinedAdditiveSteps) {
- FAIL(expr, "too many consecutive additive ops");
+ VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmInt,
+ cache_.kAsmSigned, true);
+ return;
+ }
+ case Token::BIT_XOR: {
+ // Handle booleans specially to handle de-sugared !
+ Literal* left = expr->left()->AsLiteral();
+ if (left && left->value()->IsBoolean()) {
+ if (left->ToBooleanIsTrue()) {
+ left->set_bounds(Bounds(cache_.kSingletonOne));
+ RECURSE(VisitWithExpectation(expr->right(), cache_.kAsmInt,
+ "not operator expects an integer"));
+ IntersectResult(expr, cache_.kAsmSigned);
+ return;
+ } else {
+ FAIL(left, "unexpected false");
+ }
}
- intish_ = 0;
- IntersectResult(expr, result);
+ // BIT_XOR allows Number since it is used as a type coercion (via ~~).
+ VisitIntegerBitwiseOperator(expr, Type::Number(), cache_.kAsmInt,
+ cache_.kAsmSigned, true);
+ return;
+ }
+ case Token::SHR: {
+ VisitIntegerBitwiseOperator(expr, cache_.kAsmInt, cache_.kAsmInt,
+ cache_.kAsmUnsigned, false);
+ return;
+ }
+ case Token::SHL:
+ case Token::SAR:
+ case Token::BIT_AND: {
+ VisitIntegerBitwiseOperator(expr, cache_.kAsmInt, cache_.kAsmInt,
+ cache_.kAsmSigned, false);
return;
}
case Token::ADD:
@@ -853,13 +1144,25 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
Type* right_type = computed_type_;
int right_intish = intish_;
Type* type = Type::Union(left_type, right_type, zone());
- if (type->Is(cache_.kInt32)) {
+ if (type->Is(cache_.kAsmInt)) {
if (expr->op() == Token::MUL) {
- if (!expr->left()->IsLiteral() && !expr->right()->IsLiteral()) {
+ Literal* right = expr->right()->AsLiteral();
+ if (!right) {
FAIL(expr, "direct integer multiply forbidden");
}
- intish_ = 0;
- IntersectResult(expr, cache_.kInt32);
+ if (!right->value()->IsNumber()) {
+ FAIL(expr, "multiply must be by an integer");
+ }
+ int32_t i;
+ if (!right->value()->ToInt32(&i)) {
+ FAIL(expr, "multiply must be a signed integer");
+ }
+ i = abs(i);
+ if (i >= 1 << 20) {
+ FAIL(expr, "multiply must be by value in -2^20 < n < 2^20");
+ }
+ intish_ = i;
+ IntersectResult(expr, cache_.kAsmInt);
return;
} else {
intish_ = left_intish + right_intish + 1;
@@ -872,11 +1175,23 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
FAIL(expr, "too many consecutive multiplicative ops");
}
}
- IntersectResult(expr, cache_.kInt32);
+ IntersectResult(expr, cache_.kAsmInt);
return;
}
- } else if (type->Is(Type::Number())) {
- IntersectResult(expr, cache_.kFloat64);
+ } else if (expr->op() == Token::MUL && expr->right()->IsLiteral() &&
+ right_type->Is(cache_.kAsmDouble)) {
+ // For unary +, expressed as x * 1.0
+ IntersectResult(expr, cache_.kAsmDouble);
+ return;
+ } else if (type->Is(cache_.kAsmFloat) && expr->op() != Token::MOD) {
+ if (left_intish != 0 || right_intish != 0) {
+ FAIL(expr, "float operation before required fround");
+ }
+ IntersectResult(expr, cache_.kAsmFloat);
+ intish_ = 1;
+ return;
+ } else if (type->Is(cache_.kAsmDouble)) {
+ IntersectResult(expr, cache_.kAsmDouble);
return;
} else {
FAIL(expr, "ill-typed arithmetic operation");
@@ -889,21 +1204,33 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
void AsmTyper::VisitCompareOperation(CompareOperation* expr) {
+ Token::Value op = expr->op();
+ if (op != Token::EQ && op != Token::NE && op != Token::LT &&
+ op != Token::LTE && op != Token::GT && op != Token::GTE) {
+ FAIL(expr, "illegal comparison operator");
+ }
+
RECURSE(
VisitWithExpectation(expr->left(), Type::Number(),
"left comparison operand expected to be number"));
Type* left_type = computed_type_;
+ if (!left_type->Is(cache_.kAsmComparable)) {
+ FAIL(expr->left(), "bad type on left side of comparison");
+ }
+
RECURSE(
VisitWithExpectation(expr->right(), Type::Number(),
"right comparison operand expected to be number"));
Type* right_type = computed_type_;
- Type* type = Type::Union(left_type, right_type, zone());
- expr->set_combined_type(type);
- if (type->Is(Type::Integral32()) || type->Is(Type::UntaggedFloat64())) {
- IntersectResult(expr, cache_.kInt32);
- } else {
- FAIL(expr, "ill-typed comparison operation");
+ if (!right_type->Is(cache_.kAsmComparable)) {
+ FAIL(expr->right(), "bad type on right side of comparison");
}
+
+ if (!left_type->Is(right_type) && !right_type->Is(left_type)) {
+ FAIL(expr, "left and right side of comparison must match");
+ }
+
+ IntersectResult(expr, cache_.kAsmSigned);
}
@@ -948,64 +1275,115 @@ void AsmTyper::VisitSuperCallReference(SuperCallReference* expr) {
}
+void AsmTyper::InitializeStdlibSIMD() {
+#define V(NAME, Name, name, lane_count, lane_type) \
+ { \
+ Type* type = Type::Function(Type::Name(isolate_, zone()), Type::Any(), \
+ lane_count, zone()); \
+ for (int i = 0; i < lane_count; ++i) { \
+ type->AsFunction()->InitParameter(i, Type::Number()); \
+ } \
+ stdlib_simd_##name##_constructor_type_ = new (zone()) VariableInfo(type); \
+ stdlib_simd_##name##_constructor_type_->is_constructor_function = true; \
+ }
+ SIMD128_TYPES(V)
+#undef V
+}
+
+
void AsmTyper::InitializeStdlib() {
+ if (allow_simd_) {
+ InitializeStdlibSIMD();
+ }
Type* number_type = Type::Number(zone());
- Type* double_type = cache_.kFloat64;
+ Type* double_type = cache_.kAsmDouble;
Type* double_fn1_type = Type::Function(double_type, double_type, zone());
Type* double_fn2_type =
Type::Function(double_type, double_type, double_type, zone());
- Type* fround_type = Type::Function(cache_.kFloat32, number_type, zone());
+ Type* fround_type = Type::Function(cache_.kAsmFloat, number_type, zone());
Type* imul_type =
- Type::Function(cache_.kInt32, cache_.kInt32, cache_.kInt32, zone());
+ Type::Function(cache_.kAsmSigned, cache_.kAsmInt, cache_.kAsmInt, zone());
// TODO(bradnelson): currently only approximating the proper intersection type
// (which we cannot currently represent).
- Type* abs_type = Type::Function(number_type, number_type, zone());
+ Type* number_fn1_type = Type::Function(number_type, number_type, zone());
+ Type* number_fn2_type =
+ Type::Function(number_type, number_type, number_type, zone());
struct Assignment {
const char* name;
+ StandardMember standard_member;
Type* type;
};
- const Assignment math[] = {
- {"PI", double_type}, {"E", double_type},
- {"LN2", double_type}, {"LN10", double_type},
- {"LOG2E", double_type}, {"LOG10E", double_type},
- {"SQRT2", double_type}, {"SQRT1_2", double_type},
- {"imul", imul_type}, {"abs", abs_type},
- {"ceil", double_fn1_type}, {"floor", double_fn1_type},
- {"fround", fround_type}, {"pow", double_fn2_type},
- {"exp", double_fn1_type}, {"log", double_fn1_type},
- {"min", double_fn2_type}, {"max", double_fn2_type},
- {"sqrt", double_fn1_type}, {"cos", double_fn1_type},
- {"sin", double_fn1_type}, {"tan", double_fn1_type},
- {"acos", double_fn1_type}, {"asin", double_fn1_type},
- {"atan", double_fn1_type}, {"atan2", double_fn2_type}};
+ const Assignment math[] = {{"PI", kMathPI, double_type},
+ {"E", kMathE, double_type},
+ {"LN2", kMathLN2, double_type},
+ {"LN10", kMathLN10, double_type},
+ {"LOG2E", kMathLOG2E, double_type},
+ {"LOG10E", kMathLOG10E, double_type},
+ {"SQRT2", kMathSQRT2, double_type},
+ {"SQRT1_2", kMathSQRT1_2, double_type},
+ {"imul", kMathImul, imul_type},
+ {"abs", kMathAbs, number_fn1_type},
+ {"ceil", kMathCeil, number_fn1_type},
+ {"floor", kMathFloor, number_fn1_type},
+ {"fround", kMathFround, fround_type},
+ {"pow", kMathPow, double_fn2_type},
+ {"exp", kMathExp, double_fn1_type},
+ {"log", kMathLog, double_fn1_type},
+ {"min", kMathMin, number_fn2_type},
+ {"max", kMathMax, number_fn2_type},
+ {"sqrt", kMathSqrt, number_fn1_type},
+ {"cos", kMathCos, double_fn1_type},
+ {"sin", kMathSin, double_fn1_type},
+ {"tan", kMathTan, double_fn1_type},
+ {"acos", kMathAcos, double_fn1_type},
+ {"asin", kMathAsin, double_fn1_type},
+ {"atan", kMathAtan, double_fn1_type},
+ {"atan2", kMathAtan2, double_fn2_type}};
for (unsigned i = 0; i < arraysize(math); ++i) {
- stdlib_math_types_[math[i].name] = math[i].type;
+ stdlib_math_types_[math[i].name] = new (zone()) VariableInfo(math[i].type);
+ stdlib_math_types_[math[i].name]->standard_member = math[i].standard_member;
}
+ stdlib_math_types_["fround"]->is_check_function = true;
- stdlib_types_["Infinity"] = double_type;
- stdlib_types_["NaN"] = double_type;
+ stdlib_types_["Infinity"] = new (zone()) VariableInfo(double_type);
+ stdlib_types_["Infinity"]->standard_member = kInfinity;
+ stdlib_types_["NaN"] = new (zone()) VariableInfo(double_type);
+ stdlib_types_["NaN"]->standard_member = kNaN;
Type* buffer_type = Type::Any(zone());
#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
- stdlib_types_[#TypeName "Array"] = \
- Type::Function(cache_.k##TypeName##Array, buffer_type, zone());
+ stdlib_types_[#TypeName "Array"] = new (zone()) VariableInfo( \
+ Type::Function(cache_.k##TypeName##Array, buffer_type, zone()));
TYPED_ARRAYS(TYPED_ARRAY)
#undef TYPED_ARRAY
-#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
- stdlib_heap_types_[#TypeName "Array"] = \
- Type::Function(cache_.k##TypeName##Array, buffer_type, zone());
+#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
+ stdlib_heap_types_[#TypeName "Array"] = new (zone()) VariableInfo( \
+ Type::Function(cache_.k##TypeName##Array, buffer_type, zone()));
TYPED_ARRAYS(TYPED_ARRAY)
#undef TYPED_ARRAY
}
-Type* AsmTyper::LibType(ObjectTypeMap map, Handle<String> name) {
+void AsmTyper::VisitLibraryAccess(ObjectTypeMap* map, Property* expr) {
+ Literal* key = expr->key()->AsLiteral();
+ if (key == NULL || !key->IsPropertyName())
+ FAIL(expr, "invalid key used on stdlib member");
+ Handle<String> name = key->AsPropertyName();
+ VariableInfo* info = LibType(map, name);
+ if (info == NULL || info->type == NULL) FAIL(expr, "unknown stdlib function");
+ SetResult(expr, info->type);
+ property_info_ = info;
+}
+
+
+AsmTyper::VariableInfo* AsmTyper::LibType(ObjectTypeMap* map,
+ Handle<String> name) {
base::SmartArrayPointer<char> aname = name->ToCString();
- ObjectTypeMap::iterator i = map.find(std::string(aname.get()));
- if (i == map.end()) {
+ ObjectTypeMap::iterator i = map->find(std::string(aname.get()));
+ if (i == map->end()) {
return NULL;
}
return i->second;
@@ -1013,32 +1391,62 @@ Type* AsmTyper::LibType(ObjectTypeMap map, Handle<String> name) {
void AsmTyper::SetType(Variable* variable, Type* type) {
- ZoneHashMap::Entry* entry;
- if (in_function_) {
- entry = local_variable_type_.LookupOrInsert(
- variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone()));
- } else {
- entry = global_variable_type_.LookupOrInsert(
- variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone()));
- }
- entry->value = reinterpret_cast<void*>(type);
+ VariableInfo* info = GetVariableInfo(variable, true);
+ info->type = type;
}
Type* AsmTyper::GetType(Variable* variable) {
- i::ZoneHashMap::Entry* entry = NULL;
+ VariableInfo* info = GetVariableInfo(variable, false);
+ if (!info) return NULL;
+ return info->type;
+}
+
+
+AsmTyper::VariableInfo* AsmTyper::GetVariableInfo(Variable* variable,
+ bool setting) {
+ ZoneHashMap::Entry* entry;
+ ZoneHashMap* map;
if (in_function_) {
- entry = local_variable_type_.Lookup(variable, ComputePointerHash(variable));
- }
- if (entry == NULL) {
- entry =
- global_variable_type_.Lookup(variable, ComputePointerHash(variable));
+ map = &local_variable_type_;
+ } else {
+ map = &global_variable_type_;
}
- if (entry == NULL) {
- return NULL;
+ if (setting) {
+ entry = map->LookupOrInsert(variable, ComputePointerHash(variable),
+ ZoneAllocationPolicy(zone()));
} else {
- return reinterpret_cast<Type*>(entry->value);
+ entry = map->Lookup(variable, ComputePointerHash(variable));
+ if (!entry && in_function_) {
+ entry =
+ global_variable_type_.Lookup(variable, ComputePointerHash(variable));
+ if (entry && entry->value) {
+ }
+ }
}
+ if (!entry) return NULL;
+ if (!entry->value) {
+ if (!setting) return NULL;
+ entry->value = new (zone()) VariableInfo;
+ }
+ return reinterpret_cast<VariableInfo*>(entry->value);
+}
+
+
+void AsmTyper::SetVariableInfo(Variable* variable, const VariableInfo* info) {
+ VariableInfo* dest = GetVariableInfo(variable, true);
+ dest->type = info->type;
+ dest->is_check_function = info->is_check_function;
+ dest->is_constructor_function = info->is_constructor_function;
+ dest->standard_member = info->standard_member;
+}
+
+
+AsmTyper::StandardMember AsmTyper::VariableAsStandardMember(
+ Variable* variable) {
+ VariableInfo* info = GetVariableInfo(variable, false);
+ if (!info) return kNone;
+ return info->standard_member;
}
@@ -1072,5 +1480,13 @@ void AsmTyper::VisitWithExpectation(Expression* expr, Type* expected_type,
}
expected_type_ = save;
}
+
+
+void AsmTyper::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ RECURSE(Visit(expr->expression()));
}
-} // namespace v8::internal
+
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/typing-asm.h b/chromium/v8/src/typing-asm.h
index 74c28fb3cf1..b7f53831e62 100644
--- a/chromium/v8/src/typing-asm.h
+++ b/chromium/v8/src/typing-asm.h
@@ -6,7 +6,7 @@
#define V8_TYPING_ASM_H_
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/effects.h"
#include "src/type-info.h"
#include "src/types.h"
@@ -15,34 +15,97 @@
namespace v8 {
namespace internal {
-class ZoneTypeCache;
+class TypeCache;
class AsmTyper : public AstVisitor {
public:
explicit AsmTyper(Isolate* isolate, Zone* zone, Script* script,
FunctionLiteral* root);
bool Validate();
+ void set_allow_simd(bool simd);
const char* error_message() { return error_message_; }
+ enum StandardMember {
+ kNone = 0,
+ kStdlib,
+ kInfinity,
+ kNaN,
+ kMathAcos,
+ kMathAsin,
+ kMathAtan,
+ kMathCos,
+ kMathSin,
+ kMathTan,
+ kMathExp,
+ kMathLog,
+ kMathCeil,
+ kMathFloor,
+ kMathSqrt,
+ kMathAbs,
+ kMathMin,
+ kMathMax,
+ kMathAtan2,
+ kMathPow,
+ kMathImul,
+ kMathFround,
+ kMathE,
+ kMathLN10,
+ kMathLN2,
+ kMathLOG2E,
+ kMathLOG10E,
+ kMathPI,
+ kMathSQRT1_2,
+ kMathSQRT2,
+ };
+
+ StandardMember VariableAsStandardMember(Variable* variable);
+
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
private:
+ Zone* zone_;
+ Isolate* isolate_;
Script* script_;
FunctionLiteral* root_;
bool valid_;
+ bool allow_simd_;
+
+ struct VariableInfo : public ZoneObject {
+ Type* type;
+ bool is_check_function;
+ bool is_constructor_function;
+ StandardMember standard_member;
+
+ VariableInfo()
+ : type(NULL),
+ is_check_function(false),
+ is_constructor_function(false),
+ standard_member(kNone) {}
+ explicit VariableInfo(Type* t)
+ : type(t),
+ is_check_function(false),
+ is_constructor_function(false),
+ standard_member(kNone) {}
+ };
// Information for bi-directional typing with a cap on nesting depth.
Type* expected_type_;
Type* computed_type_;
+ VariableInfo* property_info_;
int intish_; // How many ops we've gone without a x|0.
Type* return_type_; // Return type of last function.
size_t array_size_; // Array size of last ArrayLiteral.
- typedef ZoneMap<std::string, Type*> ObjectTypeMap;
+ typedef ZoneMap<std::string, VariableInfo*> ObjectTypeMap;
ObjectTypeMap stdlib_types_;
ObjectTypeMap stdlib_heap_types_;
ObjectTypeMap stdlib_math_types_;
+#define V(NAME, Name, name, lane_count, lane_type) \
+ ObjectTypeMap stdlib_simd_##name##_types_; \
+ VariableInfo* stdlib_simd_##name##_constructor_type_;
+ SIMD128_TYPES(V)
+#undef V
// Map from Variable* to global/local variable Type*.
ZoneHashMap global_variable_type_;
@@ -51,7 +114,7 @@ class AsmTyper : public AstVisitor {
bool in_function_; // In module function?
bool building_function_tables_;
- ZoneTypeCache const& cache_;
+ TypeCache const& cache_;
static const int kErrorMessageLimit = 100;
char error_message_[kErrorMessageLimit];
@@ -60,22 +123,35 @@ class AsmTyper : public AstVisitor {
static const int kMaxUncombinedMultiplicativeSteps = 1;
void InitializeStdlib();
+ void InitializeStdlibSIMD();
void VisitDeclarations(ZoneList<Declaration*>* d) override;
void VisitStatements(ZoneList<Statement*>* s) override;
- void VisitExpressionAnnotation(Expression* e);
+ void VisitExpressionAnnotation(Expression* e, Variable* var, bool is_return);
void VisitFunctionAnnotation(FunctionLiteral* f);
void VisitAsmModule(FunctionLiteral* f);
- void VisitHeapAccess(Property* expr);
+ void VisitHeapAccess(Property* expr, bool assigning, Type* assignment_type);
+
+ Expression* GetReceiverOfPropertyAccess(Expression* expr, const char* name);
+ bool IsMathObject(Expression* expr);
+ bool IsSIMDObject(Expression* expr);
+ bool IsSIMDTypeObject(Expression* expr, const char* name);
+ bool IsStdlibObject(Expression* expr);
+
+ void VisitSIMDProperty(Property* expr);
int ElementShiftSize(Type* type);
+ Type* StorageType(Type* type);
void SetType(Variable* variable, Type* type);
Type* GetType(Variable* variable);
+ VariableInfo* GetVariableInfo(Variable* variable, bool setting);
+ void SetVariableInfo(Variable* variable, const VariableInfo* info);
- Type* LibType(ObjectTypeMap map, Handle<String> name);
+ VariableInfo* LibType(ObjectTypeMap* map, Handle<String> name);
+ void VisitLibraryAccess(ObjectTypeMap* map, Property* expr);
void SetResult(Expression* expr, Type* type);
void IntersectResult(Expression* expr, Type* type);
@@ -83,13 +159,21 @@ class AsmTyper : public AstVisitor {
void VisitWithExpectation(Expression* expr, Type* expected_type,
const char* msg);
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
+ void VisitLiteral(Literal* expr, bool is_return);
+
+ void VisitIntegerBitwiseOperator(BinaryOperation* expr, Type* left_expected,
+ Type* right_expected, Type* result_type,
+ bool conversion);
+
+ Zone* zone() const { return zone_; }
+
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
DISALLOW_COPY_AND_ASSIGN(AsmTyper);
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TYPING_ASM_H_
diff --git a/chromium/v8/src/typing-reset.cc b/chromium/v8/src/typing-reset.cc
index af7641b4855..c22f7a92763 100644
--- a/chromium/v8/src/typing-reset.cc
+++ b/chromium/v8/src/typing-reset.cc
@@ -6,21 +6,20 @@
#include "src/typing-reset.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/codegen.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
-TypingReseter::TypingReseter(Isolate* isolate, Zone* zone,
- FunctionLiteral* root)
- : AstExpressionVisitor(isolate, zone, root) {}
+TypingReseter::TypingReseter(Isolate* isolate, FunctionLiteral* root)
+ : AstExpressionVisitor(isolate, root) {}
void TypingReseter::VisitExpression(Expression* expression) {
expression->set_bounds(Bounds::Unbounded());
}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/typing-reset.h b/chromium/v8/src/typing-reset.h
index b809eb21618..3e1969d9ed8 100644
--- a/chromium/v8/src/typing-reset.h
+++ b/chromium/v8/src/typing-reset.h
@@ -5,7 +5,7 @@
#ifndef V8_TYPING_RESET_H_
#define V8_TYPING_RESET_H_
-#include "src/ast-expression-visitor.h"
+#include "src/ast/ast-expression-visitor.h"
namespace v8 {
namespace internal {
@@ -15,12 +15,12 @@ namespace internal {
class TypingReseter : public AstExpressionVisitor {
public:
- TypingReseter(Isolate* isolate, Zone* zone, FunctionLiteral* root);
+ TypingReseter(Isolate* isolate, FunctionLiteral* root);
protected:
void VisitExpression(Expression* expression) override;
};
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TYPING_RESET_H_
diff --git a/chromium/v8/src/utils.cc b/chromium/v8/src/utils.cc
index bbfdc74ad20..c46028f0594 100644
--- a/chromium/v8/src/utils.cc
+++ b/chromium/v8/src/utils.cc
@@ -77,6 +77,11 @@ char* SimpleStringBuilder::Finalize() {
}
+std::ostream& operator<<(std::ostream& os, FeedbackVectorSlot slot) {
+ return os << "#" << slot.id_;
+}
+
+
size_t hash_value(BailoutId id) {
base::hash<int> h;
return h(id.id_);
@@ -363,7 +368,7 @@ static void MemMoveWrapper(void* dest, const void* src, size_t size) {
static MemMoveFunction memmove_function = &MemMoveWrapper;
// Defined in codegen-ia32.cc.
-MemMoveFunction CreateMemMoveFunction();
+MemMoveFunction CreateMemMoveFunction(Isolate* isolate);
// Copy memory area to disjoint memory area.
void MemMove(void* dest, const void* src, size_t size) {
@@ -387,29 +392,38 @@ MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper;
MemCopyUint16Uint8Function memcopy_uint16_uint8_function =
&MemCopyUint16Uint8Wrapper;
// Defined in codegen-arm.cc.
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub);
+MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
+ MemCopyUint8Function stub);
MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
- MemCopyUint16Uint8Function stub);
+ Isolate* isolate, MemCopyUint16Uint8Function stub);
#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper;
// Defined in codegen-mips.cc.
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub);
+MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
+ MemCopyUint8Function stub);
#endif
-void init_memcopy_functions() {
+static bool g_memcopy_functions_initialized = false;
+
+
+void init_memcopy_functions(Isolate* isolate) {
+ if (g_memcopy_functions_initialized) return;
+ g_memcopy_functions_initialized = true;
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
- MemMoveFunction generated_memmove = CreateMemMoveFunction();
+ MemMoveFunction generated_memmove = CreateMemMoveFunction(isolate);
if (generated_memmove != NULL) {
memmove_function = generated_memmove;
}
#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
- memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
+ memcopy_uint8_function =
+ CreateMemCopyUint8Function(isolate, &MemCopyUint8Wrapper);
memcopy_uint16_uint8_function =
- CreateMemCopyUint16Uint8Function(&MemCopyUint16Uint8Wrapper);
+ CreateMemCopyUint16Uint8Function(isolate, &MemCopyUint16Uint8Wrapper);
#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
- memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
+ memcopy_uint8_function =
+ CreateMemCopyUint8Function(isolate, &MemCopyUint8Wrapper);
#endif
}
diff --git a/chromium/v8/src/utils.h b/chromium/v8/src/utils.h
index ef35f969648..1ea2d56fbfd 100644
--- a/chromium/v8/src/utils.h
+++ b/chromium/v8/src/utils.h
@@ -26,6 +26,16 @@ namespace internal {
// ----------------------------------------------------------------------------
// General helper functions
+// Returns the value (0 .. 15) of a hexadecimal character c.
+// If c is not a legal hexadecimal character, returns a value < 0.
+inline int HexValue(uc32 c) {
+ c -= '0';
+ if (static_cast<unsigned>(c) <= 9) return c;
+ c = (c | 0x20) - ('a' - '0'); // detect 0x11..0x16 and 0x31..0x36.
+ if (static_cast<unsigned>(c) <= 5) return c + 10;
+ return -1;
+}
+
inline int BoolToInt(bool b) { return b ? 1 : 0; }
@@ -366,9 +376,8 @@ inline uint32_t ComputePointerHash(void* ptr) {
// ----------------------------------------------------------------------------
// Generated memcpy/memmove
-// Initializes the codegen support that depends on CPU features. This is
-// called after CPU initialization.
-void init_memcopy_functions();
+// Initializes the codegen support that depends on CPU features.
+void init_memcopy_functions(Isolate* isolate);
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
// Limit below which the extra overhead of the MemCopy function is likely
@@ -1042,24 +1051,31 @@ class TypeFeedbackId {
int id_;
};
+inline bool operator<(TypeFeedbackId lhs, TypeFeedbackId rhs) {
+ return lhs.ToInt() < rhs.ToInt();
+}
+inline bool operator>(TypeFeedbackId lhs, TypeFeedbackId rhs) {
+ return lhs.ToInt() > rhs.ToInt();
+}
+
-template <int dummy_parameter>
-class VectorSlot {
+class FeedbackVectorSlot {
public:
- explicit VectorSlot(int id) : id_(id) {}
+ FeedbackVectorSlot() : id_(kInvalidSlot) {}
+ explicit FeedbackVectorSlot(int id) : id_(id) {}
int ToInt() const { return id_; }
- static VectorSlot Invalid() { return VectorSlot(kInvalidSlot); }
+ static FeedbackVectorSlot Invalid() { return FeedbackVectorSlot(); }
bool IsInvalid() const { return id_ == kInvalidSlot; }
- VectorSlot next() const {
- DCHECK_NE(kInvalidSlot, id_);
- return VectorSlot(id_ + 1);
+ bool operator==(FeedbackVectorSlot that) const {
+ return this->id_ == that.id_;
}
+ bool operator!=(FeedbackVectorSlot that) const { return !(*this == that); }
- bool operator==(VectorSlot that) const { return this->id_ == that.id_; }
- bool operator!=(VectorSlot that) const { return !(*this == that); }
+ friend size_t hash_value(FeedbackVectorSlot slot) { return slot.ToInt(); }
+ friend std::ostream& operator<<(std::ostream& os, FeedbackVectorSlot);
private:
static const int kInvalidSlot = -1;
@@ -1068,23 +1084,14 @@ class VectorSlot {
};
-template <int dummy_parameter>
-size_t hash_value(VectorSlot<dummy_parameter> slot) {
- return slot.ToInt();
-}
-
-
-typedef VectorSlot<0> FeedbackVectorSlot;
-typedef VectorSlot<1> FeedbackVectorICSlot;
-
-
class BailoutId {
public:
explicit BailoutId(int id) : id_(id) { }
int ToInt() const { return id_; }
static BailoutId None() { return BailoutId(kNoneId); }
- static BailoutId Prologue() { return BailoutId(kPrologueId); }
+ static BailoutId ScriptContext() { return BailoutId(kScriptContextId); }
+ static BailoutId FunctionContext() { return BailoutId(kFunctionContextId); }
static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
@@ -1100,19 +1107,20 @@ class BailoutId {
static const int kNoneId = -1;
// Using 0 could disguise errors.
- static const int kPrologueId = 1;
- static const int kFunctionEntryId = 2;
+ static const int kScriptContextId = 1;
+ static const int kFunctionContextId = 2;
+ static const int kFunctionEntryId = 3;
// This AST id identifies the point after the declarations have been visited.
// We need it to capture the environment effects of declarations that emit
// code (function declarations).
- static const int kDeclarationsId = 3;
+ static const int kDeclarationsId = 4;
// Every FunctionState starts with this id.
- static const int kFirstUsableId = 4;
+ static const int kFirstUsableId = 5;
// Every compiled stub starts with this id.
- static const int kStubEntryId = 5;
+ static const int kStubEntryId = 6;
int id_;
};
@@ -1742,6 +1750,42 @@ static inline void WriteDoubleValue(void* p, double value) {
#endif // V8_TARGET_ARCH_MIPS
}
+
+static inline uint16_t ReadUnalignedUInt16(const void* p) {
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
+ return *reinterpret_cast<const uint16_t*>(p);
+#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ // Prevent compiler from using load-half (mips lh) on (possibly)
+ // non-16-bit aligned address.
+ union conversion {
+ uint16_t h;
+ uint8_t b[2];
+ } c;
+ const uint8_t* ptr = reinterpret_cast<const uint8_t*>(p);
+ c.b[0] = *ptr;
+ c.b[1] = *(ptr + 1);
+ return c.h;
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+}
+
+
+static inline void WriteUnalignedUInt16(void* p, uint16_t value) {
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
+ *(reinterpret_cast<uint16_t*>(p)) = value;
+#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ // Prevent compiler from using store-half (mips sh) on (possibly)
+ // non-16-bit aligned address.
+ union conversion {
+ uint16_t h;
+ uint8_t b[2];
+ } c;
+ c.h = value;
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(p);
+ *ptr = c.b[0];
+ *(ptr + 1) = c.b[1];
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/v8.cc b/chromium/v8/src/v8.cc
index 760a9b564d0..31b48780e4f 100644
--- a/chromium/v8/src/v8.cc
+++ b/chromium/v8/src/v8.cc
@@ -8,13 +8,12 @@
#include "src/base/once.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
+#include "src/crankshaft/lithium-allocator.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/frames.h"
-#include "src/hydrogen.h"
#include "src/isolate.h"
-#include "src/lithium-allocator.h"
#include "src/objects.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/sampler.h"
@@ -80,13 +79,6 @@ void V8::InitializeOncePerProcessImpl() {
Sampler::SetUp();
CpuFeatures::Probe(false);
- init_memcopy_functions();
- // The custom exp implementation needs 16KB of lookup data; initialize it
- // on demand.
- init_fast_sqrt_function();
-#ifdef _WIN64
- init_modulo_function();
-#endif
ElementsAccessor::InitializeOncePerProcess();
LOperand::SetUpCaches();
SetUpJSCallerSavedCodeData();
diff --git a/chromium/v8/src/v8.h b/chromium/v8/src/v8.h
index f5b3b84735c..6016ef1419f 100644
--- a/chromium/v8/src/v8.h
+++ b/chromium/v8/src/v8.h
@@ -41,6 +41,7 @@ class V8 : public AllStatic {
static v8::Platform* platform_;
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_V8_H_
diff --git a/chromium/v8/src/v8memory.h b/chromium/v8/src/v8memory.h
index 615ec4fe87f..b1ae939f845 100644
--- a/chromium/v8/src/v8memory.h
+++ b/chromium/v8/src/v8memory.h
@@ -66,6 +66,7 @@ class Memory {
}
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_MEMORY_H_
diff --git a/chromium/v8/src/v8threads.h b/chromium/v8/src/v8threads.h
index 3e189d5cb49..db0ed070fae 100644
--- a/chromium/v8/src/v8threads.h
+++ b/chromium/v8/src/v8threads.h
@@ -118,6 +118,7 @@ class ThreadManager {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_V8THREADS_H_
diff --git a/chromium/v8/src/vector.h b/chromium/v8/src/vector.h
index 4f3128b9185..e4637c91c98 100644
--- a/chromium/v8/src/vector.h
+++ b/chromium/v8/src/vector.h
@@ -202,6 +202,7 @@ inline Vector<char> MutableCStrVector(char* data, int max) {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_VECTOR_H_
diff --git a/chromium/v8/src/version.h b/chromium/v8/src/version.h
index 2596beeb8a4..3395d7f4fe5 100644
--- a/chromium/v8/src/version.h
+++ b/chromium/v8/src/version.h
@@ -47,6 +47,7 @@ class Version {
bool candidate, const char* soname);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_VERSION_H_
diff --git a/chromium/v8/src/vm-state-inl.h b/chromium/v8/src/vm-state-inl.h
index ac3941ea84b..d60548d27d1 100644
--- a/chromium/v8/src/vm-state-inl.h
+++ b/chromium/v8/src/vm-state-inl.h
@@ -78,6 +78,7 @@ Address ExternalCallbackScope::scope_address() {
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_VM_STATE_INL_H_
diff --git a/chromium/v8/src/vm-state.h b/chromium/v8/src/vm-state.h
index 9838b8732b5..7e723a5282b 100644
--- a/chromium/v8/src/vm-state.h
+++ b/chromium/v8/src/vm-state.h
@@ -33,7 +33,14 @@ class ExternalCallbackScope BASE_EMBEDDED {
inline ExternalCallbackScope(Isolate* isolate, Address callback);
inline ~ExternalCallbackScope();
Address callback() { return callback_; }
- Address* callback_address() { return &callback_; }
+ Address* callback_entrypoint_address() {
+ if (callback_ == nullptr) return nullptr;
+#if USES_FUNCTION_DESCRIPTORS
+ return FUNCTION_ENTRYPOINT_ADDRESS(callback_);
+#else
+ return &callback_;
+#endif
+ }
ExternalCallbackScope* previous() { return previous_scope_; }
inline Address scope_address();
@@ -46,7 +53,8 @@ class ExternalCallbackScope BASE_EMBEDDED {
#endif
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_VM_STATE_H_
diff --git a/chromium/v8/src/wasm/OWNERS b/chromium/v8/src/wasm/OWNERS
new file mode 100644
index 00000000000..a9d24ade28a
--- /dev/null
+++ b/chromium/v8/src/wasm/OWNERS
@@ -0,0 +1,5 @@
+set noparent
+
+titzer@chromium.org
+bradnelson@chromium.org
+ahaas@chromium.org
diff --git a/chromium/v8/src/wasm/asm-wasm-builder.cc b/chromium/v8/src/wasm/asm-wasm-builder.cc
new file mode 100644
index 00000000000..30f84642f82
--- /dev/null
+++ b/chromium/v8/src/wasm/asm-wasm-builder.cc
@@ -0,0 +1,1045 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/wasm/asm-wasm-builder.h"
+#include "src/wasm/wasm-macro-gen.h"
+#include "src/wasm/wasm-opcodes.h"
+
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/codegen.h"
+#include "src/type-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#define RECURSE(call) \
+ do { \
+ DCHECK(!HasStackOverflow()); \
+ call; \
+ if (HasStackOverflow()) return; \
+ } while (false)
+
+
+class AsmWasmBuilderImpl : public AstVisitor {
+ public:
+ AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal)
+ : local_variables_(HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ functions_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ global_variables_(HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ in_function_(false),
+ is_set_op_(false),
+ marking_exported(false),
+ builder_(new (zone) WasmModuleBuilder(zone)),
+ current_function_builder_(nullptr),
+ literal_(literal),
+ isolate_(isolate),
+ zone_(zone),
+ cache_(TypeCache::Get()),
+ breakable_blocks_(zone),
+ block_size_(0),
+ init_function_index(0) {
+ InitializeAstVisitor(isolate);
+ }
+
+ void InitializeInitFunction() {
+ unsigned char init[] = "__init__";
+ init_function_index = builder_->AddFunction();
+ current_function_builder_ = builder_->FunctionAt(init_function_index);
+ current_function_builder_->SetName(init, 8);
+ current_function_builder_->ReturnType(kAstStmt);
+ current_function_builder_->Exported(1);
+ current_function_builder_ = nullptr;
+ }
+
+ void Compile() {
+ InitializeInitFunction();
+ RECURSE(VisitFunctionLiteral(literal_));
+ }
+
+ void VisitVariableDeclaration(VariableDeclaration* decl) {}
+
+ void VisitFunctionDeclaration(FunctionDeclaration* decl) {
+ DCHECK(!in_function_);
+ DCHECK(current_function_builder_ == nullptr);
+ uint16_t index = LookupOrInsertFunction(decl->proxy()->var());
+ current_function_builder_ = builder_->FunctionAt(index);
+ in_function_ = true;
+ RECURSE(Visit(decl->fun()));
+ in_function_ = false;
+ current_function_builder_ = nullptr;
+ local_variables_.Clear();
+ }
+
+ void VisitImportDeclaration(ImportDeclaration* decl) {}
+
+ void VisitExportDeclaration(ExportDeclaration* decl) {}
+
+ void VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0; i < stmts->length(); ++i) {
+ Statement* stmt = stmts->at(i);
+ RECURSE(Visit(stmt));
+ if (stmt->IsJump()) break;
+ }
+ }
+
+ void VisitBlock(Block* stmt) {
+ if (stmt->statements()->length() == 1) {
+ ExpressionStatement* expr =
+ stmt->statements()->at(0)->AsExpressionStatement();
+ if (expr != nullptr) {
+ if (expr->expression()->IsAssignment()) {
+ RECURSE(VisitExpressionStatement(expr));
+ return;
+ }
+ }
+ }
+ DCHECK(in_function_);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false,
+ static_cast<byte>(stmt->statements()->length()));
+ RECURSE(VisitStatements(stmt->statements()));
+ DCHECK(block_size_ >= 0);
+ }
+
+ class BlockVisitor {
+ private:
+ int prev_block_size_;
+ uint32_t index_;
+ AsmWasmBuilderImpl* builder_;
+
+ public:
+ BlockVisitor(AsmWasmBuilderImpl* builder, BreakableStatement* stmt,
+ WasmOpcode opcode, bool is_loop, int initial_block_size)
+ : builder_(builder) {
+ builder_->breakable_blocks_.push_back(std::make_pair(stmt, is_loop));
+ builder_->current_function_builder_->Emit(opcode);
+ index_ = builder_->current_function_builder_->EmitEditableImmediate(0);
+ prev_block_size_ = builder_->block_size_;
+ builder_->block_size_ = initial_block_size;
+ }
+ ~BlockVisitor() {
+ builder_->current_function_builder_->EditImmediate(index_,
+ builder_->block_size_);
+ builder_->block_size_ = prev_block_size_;
+ builder_->breakable_blocks_.pop_back();
+ }
+ };
+
+ void VisitExpressionStatement(ExpressionStatement* stmt) {
+ RECURSE(Visit(stmt->expression()));
+ }
+
+ void VisitEmptyStatement(EmptyStatement* stmt) {}
+
+ void VisitEmptyParentheses(EmptyParentheses* paren) { UNREACHABLE(); }
+
+ void VisitIfStatement(IfStatement* stmt) {
+ DCHECK(in_function_);
+ if (stmt->HasElseStatement()) {
+ current_function_builder_->Emit(kExprIfElse);
+ } else {
+ current_function_builder_->Emit(kExprIf);
+ }
+ RECURSE(Visit(stmt->condition()));
+ if (stmt->HasThenStatement()) {
+ RECURSE(Visit(stmt->then_statement()));
+ } else {
+ current_function_builder_->Emit(kExprNop);
+ }
+ if (stmt->HasElseStatement()) {
+ RECURSE(Visit(stmt->else_statement()));
+ }
+ }
+
+ void VisitContinueStatement(ContinueStatement* stmt) {
+ DCHECK(in_function_);
+ DCHECK(stmt->target() != NULL);
+ int i = static_cast<int>(breakable_blocks_.size()) - 1;
+ int block_distance = 0;
+ for (; i >= 0; i--) {
+ auto elem = breakable_blocks_.at(i);
+ if (elem.first == stmt->target()) {
+ DCHECK(elem.second);
+ break;
+ } else if (elem.second) {
+ block_distance += 2;
+ } else {
+ block_distance += 1;
+ }
+ }
+ DCHECK(i >= 0);
+ current_function_builder_->EmitWithU8(kExprBr, block_distance);
+ current_function_builder_->Emit(kExprNop);
+ }
+
+ void VisitBreakStatement(BreakStatement* stmt) {
+ DCHECK(in_function_);
+ DCHECK(stmt->target() != NULL);
+ int i = static_cast<int>(breakable_blocks_.size()) - 1;
+ int block_distance = 0;
+ for (; i >= 0; i--) {
+ auto elem = breakable_blocks_.at(i);
+ if (elem.first == stmt->target()) {
+ if (elem.second) {
+ block_distance++;
+ }
+ break;
+ } else if (elem.second) {
+ block_distance += 2;
+ } else {
+ block_distance += 1;
+ }
+ }
+ DCHECK(i >= 0);
+ current_function_builder_->EmitWithU8(kExprBr, block_distance);
+ current_function_builder_->Emit(kExprNop);
+ }
+
+ void VisitReturnStatement(ReturnStatement* stmt) {
+ if (in_function_) {
+ current_function_builder_->Emit(kExprReturn);
+ } else {
+ marking_exported = true;
+ }
+ RECURSE(Visit(stmt->expression()));
+ if (!in_function_) {
+ marking_exported = false;
+ }
+ }
+
+ void VisitWithStatement(WithStatement* stmt) { UNREACHABLE(); }
+
+ void SetLocalTo(uint16_t index, int value) {
+ current_function_builder_->Emit(kExprSetLocal);
+ AddLeb128(index, true);
+ byte code[] = {WASM_I32(value)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ block_size_++;
+ }
+
+ void CompileCase(CaseClause* clause, uint16_t fall_through,
+ VariableProxy* tag) {
+ Literal* label = clause->label()->AsLiteral();
+ DCHECK(label != nullptr);
+ block_size_++;
+ current_function_builder_->Emit(kExprIf);
+ current_function_builder_->Emit(kExprI32Ior);
+ current_function_builder_->Emit(kExprI32Eq);
+ VisitVariableProxy(tag);
+ VisitLiteral(label);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(fall_through, true);
+ BlockVisitor visitor(this, nullptr, kExprBlock, false, 0);
+ SetLocalTo(fall_through, 1);
+ ZoneList<Statement*>* stmts = clause->statements();
+ block_size_ += stmts->length();
+ RECURSE(VisitStatements(stmts));
+ }
+
+ void VisitSwitchStatement(SwitchStatement* stmt) {
+ VariableProxy* tag = stmt->tag()->AsVariableProxy();
+ DCHECK(tag != NULL);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false,
+ 0);
+ uint16_t fall_through = current_function_builder_->AddLocal(kAstI32);
+ SetLocalTo(fall_through, 0);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ for (int i = 0; i < clauses->length(); ++i) {
+ CaseClause* clause = clauses->at(i);
+ if (!clause->is_default()) {
+ CompileCase(clause, fall_through, tag);
+ } else {
+ ZoneList<Statement*>* stmts = clause->statements();
+ block_size_ += stmts->length();
+ RECURSE(VisitStatements(stmts));
+ }
+ }
+ }
+
+ void VisitCaseClause(CaseClause* clause) { UNREACHABLE(); }
+
+ void VisitDoWhileStatement(DoWhileStatement* stmt) {
+ DCHECK(in_function_);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
+ 2);
+ RECURSE(Visit(stmt->body()));
+ current_function_builder_->Emit(kExprIf);
+ RECURSE(Visit(stmt->cond()));
+ current_function_builder_->EmitWithU8(kExprBr, 0);
+ current_function_builder_->Emit(kExprNop);
+ }
+
+ void VisitWhileStatement(WhileStatement* stmt) {
+ DCHECK(in_function_);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
+ 1);
+ current_function_builder_->Emit(kExprIf);
+ RECURSE(Visit(stmt->cond()));
+ current_function_builder_->EmitWithU8(kExprBr, 0);
+ RECURSE(Visit(stmt->body()));
+ }
+
+ void VisitForStatement(ForStatement* stmt) {
+ DCHECK(in_function_);
+ if (stmt->init() != nullptr) {
+ block_size_++;
+ RECURSE(Visit(stmt->init()));
+ }
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
+ 0);
+ if (stmt->cond() != nullptr) {
+ block_size_++;
+ current_function_builder_->Emit(kExprIf);
+ current_function_builder_->Emit(kExprBoolNot);
+ RECURSE(Visit(stmt->cond()));
+ current_function_builder_->EmitWithU8(kExprBr, 1);
+ current_function_builder_->Emit(kExprNop);
+ }
+ if (stmt->body() != nullptr) {
+ block_size_++;
+ RECURSE(Visit(stmt->body()));
+ }
+ if (stmt->next() != nullptr) {
+ block_size_++;
+ RECURSE(Visit(stmt->next()));
+ }
+ block_size_++;
+ current_function_builder_->EmitWithU8(kExprBr, 0);
+ current_function_builder_->Emit(kExprNop);
+ }
+
+ void VisitForInStatement(ForInStatement* stmt) { UNREACHABLE(); }
+
+ void VisitForOfStatement(ForOfStatement* stmt) { UNREACHABLE(); }
+
+ void VisitTryCatchStatement(TryCatchStatement* stmt) { UNREACHABLE(); }
+
+ void VisitTryFinallyStatement(TryFinallyStatement* stmt) { UNREACHABLE(); }
+
+ void VisitDebuggerStatement(DebuggerStatement* stmt) { UNREACHABLE(); }
+
+ void VisitFunctionLiteral(FunctionLiteral* expr) {
+ Scope* scope = expr->scope();
+ if (in_function_) {
+ if (expr->bounds().lower->IsFunction()) {
+ Type::FunctionType* func_type = expr->bounds().lower->AsFunction();
+ LocalType return_type = TypeFrom(func_type->Result());
+ current_function_builder_->ReturnType(return_type);
+ for (int i = 0; i < expr->parameter_count(); i++) {
+ LocalType type = TypeFrom(func_type->Parameter(i));
+ DCHECK(type != kAstStmt);
+ LookupOrInsertLocal(scope->parameter(i), type);
+ }
+ } else {
+ UNREACHABLE();
+ }
+ }
+ RECURSE(VisitDeclarations(scope->declarations()));
+ RECURSE(VisitStatements(expr->body()));
+ }
+
+ void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
+ UNREACHABLE();
+ }
+
+ void VisitConditional(Conditional* expr) {
+ DCHECK(in_function_);
+ current_function_builder_->Emit(kExprIfElse);
+ RECURSE(Visit(expr->condition()));
+ RECURSE(Visit(expr->then_expression()));
+ RECURSE(Visit(expr->else_expression()));
+ }
+
+ void VisitVariableProxy(VariableProxy* expr) {
+ if (in_function_) {
+ Variable* var = expr->var();
+ if (var->is_function()) {
+ DCHECK(!is_set_op_);
+ std::vector<uint8_t> index =
+ UnsignedLEB128From(LookupOrInsertFunction(var));
+ current_function_builder_->EmitCode(
+ &index[0], static_cast<uint32_t>(index.size()));
+ } else {
+ if (is_set_op_) {
+ if (var->IsContextSlot()) {
+ current_function_builder_->Emit(kExprStoreGlobal);
+ } else {
+ current_function_builder_->Emit(kExprSetLocal);
+ }
+ is_set_op_ = false;
+ } else {
+ if (var->IsContextSlot()) {
+ current_function_builder_->Emit(kExprLoadGlobal);
+ } else {
+ current_function_builder_->Emit(kExprGetLocal);
+ }
+ }
+ LocalType var_type = TypeOf(expr);
+ DCHECK(var_type != kAstStmt);
+ if (var->IsContextSlot()) {
+ AddLeb128(LookupOrInsertGlobal(var, var_type), false);
+ } else {
+ AddLeb128(LookupOrInsertLocal(var, var_type), true);
+ }
+ }
+ }
+ }
+
+ void VisitLiteral(Literal* expr) {
+ if (in_function_) {
+ if (expr->raw_value()->IsNumber()) {
+ LocalType type = TypeOf(expr);
+ switch (type) {
+ case kAstI32: {
+ int val = static_cast<int>(expr->raw_value()->AsNumber());
+ byte code[] = {WASM_I32(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ break;
+ }
+ case kAstF32: {
+ float val = static_cast<float>(expr->raw_value()->AsNumber());
+ byte code[] = {WASM_F32(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ break;
+ }
+ case kAstF64: {
+ double val = static_cast<double>(expr->raw_value()->AsNumber());
+ byte code[] = {WASM_F64(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+
+ void VisitRegExpLiteral(RegExpLiteral* expr) { UNREACHABLE(); }
+
+ void VisitObjectLiteral(ObjectLiteral* expr) {
+ ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+ for (int i = 0; i < props->length(); ++i) {
+ ObjectLiteralProperty* prop = props->at(i);
+ DCHECK(marking_exported);
+ VariableProxy* expr = prop->value()->AsVariableProxy();
+ DCHECK(expr != nullptr);
+ Variable* var = expr->var();
+ Literal* name = prop->key()->AsLiteral();
+ DCHECK(name != nullptr);
+ DCHECK(name->IsPropertyName());
+ const AstRawString* raw_name = name->AsRawPropertyName();
+ if (var->is_function()) {
+ uint16_t index = LookupOrInsertFunction(var);
+ builder_->FunctionAt(index)->Exported(1);
+ builder_->FunctionAt(index)
+ ->SetName(raw_name->raw_data(), raw_name->length());
+ }
+ }
+ }
+
+ void VisitArrayLiteral(ArrayLiteral* expr) { UNREACHABLE(); }
+
+ void LoadInitFunction() {
+ current_function_builder_ = builder_->FunctionAt(init_function_index);
+ in_function_ = true;
+ }
+
+ void UnLoadInitFunction() {
+ in_function_ = false;
+ current_function_builder_ = nullptr;
+ }
+
+ void VisitAssignment(Assignment* expr) {
+ bool in_init = false;
+ if (!in_function_) {
+ // TODO(bradnelson): Get rid of this.
+ if (TypeOf(expr->value()) == kAstStmt) {
+ return;
+ }
+ in_init = true;
+ LoadInitFunction();
+ }
+ BinaryOperation* value_op = expr->value()->AsBinaryOperation();
+ if (value_op != nullptr && MatchBinaryOperation(value_op) == kAsIs) {
+ VariableProxy* target_var = expr->target()->AsVariableProxy();
+ VariableProxy* effective_value_var = GetLeft(value_op)->AsVariableProxy();
+ if (target_var != nullptr && effective_value_var != nullptr &&
+ target_var->var() == effective_value_var->var()) {
+ block_size_--;
+ return;
+ }
+ }
+ is_set_op_ = true;
+ RECURSE(Visit(expr->target()));
+ DCHECK(!is_set_op_);
+ RECURSE(Visit(expr->value()));
+ if (in_init) {
+ UnLoadInitFunction();
+ }
+ }
+
+ void VisitYield(Yield* expr) { UNREACHABLE(); }
+
+ void VisitThrow(Throw* expr) { UNREACHABLE(); }
+
+ void VisitProperty(Property* expr) {
+ Expression* obj = expr->obj();
+ DCHECK(obj->bounds().lower == obj->bounds().upper);
+ TypeImpl<ZoneTypeConfig>* type = obj->bounds().lower;
+ MachineType mtype;
+ int size;
+ if (type->Is(cache_.kUint8Array)) {
+ mtype = MachineType::Uint8();
+ size = 1;
+ } else if (type->Is(cache_.kInt8Array)) {
+ mtype = MachineType::Int8();
+ size = 1;
+ } else if (type->Is(cache_.kUint16Array)) {
+ mtype = MachineType::Uint16();
+ size = 2;
+ } else if (type->Is(cache_.kInt16Array)) {
+ mtype = MachineType::Int16();
+ size = 2;
+ } else if (type->Is(cache_.kUint32Array)) {
+ mtype = MachineType::Uint32();
+ size = 4;
+ } else if (type->Is(cache_.kInt32Array)) {
+ mtype = MachineType::Int32();
+ size = 4;
+ } else if (type->Is(cache_.kUint32Array)) {
+ mtype = MachineType::Uint32();
+ size = 4;
+ } else if (type->Is(cache_.kFloat32Array)) {
+ mtype = MachineType::Float32();
+ size = 4;
+ } else if (type->Is(cache_.kFloat64Array)) {
+ mtype = MachineType::Float64();
+ size = 8;
+ } else {
+ UNREACHABLE();
+ }
+ current_function_builder_->EmitWithU8(
+ WasmOpcodes::LoadStoreOpcodeOf(mtype, is_set_op_),
+ WasmOpcodes::LoadStoreAccessOf(false));
+ is_set_op_ = false;
+ Literal* value = expr->key()->AsLiteral();
+ if (value) {
+ DCHECK(value->raw_value()->IsNumber());
+ DCHECK(kAstI32 == TypeOf(value));
+ int val = static_cast<int>(value->raw_value()->AsNumber());
+ byte code[] = {WASM_I32(val * size)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ return;
+ }
+ BinaryOperation* binop = expr->key()->AsBinaryOperation();
+ if (binop) {
+ DCHECK(Token::SAR == binop->op());
+ DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
+ DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
+ DCHECK(size ==
+ 1 << static_cast<int>(
+ binop->right()->AsLiteral()->raw_value()->AsNumber()));
+ // Mask bottom bits to match asm.js behavior.
+ current_function_builder_->Emit(kExprI32And);
+ byte code[] = {WASM_I8(~(size - 1))};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ RECURSE(Visit(binop->left()));
+ return;
+ }
+ UNREACHABLE();
+ }
+
+ void VisitCall(Call* expr) {
+ Call::CallType call_type = expr->GetCallType(isolate_);
+ switch (call_type) {
+ case Call::OTHER_CALL: {
+ DCHECK(in_function_);
+ current_function_builder_->Emit(kExprCallFunction);
+ RECURSE(Visit(expr->expression()));
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(Visit(arg));
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
+
+ void VisitCallRuntime(CallRuntime* expr) { UNREACHABLE(); }
+
+ void VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::NOT: {
+ DCHECK(TypeOf(expr->expression()) == kAstI32);
+ current_function_builder_->Emit(kExprBoolNot);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ RECURSE(Visit(expr->expression()));
+ }
+
+ void VisitCountOperation(CountOperation* expr) { UNREACHABLE(); }
+
+ bool MatchIntBinaryOperation(BinaryOperation* expr, Token::Value op,
+ int32_t val) {
+ DCHECK(expr->right() != nullptr);
+ if (expr->op() == op && expr->right()->IsLiteral() &&
+ TypeOf(expr) == kAstI32) {
+ Literal* right = expr->right()->AsLiteral();
+ DCHECK(right->raw_value()->IsNumber());
+ if (static_cast<int32_t>(right->raw_value()->AsNumber()) == val) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool MatchDoubleBinaryOperation(BinaryOperation* expr, Token::Value op,
+ double val) {
+ DCHECK(expr->right() != nullptr);
+ if (expr->op() == op && expr->right()->IsLiteral() &&
+ TypeOf(expr) == kAstF64) {
+ Literal* right = expr->right()->AsLiteral();
+ DCHECK(right->raw_value()->IsNumber());
+ if (right->raw_value()->AsNumber() == val) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ enum ConvertOperation { kNone, kAsIs, kToInt, kToDouble };
+
+ ConvertOperation MatchOr(BinaryOperation* expr) {
+ if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0)) {
+ return (TypeOf(expr->left()) == kAstI32) ? kAsIs : kToInt;
+ } else {
+ return kNone;
+ }
+ }
+
+ ConvertOperation MatchShr(BinaryOperation* expr) {
+ if (MatchIntBinaryOperation(expr, Token::SHR, 0)) {
+ // TODO(titzer): this probably needs to be kToUint
+ return (TypeOf(expr->left()) == kAstI32) ? kAsIs : kToInt;
+ } else {
+ return kNone;
+ }
+ }
+
+ ConvertOperation MatchXor(BinaryOperation* expr) {
+ if (MatchIntBinaryOperation(expr, Token::BIT_XOR, 0xffffffff)) {
+ DCHECK(TypeOf(expr->left()) == kAstI32);
+ DCHECK(TypeOf(expr->right()) == kAstI32);
+ BinaryOperation* op = expr->left()->AsBinaryOperation();
+ if (op != nullptr) {
+ if (MatchIntBinaryOperation(op, Token::BIT_XOR, 0xffffffff)) {
+ DCHECK(TypeOf(op->right()) == kAstI32);
+ if (TypeOf(op->left()) != kAstI32) {
+ return kToInt;
+ } else {
+ return kAsIs;
+ }
+ }
+ }
+ }
+ return kNone;
+ }
+
+ ConvertOperation MatchMul(BinaryOperation* expr) {
+ if (MatchDoubleBinaryOperation(expr, Token::MUL, 1.0)) {
+ DCHECK(TypeOf(expr->right()) == kAstF64);
+ if (TypeOf(expr->left()) != kAstF64) {
+ return kToDouble;
+ } else {
+ return kAsIs;
+ }
+ } else {
+ return kNone;
+ }
+ }
+
+ ConvertOperation MatchBinaryOperation(BinaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::BIT_OR:
+ return MatchOr(expr);
+ case Token::SHR:
+ return MatchShr(expr);
+ case Token::BIT_XOR:
+ return MatchXor(expr);
+ case Token::MUL:
+ return MatchMul(expr);
+ default:
+ return kNone;
+ }
+ }
+
+// Work around Mul + Div being defined in PPC assembler.
+#ifdef Mul
+#undef Mul
+#endif
+#ifdef Div
+#undef Div
+#endif
+
+#define NON_SIGNED_BINOP(op) \
+ static WasmOpcode opcodes[] = { \
+ kExprI32##op, \
+ kExprI32##op, \
+ kExprF32##op, \
+ kExprF64##op \
+ }
+
+#define SIGNED_BINOP(op) \
+ static WasmOpcode opcodes[] = { \
+ kExprI32##op##S, \
+ kExprI32##op##U, \
+ kExprF32##op, \
+ kExprF64##op \
+ }
+
+#define NON_SIGNED_INT_BINOP(op) \
+ static WasmOpcode opcodes[] = { kExprI32##op, kExprI32##op }
+
+#define BINOP_CASE(token, op, V, ignore_sign) \
+ case token: { \
+ V(op); \
+ int type = TypeIndexOf(expr->left(), expr->right(), ignore_sign); \
+ current_function_builder_->Emit(opcodes[type]); \
+ break; \
+ }
+
+ Expression* GetLeft(BinaryOperation* expr) {
+ if (expr->op() == Token::BIT_XOR) {
+ return expr->left()->AsBinaryOperation()->left();
+ } else {
+ return expr->left();
+ }
+ }
+
+ void VisitBinaryOperation(BinaryOperation* expr) {
+ ConvertOperation convertOperation = MatchBinaryOperation(expr);
+ if (convertOperation == kToDouble) {
+ TypeIndex type = TypeIndexOf(expr->left());
+ if (type == kInt32 || type == kFixnum) {
+ current_function_builder_->Emit(kExprF64SConvertI32);
+ } else if (type == kUint32) {
+ current_function_builder_->Emit(kExprF64UConvertI32);
+ } else if (type == kFloat32) {
+ current_function_builder_->Emit(kExprF64ConvertF32);
+ } else {
+ UNREACHABLE();
+ }
+ RECURSE(Visit(expr->left()));
+ } else if (convertOperation == kToInt) {
+ TypeIndex type = TypeIndexOf(GetLeft(expr));
+ if (type == kFloat32) {
+ current_function_builder_->Emit(kExprI32SConvertF32);
+ } else if (type == kFloat64) {
+ current_function_builder_->Emit(kExprI32SConvertF64);
+ } else {
+ UNREACHABLE();
+ }
+ RECURSE(Visit(GetLeft(expr)));
+ } else if (convertOperation == kAsIs) {
+ RECURSE(Visit(GetLeft(expr)));
+ } else {
+ switch (expr->op()) {
+ BINOP_CASE(Token::ADD, Add, NON_SIGNED_BINOP, true);
+ BINOP_CASE(Token::SUB, Sub, NON_SIGNED_BINOP, true);
+ BINOP_CASE(Token::MUL, Mul, NON_SIGNED_BINOP, true);
+ BINOP_CASE(Token::DIV, Div, SIGNED_BINOP, false);
+ BINOP_CASE(Token::BIT_OR, Ior, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::BIT_XOR, Xor, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::SHL, Shl, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::SAR, ShrS, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::SHR, ShrU, NON_SIGNED_INT_BINOP, true);
+ case Token::MOD: {
+ TypeIndex type = TypeIndexOf(expr->left(), expr->right(), false);
+ if (type == kInt32) {
+ current_function_builder_->Emit(kExprI32RemS);
+ } else if (type == kUint32) {
+ current_function_builder_->Emit(kExprI32RemU);
+ } else if (type == kFloat64) {
+ ModF64(expr);
+ return;
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+ }
+ }
+
+ void ModF64(BinaryOperation* expr) {
+ current_function_builder_->EmitWithU8(kExprBlock, 3);
+ uint16_t index_0 = current_function_builder_->AddLocal(kAstF64);
+ uint16_t index_1 = current_function_builder_->AddLocal(kAstF64);
+ current_function_builder_->Emit(kExprSetLocal);
+ AddLeb128(index_0, true);
+ RECURSE(Visit(expr->left()));
+ current_function_builder_->Emit(kExprSetLocal);
+ AddLeb128(index_1, true);
+ RECURSE(Visit(expr->right()));
+ current_function_builder_->Emit(kExprF64Sub);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(index_0, true);
+ current_function_builder_->Emit(kExprF64Mul);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(index_1, true);
+ // Use trunc instead of two casts
+ current_function_builder_->Emit(kExprF64SConvertI32);
+ current_function_builder_->Emit(kExprI32SConvertF64);
+ current_function_builder_->Emit(kExprF64Div);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(index_0, true);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(index_1, true);
+ }
+
+ void AddLeb128(uint32_t index, bool is_local) {
+ std::vector<uint8_t> index_vec = UnsignedLEB128From(index);
+ if (is_local) {
+ uint32_t pos_of_index[1] = {0};
+ current_function_builder_->EmitCode(
+ &index_vec[0], static_cast<uint32_t>(index_vec.size()), pos_of_index,
+ 1);
+ } else {
+ current_function_builder_->EmitCode(
+ &index_vec[0], static_cast<uint32_t>(index_vec.size()));
+ }
+ }
+
+ void VisitCompareOperation(CompareOperation* expr) {
+ switch (expr->op()) {
+ BINOP_CASE(Token::EQ, Eq, NON_SIGNED_BINOP, false);
+ BINOP_CASE(Token::LT, Lt, SIGNED_BINOP, false);
+ BINOP_CASE(Token::LTE, Le, SIGNED_BINOP, false);
+ BINOP_CASE(Token::GT, Gt, SIGNED_BINOP, false);
+ BINOP_CASE(Token::GTE, Ge, SIGNED_BINOP, false);
+ default:
+ UNREACHABLE();
+ }
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+ }
+
+#undef BINOP_CASE
+#undef NON_SIGNED_INT_BINOP
+#undef SIGNED_BINOP
+#undef NON_SIGNED_BINOP
+
+ enum TypeIndex {
+ kInt32 = 0,
+ kUint32 = 1,
+ kFloat32 = 2,
+ kFloat64 = 3,
+ kFixnum = 4
+ };
+
+ TypeIndex TypeIndexOf(Expression* left, Expression* right, bool ignore_sign) {
+ TypeIndex left_index = TypeIndexOf(left);
+ TypeIndex right_index = TypeIndexOf(right);
+ if (left_index == kFixnum) {
+ left_index = right_index;
+ }
+ if (right_index == kFixnum) {
+ right_index = left_index;
+ }
+ if (left_index == kFixnum && right_index == kFixnum) {
+ left_index = kInt32;
+ right_index = kInt32;
+ }
+ DCHECK((left_index == right_index) ||
+ (ignore_sign && (left_index <= 1) && (right_index <= 1)));
+ return left_index;
+ }
+
+ TypeIndex TypeIndexOf(Expression* expr) {
+ DCHECK(expr->bounds().lower == expr->bounds().upper);
+ TypeImpl<ZoneTypeConfig>* type = expr->bounds().lower;
+ if (type->Is(cache_.kAsmFixnum)) {
+ return kFixnum;
+ } else if (type->Is(cache_.kAsmSigned)) {
+ return kInt32;
+ } else if (type->Is(cache_.kAsmUnsigned)) {
+ return kUint32;
+ } else if (type->Is(cache_.kAsmInt)) {
+ return kInt32;
+ } else if (type->Is(cache_.kAsmFloat)) {
+ return kFloat32;
+ } else if (type->Is(cache_.kAsmDouble)) {
+ return kFloat64;
+ } else {
+ UNREACHABLE();
+ return kInt32;
+ }
+ }
+
+#undef CASE
+#undef NON_SIGNED_INT
+#undef SIGNED
+#undef NON_SIGNED
+
+ void VisitThisFunction(ThisFunction* expr) { UNREACHABLE(); }
+
+ void VisitDeclarations(ZoneList<Declaration*>* decls) {
+ for (int i = 0; i < decls->length(); ++i) {
+ Declaration* decl = decls->at(i);
+ RECURSE(Visit(decl));
+ }
+ }
+
+ void VisitClassLiteral(ClassLiteral* expr) { UNREACHABLE(); }
+
+ void VisitSpread(Spread* expr) { UNREACHABLE(); }
+
+ void VisitSuperPropertyReference(SuperPropertyReference* expr) {
+ UNREACHABLE();
+ }
+
+ void VisitSuperCallReference(SuperCallReference* expr) { UNREACHABLE(); }
+
+ void VisitSloppyBlockFunctionStatement(SloppyBlockFunctionStatement* expr) {
+ UNREACHABLE();
+ }
+
+ void VisitDoExpression(DoExpression* expr) { UNREACHABLE(); }
+
+ void VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ UNREACHABLE();
+ }
+
+ struct IndexContainer : public ZoneObject {
+ uint16_t index;
+ };
+
+ uint16_t LookupOrInsertLocal(Variable* v, LocalType type) {
+ DCHECK(current_function_builder_ != nullptr);
+ ZoneHashMap::Entry* entry =
+ local_variables_.Lookup(v, ComputePointerHash(v));
+ if (entry == nullptr) {
+ uint16_t index;
+ if (v->IsParameter()) {
+ index = current_function_builder_->AddParam(type);
+ } else {
+ index = current_function_builder_->AddLocal(type);
+ }
+ IndexContainer* container = new (zone()) IndexContainer();
+ container->index = index;
+ entry = local_variables_.LookupOrInsert(v, ComputePointerHash(v),
+ ZoneAllocationPolicy(zone()));
+ entry->value = container;
+ }
+ return (reinterpret_cast<IndexContainer*>(entry->value))->index;
+ }
+
+ uint16_t LookupOrInsertGlobal(Variable* v, LocalType type) {
+ ZoneHashMap::Entry* entry =
+ global_variables_.Lookup(v, ComputePointerHash(v));
+ if (entry == nullptr) {
+ uint16_t index =
+ builder_->AddGlobal(WasmOpcodes::MachineTypeFor(type), 0);
+ IndexContainer* container = new (zone()) IndexContainer();
+ container->index = index;
+ entry = global_variables_.LookupOrInsert(v, ComputePointerHash(v),
+ ZoneAllocationPolicy(zone()));
+ entry->value = container;
+ }
+ return (reinterpret_cast<IndexContainer*>(entry->value))->index;
+ }
+
+ uint16_t LookupOrInsertFunction(Variable* v) {
+ DCHECK(builder_ != nullptr);
+ ZoneHashMap::Entry* entry = functions_.Lookup(v, ComputePointerHash(v));
+ if (entry == nullptr) {
+ uint16_t index = builder_->AddFunction();
+ IndexContainer* container = new (zone()) IndexContainer();
+ container->index = index;
+ entry = functions_.LookupOrInsert(v, ComputePointerHash(v),
+ ZoneAllocationPolicy(zone()));
+ entry->value = container;
+ }
+ return (reinterpret_cast<IndexContainer*>(entry->value))->index;
+ }
+
+ LocalType TypeOf(Expression* expr) {
+ DCHECK(expr->bounds().lower == expr->bounds().upper);
+ return TypeFrom(expr->bounds().lower);
+ }
+
+ LocalType TypeFrom(TypeImpl<ZoneTypeConfig>* type) {
+ if (type->Is(cache_.kAsmInt)) {
+ return kAstI32;
+ } else if (type->Is(cache_.kAsmFloat)) {
+ return kAstF32;
+ } else if (type->Is(cache_.kAsmDouble)) {
+ return kAstF64;
+ } else {
+ return kAstStmt;
+ }
+ }
+
+ Zone* zone() { return zone_; }
+
+ ZoneHashMap local_variables_;
+ ZoneHashMap functions_;
+ ZoneHashMap global_variables_;
+ bool in_function_;
+ bool is_set_op_;
+ bool marking_exported;
+ WasmModuleBuilder* builder_;
+ WasmFunctionBuilder* current_function_builder_;
+ FunctionLiteral* literal_;
+ Isolate* isolate_;
+ Zone* zone_;
+ TypeCache const& cache_;
+ ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
+ int block_size_;
+ uint16_t init_function_index;
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AsmWasmBuilderImpl);
+};
+
+AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
+ FunctionLiteral* literal)
+ : isolate_(isolate), zone_(zone), literal_(literal) {}
+
+// TODO(aseemgarg): probably should take zone (to write wasm to) as input so
+// that zone in constructor may be thrown away once wasm module is written.
+WasmModuleIndex* AsmWasmBuilder::Run() {
+ AsmWasmBuilderImpl impl(isolate_, zone_, literal_);
+ impl.Compile();
+ WasmModuleWriter* writer = impl.builder_->Build(zone_);
+ return writer->WriteTo(zone_);
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/asm-wasm-builder.h b/chromium/v8/src/wasm/asm-wasm-builder.h
new file mode 100644
index 00000000000..cb568db77c6
--- /dev/null
+++ b/chromium/v8/src/wasm/asm-wasm-builder.h
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_ASM_WASM_BUILDER_H_
+#define V8_WASM_ASM_WASM_BUILDER_H_
+
+#include "src/allocation.h"
+#include "src/wasm/encoder.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class FunctionLiteral;
+
+namespace wasm {
+
+class AsmWasmBuilder {
+ public:
+ explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root);
+ WasmModuleIndex* Run();
+
+ private:
+ Isolate* isolate_;
+ Zone* zone_;
+ FunctionLiteral* literal_;
+};
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_ASM_WASM_BUILDER_H_
diff --git a/chromium/v8/src/wasm/ast-decoder.cc b/chromium/v8/src/wasm/ast-decoder.cc
new file mode 100644
index 00000000000..ffb815771a7
--- /dev/null
+++ b/chromium/v8/src/wasm/ast-decoder.cc
@@ -0,0 +1,1583 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/elapsed-timer.h"
+#include "src/signature.h"
+
+#include "src/flags.h"
+#include "src/handles.h"
+#include "src/zone-containers.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+#include "src/compiler/wasm-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif
+
+// The root of a decoded tree.
+struct Tree {
+ LocalType type; // tree type.
+ uint32_t count; // number of children.
+ const byte* pc; // start of the syntax tree.
+ TFNode* node; // node in the TurboFan graph.
+ Tree* children[1]; // pointers to children.
+
+ WasmOpcode opcode() const { return static_cast<WasmOpcode>(*pc); }
+};
+
+
+// A production represents an incomplete decoded tree in the LR decoder.
+struct Production {
+ Tree* tree; // the root of the syntax tree.
+ int index; // the current index into the children of the tree.
+
+ WasmOpcode opcode() const { return static_cast<WasmOpcode>(*pc()); }
+ const byte* pc() const { return tree->pc; }
+ bool done() const { return index >= static_cast<int>(tree->count); }
+ Tree* last() const { return index > 0 ? tree->children[index - 1] : nullptr; }
+};
+
+
+// An SsaEnv environment carries the current local variable renaming
+// as well as the current effect and control dependency in the TF graph.
+// It maintains a control state that tracks whether the environment
+// is reachable, has reached a control end, or has been merged.
+struct SsaEnv {
+ enum State { kControlEnd, kUnreachable, kReached, kMerged };
+
+ State state;
+ TFNode* control;
+ TFNode* effect;
+ TFNode** locals;
+
+ bool go() { return state >= kReached; }
+ void Kill(State new_state = kControlEnd) {
+ state = new_state;
+ locals = nullptr;
+ control = nullptr;
+ effect = nullptr;
+ }
+};
+
+
+// An entry in the stack of blocks during decoding.
+struct Block {
+ SsaEnv* ssa_env; // SSA renaming environment.
+ int stack_depth; // production stack depth.
+};
+
+
+// An entry in the stack of ifs during decoding.
+struct IfEnv {
+ SsaEnv* false_env;
+ SsaEnv* merge_env;
+ SsaEnv** case_envs;
+};
+
+
+// Macros that build nodes only if there is a graph and the current SSA
+// environment is reachable from start. This avoids problems with malformed
+// TF graphs when decoding inputs that have unreachable code.
+#define BUILD(func, ...) (build() ? builder_->func(__VA_ARGS__) : nullptr)
+#define BUILD0(func) (build() ? builder_->func() : nullptr)
+
+
+// A shift-reduce-parser strategy for decoding Wasm code that uses an explicit
+// shift-reduce strategy with multiple internal stacks.
+class LR_WasmDecoder : public Decoder {
+ public:
+ LR_WasmDecoder(Zone* zone, TFBuilder* builder)
+ : Decoder(nullptr, nullptr),
+ zone_(zone),
+ builder_(builder),
+ trees_(zone),
+ stack_(zone),
+ blocks_(zone),
+ ifs_(zone) {}
+
+ TreeResult Decode(FunctionEnv* function_env, const byte* base, const byte* pc,
+ const byte* end) {
+ base::ElapsedTimer decode_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ decode_timer.Start();
+ }
+ trees_.clear();
+ stack_.clear();
+ blocks_.clear();
+ ifs_.clear();
+
+ if (end < pc) {
+ error(pc, "function body end < start");
+ return result_;
+ }
+
+ base_ = base;
+ Reset(pc, end);
+ function_env_ = function_env;
+
+ InitSsaEnv();
+ DecodeFunctionBody();
+
+ Tree* tree = nullptr;
+ if (ok()) {
+ if (ssa_env_->go()) {
+ if (stack_.size() > 0) {
+ error(stack_.back().pc(), end, "fell off end of code");
+ }
+ AddImplicitReturnAtEnd();
+ }
+ if (trees_.size() == 0) {
+ if (function_env_->sig->return_count() > 0) {
+ error(start_, "no trees created");
+ }
+ } else {
+ tree = trees_[0];
+ }
+ }
+
+ if (ok()) {
+ if (FLAG_trace_wasm_decode_time) {
+ double ms = decode_timer.Elapsed().InMillisecondsF();
+ PrintF(" - decoding took %0.3f ms\n", ms);
+ }
+ TRACE("wasm-decode ok\n\n");
+ } else {
+ TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
+ startrel(error_pc_), error_msg_.get());
+ }
+ return toResult(tree);
+ }
+
+ private:
+ static const size_t kErrorMsgSize = 128;
+
+ Zone* zone_;
+ TFBuilder* builder_;
+ const byte* base_;
+ TreeResult result_;
+
+ SsaEnv* ssa_env_;
+ FunctionEnv* function_env_;
+
+ ZoneVector<Tree*> trees_;
+ ZoneVector<Production> stack_;
+ ZoneVector<Block> blocks_;
+ ZoneVector<IfEnv> ifs_;
+
+ inline bool build() { return builder_ && ssa_env_->go(); }
+
+ void InitSsaEnv() {
+ FunctionSig* sig = function_env_->sig;
+ int param_count = static_cast<int>(sig->parameter_count());
+ TFNode* start = nullptr;
+ SsaEnv* ssa_env = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
+ size_t size = sizeof(TFNode*) * EnvironmentCount();
+ ssa_env->state = SsaEnv::kReached;
+ ssa_env->locals =
+ size > 0 ? reinterpret_cast<TFNode**>(zone_->New(size)) : nullptr;
+
+ int pos = 0;
+ if (builder_) {
+ start = builder_->Start(param_count + 1);
+ // Initialize parameters.
+ for (int i = 0; i < param_count; i++) {
+ ssa_env->locals[pos++] = builder_->Param(i, sig->GetParam(i));
+ }
+ // Initialize int32 locals.
+ if (function_env_->local_int32_count > 0) {
+ TFNode* zero = builder_->Int32Constant(0);
+ for (uint32_t i = 0; i < function_env_->local_int32_count; i++) {
+ ssa_env->locals[pos++] = zero;
+ }
+ }
+ // Initialize int64 locals.
+ if (function_env_->local_int64_count > 0) {
+ TFNode* zero = builder_->Int64Constant(0);
+ for (uint32_t i = 0; i < function_env_->local_int64_count; i++) {
+ ssa_env->locals[pos++] = zero;
+ }
+ }
+ // Initialize float32 locals.
+ if (function_env_->local_float32_count > 0) {
+ TFNode* zero = builder_->Float32Constant(0);
+ for (uint32_t i = 0; i < function_env_->local_float32_count; i++) {
+ ssa_env->locals[pos++] = zero;
+ }
+ }
+ // Initialize float64 locals.
+ if (function_env_->local_float64_count > 0) {
+ TFNode* zero = builder_->Float64Constant(0);
+ for (uint32_t i = 0; i < function_env_->local_float64_count; i++) {
+ ssa_env->locals[pos++] = zero;
+ }
+ }
+ DCHECK_EQ(function_env_->total_locals, pos);
+ DCHECK_EQ(EnvironmentCount(), pos);
+ builder_->set_module(function_env_->module);
+ }
+ ssa_env->control = start;
+ ssa_env->effect = start;
+ SetEnv("initial", ssa_env);
+ }
+
+ void Leaf(LocalType type, TFNode* node = nullptr) {
+ size_t size = sizeof(Tree);
+ Tree* tree = reinterpret_cast<Tree*>(zone_->New(size));
+ tree->type = type;
+ tree->count = 0;
+ tree->pc = pc_;
+ tree->node = node;
+ tree->children[0] = nullptr;
+ Reduce(tree);
+ }
+
+ void Shift(LocalType type, uint32_t count) {
+ size_t size =
+ sizeof(Tree) + (count == 0 ? 0 : ((count - 1) * sizeof(Tree*)));
+ Tree* tree = reinterpret_cast<Tree*>(zone_->New(size));
+ tree->type = type;
+ tree->count = count;
+ tree->pc = pc_;
+ tree->node = nullptr;
+ for (uint32_t i = 0; i < count; i++) tree->children[i] = nullptr;
+ if (count == 0) {
+ Production p = {tree, 0};
+ Reduce(&p);
+ Reduce(tree);
+ } else {
+ stack_.push_back({tree, 0});
+ }
+ }
+
+ void Reduce(Tree* tree) {
+ while (true) {
+ if (stack_.size() == 0) {
+ trees_.push_back(tree);
+ break;
+ }
+ Production* p = &stack_.back();
+ p->tree->children[p->index++] = tree;
+ Reduce(p);
+ if (p->done()) {
+ tree = p->tree;
+ stack_.pop_back();
+ } else {
+ break;
+ }
+ }
+ }
+
+ char* indentation() {
+ static const int kMaxIndent = 64;
+ static char bytes[kMaxIndent + 1];
+ for (int i = 0; i < kMaxIndent; i++) bytes[i] = ' ';
+ bytes[kMaxIndent] = 0;
+ if (stack_.size() < kMaxIndent / 2) {
+ bytes[stack_.size() * 2] = 0;
+ }
+ return bytes;
+ }
+
+ // Decodes the body of a function, producing reduced trees into {result}.
+ void DecodeFunctionBody() {
+ TRACE("wasm-decode %p...%p (%d bytes) %s\n",
+ reinterpret_cast<const void*>(start_),
+ reinterpret_cast<const void*>(limit_),
+ static_cast<int>(limit_ - start_), builder_ ? "graph building" : "");
+
+ if (pc_ >= limit_) return; // Nothing to do.
+
+ while (true) { // decoding loop.
+ int len = 1;
+ WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
+ TRACE("wasm-decode module+%-6d %s func+%d: 0x%02x %s\n", baserel(pc_),
+ indentation(), startrel(pc_), opcode,
+ WasmOpcodes::OpcodeName(opcode));
+
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ if (sig) {
+ // A simple expression with a fixed signature.
+ Shift(sig->GetReturn(), static_cast<uint32_t>(sig->parameter_count()));
+ pc_ += len;
+ if (pc_ >= limit_) {
+ // End of code reached or exceeded.
+ if (pc_ > limit_ && ok()) {
+ error("Beyond end of code");
+ }
+ return;
+ }
+ continue; // back to decoding loop.
+ }
+
+ switch (opcode) {
+ case kExprNop:
+ Leaf(kAstStmt);
+ break;
+ case kExprBlock: {
+ int length = Operand<uint8_t>(pc_);
+ if (length < 1) {
+ Leaf(kAstStmt);
+ } else {
+ Shift(kAstEnd, length);
+ // The break environment is the outer environment.
+ SsaEnv* break_env = ssa_env_;
+ PushBlock(break_env);
+ SetEnv("block:start", Steal(break_env));
+ }
+ len = 2;
+ break;
+ }
+ case kExprLoop: {
+ int length = Operand<uint8_t>(pc_);
+ if (length < 1) {
+ Leaf(kAstStmt);
+ } else {
+ Shift(kAstEnd, length);
+ // The break environment is the outer environment.
+ SsaEnv* break_env = ssa_env_;
+ PushBlock(break_env);
+ SsaEnv* cont_env = Steal(break_env);
+ // The continue environment is the inner environment.
+ PrepareForLoop(cont_env);
+ SetEnv("loop:start", Split(cont_env));
+ if (ssa_env_->go()) ssa_env_->state = SsaEnv::kReached;
+ PushBlock(cont_env);
+ blocks_.back().stack_depth = -1; // no production for inner block.
+ }
+ len = 2;
+ break;
+ }
+ case kExprIf:
+ Shift(kAstStmt, 2);
+ break;
+ case kExprIfElse:
+ Shift(kAstEnd, 3); // Result type is typeof(x) in {c ? x : y}.
+ break;
+ case kExprSelect:
+ Shift(kAstStmt, 3); // Result type is typeof(x) in {c ? x : y}.
+ break;
+ case kExprBr: {
+ uint32_t depth = Operand<uint8_t>(pc_);
+ Shift(kAstEnd, 1);
+ if (depth >= blocks_.size()) {
+ error("improperly nested branch");
+ }
+ len = 2;
+ break;
+ }
+ case kExprBrIf: {
+ uint32_t depth = Operand<uint8_t>(pc_);
+ Shift(kAstStmt, 2);
+ if (depth >= blocks_.size()) {
+ error("improperly nested conditional branch");
+ }
+ len = 2;
+ break;
+ }
+ case kExprTableSwitch: {
+ if (!checkAvailable(5)) {
+ error("expected #tableswitch <cases> <table>, fell off end");
+ break;
+ }
+ uint16_t case_count = *reinterpret_cast<const uint16_t*>(pc_ + 1);
+ uint16_t table_count = *reinterpret_cast<const uint16_t*>(pc_ + 3);
+ len = 5 + table_count * 2;
+
+ if (table_count == 0) {
+ error("tableswitch with 0 entries");
+ break;
+ }
+
+ if (!checkAvailable(len)) {
+ error("expected #tableswitch <cases> <table>, fell off end");
+ break;
+ }
+
+ Shift(kAstEnd, 1 + case_count);
+
+ // Verify table.
+ for (int i = 0; i < table_count; i++) {
+ uint16_t target =
+ *reinterpret_cast<const uint16_t*>(pc_ + 5 + i * 2);
+ if (target >= 0x8000) {
+ size_t depth = target - 0x8000;
+ if (depth > blocks_.size()) {
+ error(pc_ + 5 + i * 2, "improper branch in tableswitch");
+ }
+ } else {
+ if (target >= case_count) {
+ error(pc_ + 5 + i * 2, "invalid case target in tableswitch");
+ }
+ }
+ }
+ break;
+ }
+ case kExprReturn: {
+ int count = static_cast<int>(function_env_->sig->return_count());
+ if (count == 0) {
+ BUILD(Return, 0, builder_->Buffer(0));
+ ssa_env_->Kill();
+ Leaf(kAstEnd);
+ } else {
+ Shift(kAstEnd, count);
+ }
+ break;
+ }
+ case kExprUnreachable: {
+ BUILD0(Unreachable);
+ ssa_env_->Kill(SsaEnv::kControlEnd);
+ Leaf(kAstEnd, nullptr);
+ break;
+ }
+ case kExprI8Const: {
+ int32_t value = Operand<int8_t>(pc_);
+ Leaf(kAstI32, BUILD(Int32Constant, value));
+ len = 2;
+ break;
+ }
+ case kExprI32Const: {
+ int32_t value = Operand<int32_t>(pc_);
+ Leaf(kAstI32, BUILD(Int32Constant, value));
+ len = 5;
+ break;
+ }
+ case kExprI64Const: {
+ int64_t value = Operand<int64_t>(pc_);
+ Leaf(kAstI64, BUILD(Int64Constant, value));
+ len = 9;
+ break;
+ }
+ case kExprF32Const: {
+ float value = Operand<float>(pc_);
+ Leaf(kAstF32, BUILD(Float32Constant, value));
+ len = 5;
+ break;
+ }
+ case kExprF64Const: {
+ double value = Operand<double>(pc_);
+ Leaf(kAstF64, BUILD(Float64Constant, value));
+ len = 9;
+ break;
+ }
+ case kExprGetLocal: {
+ uint32_t index;
+ LocalType type = LocalOperand(pc_, &index, &len);
+ TFNode* val =
+ build() && type != kAstStmt ? ssa_env_->locals[index] : nullptr;
+ Leaf(type, val);
+ break;
+ }
+ case kExprSetLocal: {
+ uint32_t index;
+ LocalType type = LocalOperand(pc_, &index, &len);
+ Shift(type, 1);
+ break;
+ }
+ case kExprLoadGlobal: {
+ uint32_t index;
+ LocalType type = GlobalOperand(pc_, &index, &len);
+ Leaf(type, BUILD(LoadGlobal, index));
+ break;
+ }
+ case kExprStoreGlobal: {
+ uint32_t index;
+ LocalType type = GlobalOperand(pc_, &index, &len);
+ Shift(type, 1);
+ break;
+ }
+ case kExprI32LoadMem8S:
+ case kExprI32LoadMem8U:
+ case kExprI32LoadMem16S:
+ case kExprI32LoadMem16U:
+ case kExprI32LoadMem:
+ len = DecodeLoadMem(pc_, kAstI32);
+ break;
+ case kExprI64LoadMem8S:
+ case kExprI64LoadMem8U:
+ case kExprI64LoadMem16S:
+ case kExprI64LoadMem16U:
+ case kExprI64LoadMem32S:
+ case kExprI64LoadMem32U:
+ case kExprI64LoadMem:
+ len = DecodeLoadMem(pc_, kAstI64);
+ break;
+ case kExprF32LoadMem:
+ len = DecodeLoadMem(pc_, kAstF32);
+ break;
+ case kExprF64LoadMem:
+ len = DecodeLoadMem(pc_, kAstF64);
+ break;
+ case kExprI32StoreMem8:
+ case kExprI32StoreMem16:
+ case kExprI32StoreMem:
+ len = DecodeStoreMem(pc_, kAstI32);
+ break;
+ case kExprI64StoreMem8:
+ case kExprI64StoreMem16:
+ case kExprI64StoreMem32:
+ case kExprI64StoreMem:
+ len = DecodeStoreMem(pc_, kAstI64);
+ break;
+ case kExprF32StoreMem:
+ len = DecodeStoreMem(pc_, kAstF32);
+ break;
+ case kExprF64StoreMem:
+ len = DecodeStoreMem(pc_, kAstF64);
+ break;
+ case kExprMemorySize:
+ Leaf(kAstI32, BUILD(MemSize, 0));
+ break;
+ case kExprGrowMemory:
+ Shift(kAstI32, 1);
+ break;
+ case kExprCallFunction: {
+ uint32_t unused;
+ FunctionSig* sig = FunctionSigOperand(pc_, &unused, &len);
+ if (sig) {
+ LocalType type =
+ sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
+ Shift(type, static_cast<int>(sig->parameter_count()));
+ } else {
+ Leaf(kAstI32); // error
+ }
+ break;
+ }
+ case kExprCallIndirect: {
+ uint32_t unused;
+ FunctionSig* sig = SigOperand(pc_, &unused, &len);
+ if (sig) {
+ LocalType type =
+ sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
+ Shift(type, static_cast<int>(1 + sig->parameter_count()));
+ } else {
+ Leaf(kAstI32); // error
+ }
+ break;
+ }
+ default:
+ error("Invalid opcode");
+ return;
+ }
+ pc_ += len;
+ if (pc_ >= limit_) {
+ // End of code reached or exceeded.
+ if (pc_ > limit_ && ok()) {
+ error("Beyond end of code");
+ }
+ return;
+ }
+ }
+ }
+
+ void PushBlock(SsaEnv* ssa_env) {
+ blocks_.push_back({ssa_env, static_cast<int>(stack_.size() - 1)});
+ }
+
+ int DecodeLoadMem(const byte* pc, LocalType type) {
+ int length = 2;
+ uint32_t offset;
+ MemoryAccessOperand(pc, &length, &offset);
+ Shift(type, 1);
+ return length;
+ }
+
+ int DecodeStoreMem(const byte* pc, LocalType type) {
+ int length = 2;
+ uint32_t offset;
+ MemoryAccessOperand(pc, &length, &offset);
+ Shift(type, 2);
+ return length;
+ }
+
+ void AddImplicitReturnAtEnd() {
+ int retcount = static_cast<int>(function_env_->sig->return_count());
+ if (retcount == 0) {
+ BUILD0(ReturnVoid);
+ return;
+ }
+
+ if (static_cast<int>(trees_.size()) < retcount) {
+ error(limit_, nullptr,
+ "ImplicitReturn expects %d arguments, only %d remain", retcount,
+ static_cast<int>(trees_.size()));
+ return;
+ }
+
+ TRACE("wasm-decode implicit return of %d args\n", retcount);
+
+ TFNode** buffer = BUILD(Buffer, retcount);
+ for (int index = 0; index < retcount; index++) {
+ Tree* tree = trees_[trees_.size() - 1 - index];
+ if (buffer) buffer[index] = tree->node;
+ LocalType expected = function_env_->sig->GetReturn(index);
+ if (tree->type != expected) {
+ error(limit_, tree->pc,
+ "ImplicitReturn[%d] expected type %s, found %s of type %s", index,
+ WasmOpcodes::TypeName(expected),
+ WasmOpcodes::OpcodeName(tree->opcode()),
+ WasmOpcodes::TypeName(tree->type));
+ return;
+ }
+ }
+
+ BUILD(Return, retcount, buffer);
+ }
+
+ int baserel(const byte* ptr) {
+ return base_ ? static_cast<int>(ptr - base_) : 0;
+ }
+
+ int startrel(const byte* ptr) { return static_cast<int>(ptr - start_); }
+
+ void Reduce(Production* p) {
+ WasmOpcode opcode = p->opcode();
+ TRACE("-----reduce module+%-6d %s func+%d: 0x%02x %s\n", baserel(p->pc()),
+ indentation(), startrel(p->pc()), opcode,
+ WasmOpcodes::OpcodeName(opcode));
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ if (sig) {
+ // A simple expression with a fixed signature.
+ TypeCheckLast(p, sig->GetParam(p->index - 1));
+ if (p->done() && build()) {
+ if (sig->parameter_count() == 2) {
+ p->tree->node = builder_->Binop(opcode, p->tree->children[0]->node,
+ p->tree->children[1]->node);
+ } else if (sig->parameter_count() == 1) {
+ p->tree->node = builder_->Unop(opcode, p->tree->children[0]->node);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ return;
+ }
+
+ switch (opcode) {
+ case kExprBlock: {
+ if (p->done()) {
+ Block* last = &blocks_.back();
+ DCHECK_EQ(stack_.size() - 1, last->stack_depth);
+ // fallthrough with the last expression.
+ ReduceBreakToExprBlock(p, last);
+ SetEnv("block:end", last->ssa_env);
+ blocks_.pop_back();
+ }
+ break;
+ }
+ case kExprLoop: {
+ if (p->done()) {
+ // Pop the continue environment.
+ blocks_.pop_back();
+ // Get the break environment.
+ Block* last = &blocks_.back();
+ DCHECK_EQ(stack_.size() - 1, last->stack_depth);
+ // fallthrough with the last expression.
+ ReduceBreakToExprBlock(p, last);
+ SetEnv("loop:end", last->ssa_env);
+ blocks_.pop_back();
+ }
+ break;
+ }
+ case kExprIf: {
+ if (p->index == 1) {
+ // Condition done. Split environment for true branch.
+ TypeCheckLast(p, kAstI32);
+ SsaEnv* false_env = ssa_env_;
+ SsaEnv* true_env = Split(ssa_env_);
+ ifs_.push_back({nullptr, false_env, nullptr});
+ BUILD(Branch, p->last()->node, &true_env->control,
+ &false_env->control);
+ SetEnv("if:true", true_env);
+ } else if (p->index == 2) {
+ // True block done. Merge true and false environments.
+ IfEnv* env = &ifs_.back();
+ SsaEnv* merge = env->merge_env;
+ if (merge->go()) {
+ merge->state = SsaEnv::kReached;
+ Goto(ssa_env_, merge);
+ }
+ SetEnv("if:merge", merge);
+ ifs_.pop_back();
+ }
+ break;
+ }
+ case kExprIfElse: {
+ if (p->index == 1) {
+ // Condition done. Split environment for true and false branches.
+ TypeCheckLast(p, kAstI32);
+ SsaEnv* merge_env = ssa_env_;
+ TFNode* if_true = nullptr;
+ TFNode* if_false = nullptr;
+ BUILD(Branch, p->last()->node, &if_true, &if_false);
+ SsaEnv* false_env = Split(ssa_env_);
+ SsaEnv* true_env = Steal(ssa_env_);
+ false_env->control = if_false;
+ true_env->control = if_true;
+ ifs_.push_back({false_env, merge_env, nullptr});
+ SetEnv("if_else:true", true_env);
+ } else if (p->index == 2) {
+ // True expr done.
+ IfEnv* env = &ifs_.back();
+ MergeIntoProduction(p, env->merge_env, p->last());
+ // Switch to environment for false branch.
+ SsaEnv* false_env = ifs_.back().false_env;
+ SetEnv("if_else:false", false_env);
+ } else if (p->index == 3) {
+ // False expr done.
+ IfEnv* env = &ifs_.back();
+ MergeIntoProduction(p, env->merge_env, p->last());
+ SetEnv("if_else:merge", env->merge_env);
+ ifs_.pop_back();
+ }
+ break;
+ }
+ case kExprSelect: {
+ if (p->index == 1) {
+ // Condition done.
+ TypeCheckLast(p, kAstI32);
+ } else if (p->index == 2) {
+ // True expression done.
+ p->tree->type = p->last()->type;
+ if (p->tree->type == kAstStmt) {
+ error(p->pc(), p->tree->children[1]->pc,
+ "select operand should be expression");
+ }
+ } else {
+ // False expression done.
+ DCHECK(p->done());
+ TypeCheckLast(p, p->tree->type);
+ if (build()) {
+ TFNode* controls[2];
+ builder_->Branch(p->tree->children[0]->node, &controls[0],
+ &controls[1]);
+ TFNode* merge = builder_->Merge(2, controls);
+ TFNode* vals[2] = {p->tree->children[1]->node,
+ p->tree->children[2]->node};
+ TFNode* phi = builder_->Phi(p->tree->type, 2, vals, merge);
+ p->tree->node = phi;
+ ssa_env_->control = merge;
+ }
+ }
+ break;
+ }
+ case kExprBr: {
+ uint32_t depth = Operand<uint8_t>(p->pc());
+ if (depth >= blocks_.size()) {
+ error("improperly nested branch");
+ break;
+ }
+ Block* block = &blocks_[blocks_.size() - depth - 1];
+ ReduceBreakToExprBlock(p, block);
+ break;
+ }
+ case kExprBrIf: {
+ if (p->index == 1) {
+ TypeCheckLast(p, kAstI32);
+ } else if (p->done()) {
+ uint32_t depth = Operand<uint8_t>(p->pc());
+ if (depth >= blocks_.size()) {
+ error("improperly nested branch");
+ break;
+ }
+ Block* block = &blocks_[blocks_.size() - depth - 1];
+ SsaEnv* fenv = ssa_env_;
+ SsaEnv* tenv = Split(fenv);
+ BUILD(Branch, p->tree->children[0]->node, &tenv->control,
+ &fenv->control);
+ ssa_env_ = tenv;
+ ReduceBreakToExprBlock(p, block);
+ ssa_env_ = fenv;
+ }
+ break;
+ }
+ case kExprTableSwitch: {
+ uint16_t table_count = *reinterpret_cast<const uint16_t*>(p->pc() + 3);
+ if (table_count == 1) {
+ // Degenerate switch with only a default target.
+ if (p->index == 1) {
+ SsaEnv* break_env = ssa_env_;
+ PushBlock(break_env);
+ SetEnv("switch:default", Steal(break_env));
+ }
+ if (p->done()) {
+ Block* block = &blocks_.back();
+ // fall through to the end.
+ ReduceBreakToExprBlock(p, block);
+ SetEnv("switch:end", block->ssa_env);
+ blocks_.pop_back();
+ }
+ break;
+ }
+
+ if (p->index == 1) {
+ // Switch key finished.
+ TypeCheckLast(p, kAstI32);
+
+ TFNode* sw = BUILD(Switch, table_count, p->last()->node);
+
+ // Allocate environments for each case.
+ uint16_t case_count = *reinterpret_cast<const uint16_t*>(p->pc() + 1);
+ SsaEnv** case_envs = zone_->NewArray<SsaEnv*>(case_count);
+ for (int i = 0; i < case_count; i++) {
+ case_envs[i] = UnreachableEnv();
+ }
+
+ ifs_.push_back({nullptr, nullptr, case_envs});
+ SsaEnv* break_env = ssa_env_;
+ PushBlock(break_env);
+ SsaEnv* copy = Steal(break_env);
+ ssa_env_ = copy;
+
+ // Build the environments for each case based on the table.
+ const uint16_t* table =
+ reinterpret_cast<const uint16_t*>(p->pc() + 5);
+ for (int i = 0; i < table_count; i++) {
+ uint16_t target = table[i];
+ SsaEnv* env = Split(copy);
+ env->control = (i == table_count - 1) ? BUILD(IfDefault, sw)
+ : BUILD(IfValue, i, sw);
+ if (target >= 0x8000) {
+ // Targets an outer block.
+ int depth = target - 0x8000;
+ SsaEnv* tenv = blocks_[blocks_.size() - depth - 1].ssa_env;
+ Goto(env, tenv);
+ } else {
+ // Targets a case.
+ Goto(env, case_envs[target]);
+ }
+ }
+
+ // Switch to the environment for the first case.
+ SetEnv("switch:case", case_envs[0]);
+ } else {
+ // Switch case finished.
+ if (p->done()) {
+ // Last case. Fall through to the end.
+ Block* block = &blocks_.back();
+ ReduceBreakToExprBlock(p, block);
+ SsaEnv* next = block->ssa_env;
+ blocks_.pop_back();
+ ifs_.pop_back();
+ SetEnv("switch:end", next);
+ } else {
+ // Interior case. Maybe fall through to the next case.
+ SsaEnv* next = ifs_.back().case_envs[p->index - 1];
+ if (ssa_env_->go()) Goto(ssa_env_, next);
+ SetEnv("switch:case", next);
+ }
+ }
+ break;
+ }
+ case kExprReturn: {
+ TypeCheckLast(p, function_env_->sig->GetReturn(p->index - 1));
+ if (p->done()) {
+ if (build()) {
+ int count = p->tree->count;
+ TFNode** buffer = builder_->Buffer(count);
+ for (int i = 0; i < count; i++) {
+ buffer[i] = p->tree->children[i]->node;
+ }
+ BUILD(Return, count, buffer);
+ }
+ ssa_env_->Kill(SsaEnv::kControlEnd);
+ }
+ break;
+ }
+ case kExprSetLocal: {
+ int unused = 0;
+ uint32_t index;
+ LocalType type = LocalOperand(p->pc(), &index, &unused);
+ Tree* val = p->last();
+ if (type == val->type) {
+ if (build()) ssa_env_->locals[index] = val->node;
+ p->tree->node = val->node;
+ } else {
+ error(p->pc(), val->pc, "Typecheck failed in SetLocal");
+ }
+ break;
+ }
+ case kExprStoreGlobal: {
+ int unused = 0;
+ uint32_t index;
+ LocalType type = GlobalOperand(p->pc(), &index, &unused);
+ Tree* val = p->last();
+ if (type == val->type) {
+ BUILD(StoreGlobal, index, val->node);
+ p->tree->node = val->node;
+ } else {
+ error(p->pc(), val->pc, "Typecheck failed in StoreGlobal");
+ }
+ break;
+ }
+
+ case kExprI32LoadMem8S:
+ return ReduceLoadMem(p, kAstI32, MachineType::Int8());
+ case kExprI32LoadMem8U:
+ return ReduceLoadMem(p, kAstI32, MachineType::Uint8());
+ case kExprI32LoadMem16S:
+ return ReduceLoadMem(p, kAstI32, MachineType::Int16());
+ case kExprI32LoadMem16U:
+ return ReduceLoadMem(p, kAstI32, MachineType::Uint16());
+ case kExprI32LoadMem:
+ return ReduceLoadMem(p, kAstI32, MachineType::Int32());
+
+ case kExprI64LoadMem8S:
+ return ReduceLoadMem(p, kAstI64, MachineType::Int8());
+ case kExprI64LoadMem8U:
+ return ReduceLoadMem(p, kAstI64, MachineType::Uint8());
+ case kExprI64LoadMem16S:
+ return ReduceLoadMem(p, kAstI64, MachineType::Int16());
+ case kExprI64LoadMem16U:
+ return ReduceLoadMem(p, kAstI64, MachineType::Uint16());
+ case kExprI64LoadMem32S:
+ return ReduceLoadMem(p, kAstI64, MachineType::Int32());
+ case kExprI64LoadMem32U:
+ return ReduceLoadMem(p, kAstI64, MachineType::Uint32());
+ case kExprI64LoadMem:
+ return ReduceLoadMem(p, kAstI64, MachineType::Int64());
+
+ case kExprF32LoadMem:
+ return ReduceLoadMem(p, kAstF32, MachineType::Float32());
+
+ case kExprF64LoadMem:
+ return ReduceLoadMem(p, kAstF64, MachineType::Float64());
+
+ case kExprI32StoreMem8:
+ return ReduceStoreMem(p, kAstI32, MachineType::Int8());
+ case kExprI32StoreMem16:
+ return ReduceStoreMem(p, kAstI32, MachineType::Int16());
+ case kExprI32StoreMem:
+ return ReduceStoreMem(p, kAstI32, MachineType::Int32());
+
+ case kExprI64StoreMem8:
+ return ReduceStoreMem(p, kAstI64, MachineType::Int8());
+ case kExprI64StoreMem16:
+ return ReduceStoreMem(p, kAstI64, MachineType::Int16());
+ case kExprI64StoreMem32:
+ return ReduceStoreMem(p, kAstI64, MachineType::Int32());
+ case kExprI64StoreMem:
+ return ReduceStoreMem(p, kAstI64, MachineType::Int64());
+
+ case kExprF32StoreMem:
+ return ReduceStoreMem(p, kAstF32, MachineType::Float32());
+
+ case kExprF64StoreMem:
+ return ReduceStoreMem(p, kAstF64, MachineType::Float64());
+
+ case kExprGrowMemory:
+ TypeCheckLast(p, kAstI32);
+ // TODO(titzer): build node for GrowMemory
+ p->tree->node = BUILD(Int32Constant, 0);
+ return;
+
+ case kExprCallFunction: {
+ int len;
+ uint32_t index;
+ FunctionSig* sig = FunctionSigOperand(p->pc(), &index, &len);
+ if (!sig) break;
+ if (p->index > 0) {
+ TypeCheckLast(p, sig->GetParam(p->index - 1));
+ }
+ if (p->done() && build()) {
+ uint32_t count = p->tree->count + 1;
+ TFNode** buffer = builder_->Buffer(count);
+ FunctionSig* sig = FunctionSigOperand(p->pc(), &index, &len);
+ USE(sig);
+ buffer[0] = nullptr; // reserved for code object.
+ for (uint32_t i = 1; i < count; i++) {
+ buffer[i] = p->tree->children[i - 1]->node;
+ }
+ p->tree->node = builder_->CallDirect(index, buffer);
+ }
+ break;
+ }
+ case kExprCallIndirect: {
+ int len;
+ uint32_t index;
+ FunctionSig* sig = SigOperand(p->pc(), &index, &len);
+ if (p->index == 1) {
+ TypeCheckLast(p, kAstI32);
+ } else {
+ TypeCheckLast(p, sig->GetParam(p->index - 2));
+ }
+ if (p->done() && build()) {
+ uint32_t count = p->tree->count;
+ TFNode** buffer = builder_->Buffer(count);
+ for (uint32_t i = 0; i < count; i++) {
+ buffer[i] = p->tree->children[i]->node;
+ }
+ p->tree->node = builder_->CallIndirect(index, buffer);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ void ReduceBreakToExprBlock(Production* p, Block* block) {
+ if (block->stack_depth < 0) {
+ // This is the inner loop block, which does not have a value.
+ Goto(ssa_env_, block->ssa_env);
+ } else {
+ // Merge the value into the production for the block.
+ Production* bp = &stack_[block->stack_depth];
+ MergeIntoProduction(bp, block->ssa_env, p->last());
+ }
+ }
+
+ void MergeIntoProduction(Production* p, SsaEnv* target, Tree* expr) {
+ if (!ssa_env_->go()) return;
+
+ bool first = target->state == SsaEnv::kUnreachable;
+ Goto(ssa_env_, target);
+ if (expr->type == kAstEnd) return;
+
+ if (first) {
+ // first merge to this environment; set the type and the node.
+ p->tree->type = expr->type;
+ p->tree->node = expr->node;
+ } else {
+ // merge with the existing value for this block.
+ LocalType type = p->tree->type;
+ if (expr->type != type) {
+ type = kAstStmt;
+ p->tree->type = kAstStmt;
+ p->tree->node = nullptr;
+ } else if (type != kAstStmt) {
+ p->tree->node = CreateOrMergeIntoPhi(type, target->control,
+ p->tree->node, expr->node);
+ }
+ }
+ }
+
+ void ReduceLoadMem(Production* p, LocalType type, MachineType mem_type) {
+ DCHECK_EQ(1, p->index);
+ TypeCheckLast(p, kAstI32); // index
+ if (build()) {
+ int length = 0;
+ uint32_t offset = 0;
+ MemoryAccessOperand(p->pc(), &length, &offset);
+ p->tree->node =
+ builder_->LoadMem(type, mem_type, p->last()->node, offset);
+ }
+ }
+
+ void ReduceStoreMem(Production* p, LocalType type, MachineType mem_type) {
+ if (p->index == 1) {
+ TypeCheckLast(p, kAstI32); // index
+ } else {
+ DCHECK_EQ(2, p->index);
+ TypeCheckLast(p, type);
+ if (build()) {
+ int length = 0;
+ uint32_t offset = 0;
+ MemoryAccessOperand(p->pc(), &length, &offset);
+ TFNode* val = p->tree->children[1]->node;
+ builder_->StoreMem(mem_type, p->tree->children[0]->node, offset, val);
+ p->tree->node = val;
+ }
+ }
+ }
+
+ void TypeCheckLast(Production* p, LocalType expected) {
+ LocalType result = p->last()->type;
+ if (result == expected) return;
+ if (result == kAstEnd) return;
+ if (expected != kAstStmt) {
+ error(p->pc(), p->last()->pc,
+ "%s[%d] expected type %s, found %s of type %s",
+ WasmOpcodes::OpcodeName(p->opcode()), p->index - 1,
+ WasmOpcodes::TypeName(expected),
+ WasmOpcodes::OpcodeName(p->last()->opcode()),
+ WasmOpcodes::TypeName(p->last()->type));
+ }
+ }
+
+ void SetEnv(const char* reason, SsaEnv* env) {
+ TRACE(" env = %p, block depth = %d, reason = %s", static_cast<void*>(env),
+ static_cast<int>(blocks_.size()), reason);
+ if (env->control != nullptr && FLAG_trace_wasm_decoder) {
+ TRACE(", control = ");
+ compiler::WasmGraphBuilder::PrintDebugName(env->control);
+ }
+ TRACE("\n");
+ ssa_env_ = env;
+ if (builder_) {
+ builder_->set_control_ptr(&env->control);
+ builder_->set_effect_ptr(&env->effect);
+ }
+ }
+
+ void Goto(SsaEnv* from, SsaEnv* to) {
+ DCHECK_NOT_NULL(to);
+ if (!from->go()) return;
+ switch (to->state) {
+ case SsaEnv::kUnreachable: { // Overwrite destination.
+ to->state = SsaEnv::kReached;
+ to->locals = from->locals;
+ to->control = from->control;
+ to->effect = from->effect;
+ break;
+ }
+ case SsaEnv::kReached: { // Create a new merge.
+ to->state = SsaEnv::kMerged;
+ if (!builder_) break;
+ // Merge control.
+ TFNode* controls[] = {to->control, from->control};
+ TFNode* merge = builder_->Merge(2, controls);
+ to->control = merge;
+ // Merge effects.
+ if (from->effect != to->effect) {
+ TFNode* effects[] = {to->effect, from->effect, merge};
+ to->effect = builder_->EffectPhi(2, effects, merge);
+ }
+ // Merge SSA values.
+ for (int i = EnvironmentCount() - 1; i >= 0; i--) {
+ TFNode* a = to->locals[i];
+ TFNode* b = from->locals[i];
+ if (a != b) {
+ TFNode* vals[] = {a, b};
+ to->locals[i] =
+ builder_->Phi(function_env_->GetLocalType(i), 2, vals, merge);
+ }
+ }
+ break;
+ }
+ case SsaEnv::kMerged: {
+ if (!builder_) break;
+ TFNode* merge = to->control;
+ // Extend the existing merge.
+ builder_->AppendToMerge(merge, from->control);
+ // Merge effects.
+ if (builder_->IsPhiWithMerge(to->effect, merge)) {
+ builder_->AppendToPhi(merge, to->effect, from->effect);
+ } else if (to->effect != from->effect) {
+ uint32_t count = builder_->InputCount(merge);
+ TFNode** effects = builder_->Buffer(count);
+ for (uint32_t j = 0; j < count - 1; j++) {
+ effects[j] = to->effect;
+ }
+ effects[count - 1] = from->effect;
+ to->effect = builder_->EffectPhi(count, effects, merge);
+ }
+ // Merge locals.
+ for (int i = EnvironmentCount() - 1; i >= 0; i--) {
+ TFNode* tnode = to->locals[i];
+ TFNode* fnode = from->locals[i];
+ if (builder_->IsPhiWithMerge(tnode, merge)) {
+ builder_->AppendToPhi(merge, tnode, fnode);
+ } else if (tnode != fnode) {
+ uint32_t count = builder_->InputCount(merge);
+ TFNode** vals = builder_->Buffer(count);
+ for (uint32_t j = 0; j < count - 1; j++) {
+ vals[j] = tnode;
+ }
+ vals[count - 1] = fnode;
+ to->locals[i] = builder_->Phi(function_env_->GetLocalType(i), count,
+ vals, merge);
+ }
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return from->Kill();
+ }
+
+ TFNode* CreateOrMergeIntoPhi(LocalType type, TFNode* merge, TFNode* tnode,
+ TFNode* fnode) {
+ if (builder_->IsPhiWithMerge(tnode, merge)) {
+ builder_->AppendToPhi(merge, tnode, fnode);
+ } else if (tnode != fnode) {
+ uint32_t count = builder_->InputCount(merge);
+ TFNode** vals = builder_->Buffer(count);
+ for (uint32_t j = 0; j < count - 1; j++) vals[j] = tnode;
+ vals[count - 1] = fnode;
+ return builder_->Phi(type, count, vals, merge);
+ }
+ return tnode;
+ }
+
+ void BuildInfiniteLoop() {
+ if (ssa_env_->go()) {
+ PrepareForLoop(ssa_env_);
+ SsaEnv* cont_env = ssa_env_;
+ ssa_env_ = Split(ssa_env_);
+ ssa_env_->state = SsaEnv::kReached;
+ Goto(ssa_env_, cont_env);
+ }
+ }
+
+ void PrepareForLoop(SsaEnv* env) {
+ if (env->go()) {
+ env->state = SsaEnv::kMerged;
+ if (builder_) {
+ env->control = builder_->Loop(env->control);
+ env->effect = builder_->EffectPhi(1, &env->effect, env->control);
+ builder_->Terminate(env->effect, env->control);
+ for (int i = EnvironmentCount() - 1; i >= 0; i--) {
+ env->locals[i] = builder_->Phi(function_env_->GetLocalType(i), 1,
+ &env->locals[i], env->control);
+ }
+ }
+ }
+ }
+
+ // Create a complete copy of the {from}.
+ SsaEnv* Split(SsaEnv* from) {
+ DCHECK_NOT_NULL(from);
+ SsaEnv* result = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
+ size_t size = sizeof(TFNode*) * EnvironmentCount();
+ result->control = from->control;
+ result->effect = from->effect;
+ result->state = from->state == SsaEnv::kUnreachable ? SsaEnv::kUnreachable
+ : SsaEnv::kReached;
+
+ if (from->go()) {
+ result->state = SsaEnv::kReached;
+ result->locals =
+ size > 0 ? reinterpret_cast<TFNode**>(zone_->New(size)) : nullptr;
+ memcpy(result->locals, from->locals, size);
+ } else {
+ result->state = SsaEnv::kUnreachable;
+ result->locals = nullptr;
+ }
+
+ return result;
+ }
+
+ // Create a copy of {from} that steals its state and leaves {from}
+ // unreachable.
+ SsaEnv* Steal(SsaEnv* from) {
+ DCHECK_NOT_NULL(from);
+ if (!from->go()) return UnreachableEnv();
+ SsaEnv* result = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
+ result->state = SsaEnv::kReached;
+ result->locals = from->locals;
+ result->control = from->control;
+ result->effect = from->effect;
+ from->Kill(SsaEnv::kUnreachable);
+ return result;
+ }
+
+ // Create an unreachable environment.
+ SsaEnv* UnreachableEnv() {
+ SsaEnv* result = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
+ result->state = SsaEnv::kUnreachable;
+ result->control = nullptr;
+ result->effect = nullptr;
+ result->locals = nullptr;
+ return result;
+ }
+
+ // Load an operand at [pc + 1].
+ template <typename V>
+ V Operand(const byte* pc) {
+ if ((limit_ - pc) < static_cast<int>(1 + sizeof(V))) {
+ const char* msg = "Expected operand following opcode";
+ switch (sizeof(V)) {
+ case 1:
+ msg = "Expected 1-byte operand following opcode";
+ break;
+ case 2:
+ msg = "Expected 2-byte operand following opcode";
+ break;
+ case 4:
+ msg = "Expected 4-byte operand following opcode";
+ break;
+ default:
+ break;
+ }
+ error(pc, msg);
+ return -1;
+ }
+ return *reinterpret_cast<const V*>(pc + 1);
+ }
+
+ int EnvironmentCount() {
+ if (builder_) return static_cast<int>(function_env_->GetLocalCount());
+ return 0; // if we aren't building a graph, don't bother with SSA renaming.
+ }
+
+ LocalType LocalOperand(const byte* pc, uint32_t* index, int* length) {
+ *index = UnsignedLEB128Operand(pc, length);
+ if (function_env_->IsValidLocal(*index)) {
+ return function_env_->GetLocalType(*index);
+ }
+ error(pc, "invalid local variable index");
+ return kAstStmt;
+ }
+
+ LocalType GlobalOperand(const byte* pc, uint32_t* index, int* length) {
+ *index = UnsignedLEB128Operand(pc, length);
+ if (function_env_->module->IsValidGlobal(*index)) {
+ return WasmOpcodes::LocalTypeFor(
+ function_env_->module->GetGlobalType(*index));
+ }
+ error(pc, "invalid global variable index");
+ return kAstStmt;
+ }
+
+ FunctionSig* FunctionSigOperand(const byte* pc, uint32_t* index,
+ int* length) {
+ *index = UnsignedLEB128Operand(pc, length);
+ if (function_env_->module->IsValidFunction(*index)) {
+ return function_env_->module->GetFunctionSignature(*index);
+ }
+ error(pc, "invalid function index");
+ return nullptr;
+ }
+
+ FunctionSig* SigOperand(const byte* pc, uint32_t* index, int* length) {
+ *index = UnsignedLEB128Operand(pc, length);
+ if (function_env_->module->IsValidSignature(*index)) {
+ return function_env_->module->GetSignature(*index);
+ }
+ error(pc, "invalid signature index");
+ return nullptr;
+ }
+
+ uint32_t UnsignedLEB128Operand(const byte* pc, int* length) {
+ uint32_t result = 0;
+ ReadUnsignedLEB128ErrorCode error_code =
+ ReadUnsignedLEB128Operand(pc + 1, limit_, length, &result);
+ if (error_code == kInvalidLEB128) error(pc, "invalid LEB128 varint");
+ if (error_code == kMissingLEB128) error(pc, "expected LEB128 varint");
+ (*length)++;
+ return result;
+ }
+
+ void MemoryAccessOperand(const byte* pc, int* length, uint32_t* offset) {
+ byte bitfield = Operand<uint8_t>(pc);
+ if (MemoryAccess::OffsetField::decode(bitfield)) {
+ *offset = UnsignedLEB128Operand(pc + 1, length);
+ (*length)++; // to account for the memory access byte
+ } else {
+ *offset = 0;
+ *length = 2;
+ }
+ }
+
+ virtual void onFirstError() {
+ limit_ = start_; // Terminate decoding loop.
+ builder_ = nullptr; // Don't build any more nodes.
+#if DEBUG
+ PrintStackForDebugging();
+#endif
+ }
+
+#if DEBUG
+ void PrintStackForDebugging() { PrintProduction(0); }
+
+ void PrintProduction(size_t depth) {
+ if (depth >= stack_.size()) return;
+ Production* p = &stack_[depth];
+ for (size_t d = 0; d < depth; d++) PrintF(" ");
+
+ PrintF("@%d %s [%d]\n", static_cast<int>(p->tree->pc - start_),
+ WasmOpcodes::OpcodeName(p->opcode()), p->tree->count);
+ for (int i = 0; i < p->index; i++) {
+ Tree* child = p->tree->children[i];
+ for (size_t d = 0; d <= depth; d++) PrintF(" ");
+ PrintF("@%d %s [%d]", static_cast<int>(child->pc - start_),
+ WasmOpcodes::OpcodeName(child->opcode()), child->count);
+ if (child->node) {
+ PrintF(" => TF");
+ compiler::WasmGraphBuilder::PrintDebugName(child->node);
+ }
+ PrintF("\n");
+ }
+ PrintProduction(depth + 1);
+ }
+#endif
+};
+
+
+TreeResult VerifyWasmCode(FunctionEnv* env, const byte* base, const byte* start,
+ const byte* end) {
+ Zone zone;
+ LR_WasmDecoder decoder(&zone, nullptr);
+ TreeResult result = decoder.Decode(env, base, start, end);
+ return result;
+}
+
+
+TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env, const byte* base,
+ const byte* start, const byte* end) {
+ Zone zone;
+ LR_WasmDecoder decoder(&zone, builder);
+ TreeResult result = decoder.Decode(env, base, start, end);
+ return result;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const Tree& tree) {
+ if (tree.pc == nullptr) {
+ os << "null";
+ return os;
+ }
+ PrintF("%s", WasmOpcodes::OpcodeName(tree.opcode()));
+ if (tree.count > 0) os << "(";
+ for (uint32_t i = 0; i < tree.count; i++) {
+ if (i > 0) os << ", ";
+ os << *tree.children[i];
+ }
+ if (tree.count > 0) os << ")";
+ return os;
+}
+
+
+ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte* pc,
+ const byte* limit,
+ int* length,
+ uint32_t* result) {
+ *result = 0;
+ const byte* ptr = pc;
+ const byte* end = pc + 5; // maximum 5 bytes.
+ if (end > limit) end = limit;
+ int shift = 0;
+ byte b = 0;
+ while (ptr < end) {
+ b = *ptr++;
+ *result = *result | ((b & 0x7F) << shift);
+ if ((b & 0x80) == 0) break;
+ shift += 7;
+ }
+ DCHECK_LE(ptr - pc, 5);
+ *length = static_cast<int>(ptr - pc);
+ if (ptr == end && (b & 0x80)) {
+ return kInvalidLEB128;
+ } else if (*length == 0) {
+ return kMissingLEB128;
+ } else {
+ return kNoError;
+ }
+}
+
+
+int OpcodeLength(const byte* pc) {
+ switch (static_cast<WasmOpcode>(*pc)) {
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+
+ case kExprI8Const:
+ case kExprBlock:
+ case kExprLoop:
+ case kExprBr:
+ case kExprBrIf:
+ return 2;
+ case kExprI32Const:
+ case kExprF32Const:
+ return 5;
+ case kExprI64Const:
+ case kExprF64Const:
+ return 9;
+ case kExprStoreGlobal:
+ case kExprSetLocal:
+ case kExprLoadGlobal:
+ case kExprCallFunction:
+ case kExprCallIndirect:
+ case kExprGetLocal: {
+ int length;
+ uint32_t result = 0;
+ ReadUnsignedLEB128Operand(pc + 1, pc + 6, &length, &result);
+ return 1 + length;
+ }
+ case kExprTableSwitch: {
+ uint16_t table_count = *reinterpret_cast<const uint16_t*>(pc + 3);
+ return 5 + table_count * 2;
+ }
+
+ default:
+ return 1;
+ }
+}
+
+
+int OpcodeArity(FunctionEnv* env, const byte* pc) {
+#define DECLARE_ARITY(name, ...) \
+ static const LocalType kTypes_##name[] = {__VA_ARGS__}; \
+ static const int kArity_##name = \
+ static_cast<int>(arraysize(kTypes_##name) - 1);
+
+ FOREACH_SIGNATURE(DECLARE_ARITY);
+#undef DECLARE_ARITY
+
+ switch (static_cast<WasmOpcode>(*pc)) {
+ case kExprI8Const:
+ case kExprI32Const:
+ case kExprI64Const:
+ case kExprF64Const:
+ case kExprF32Const:
+ case kExprGetLocal:
+ case kExprLoadGlobal:
+ case kExprNop:
+ case kExprUnreachable:
+ return 0;
+
+ case kExprBr:
+ case kExprStoreGlobal:
+ case kExprSetLocal:
+ return 1;
+
+ case kExprIf:
+ case kExprBrIf:
+ return 2;
+ case kExprIfElse:
+ case kExprSelect:
+ return 3;
+ case kExprBlock:
+ case kExprLoop:
+ return *(pc + 1);
+
+ case kExprCallFunction: {
+ int index = *(pc + 1);
+ return static_cast<int>(
+ env->module->GetFunctionSignature(index)->parameter_count());
+ }
+ case kExprCallIndirect: {
+ int index = *(pc + 1);
+ return 1 + static_cast<int>(
+ env->module->GetSignature(index)->parameter_count());
+ }
+ case kExprReturn:
+ return static_cast<int>(env->sig->return_count());
+ case kExprTableSwitch: {
+ uint16_t case_count = *reinterpret_cast<const uint16_t*>(pc + 1);
+ return 1 + case_count;
+ }
+
+#define DECLARE_OPCODE_CASE(name, opcode, sig) \
+ case kExpr##name: \
+ return kArity_##sig;
+
+ FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+ }
+ UNREACHABLE();
+ return 0;
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/ast-decoder.h b/chromium/v8/src/wasm/ast-decoder.h
new file mode 100644
index 00000000000..5b95ad9f870
--- /dev/null
+++ b/chromium/v8/src/wasm/ast-decoder.h
@@ -0,0 +1,116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_AST_DECODER_H_
+#define V8_WASM_AST_DECODER_H_
+
+#include "src/signature.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler { // external declarations from compiler.
+class WasmGraphBuilder;
+}
+
+namespace wasm {
+
+typedef compiler::WasmGraphBuilder TFBuilder;
+struct ModuleEnv; // forward declaration of module interface.
+
+// Interface the function environment during decoding, include the signature
+// and number of locals.
+struct FunctionEnv {
+ ModuleEnv* module; // module environment
+ FunctionSig* sig; // signature of this function
+ uint32_t local_int32_count; // number of int32 locals
+ uint32_t local_int64_count; // number of int64 locals
+ uint32_t local_float32_count; // number of float32 locals
+ uint32_t local_float64_count; // number of float64 locals
+ uint32_t total_locals; // sum of parameters and all locals
+
+ bool IsValidLocal(uint32_t index) { return index < total_locals; }
+ uint32_t GetLocalCount() { return total_locals; }
+ LocalType GetLocalType(uint32_t index) {
+ if (index < static_cast<uint32_t>(sig->parameter_count())) {
+ return sig->GetParam(index);
+ }
+ index -= static_cast<uint32_t>(sig->parameter_count());
+ if (index < local_int32_count) return kAstI32;
+ index -= local_int32_count;
+ if (index < local_int64_count) return kAstI64;
+ index -= local_int64_count;
+ if (index < local_float32_count) return kAstF32;
+ index -= local_float32_count;
+ if (index < local_float64_count) return kAstF64;
+ return kAstStmt;
+ }
+
+ void AddLocals(LocalType type, uint32_t count) {
+ switch (type) {
+ case kAstI32:
+ local_int32_count += count;
+ break;
+ case kAstI64:
+ local_int64_count += count;
+ break;
+ case kAstF32:
+ local_float32_count += count;
+ break;
+ case kAstF64:
+ local_float64_count += count;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ total_locals += count;
+ DCHECK(total_locals ==
+ (sig->parameter_count() + local_int32_count + local_int64_count +
+ local_float32_count + local_float64_count));
+ }
+
+ void SumLocals() {
+ total_locals = static_cast<uint32_t>(sig->parameter_count()) +
+ local_int32_count + local_int64_count + local_float32_count +
+ local_float64_count;
+ }
+};
+
+struct Tree;
+typedef Result<Tree*> TreeResult;
+
+std::ostream& operator<<(std::ostream& os, const Tree& tree);
+
+TreeResult VerifyWasmCode(FunctionEnv* env, const byte* base, const byte* start,
+ const byte* end);
+TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env, const byte* base,
+ const byte* start, const byte* end);
+
+inline TreeResult VerifyWasmCode(FunctionEnv* env, const byte* start,
+ const byte* end) {
+ return VerifyWasmCode(env, nullptr, start, end);
+}
+
+inline TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env,
+ const byte* start, const byte* end) {
+ return BuildTFGraph(builder, env, nullptr, start, end);
+}
+
+enum ReadUnsignedLEB128ErrorCode { kNoError, kInvalidLEB128, kMissingLEB128 };
+
+ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte*, const byte*,
+ int*, uint32_t*);
+
+// Computes the length of the opcode at the given address.
+int OpcodeLength(const byte* pc);
+
+// Computes the arity (number of sub-nodes) of the opcode at the given address.
+int OpcodeArity(FunctionEnv* env, const byte* pc);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_AST_DECODER_H_
diff --git a/chromium/v8/src/wasm/decoder.h b/chromium/v8/src/wasm/decoder.h
new file mode 100644
index 00000000000..698919d6a02
--- /dev/null
+++ b/chromium/v8/src/wasm/decoder.h
@@ -0,0 +1,233 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_DECODER_H_
+#define V8_WASM_DECODER_H_
+
+#include "src/base/smart-pointers.h"
+#include "src/flags.h"
+#include "src/signature.h"
+#include "src/wasm/wasm-result.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif
+
+// A helper utility to decode bytes, integers, fields, varints, etc, from
+// a buffer of bytes.
+class Decoder {
+ public:
+ Decoder(const byte* start, const byte* end)
+ : start_(start),
+ pc_(start),
+ limit_(end),
+ error_pc_(nullptr),
+ error_pt_(nullptr) {}
+
+ virtual ~Decoder() {}
+
+ // Reads a 8-bit unsigned integer (byte) and advances {pc_}.
+ uint8_t u8(const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "uint8_t");
+ if (checkAvailable(1)) {
+ byte val = *(pc_++);
+ TRACE("%02x = %d\n", val, val);
+ return val;
+ } else {
+ error("expected 1 byte, but fell off end");
+ return traceOffEnd<uint8_t>();
+ }
+ }
+
+ // Reads a 16-bit unsigned integer (little endian) and advances {pc_}.
+ uint16_t u16(const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "uint16_t");
+ if (checkAvailable(2)) {
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ byte b0 = pc_[0];
+ byte b1 = pc_[1];
+#else
+ byte b1 = pc_[0];
+ byte b0 = pc_[1];
+#endif
+ uint16_t val = static_cast<uint16_t>(b1 << 8) | b0;
+ TRACE("%02x %02x = %d\n", pc_[0], pc_[1], val);
+ pc_ += 2;
+ return val;
+ } else {
+ error("expected 2 bytes, but fell off end");
+ return traceOffEnd<uint16_t>();
+ }
+ }
+
+ // Reads a single 32-bit unsigned integer (little endian) and advances {pc_}.
+ uint32_t u32(const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "uint32_t");
+ if (checkAvailable(4)) {
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ byte b0 = pc_[0];
+ byte b1 = pc_[1];
+ byte b2 = pc_[2];
+ byte b3 = pc_[3];
+#else
+ byte b3 = pc_[0];
+ byte b2 = pc_[1];
+ byte b1 = pc_[2];
+ byte b0 = pc_[3];
+#endif
+ uint32_t val = static_cast<uint32_t>(b3 << 24) |
+ static_cast<uint32_t>(b2 << 16) |
+ static_cast<uint32_t>(b1 << 8) | b0;
+ TRACE("%02x %02x %02x %02x = %u\n", pc_[0], pc_[1], pc_[2], pc_[3], val);
+ pc_ += 4;
+ return val;
+ } else {
+ error("expected 4 bytes, but fell off end");
+ return traceOffEnd<uint32_t>();
+ }
+ }
+
+ // Reads a LEB128 variable-length 32-bit integer and advances {pc_}.
+ uint32_t u32v(int* length, const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "varint");
+
+ if (!checkAvailable(1)) {
+ error("expected at least 1 byte, but fell off end");
+ return traceOffEnd<uint32_t>();
+ }
+
+ const byte* pos = pc_;
+ const byte* end = pc_ + 5;
+ if (end > limit_) end = limit_;
+
+ uint32_t result = 0;
+ int shift = 0;
+ byte b = 0;
+ while (pc_ < end) {
+ b = *pc_++;
+ TRACE("%02x ", b);
+ result = result | ((b & 0x7F) << shift);
+ if ((b & 0x80) == 0) break;
+ shift += 7;
+ }
+
+ *length = static_cast<int>(pc_ - pos);
+ if (pc_ == end && (b & 0x80)) {
+ error(pc_ - 1, "varint too large");
+ } else {
+ TRACE("= %u\n", result);
+ }
+ return result;
+ }
+
+ // Check that at least {size} bytes exist between {pc_} and {limit_}.
+ bool checkAvailable(int size) {
+ if (pc_ < start_ || (pc_ + size) > limit_) {
+ error(pc_, nullptr, "expected %d bytes, fell off end", size);
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ void error(const char* msg) { error(pc_, nullptr, msg); }
+
+ void error(const byte* pc, const char* msg) { error(pc, nullptr, msg); }
+
+ // Sets internal error state.
+ void error(const byte* pc, const byte* pt, const char* format, ...) {
+ if (ok()) {
+#if DEBUG
+ if (FLAG_wasm_break_on_decoder_error) {
+ base::OS::DebugBreak();
+ }
+#endif
+ const int kMaxErrorMsg = 256;
+ char* buffer = new char[kMaxErrorMsg];
+ va_list arguments;
+ va_start(arguments, format);
+ base::OS::VSNPrintF(buffer, kMaxErrorMsg - 1, format, arguments);
+ va_end(arguments);
+ error_msg_.Reset(buffer);
+ error_pc_ = pc;
+ error_pt_ = pt;
+ onFirstError();
+ }
+ }
+
+ // Behavior triggered on first error, overridden in subclasses.
+ virtual void onFirstError() {}
+
+ // Debugging helper to print bytes up to the end.
+ template <typename T>
+ T traceOffEnd() {
+ T t = 0;
+ for (const byte* ptr = pc_; ptr < limit_; ptr++) {
+ TRACE("%02x ", *ptr);
+ }
+ TRACE("<end>\n");
+ pc_ = limit_;
+ return t;
+ }
+
+ // Converts the given value to a {Result}, copying the error if necessary.
+ template <typename T>
+ Result<T> toResult(T val) {
+ Result<T> result;
+ if (error_pc_) {
+ result.error_code = kError;
+ result.start = start_;
+ result.error_pc = error_pc_;
+ result.error_pt = error_pt_;
+ result.error_msg = error_msg_;
+ error_msg_.Reset(nullptr);
+ } else {
+ result.error_code = kSuccess;
+ }
+ result.val = val;
+ return result;
+ }
+
+ // Resets the boundaries of this decoder.
+ void Reset(const byte* start, const byte* end) {
+ start_ = start;
+ pc_ = start;
+ limit_ = end;
+ error_pc_ = nullptr;
+ error_pt_ = nullptr;
+ error_msg_.Reset(nullptr);
+ }
+
+ bool ok() const { return error_pc_ == nullptr; }
+ bool failed() const { return error_pc_ != nullptr; }
+
+ protected:
+ const byte* start_;
+ const byte* pc_;
+ const byte* limit_;
+ const byte* error_pc_;
+ const byte* error_pt_;
+ base::SmartArrayPointer<char> error_msg_;
+};
+
+#undef TRACE
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_DECODER_H_
diff --git a/chromium/v8/src/wasm/encoder.cc b/chromium/v8/src/wasm/encoder.cc
new file mode 100644
index 00000000000..d8d36338b12
--- /dev/null
+++ b/chromium/v8/src/wasm/encoder.cc
@@ -0,0 +1,592 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/signature.h"
+
+#include "src/handles.h"
+#include "src/v8.h"
+#include "src/zone-containers.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/encoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+#include "src/v8memory.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+/*TODO: add error cases for adding too many locals, too many functions and bad
+ indices in body */
+
+namespace {
+void EmitUint8(byte** b, uint8_t x) {
+ Memory::uint8_at(*b) = x;
+ *b += 1;
+}
+
+
+void EmitUint16(byte** b, uint16_t x) {
+ Memory::uint16_at(*b) = x;
+ *b += 2;
+}
+
+
+void EmitUint32(byte** b, uint32_t x) {
+ Memory::uint32_at(*b) = x;
+ *b += 4;
+}
+
+
+void EmitVarInt(byte** b, size_t val) {
+ while (true) {
+ size_t next = val >> 7;
+ byte out = static_cast<byte>(val & 0x7f);
+ if (next) {
+ *((*b)++) = 0x80 | out;
+ val = next;
+ } else {
+ *((*b)++) = out;
+ break;
+ }
+ }
+}
+} // namespace
+
+
+struct WasmFunctionBuilder::Type {
+ bool param_;
+ LocalType type_;
+};
+
+
+WasmFunctionBuilder::WasmFunctionBuilder(Zone* zone)
+ : return_type_(kAstI32),
+ locals_(zone),
+ exported_(0),
+ external_(0),
+ body_(zone),
+ local_indices_(zone),
+ name_(zone) {}
+
+
+uint16_t WasmFunctionBuilder::AddParam(LocalType type) {
+ return AddVar(type, true);
+}
+
+
+uint16_t WasmFunctionBuilder::AddLocal(LocalType type) {
+ return AddVar(type, false);
+}
+
+
+uint16_t WasmFunctionBuilder::AddVar(LocalType type, bool param) {
+ locals_.push_back({param, type});
+ return static_cast<uint16_t>(locals_.size() - 1);
+}
+
+
+void WasmFunctionBuilder::ReturnType(LocalType type) { return_type_ = type; }
+
+
+void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
+ EmitCode(code, code_size, nullptr, 0);
+}
+
+
+void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size,
+ const uint32_t* local_indices,
+ uint32_t indices_size) {
+ size_t size = body_.size();
+ for (size_t i = 0; i < code_size; i++) {
+ body_.push_back(code[i]);
+ }
+ for (size_t i = 0; i < indices_size; i++) {
+ local_indices_.push_back(local_indices[i] + static_cast<uint32_t>(size));
+ }
+}
+
+
+void WasmFunctionBuilder::Emit(WasmOpcode opcode) {
+ body_.push_back(static_cast<byte>(opcode));
+}
+
+
+void WasmFunctionBuilder::EmitWithU8(WasmOpcode opcode, const byte immediate) {
+ body_.push_back(static_cast<byte>(opcode));
+ body_.push_back(immediate);
+}
+
+
+void WasmFunctionBuilder::EmitWithLocal(WasmOpcode opcode) {
+ body_.push_back(static_cast<byte>(opcode));
+ local_indices_.push_back(static_cast<uint32_t>(body_.size()) - 1);
+}
+
+
+uint32_t WasmFunctionBuilder::EmitEditableImmediate(const byte immediate) {
+ body_.push_back(immediate);
+ return static_cast<uint32_t>(body_.size()) - 1;
+}
+
+
+void WasmFunctionBuilder::EditImmediate(uint32_t offset, const byte immediate) {
+ DCHECK(offset < body_.size());
+ body_[offset] = immediate;
+}
+
+
+void WasmFunctionBuilder::Exported(uint8_t flag) { exported_ = flag; }
+
+
+void WasmFunctionBuilder::External(uint8_t flag) { external_ = flag; }
+
+void WasmFunctionBuilder::SetName(const unsigned char* name, int name_length) {
+ name_.clear();
+ if (name_length > 0) {
+ for (int i = 0; i < name_length; i++) {
+ name_.push_back(*(name + i));
+ }
+ name_.push_back('\0');
+ }
+}
+
+
+WasmFunctionEncoder* WasmFunctionBuilder::Build(Zone* zone,
+ WasmModuleBuilder* mb) const {
+ WasmFunctionEncoder* e =
+ new (zone) WasmFunctionEncoder(zone, return_type_, exported_, external_);
+ uint16_t* var_index = zone->NewArray<uint16_t>(locals_.size());
+ IndexVars(e, var_index);
+ if (body_.size() > 0) {
+ // TODO(titzer): iterate over local indexes, not the bytes.
+ const byte* start = &body_[0];
+ const byte* end = start + body_.size();
+ size_t local_index = 0;
+ for (size_t i = 0; i < body_.size();) {
+ if (local_index < local_indices_.size() &&
+ i == local_indices_[local_index]) {
+ int length = 0;
+ uint32_t index;
+ ReadUnsignedLEB128Operand(start + i, end, &length, &index);
+ uint16_t new_index = var_index[index];
+ const std::vector<uint8_t>& index_vec = UnsignedLEB128From(new_index);
+ for (size_t j = 0; j < index_vec.size(); j++) {
+ e->body_.push_back(index_vec.at(j));
+ }
+ i += length;
+ local_index++;
+ } else {
+ e->body_.push_back(*(start + i));
+ i++;
+ }
+ }
+ }
+ FunctionSig::Builder sig(zone, return_type_ == kAstStmt ? 0 : 1,
+ e->params_.size());
+ if (return_type_ != kAstStmt) {
+ sig.AddReturn(static_cast<LocalType>(return_type_));
+ }
+ for (size_t i = 0; i < e->params_.size(); i++) {
+ sig.AddParam(static_cast<LocalType>(e->params_[i]));
+ }
+ e->signature_index_ = mb->AddSignature(sig.Build());
+ e->name_.insert(e->name_.begin(), name_.begin(), name_.end());
+ return e;
+}
+
+
+void WasmFunctionBuilder::IndexVars(WasmFunctionEncoder* e,
+ uint16_t* var_index) const {
+ uint16_t param = 0;
+ uint16_t int32 = 0;
+ uint16_t int64 = 0;
+ uint16_t float32 = 0;
+ uint16_t float64 = 0;
+ for (size_t i = 0; i < locals_.size(); i++) {
+ if (locals_.at(i).param_) {
+ param++;
+ } else if (locals_.at(i).type_ == kAstI32) {
+ int32++;
+ } else if (locals_.at(i).type_ == kAstI64) {
+ int64++;
+ } else if (locals_.at(i).type_ == kAstF32) {
+ float32++;
+ } else if (locals_.at(i).type_ == kAstF64) {
+ float64++;
+ }
+ }
+ e->local_int32_count_ = int32;
+ e->local_int64_count_ = int64;
+ e->local_float32_count_ = float32;
+ e->local_float64_count_ = float64;
+ float64 = param + int32 + int64 + float32;
+ float32 = param + int32 + int64;
+ int64 = param + int32;
+ int32 = param;
+ param = 0;
+ for (size_t i = 0; i < locals_.size(); i++) {
+ if (locals_.at(i).param_) {
+ e->params_.push_back(locals_.at(i).type_);
+ var_index[i] = param++;
+ } else if (locals_.at(i).type_ == kAstI32) {
+ var_index[i] = int32++;
+ } else if (locals_.at(i).type_ == kAstI64) {
+ var_index[i] = int64++;
+ } else if (locals_.at(i).type_ == kAstF32) {
+ var_index[i] = float32++;
+ } else if (locals_.at(i).type_ == kAstF64) {
+ var_index[i] = float64++;
+ }
+ }
+}
+
+
+WasmFunctionEncoder::WasmFunctionEncoder(Zone* zone, LocalType return_type,
+ bool exported, bool external)
+ : params_(zone),
+ exported_(exported),
+ external_(external),
+ body_(zone),
+ name_(zone) {}
+
+
+uint32_t WasmFunctionEncoder::HeaderSize() const {
+ uint32_t size = 3;
+ if (HasLocals()) size += 8;
+ if (!external_) size += 2;
+ if (HasName()) size += 4;
+ return size;
+}
+
+
+uint32_t WasmFunctionEncoder::BodySize(void) const {
+ return external_ ? 0 : static_cast<uint32_t>(body_.size());
+}
+
+
+uint32_t WasmFunctionEncoder::NameSize() const {
+ return exported_ ? static_cast<uint32_t>(name_.size()) : 0;
+}
+
+
+void WasmFunctionEncoder::Serialize(byte* buffer, byte** header,
+ byte** body) const {
+ uint8_t decl_bits = (exported_ ? kDeclFunctionExport : 0) |
+ (external_ ? kDeclFunctionImport : 0) |
+ (HasLocals() ? kDeclFunctionLocals : 0) |
+ (HasName() ? kDeclFunctionName : 0);
+
+ EmitUint8(header, decl_bits);
+ EmitUint16(header, signature_index_);
+
+ if (HasName()) {
+ uint32_t name_offset = static_cast<uint32_t>(*body - buffer);
+ EmitUint32(header, name_offset);
+ std::memcpy(*body, &name_[0], name_.size());
+ (*body) += name_.size();
+ }
+
+ if (HasLocals()) {
+ EmitUint16(header, local_int32_count_);
+ EmitUint16(header, local_int64_count_);
+ EmitUint16(header, local_float32_count_);
+ EmitUint16(header, local_float64_count_);
+ }
+
+ if (!external_) {
+ EmitUint16(header, static_cast<uint16_t>(body_.size()));
+ if (body_.size() > 0) {
+ std::memcpy(*header, &body_[0], body_.size());
+ (*header) += body_.size();
+ }
+ }
+}
+
+
+WasmDataSegmentEncoder::WasmDataSegmentEncoder(Zone* zone, const byte* data,
+ uint32_t size, uint32_t dest)
+ : data_(zone), dest_(dest) {
+ for (size_t i = 0; i < size; i++) {
+ data_.push_back(data[i]);
+ }
+}
+
+
+uint32_t WasmDataSegmentEncoder::HeaderSize() const {
+ static const int kDataSegmentSize = 13;
+ return kDataSegmentSize;
+}
+
+
+uint32_t WasmDataSegmentEncoder::BodySize() const {
+ return static_cast<uint32_t>(data_.size());
+}
+
+
+void WasmDataSegmentEncoder::Serialize(byte* buffer, byte** header,
+ byte** body) const {
+ uint32_t body_offset = static_cast<uint32_t>(*body - buffer);
+ EmitUint32(header, dest_);
+ EmitUint32(header, body_offset);
+ EmitUint32(header, static_cast<uint32_t>(data_.size()));
+ EmitUint8(header, 1); // init
+
+ std::memcpy(*body, &data_[0], data_.size());
+ (*body) += data_.size();
+}
+
+
+WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
+ : zone_(zone),
+ signatures_(zone),
+ functions_(zone),
+ data_segments_(zone),
+ indirect_functions_(zone),
+ globals_(zone),
+ signature_map_(zone) {}
+
+
+uint16_t WasmModuleBuilder::AddFunction() {
+ functions_.push_back(new (zone_) WasmFunctionBuilder(zone_));
+ return static_cast<uint16_t>(functions_.size() - 1);
+}
+
+
+WasmFunctionBuilder* WasmModuleBuilder::FunctionAt(size_t index) {
+ if (functions_.size() > index) {
+ return functions_.at(index);
+ } else {
+ return nullptr;
+ }
+}
+
+
+void WasmModuleBuilder::AddDataSegment(WasmDataSegmentEncoder* data) {
+ data_segments_.push_back(data);
+}
+
+
+int WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
+ FunctionSig* b) const {
+ if (a->return_count() < b->return_count()) return -1;
+ if (a->return_count() > b->return_count()) return 1;
+ if (a->parameter_count() < b->parameter_count()) return -1;
+ if (a->parameter_count() > b->parameter_count()) return 1;
+ for (size_t r = 0; r < a->return_count(); r++) {
+ if (a->GetReturn(r) < b->GetReturn(r)) return -1;
+ if (a->GetReturn(r) > b->GetReturn(r)) return 1;
+ }
+ for (size_t p = 0; p < a->parameter_count(); p++) {
+ if (a->GetParam(p) < b->GetParam(p)) return -1;
+ if (a->GetParam(p) > b->GetParam(p)) return 1;
+ }
+ return 0;
+}
+
+
+uint16_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
+ SignatureMap::iterator pos = signature_map_.find(sig);
+ if (pos != signature_map_.end()) {
+ return pos->second;
+ } else {
+ uint16_t index = static_cast<uint16_t>(signatures_.size());
+ signature_map_[sig] = index;
+ signatures_.push_back(sig);
+ return index;
+ }
+}
+
+
+void WasmModuleBuilder::AddIndirectFunction(uint16_t index) {
+ indirect_functions_.push_back(index);
+}
+
+
+WasmModuleWriter* WasmModuleBuilder::Build(Zone* zone) {
+ WasmModuleWriter* writer = new (zone) WasmModuleWriter(zone);
+ for (auto function : functions_) {
+ writer->functions_.push_back(function->Build(zone, this));
+ }
+ for (auto segment : data_segments_) {
+ writer->data_segments_.push_back(segment);
+ }
+ for (auto sig : signatures_) {
+ writer->signatures_.push_back(sig);
+ }
+ for (auto index : indirect_functions_) {
+ writer->indirect_functions_.push_back(index);
+ }
+ for (auto global : globals_) {
+ writer->globals_.push_back(global);
+ }
+ return writer;
+}
+
+
+uint32_t WasmModuleBuilder::AddGlobal(MachineType type, bool exported) {
+ globals_.push_back(std::make_pair(type, exported));
+ return static_cast<uint32_t>(globals_.size() - 1);
+}
+
+
+WasmModuleWriter::WasmModuleWriter(Zone* zone)
+ : functions_(zone),
+ data_segments_(zone),
+ signatures_(zone),
+ indirect_functions_(zone),
+ globals_(zone) {}
+
+
+struct Sizes {
+ size_t header_size;
+ size_t body_size;
+
+ size_t total() { return header_size + body_size; }
+
+ void Add(size_t header, size_t body) {
+ header_size += header;
+ body_size += body;
+ }
+
+ void AddSection(size_t size) {
+ if (size > 0) {
+ Add(1, 0);
+ while (size > 0) {
+ Add(1, 0);
+ size = size >> 7;
+ }
+ }
+ }
+};
+
+
+WasmModuleIndex* WasmModuleWriter::WriteTo(Zone* zone) const {
+ Sizes sizes = {0, 0};
+
+ sizes.Add(1, 0);
+ sizes.Add(kDeclMemorySize, 0);
+
+ sizes.AddSection(signatures_.size());
+ for (auto sig : signatures_) {
+ sizes.Add(2 + sig->parameter_count(), 0);
+ }
+
+ sizes.AddSection(globals_.size());
+ if (globals_.size() > 0) {
+ sizes.Add(kDeclGlobalSize * globals_.size(), 0);
+ }
+
+ sizes.AddSection(functions_.size());
+ for (auto function : functions_) {
+ sizes.Add(function->HeaderSize() + function->BodySize(),
+ function->NameSize());
+ }
+
+ sizes.AddSection(data_segments_.size());
+ for (auto segment : data_segments_) {
+ sizes.Add(segment->HeaderSize(), segment->BodySize());
+ }
+
+ sizes.AddSection(indirect_functions_.size());
+ sizes.Add(2 * static_cast<uint32_t>(indirect_functions_.size()), 0);
+
+ if (sizes.body_size > 0) sizes.Add(1, 0);
+
+ ZoneVector<uint8_t> buffer_vector(sizes.total(), zone);
+ byte* buffer = &buffer_vector[0];
+ byte* header = buffer;
+ byte* body = buffer + sizes.header_size;
+
+ // -- emit memory declaration ------------------------------------------------
+ EmitUint8(&header, kDeclMemory);
+ EmitUint8(&header, 16); // min memory size
+ EmitUint8(&header, 16); // max memory size
+ EmitUint8(&header, 0); // memory export
+
+ // -- emit globals -----------------------------------------------------------
+ if (globals_.size() > 0) {
+ EmitUint8(&header, kDeclGlobals);
+ EmitVarInt(&header, globals_.size());
+
+ for (auto global : globals_) {
+ EmitUint32(&header, 0);
+ EmitUint8(&header, WasmOpcodes::MemTypeCodeFor(global.first));
+ EmitUint8(&header, global.second);
+ }
+ }
+
+ // -- emit signatures --------------------------------------------------------
+ if (signatures_.size() > 0) {
+ EmitUint8(&header, kDeclSignatures);
+ EmitVarInt(&header, signatures_.size());
+
+ for (FunctionSig* sig : signatures_) {
+ EmitUint8(&header, static_cast<byte>(sig->parameter_count()));
+ if (sig->return_count() > 0) {
+ EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetReturn()));
+ } else {
+ EmitUint8(&header, kLocalVoid);
+ }
+ for (size_t j = 0; j < sig->parameter_count(); j++) {
+ EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetParam(j)));
+ }
+ }
+ }
+
+ // -- emit functions ---------------------------------------------------------
+ if (functions_.size() > 0) {
+ EmitUint8(&header, kDeclFunctions);
+ EmitVarInt(&header, functions_.size());
+
+ for (auto func : functions_) {
+ func->Serialize(buffer, &header, &body);
+ }
+ }
+
+ // -- emit data segments -----------------------------------------------------
+ if (data_segments_.size() > 0) {
+ EmitUint8(&header, kDeclDataSegments);
+ EmitVarInt(&header, data_segments_.size());
+
+ for (auto segment : data_segments_) {
+ segment->Serialize(buffer, &header, &body);
+ }
+ }
+
+ // -- emit function table ----------------------------------------------------
+ if (indirect_functions_.size() > 0) {
+ EmitUint8(&header, kDeclFunctionTable);
+ EmitVarInt(&header, indirect_functions_.size());
+
+ for (auto index : indirect_functions_) {
+ EmitUint16(&header, index);
+ }
+ }
+
+ if (sizes.body_size > 0) EmitUint8(&header, kDeclEnd);
+
+ return new (zone) WasmModuleIndex(buffer, buffer + sizes.total());
+}
+
+
+std::vector<uint8_t> UnsignedLEB128From(uint32_t result) {
+ std::vector<uint8_t> output;
+ uint8_t next = 0;
+ int shift = 0;
+ do {
+ next = static_cast<uint8_t>(result >> shift);
+ if (((result >> shift) & 0xFFFFFF80) != 0) {
+ next = next | 0x80;
+ }
+ output.push_back(next);
+ shift += 7;
+ } while ((next & 0x80) != 0);
+ return output;
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/encoder.h b/chromium/v8/src/wasm/encoder.h
new file mode 100644
index 00000000000..f0fabe998a0
--- /dev/null
+++ b/chromium/v8/src/wasm/encoder.h
@@ -0,0 +1,157 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_ENCODER_H_
+#define V8_WASM_ENCODER_H_
+
+#include "src/signature.h"
+#include "src/zone-containers.h"
+
+#include "src/base/smart-pointers.h"
+
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmModuleBuilder;
+
+class WasmFunctionEncoder : public ZoneObject {
+ public:
+ uint32_t HeaderSize() const;
+ uint32_t BodySize() const;
+ uint32_t NameSize() const;
+ void Serialize(byte* buffer, byte** header, byte** body) const;
+
+ private:
+ WasmFunctionEncoder(Zone* zone, LocalType return_type, bool exported,
+ bool external);
+ friend class WasmFunctionBuilder;
+ uint16_t signature_index_;
+ ZoneVector<LocalType> params_;
+ uint16_t local_int32_count_;
+ uint16_t local_int64_count_;
+ uint16_t local_float32_count_;
+ uint16_t local_float64_count_;
+ bool exported_;
+ bool external_;
+ ZoneVector<uint8_t> body_;
+ ZoneVector<char> name_;
+
+ bool HasLocals() const {
+ return (local_int32_count_ + local_int64_count_ + local_float32_count_ +
+ local_float64_count_) > 0;
+ }
+
+ bool HasName() const { return exported_ && name_.size() > 0; }
+};
+
+class WasmFunctionBuilder : public ZoneObject {
+ public:
+ uint16_t AddParam(LocalType type);
+ uint16_t AddLocal(LocalType type);
+ void ReturnType(LocalType type);
+ void EmitCode(const byte* code, uint32_t code_size);
+ void EmitCode(const byte* code, uint32_t code_size,
+ const uint32_t* local_indices, uint32_t indices_size);
+ void Emit(WasmOpcode opcode);
+ void EmitWithU8(WasmOpcode opcode, const byte immediate);
+ void EmitWithLocal(WasmOpcode opcode);
+ uint32_t EmitEditableImmediate(const byte immediate);
+ void EditImmediate(uint32_t offset, const byte immediate);
+ void Exported(uint8_t flag);
+ void External(uint8_t flag);
+ void SetName(const unsigned char* name, int name_length);
+ WasmFunctionEncoder* Build(Zone* zone, WasmModuleBuilder* mb) const;
+
+ private:
+ explicit WasmFunctionBuilder(Zone* zone);
+ friend class WasmModuleBuilder;
+ LocalType return_type_;
+ struct Type;
+ ZoneVector<Type> locals_;
+ uint8_t exported_;
+ uint8_t external_;
+ ZoneVector<uint8_t> body_;
+ ZoneVector<uint32_t> local_indices_;
+ ZoneVector<char> name_;
+ uint16_t AddVar(LocalType type, bool param);
+ void IndexVars(WasmFunctionEncoder* e, uint16_t* var_index) const;
+};
+
+class WasmDataSegmentEncoder : public ZoneObject {
+ public:
+ WasmDataSegmentEncoder(Zone* zone, const byte* data, uint32_t size,
+ uint32_t dest);
+ uint32_t HeaderSize() const;
+ uint32_t BodySize() const;
+ void Serialize(byte* buffer, byte** header, byte** body) const;
+
+ private:
+ ZoneVector<byte> data_;
+ uint32_t dest_;
+};
+
+class WasmModuleIndex : public ZoneObject {
+ public:
+ const byte* Begin() const { return begin_; }
+ const byte* End() const { return end_; }
+
+ private:
+ friend class WasmModuleWriter;
+ WasmModuleIndex(const byte* begin, const byte* end)
+ : begin_(begin), end_(end) {}
+ const byte* begin_;
+ const byte* end_;
+};
+
+class WasmModuleWriter : public ZoneObject {
+ public:
+ WasmModuleIndex* WriteTo(Zone* zone) const;
+
+ private:
+ friend class WasmModuleBuilder;
+ explicit WasmModuleWriter(Zone* zone);
+ ZoneVector<WasmFunctionEncoder*> functions_;
+ ZoneVector<WasmDataSegmentEncoder*> data_segments_;
+ ZoneVector<FunctionSig*> signatures_;
+ ZoneVector<uint16_t> indirect_functions_;
+ ZoneVector<std::pair<MachineType, bool>> globals_;
+};
+
+class WasmModuleBuilder : public ZoneObject {
+ public:
+ explicit WasmModuleBuilder(Zone* zone);
+ uint16_t AddFunction();
+ uint32_t AddGlobal(MachineType type, bool exported);
+ WasmFunctionBuilder* FunctionAt(size_t index);
+ void AddDataSegment(WasmDataSegmentEncoder* data);
+ uint16_t AddSignature(FunctionSig* sig);
+ void AddIndirectFunction(uint16_t index);
+ WasmModuleWriter* Build(Zone* zone);
+
+ private:
+ struct CompareFunctionSigs {
+ int operator()(FunctionSig* a, FunctionSig* b) const;
+ };
+ typedef ZoneMap<FunctionSig*, uint16_t, CompareFunctionSigs> SignatureMap;
+
+ Zone* zone_;
+ ZoneVector<FunctionSig*> signatures_;
+ ZoneVector<WasmFunctionBuilder*> functions_;
+ ZoneVector<WasmDataSegmentEncoder*> data_segments_;
+ ZoneVector<uint16_t> indirect_functions_;
+ ZoneVector<std::pair<MachineType, bool>> globals_;
+ SignatureMap signature_map_;
+};
+
+std::vector<uint8_t> UnsignedLEB128From(uint32_t result);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_ENCODER_H_
diff --git a/chromium/v8/src/wasm/module-decoder.cc b/chromium/v8/src/wasm/module-decoder.cc
new file mode 100644
index 00000000000..0b922475581
--- /dev/null
+++ b/chromium/v8/src/wasm/module-decoder.cc
@@ -0,0 +1,547 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+#include "src/v8.h"
+
+#include "src/wasm/decoder.h"
+#include "src/wasm/module-decoder.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif
+
+
+// The main logic for decoding the bytes of a module.
+class ModuleDecoder : public Decoder {
+ public:
+ ModuleDecoder(Zone* zone, const byte* module_start, const byte* module_end,
+ bool asm_js)
+ : Decoder(module_start, module_end), module_zone(zone), asm_js_(asm_js) {
+ result_.start = start_;
+ if (limit_ < start_) {
+ error(start_, "end is less than start");
+ limit_ = start_;
+ }
+ }
+
+ virtual void onFirstError() {
+ pc_ = limit_; // On error, terminate section decoding loop.
+ }
+
+ // Decodes an entire module.
+ ModuleResult DecodeModule(WasmModule* module, bool verify_functions = true) {
+ pc_ = start_;
+ module->module_start = start_;
+ module->module_end = limit_;
+ module->min_mem_size_log2 = 0;
+ module->max_mem_size_log2 = 0;
+ module->mem_export = false;
+ module->mem_external = false;
+ module->globals = new std::vector<WasmGlobal>();
+ module->signatures = new std::vector<FunctionSig*>();
+ module->functions = new std::vector<WasmFunction>();
+ module->data_segments = new std::vector<WasmDataSegment>();
+ module->function_table = new std::vector<uint16_t>();
+
+ bool sections[kMaxModuleSectionCode];
+ memset(sections, 0, sizeof(sections));
+
+ // Decode the module sections.
+ while (pc_ < limit_) {
+ TRACE("DecodeSection\n");
+ WasmSectionDeclCode section =
+ static_cast<WasmSectionDeclCode>(u8("section"));
+ // Each section should appear at most once.
+ if (section < kMaxModuleSectionCode) {
+ CheckForPreviousSection(sections, section, false);
+ sections[section] = true;
+ }
+
+ switch (section) {
+ case kDeclEnd:
+ // Terminate section decoding.
+ limit_ = pc_;
+ break;
+ case kDeclMemory:
+ module->min_mem_size_log2 = u8("min memory");
+ module->max_mem_size_log2 = u8("max memory");
+ module->mem_export = u8("export memory") != 0;
+ break;
+ case kDeclSignatures: {
+ int length;
+ uint32_t signatures_count = u32v(&length, "signatures count");
+ module->signatures->reserve(SafeReserve(signatures_count));
+ // Decode signatures.
+ for (uint32_t i = 0; i < signatures_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeSignature[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ FunctionSig* s = sig(); // read function sig.
+ module->signatures->push_back(s);
+ }
+ break;
+ }
+ case kDeclFunctions: {
+ // Functions require a signature table first.
+ CheckForPreviousSection(sections, kDeclSignatures, true);
+ int length;
+ uint32_t functions_count = u32v(&length, "functions count");
+ module->functions->reserve(SafeReserve(functions_count));
+ // Set up module environment for verification.
+ ModuleEnv menv;
+ menv.module = module;
+ menv.globals_area = 0;
+ menv.mem_start = 0;
+ menv.mem_end = 0;
+ menv.function_code = nullptr;
+ menv.asm_js = asm_js_;
+ // Decode functions.
+ for (uint32_t i = 0; i < functions_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeFunction[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+
+ module->functions->push_back(
+ {nullptr, 0, 0, 0, 0, 0, 0, false, false});
+ WasmFunction* function = &module->functions->back();
+ DecodeFunctionInModule(module, function, false);
+ }
+ if (ok() && verify_functions) {
+ for (uint32_t i = 0; i < functions_count; i++) {
+ if (failed()) break;
+ WasmFunction* function = &module->functions->at(i);
+ if (!function->external) {
+ VerifyFunctionBody(i, &menv, function);
+ if (result_.failed())
+ error(result_.error_pc, result_.error_msg.get());
+ }
+ }
+ }
+ break;
+ }
+ case kDeclGlobals: {
+ int length;
+ uint32_t globals_count = u32v(&length, "globals count");
+ module->globals->reserve(SafeReserve(globals_count));
+ // Decode globals.
+ for (uint32_t i = 0; i < globals_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeGlobal[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ module->globals->push_back({0, MachineType::Int32(), 0, false});
+ WasmGlobal* global = &module->globals->back();
+ DecodeGlobalInModule(global);
+ }
+ break;
+ }
+ case kDeclDataSegments: {
+ int length;
+ uint32_t data_segments_count = u32v(&length, "data segments count");
+ module->data_segments->reserve(SafeReserve(data_segments_count));
+ // Decode data segments.
+ for (uint32_t i = 0; i < data_segments_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeDataSegment[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ module->data_segments->push_back({0, 0, 0});
+ WasmDataSegment* segment = &module->data_segments->back();
+ DecodeDataSegmentInModule(segment);
+ }
+ break;
+ }
+ case kDeclFunctionTable: {
+ // An indirect function table requires functions first.
+ CheckForPreviousSection(sections, kDeclFunctions, true);
+ int length;
+ uint32_t function_table_count = u32v(&length, "function table count");
+ module->function_table->reserve(SafeReserve(function_table_count));
+ // Decode function table.
+ for (uint32_t i = 0; i < function_table_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeFunctionTable[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ uint16_t index = u16();
+ if (index >= module->functions->size()) {
+ error(pc_ - 2, "invalid function index");
+ break;
+ }
+ module->function_table->push_back(index);
+ }
+ break;
+ }
+ case kDeclWLL: {
+ // Reserved for experimentation by the Web Low-level Language project
+ // which is augmenting the binary encoding with source code meta
+ // information. This section does not affect the semantics of the code
+ // and can be ignored by the runtime. https://github.com/JSStats/wll
+ int length;
+ uint32_t section_size = u32v(&length, "section size");
+ if (pc_ + section_size > limit_ || pc_ + section_size < pc_) {
+ error(pc_ - length, "invalid section size");
+ break;
+ }
+ pc_ += section_size;
+ break;
+ }
+ default:
+ error(pc_ - 1, nullptr, "unrecognized section 0x%02x", section);
+ break;
+ }
+ }
+
+ return toResult(module);
+ }
+
+ uint32_t SafeReserve(uint32_t count) {
+ // Avoid OOM by only reserving up to a certain size.
+ const uint32_t kMaxReserve = 20000;
+ return count < kMaxReserve ? count : kMaxReserve;
+ }
+
+ void CheckForPreviousSection(bool* sections, WasmSectionDeclCode section,
+ bool present) {
+ if (section >= kMaxModuleSectionCode) return;
+ if (sections[section] == present) return;
+ const char* name = "";
+ switch (section) {
+ case kDeclMemory:
+ name = "memory";
+ break;
+ case kDeclSignatures:
+ name = "signatures";
+ break;
+ case kDeclFunctions:
+ name = "function declaration";
+ break;
+ case kDeclGlobals:
+ name = "global variable";
+ break;
+ case kDeclDataSegments:
+ name = "data segment";
+ break;
+ case kDeclFunctionTable:
+ name = "function table";
+ break;
+ default:
+ name = "";
+ break;
+ }
+ if (present) {
+ error(pc_ - 1, nullptr, "required %s section missing", name);
+ } else {
+ error(pc_ - 1, nullptr, "%s section already present", name);
+ }
+ }
+
+ // Decodes a single anonymous function starting at {start_}.
+ FunctionResult DecodeSingleFunction(ModuleEnv* module_env,
+ WasmFunction* function) {
+ pc_ = start_;
+ function->sig = sig(); // read signature
+ function->name_offset = 0; // ---- name
+ function->code_start_offset = off(pc_ + 8); // ---- code start
+ function->code_end_offset = off(limit_); // ---- code end
+ function->local_int32_count = u16(); // read u16
+ function->local_int64_count = u16(); // read u16
+ function->local_float32_count = u16(); // read u16
+ function->local_float64_count = u16(); // read u16
+ function->exported = false; // ---- exported
+ function->external = false; // ---- external
+
+ if (ok()) VerifyFunctionBody(0, module_env, function);
+
+ FunctionResult result;
+ result.CopyFrom(result_); // Copy error code and location.
+ result.val = function;
+ return result;
+ }
+
+ // Decodes a single function signature at {start}.
+ FunctionSig* DecodeFunctionSignature(const byte* start) {
+ pc_ = start;
+ FunctionSig* result = sig();
+ return ok() ? result : nullptr;
+ }
+
+ private:
+ Zone* module_zone;
+ ModuleResult result_;
+ bool asm_js_;
+
+ uint32_t off(const byte* ptr) { return static_cast<uint32_t>(ptr - start_); }
+
+ // Decodes a single global entry inside a module starting at {pc_}.
+ void DecodeGlobalInModule(WasmGlobal* global) {
+ global->name_offset = string("global name");
+ global->type = mem_type();
+ global->offset = 0;
+ global->exported = u8("exported") != 0;
+ }
+
+ // Decodes a single function entry inside a module starting at {pc_}.
+ void DecodeFunctionInModule(WasmModule* module, WasmFunction* function,
+ bool verify_body = true) {
+ byte decl_bits = u8("function decl");
+
+ const byte* sigpos = pc_;
+ function->sig_index = u16("signature index");
+
+ if (function->sig_index >= module->signatures->size()) {
+ return error(sigpos, "invalid signature index");
+ } else {
+ function->sig = module->signatures->at(function->sig_index);
+ }
+
+ TRACE(" +%d <function attributes:%s%s%s%s%s>\n",
+ static_cast<int>(pc_ - start_),
+ decl_bits & kDeclFunctionName ? " name" : "",
+ decl_bits & kDeclFunctionImport ? " imported" : "",
+ decl_bits & kDeclFunctionLocals ? " locals" : "",
+ decl_bits & kDeclFunctionExport ? " exported" : "",
+ (decl_bits & kDeclFunctionImport) == 0 ? " body" : "");
+
+ if (decl_bits & kDeclFunctionName) {
+ function->name_offset = string("function name");
+ }
+
+ function->exported = decl_bits & kDeclFunctionExport;
+
+ // Imported functions have no locals or body.
+ if (decl_bits & kDeclFunctionImport) {
+ function->external = true;
+ return;
+ }
+
+ if (decl_bits & kDeclFunctionLocals) {
+ function->local_int32_count = u16("int32 count");
+ function->local_int64_count = u16("int64 count");
+ function->local_float32_count = u16("float32 count");
+ function->local_float64_count = u16("float64 count");
+ }
+
+ uint16_t size = u16("body size");
+ if (ok()) {
+ if ((pc_ + size) > limit_) {
+ return error(pc_, limit_,
+ "expected %d bytes for function body, fell off end", size);
+ }
+ function->code_start_offset = static_cast<uint32_t>(pc_ - start_);
+ function->code_end_offset = function->code_start_offset + size;
+ TRACE(" +%d %-20s: (%d bytes)\n", static_cast<int>(pc_ - start_),
+ "function body", size);
+ pc_ += size;
+ }
+ }
+
+ // Decodes a single data segment entry inside a module starting at {pc_}.
+ void DecodeDataSegmentInModule(WasmDataSegment* segment) {
+ segment->dest_addr =
+ u32("destination"); // TODO(titzer): check it's within the memory size.
+ segment->source_offset = offset("source offset");
+ segment->source_size =
+ u32("source size"); // TODO(titzer): check the size is reasonable.
+ segment->init = u8("init");
+ }
+
+ // Verifies the body (code) of a given function.
+ void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
+ WasmFunction* function) {
+ if (FLAG_trace_wasm_decode_time) {
+ // TODO(titzer): clean me up a bit.
+ OFStream os(stdout);
+ os << "Verifying WASM function:";
+ if (function->name_offset > 0) {
+ os << menv->module->GetName(function->name_offset);
+ }
+ os << std::endl;
+ }
+ FunctionEnv fenv;
+ fenv.module = menv;
+ fenv.sig = function->sig;
+ fenv.local_int32_count = function->local_int32_count;
+ fenv.local_int64_count = function->local_int64_count;
+ fenv.local_float32_count = function->local_float32_count;
+ fenv.local_float64_count = function->local_float64_count;
+ fenv.SumLocals();
+
+ TreeResult result =
+ VerifyWasmCode(&fenv, start_, start_ + function->code_start_offset,
+ start_ + function->code_end_offset);
+ if (result.failed()) {
+ // Wrap the error message from the function decoder.
+ std::ostringstream str;
+ str << "in function #" << func_num << ": ";
+ // TODO(titzer): add function name for the user?
+ str << result;
+ std::string strval = str.str();
+ const char* raw = strval.c_str();
+ size_t len = strlen(raw);
+ char* buffer = new char[len];
+ strncpy(buffer, raw, len);
+ buffer[len - 1] = 0;
+
+ // Copy error code and location.
+ result_.CopyFrom(result);
+ result_.error_msg.Reset(buffer);
+ }
+ }
+
+ // Reads a single 32-bit unsigned integer interpreted as an offset, checking
+ // the offset is within bounds and advances.
+ uint32_t offset(const char* name = nullptr) {
+ uint32_t offset = u32(name ? name : "offset");
+ if (offset > static_cast<uint32_t>(limit_ - start_)) {
+ error(pc_ - sizeof(uint32_t), "offset out of bounds of module");
+ }
+ return offset;
+ }
+
+ // Reads a single 32-bit unsigned integer interpreted as an offset into the
+ // data and validating the string there and advances.
+ uint32_t string(const char* name = nullptr) {
+ return offset(name ? name : "string"); // TODO(titzer): validate string
+ }
+
+ // Reads a single 8-bit integer, interpreting it as a local type.
+ LocalType local_type() {
+ byte val = u8("local type");
+ LocalTypeCode t = static_cast<LocalTypeCode>(val);
+ switch (t) {
+ case kLocalVoid:
+ return kAstStmt;
+ case kLocalI32:
+ return kAstI32;
+ case kLocalI64:
+ return kAstI64;
+ case kLocalF32:
+ return kAstF32;
+ case kLocalF64:
+ return kAstF64;
+ default:
+ error(pc_ - 1, "invalid local type");
+ return kAstStmt;
+ }
+ }
+
+ // Reads a single 8-bit integer, interpreting it as a memory type.
+ MachineType mem_type() {
+ byte val = u8("memory type");
+ MemTypeCode t = static_cast<MemTypeCode>(val);
+ switch (t) {
+ case kMemI8:
+ return MachineType::Int8();
+ case kMemU8:
+ return MachineType::Uint8();
+ case kMemI16:
+ return MachineType::Int16();
+ case kMemU16:
+ return MachineType::Uint16();
+ case kMemI32:
+ return MachineType::Int32();
+ case kMemU32:
+ return MachineType::Uint32();
+ case kMemI64:
+ return MachineType::Int64();
+ case kMemU64:
+ return MachineType::Uint64();
+ case kMemF32:
+ return MachineType::Float32();
+ case kMemF64:
+ return MachineType::Float64();
+ default:
+ error(pc_ - 1, "invalid memory type");
+ return MachineType::None();
+ }
+ }
+
+ // Parses an inline function signature.
+ FunctionSig* sig() {
+ byte count = u8("param count");
+ LocalType ret = local_type();
+ FunctionSig::Builder builder(module_zone, ret == kAstStmt ? 0 : 1, count);
+ if (ret != kAstStmt) builder.AddReturn(ret);
+
+ for (int i = 0; i < count; i++) {
+ LocalType param = local_type();
+ if (param == kAstStmt) error(pc_ - 1, "invalid void parameter type");
+ builder.AddParam(param);
+ }
+ return builder.Build();
+ }
+};
+
+
+// Helpers for nice error messages.
+class ModuleError : public ModuleResult {
+ public:
+ explicit ModuleError(const char* msg) {
+ error_code = kError;
+ size_t len = strlen(msg) + 1;
+ char* result = new char[len];
+ strncpy(result, msg, len);
+ result[len - 1] = 0;
+ error_msg.Reset(result);
+ }
+};
+
+
+// Helpers for nice error messages.
+class FunctionError : public FunctionResult {
+ public:
+ explicit FunctionError(const char* msg) {
+ error_code = kError;
+ size_t len = strlen(msg) + 1;
+ char* result = new char[len];
+ strncpy(result, msg, len);
+ result[len - 1] = 0;
+ error_msg.Reset(result);
+ }
+};
+
+
+ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
+ const byte* module_start, const byte* module_end,
+ bool verify_functions, bool asm_js) {
+ size_t size = module_end - module_start;
+ if (module_start > module_end) return ModuleError("start > end");
+ if (size >= kMaxModuleSize) return ModuleError("size > maximum module size");
+ WasmModule* module = new WasmModule();
+ ModuleDecoder decoder(zone, module_start, module_end, asm_js);
+ return decoder.DecodeModule(module, verify_functions);
+}
+
+
+FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
+ const byte* end) {
+ ModuleDecoder decoder(zone, start, end, false);
+ return decoder.DecodeFunctionSignature(start);
+}
+
+
+FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
+ ModuleEnv* module_env,
+ const byte* function_start,
+ const byte* function_end) {
+ size_t size = function_end - function_start;
+ if (function_start > function_end) return FunctionError("start > end");
+ if (size > kMaxFunctionSize)
+ return FunctionError("size > maximum function size");
+ WasmFunction* function = new WasmFunction();
+ ModuleDecoder decoder(zone, function_start, function_end, false);
+ return decoder.DecodeSingleFunction(module_env, function);
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/module-decoder.h b/chromium/v8/src/wasm/module-decoder.h
new file mode 100644
index 00000000000..3f469a500e1
--- /dev/null
+++ b/chromium/v8/src/wasm/module-decoder.h
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MODULE_DECODER_H_
+#define V8_WASM_MODULE_DECODER_H_
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+// Decodes the bytes of a WASM module between {module_start} and {module_end}.
+ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
+ const byte* module_start, const byte* module_end,
+ bool verify_functions, bool asm_js);
+
+// Exposed for testing. Decodes a single function signature, allocating it
+// in the given zone. Returns {nullptr} upon failure.
+FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
+ const byte* end);
+
+// Decodes the bytes of a WASM function between
+// {function_start} and {function_end}.
+FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone, ModuleEnv* env,
+ const byte* function_start,
+ const byte* function_end);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MODULE_DECODER_H_
diff --git a/chromium/v8/src/wasm/wasm-js.cc b/chromium/v8/src/wasm/wasm-js.cc
new file mode 100644
index 00000000000..80d8bdb2369
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-js.cc
@@ -0,0 +1,345 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api.h"
+#include "src/api-natives.h"
+#include "src/assert-scope.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/factory.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/parsing/parser.h"
+#include "src/typing-asm.h"
+
+#include "src/wasm/asm-wasm-builder.h"
+#include "src/wasm/encoder.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-result.h"
+
+typedef uint8_t byte;
+
+using v8::internal::wasm::ErrorThrower;
+
+namespace v8 {
+
+namespace {
+struct RawBuffer {
+ const byte* start;
+ const byte* end;
+ size_t size() { return static_cast<size_t>(end - start); }
+};
+
+
+RawBuffer GetRawBufferArgument(
+ ErrorThrower& thrower, const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() < 1 || !args[0]->IsArrayBuffer()) {
+ thrower.Error("Argument 0 must be an array buffer");
+ return {nullptr, nullptr};
+ }
+ Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(args[0]);
+ ArrayBuffer::Contents contents = buffer->GetContents();
+
+ // TODO(titzer): allow offsets into buffers, views, etc.
+
+ const byte* start = reinterpret_cast<const byte*>(contents.Data());
+ const byte* end = start + contents.ByteLength();
+
+ if (start == nullptr) {
+ thrower.Error("ArrayBuffer argument is empty");
+ }
+ return {start, end};
+}
+
+
+void VerifyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.verifyModule()");
+
+ RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (thrower.error()) return;
+
+ i::Zone zone;
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, buffer.start, buffer.end, true, false);
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+void VerifyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.verifyFunction()");
+
+ RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (thrower.error()) return;
+
+ internal::wasm::FunctionResult result;
+ {
+ // Verification of a single function shouldn't allocate.
+ i::DisallowHeapAllocation no_allocation;
+ i::Zone zone;
+ result = internal::wasm::DecodeWasmFunction(isolate, &zone, nullptr,
+ buffer.start, buffer.end);
+ }
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+void CompileRun(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.compileRun()");
+
+ RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (thrower.error()) return;
+
+ // Decode and pre-verify the functions before compiling and running.
+ i::Zone zone;
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, buffer.start, buffer.end, true, false);
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ } else {
+ // Success. Compile and run!
+ int32_t retval = i::wasm::CompileAndRunWasmModule(isolate, result.val);
+ args.GetReturnValue().Set(retval);
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+v8::internal::wasm::WasmModuleIndex* TranslateAsmModule(i::ParseInfo* info) {
+ info->set_global();
+ info->set_lazy(false);
+ info->set_allow_lazy_parsing(false);
+ info->set_toplevel(true);
+
+ if (!i::Compiler::ParseAndAnalyze(info)) {
+ return nullptr;
+ }
+
+ info->set_literal(
+ info->scope()->declarations()->at(0)->AsFunctionDeclaration()->fun());
+
+ v8::internal::AsmTyper typer(info->isolate(), info->zone(), *(info->script()),
+ info->literal());
+ if (!typer.Validate()) {
+ return nullptr;
+ }
+
+ auto module = v8::internal::wasm::AsmWasmBuilder(
+ info->isolate(), info->zone(), info->literal())
+ .Run();
+ return module;
+}
+
+
+void AsmCompileRun(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.asmCompileRun()");
+
+ if (args.Length() != 1) {
+ thrower.Error("Invalid argument count");
+ return;
+ }
+ if (!args[0]->IsString()) {
+ thrower.Error("Invalid argument count");
+ return;
+ }
+
+ i::Factory* factory = isolate->factory();
+ i::Zone zone;
+ Local<String> source = Local<String>::Cast(args[0]);
+ i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
+ i::ParseInfo info(&zone, script);
+
+ auto module = TranslateAsmModule(&info);
+ if (module == nullptr) {
+ thrower.Error("Asm.js validation failed");
+ return;
+ }
+
+ int32_t result = v8::internal::wasm::CompileAndRunWasmModule(
+ isolate, module->Begin(), module->End(), true);
+ args.GetReturnValue().Set(result);
+}
+
+
+// TODO(aseemgarg): deal with arraybuffer and foreign functions
+void InstantiateModuleFromAsm(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.instantiateModuleFromAsm()");
+
+ if (args.Length() != 1) {
+ thrower.Error("Invalid argument count");
+ return;
+ }
+ if (!args[0]->IsString()) {
+ thrower.Error("Invalid argument count");
+ return;
+ }
+
+ i::Factory* factory = isolate->factory();
+ i::Zone zone;
+ Local<String> source = Local<String>::Cast(args[0]);
+ i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
+ i::ParseInfo info(&zone, script);
+
+ auto module = TranslateAsmModule(&info);
+ if (module == nullptr) {
+ thrower.Error("Asm.js validation failed");
+ return;
+ }
+
+ i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, module->Begin(), module->End(), false, false);
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ } else {
+ // Success. Instantiate the module and return the object.
+ i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
+
+ i::MaybeHandle<i::JSObject> object =
+ result.val->Instantiate(isolate, ffi, memory);
+
+ if (!object.is_null()) {
+ args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
+ }
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.instantiateModule()");
+
+ RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (buffer.start == nullptr) return;
+
+ i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
+ if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
+ Local<Object> obj = Local<Object>::Cast(args[2]);
+ i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
+ memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
+ }
+
+ // Decode but avoid a redundant pass over function bodies for verification.
+ // Verification will happen during compilation.
+ i::Zone zone;
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, buffer.start, buffer.end, false, false);
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ } else {
+ // Success. Instantiate the module and return the object.
+ i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
+ if (args.Length() > 1 && args[1]->IsObject()) {
+ Local<Object> obj = Local<Object>::Cast(args[1]);
+ ffi = i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
+ }
+
+ i::MaybeHandle<i::JSObject> object =
+ result.val->Instantiate(isolate, ffi, memory);
+
+ if (!object.is_null()) {
+ args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
+ }
+ }
+
+ if (result.val) delete result.val;
+}
+} // namespace
+
+
+// TODO(titzer): we use the API to create the function template because the
+// internal guts are too ugly to replicate here.
+static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
+ FunctionCallback func) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
+ Local<FunctionTemplate> local = FunctionTemplate::New(isolate, func);
+ return v8::Utils::OpenHandle(*local);
+}
+
+
+namespace internal {
+static Handle<String> v8_str(Isolate* isolate, const char* str) {
+ return isolate->factory()->NewStringFromAsciiChecked(str);
+}
+
+
+static void InstallFunc(Isolate* isolate, Handle<JSObject> object,
+ const char* str, FunctionCallback func) {
+ Handle<String> name = v8_str(isolate, str);
+ Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
+ Handle<JSFunction> function =
+ ApiNatives::InstantiateFunction(temp).ToHandleChecked();
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+ JSObject::AddProperty(object, name, function, attributes);
+}
+
+
+void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
+ // Setup wasm function map.
+ Handle<Context> context(global->native_context(), isolate);
+ InstallWasmFunctionMap(isolate, context);
+
+ // Bind the WASM object.
+ Factory* factory = isolate->factory();
+ Handle<String> name = v8_str(isolate, "_WASMEXP_");
+ Handle<JSFunction> cons = factory->NewFunction(name);
+ JSFunction::SetInstancePrototype(
+ cons, Handle<Object>(context->initial_object_prototype(), isolate));
+ cons->shared()->set_instance_class_name(*name);
+ Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
+ PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
+ JSObject::AddProperty(global, name, wasm_object, attributes);
+
+ // Install functions on the WASM object.
+ InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
+ InstallFunc(isolate, wasm_object, "verifyModule", VerifyModule);
+ InstallFunc(isolate, wasm_object, "verifyFunction", VerifyFunction);
+ InstallFunc(isolate, wasm_object, "compileRun", CompileRun);
+ InstallFunc(isolate, wasm_object, "asmCompileRun", AsmCompileRun);
+ InstallFunc(isolate, wasm_object, "instantiateModuleFromAsm",
+ InstantiateModuleFromAsm);
+}
+
+
+void WasmJs::InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context) {
+ if (!context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) {
+ Handle<Map> wasm_function_map = isolate->factory()->NewMap(
+ JS_FUNCTION_TYPE, JSFunction::kSize + kPointerSize);
+ wasm_function_map->set_is_callable();
+ context->set_wasm_function_map(*wasm_function_map);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-js.h b/chromium/v8/src/wasm/wasm-js.h
new file mode 100644
index 00000000000..e7305aa164a
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-js.h
@@ -0,0 +1,27 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_JS_H_
+#define V8_WASM_JS_H_
+
+#ifndef V8_SHARED
+#include "src/allocation.h"
+#include "src/hashmap.h"
+#else
+#include "include/v8.h"
+#include "src/base/compiler-specific.h"
+#endif // !V8_SHARED
+
+namespace v8 {
+namespace internal {
+// Exposes a WASM API to JavaScript through the V8 API.
+class WasmJs {
+ public:
+ static void Install(Isolate* isolate, Handle<JSGlobalObject> global_object);
+ static void InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context);
+};
+
+} // namespace internal
+} // namespace v8
+#endif
diff --git a/chromium/v8/src/wasm/wasm-macro-gen.h b/chromium/v8/src/wasm/wasm-macro-gen.h
new file mode 100644
index 00000000000..470804a73dc
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-macro-gen.h
@@ -0,0 +1,265 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MACRO_GEN_H_
+#define V8_WASM_MACRO_GEN_H_
+
+#include "src/wasm/wasm-opcodes.h"
+
+// Convenience macros for building Wasm bytecode directly into a byte array.
+
+//------------------------------------------------------------------------------
+// Control.
+//------------------------------------------------------------------------------
+#define WASM_NOP kExprNop
+
+#define WASM_BLOCK(count, ...) kExprBlock, static_cast<byte>(count), __VA_ARGS__
+#define WASM_INFINITE_LOOP kExprLoop, 1, kExprBr, 0, kExprNop
+#define WASM_LOOP(count, ...) kExprLoop, static_cast<byte>(count), __VA_ARGS__
+#define WASM_IF(cond, tstmt) kExprIf, cond, tstmt
+#define WASM_IF_ELSE(cond, tstmt, fstmt) kExprIfElse, cond, tstmt, fstmt
+#define WASM_SELECT(cond, tval, fval) kExprSelect, cond, tval, fval
+#define WASM_BR(depth) kExprBr, static_cast<byte>(depth), kExprNop
+#define WASM_BR_IF(depth, cond) \
+ kExprBrIf, static_cast<byte>(depth), cond, kExprNop
+#define WASM_BRV(depth, val) kExprBr, static_cast<byte>(depth), val
+#define WASM_BRV_IF(depth, cond, val) \
+ kExprBrIf, static_cast<byte>(depth), cond, val
+#define WASM_BREAK(depth) kExprBr, static_cast<byte>(depth + 1), kExprNop
+#define WASM_CONTINUE(depth) kExprBr, static_cast<byte>(depth), kExprNop
+#define WASM_BREAKV(depth, val) kExprBr, static_cast<byte>(depth + 1), val
+#define WASM_RETURN0 kExprReturn
+#define WASM_RETURN(...) kExprReturn, __VA_ARGS__
+#define WASM_UNREACHABLE kExprUnreachable
+
+#define WASM_TABLESWITCH_OP(case_count, table_count, ...) \
+ kExprTableSwitch, static_cast<byte>(case_count), \
+ static_cast<byte>(case_count >> 8), static_cast<byte>(table_count), \
+ static_cast<byte>(table_count >> 8), __VA_ARGS__
+
+#define WASM_TABLESWITCH_BODY0(key) key
+
+#define WASM_TABLESWITCH_BODY(key, ...) key, __VA_ARGS__
+
+#define WASM_CASE(x) static_cast<byte>(x), static_cast<byte>(x >> 8)
+#define WASM_CASE_BR(x) static_cast<byte>(x), static_cast<byte>(0x80 | (x) >> 8)
+
+//------------------------------------------------------------------------------
+// Misc expressions.
+//------------------------------------------------------------------------------
+#define WASM_ID(...) __VA_ARGS__
+#define WASM_ZERO kExprI8Const, 0
+#define WASM_ONE kExprI8Const, 1
+#define WASM_I8(val) kExprI8Const, static_cast<byte>(val)
+#define WASM_I32(val) \
+ kExprI32Const, static_cast<byte>(val), static_cast<byte>(val >> 8), \
+ static_cast<byte>(val >> 16), static_cast<byte>(val >> 24)
+#define WASM_I64(val) \
+ kExprI64Const, static_cast<byte>(static_cast<uint64_t>(val)), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 8), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 16), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 24), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 32), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 40), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 48), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 56)
+#define WASM_F32(val) \
+ kExprF32Const, \
+ static_cast<byte>(bit_cast<int32_t>(static_cast<float>(val))), \
+ static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 8), \
+ static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 16), \
+ static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 24)
+#define WASM_F64(val) \
+ kExprF64Const, static_cast<byte>(bit_cast<uint64_t>(val)), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 8), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 16), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 24), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 32), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 40), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 48), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 56)
+#define WASM_GET_LOCAL(index) kExprGetLocal, static_cast<byte>(index)
+#define WASM_SET_LOCAL(index, val) kExprSetLocal, static_cast<byte>(index), val
+#define WASM_LOAD_GLOBAL(index) kExprLoadGlobal, static_cast<byte>(index)
+#define WASM_STORE_GLOBAL(index, val) \
+ kExprStoreGlobal, static_cast<byte>(index), val
+#define WASM_LOAD_MEM(type, index) \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+ v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(false), index
+#define WASM_STORE_MEM(type, index, val) \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+ v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(false), index, val
+#define WASM_LOAD_MEM_OFFSET(type, offset, index) \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+ v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(true), \
+ static_cast<byte>(offset), index
+#define WASM_STORE_MEM_OFFSET(type, offset, index, val) \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+ v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(true), \
+ static_cast<byte>(offset), index, val
+#define WASM_CALL_FUNCTION(index, ...) \
+ kExprCallFunction, static_cast<byte>(index), __VA_ARGS__
+#define WASM_CALL_INDIRECT(index, func, ...) \
+ kExprCallIndirect, static_cast<byte>(index), func, __VA_ARGS__
+#define WASM_CALL_FUNCTION0(index) kExprCallFunction, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT0(index, func) \
+ kExprCallIndirect, static_cast<byte>(index), func
+#define WASM_NOT(x) kExprBoolNot, x
+
+//------------------------------------------------------------------------------
+// Constructs that are composed of multiple bytecodes.
+//------------------------------------------------------------------------------
+#define WASM_WHILE(x, y) kExprLoop, 1, kExprIf, x, kExprBr, 0, y
+#define WASM_INC_LOCAL(index) \
+ kExprSetLocal, static_cast<byte>(index), kExprI32Add, kExprGetLocal, \
+ static_cast<byte>(index), kExprI8Const, 1
+#define WASM_INC_LOCAL_BY(index, count) \
+ kExprSetLocal, static_cast<byte>(index), kExprI32Add, kExprGetLocal, \
+ static_cast<byte>(index), kExprI8Const, static_cast<int8_t>(count)
+
+#define WASM_UNOP(opcode, x) static_cast<byte>(opcode), x
+#define WASM_BINOP(opcode, x, y) static_cast<byte>(opcode), x, y
+
+//------------------------------------------------------------------------------
+// Int32 operations
+//------------------------------------------------------------------------------
+#define WASM_I32_ADD(x, y) kExprI32Add, x, y
+#define WASM_I32_SUB(x, y) kExprI32Sub, x, y
+#define WASM_I32_MUL(x, y) kExprI32Mul, x, y
+#define WASM_I32_DIVS(x, y) kExprI32DivS, x, y
+#define WASM_I32_DIVU(x, y) kExprI32DivU, x, y
+#define WASM_I32_REMS(x, y) kExprI32RemS, x, y
+#define WASM_I32_REMU(x, y) kExprI32RemU, x, y
+#define WASM_I32_AND(x, y) kExprI32And, x, y
+#define WASM_I32_IOR(x, y) kExprI32Ior, x, y
+#define WASM_I32_XOR(x, y) kExprI32Xor, x, y
+#define WASM_I32_SHL(x, y) kExprI32Shl, x, y
+#define WASM_I32_SHR(x, y) kExprI32ShrU, x, y
+#define WASM_I32_SAR(x, y) kExprI32ShrS, x, y
+#define WASM_I32_EQ(x, y) kExprI32Eq, x, y
+#define WASM_I32_NE(x, y) kExprI32Ne, x, y
+#define WASM_I32_LTS(x, y) kExprI32LtS, x, y
+#define WASM_I32_LES(x, y) kExprI32LeS, x, y
+#define WASM_I32_LTU(x, y) kExprI32LtU, x, y
+#define WASM_I32_LEU(x, y) kExprI32LeU, x, y
+#define WASM_I32_GTS(x, y) kExprI32GtS, x, y
+#define WASM_I32_GES(x, y) kExprI32GeS, x, y
+#define WASM_I32_GTU(x, y) kExprI32GtU, x, y
+#define WASM_I32_GEU(x, y) kExprI32GeU, x, y
+#define WASM_I32_CLZ(x) kExprI32Clz, x
+#define WASM_I32_CTZ(x) kExprI32Ctz, x
+#define WASM_I32_POPCNT(x) kExprI32Popcnt, x
+
+//------------------------------------------------------------------------------
+// Int64 operations
+//------------------------------------------------------------------------------
+#define WASM_I64_ADD(x, y) kExprI64Add, x, y
+#define WASM_I64_SUB(x, y) kExprI64Sub, x, y
+#define WASM_I64_MUL(x, y) kExprI64Mul, x, y
+#define WASM_I64_DIVS(x, y) kExprI64DivS, x, y
+#define WASM_I64_DIVU(x, y) kExprI64DivU, x, y
+#define WASM_I64_REMS(x, y) kExprI64RemS, x, y
+#define WASM_I64_REMU(x, y) kExprI64RemU, x, y
+#define WASM_I64_AND(x, y) kExprI64And, x, y
+#define WASM_I64_IOR(x, y) kExprI64Ior, x, y
+#define WASM_I64_XOR(x, y) kExprI64Xor, x, y
+#define WASM_I64_SHL(x, y) kExprI64Shl, x, y
+#define WASM_I64_SHR(x, y) kExprI64ShrU, x, y
+#define WASM_I64_SAR(x, y) kExprI64ShrS, x, y
+#define WASM_I64_EQ(x, y) kExprI64Eq, x, y
+#define WASM_I64_NE(x, y) kExprI64Ne, x, y
+#define WASM_I64_LTS(x, y) kExprI64LtS, x, y
+#define WASM_I64_LES(x, y) kExprI64LeS, x, y
+#define WASM_I64_LTU(x, y) kExprI64LtU, x, y
+#define WASM_I64_LEU(x, y) kExprI64LeU, x, y
+#define WASM_I64_GTS(x, y) kExprI64GtS, x, y
+#define WASM_I64_GES(x, y) kExprI64GeS, x, y
+#define WASM_I64_GTU(x, y) kExprI64GtU, x, y
+#define WASM_I64_GEU(x, y) kExprI64GeU, x, y
+#define WASM_I64_CLZ(x) kExprI64Clz, x
+#define WASM_I64_CTZ(x) kExprI64Ctz, x
+#define WASM_I64_POPCNT(x) kExprI64Popcnt, x
+
+//------------------------------------------------------------------------------
+// Float32 operations
+//------------------------------------------------------------------------------
+#define WASM_F32_ADD(x, y) kExprF32Add, x, y
+#define WASM_F32_SUB(x, y) kExprF32Sub, x, y
+#define WASM_F32_MUL(x, y) kExprF32Mul, x, y
+#define WASM_F32_DIV(x, y) kExprF32Div, x, y
+#define WASM_F32_MIN(x, y) kExprF32Min, x, y
+#define WASM_F32_MAX(x, y) kExprF32Max, x, y
+#define WASM_F32_ABS(x) kExprF32Abs, x
+#define WASM_F32_NEG(x) kExprF32Neg, x
+#define WASM_F32_COPYSIGN(x, y) kExprF32CopySign, x, y
+#define WASM_F32_CEIL(x) kExprF32Ceil, x
+#define WASM_F32_FLOOR(x) kExprF32Floor, x
+#define WASM_F32_TRUNC(x) kExprF32Trunc, x
+#define WASM_F32_NEARESTINT(x) kExprF32NearestInt, x
+#define WASM_F32_SQRT(x) kExprF32Sqrt, x
+#define WASM_F32_EQ(x, y) kExprF32Eq, x, y
+#define WASM_F32_NE(x, y) kExprF32Ne, x, y
+#define WASM_F32_LT(x, y) kExprF32Lt, x, y
+#define WASM_F32_LE(x, y) kExprF32Le, x, y
+#define WASM_F32_GT(x, y) kExprF32Gt, x, y
+#define WASM_F32_GE(x, y) kExprF32Ge, x, y
+
+//------------------------------------------------------------------------------
+// Float64 operations
+//------------------------------------------------------------------------------
+#define WASM_F64_ADD(x, y) kExprF64Add, x, y
+#define WASM_F64_SUB(x, y) kExprF64Sub, x, y
+#define WASM_F64_MUL(x, y) kExprF64Mul, x, y
+#define WASM_F64_DIV(x, y) kExprF64Div, x, y
+#define WASM_F64_MIN(x, y) kExprF64Min, x, y
+#define WASM_F64_MAX(x, y) kExprF64Max, x, y
+#define WASM_F64_ABS(x) kExprF64Abs, x
+#define WASM_F64_NEG(x) kExprF64Neg, x
+#define WASM_F64_COPYSIGN(x, y) kExprF64CopySign, x, y
+#define WASM_F64_CEIL(x) kExprF64Ceil, x
+#define WASM_F64_FLOOR(x) kExprF64Floor, x
+#define WASM_F64_TRUNC(x) kExprF64Trunc, x
+#define WASM_F64_NEARESTINT(x) kExprF64NearestInt, x
+#define WASM_F64_SQRT(x) kExprF64Sqrt, x
+#define WASM_F64_EQ(x, y) kExprF64Eq, x, y
+#define WASM_F64_NE(x, y) kExprF64Ne, x, y
+#define WASM_F64_LT(x, y) kExprF64Lt, x, y
+#define WASM_F64_LE(x, y) kExprF64Le, x, y
+#define WASM_F64_GT(x, y) kExprF64Gt, x, y
+#define WASM_F64_GE(x, y) kExprF64Ge, x, y
+
+//------------------------------------------------------------------------------
+// Type conversions.
+//------------------------------------------------------------------------------
+#define WASM_I32_SCONVERT_F32(x) kExprI32SConvertF32, x
+#define WASM_I32_SCONVERT_F64(x) kExprI32SConvertF64, x
+#define WASM_I32_UCONVERT_F32(x) kExprI32UConvertF32, x
+#define WASM_I32_UCONVERT_F64(x) kExprI32UConvertF64, x
+#define WASM_I32_CONVERT_I64(x) kExprI32ConvertI64, x
+#define WASM_I64_SCONVERT_F32(x) kExprI64SConvertF32, x
+#define WASM_I64_SCONVERT_F64(x) kExprI64SConvertF64, x
+#define WASM_I64_UCONVERT_F32(x) kExprI64UConvertF32, x
+#define WASM_I64_UCONVERT_F64(x) kExprI64UConvertF64, x
+#define WASM_I64_SCONVERT_I32(x) kExprI64SConvertI32, x
+#define WASM_I64_UCONVERT_I32(x) kExprI64UConvertI32, x
+#define WASM_F32_SCONVERT_I32(x) kExprF32SConvertI32, x
+#define WASM_F32_UCONVERT_I32(x) kExprF32UConvertI32, x
+#define WASM_F32_SCONVERT_I64(x) kExprF32SConvertI64, x
+#define WASM_F32_UCONVERT_I64(x) kExprF32UConvertI64, x
+#define WASM_F32_CONVERT_F64(x) kExprF32ConvertF64, x
+#define WASM_F32_REINTERPRET_I32(x) kExprF32ReinterpretI32, x
+#define WASM_F64_SCONVERT_I32(x) kExprF64SConvertI32, x
+#define WASM_F64_UCONVERT_I32(x) kExprF64UConvertI32, x
+#define WASM_F64_SCONVERT_I64(x) kExprF64SConvertI64, x
+#define WASM_F64_UCONVERT_I64(x) kExprF64UConvertI64, x
+#define WASM_F64_CONVERT_F32(x) kExprF64ConvertF32, x
+#define WASM_F64_REINTERPRET_I64(x) kExprF64ReinterpretI64, x
+#define WASM_I32_REINTERPRET_F32(x) kExprI32ReinterpretF32, x
+#define WASM_I64_REINTERPRET_F64(x) kExprI64ReinterpretF64, x
+
+#endif // V8_WASM_MACRO_GEN_H_
diff --git a/chromium/v8/src/wasm/wasm-module.cc b/chromium/v8/src/wasm/wasm-module.cc
new file mode 100644
index 00000000000..fd2428080be
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-module.cc
@@ -0,0 +1,511 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+#include "src/v8.h"
+
+#include "src/simulator.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-result.h"
+
+#include "src/compiler/wasm-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
+ os << "WASM module with ";
+ os << (1 << module.min_mem_size_log2) << " min mem";
+ os << (1 << module.max_mem_size_log2) << " max mem";
+ if (module.functions) os << module.functions->size() << " functions";
+ if (module.globals) os << module.functions->size() << " globals";
+ if (module.data_segments) os << module.functions->size() << " data segments";
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
+ os << "WASM function with signature ";
+
+ // TODO(titzer): factor out rendering of signatures.
+ if (function.sig->return_count() == 0) os << "v";
+ for (size_t i = 0; i < function.sig->return_count(); i++) {
+ os << WasmOpcodes::ShortNameOf(function.sig->GetReturn(i));
+ }
+ os << "_";
+ if (function.sig->parameter_count() == 0) os << "v";
+ for (size_t i = 0; i < function.sig->parameter_count(); i++) {
+ os << WasmOpcodes::ShortNameOf(function.sig->GetParam(i));
+ }
+ os << " locals: ";
+ if (function.local_int32_count)
+ os << function.local_int32_count << " int32s ";
+ if (function.local_int64_count)
+ os << function.local_int64_count << " int64s ";
+ if (function.local_float32_count)
+ os << function.local_float32_count << " float32s ";
+ if (function.local_float64_count)
+ os << function.local_float64_count << " float64s ";
+
+ os << " code bytes: "
+ << (function.code_end_offset - function.code_start_offset);
+ return os;
+}
+
+
+// A helper class for compiling multiple wasm functions that offers
+// placeholder code objects for calling functions that are not yet compiled.
+class WasmLinker {
+ public:
+ WasmLinker(Isolate* isolate, size_t size)
+ : isolate_(isolate), placeholder_code_(size), function_code_(size) {}
+
+ // Get the code object for a function, allocating a placeholder if it has
+ // not yet been compiled.
+ Handle<Code> GetFunctionCode(uint32_t index) {
+ DCHECK(index < function_code_.size());
+ if (function_code_[index].is_null()) {
+ // Create a placeholder code object and encode the corresponding index in
+ // the {constant_pool_offset} field of the code object.
+ // TODO(titzer): placeholder code objects are somewhat dangerous.
+ Handle<Code> self(nullptr, isolate_);
+ byte buffer[] = {0, 0, 0, 0, 0, 0, 0, 0}; // fake instructions.
+ CodeDesc desc = {buffer, 8, 8, 0, 0, nullptr};
+ Handle<Code> code = isolate_->factory()->NewCode(
+ desc, Code::KindField::encode(Code::WASM_FUNCTION), self);
+ code->set_constant_pool_offset(index + kPlaceholderMarker);
+ placeholder_code_[index] = code;
+ function_code_[index] = code;
+ }
+ return function_code_[index];
+ }
+
+ void Finish(uint32_t index, Handle<Code> code) {
+ DCHECK(index < function_code_.size());
+ function_code_[index] = code;
+ }
+
+ void Link(Handle<FixedArray> function_table,
+ std::vector<uint16_t>* functions) {
+ for (size_t i = 0; i < function_code_.size(); i++) {
+ LinkFunction(function_code_[i]);
+ }
+ if (functions && !function_table.is_null()) {
+ int table_size = static_cast<int>(functions->size());
+ DCHECK_EQ(function_table->length(), table_size * 2);
+ for (int i = 0; i < table_size; i++) {
+ function_table->set(i + table_size, *function_code_[functions->at(i)]);
+ }
+ }
+ }
+
+ private:
+ static const int kPlaceholderMarker = 1000000000;
+
+ Isolate* isolate_;
+ std::vector<Handle<Code>> placeholder_code_;
+ std::vector<Handle<Code>> function_code_;
+
+ void LinkFunction(Handle<Code> code) {
+ bool modified = false;
+ int mode_mask = RelocInfo::kCodeTargetMask;
+ AllowDeferredHandleDereference embedding_raw_address;
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsCodeTarget(mode)) {
+ Code* target =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (target->kind() == Code::WASM_FUNCTION &&
+ target->constant_pool_offset() >= kPlaceholderMarker) {
+ // Patch direct calls to placeholder code objects.
+ uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
+ CHECK(index < function_code_.size());
+ Handle<Code> new_target = function_code_[index];
+ if (target != *new_target) {
+ CHECK_EQ(*placeholder_code_[index], target);
+ it.rinfo()->set_target_address(new_target->instruction_start(),
+ SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
+ modified = true;
+ }
+ }
+ }
+ }
+ if (modified) {
+ Assembler::FlushICache(isolate_, code->instruction_start(),
+ code->instruction_size());
+ }
+ }
+};
+
+namespace {
+// Internal constants for the layout of the module object.
+const int kWasmModuleInternalFieldCount = 4;
+const int kWasmModuleFunctionTable = 0;
+const int kWasmModuleCodeTable = 1;
+const int kWasmMemArrayBuffer = 2;
+const int kWasmGlobalsArrayBuffer = 3;
+
+
+size_t AllocateGlobalsOffsets(std::vector<WasmGlobal>* globals) {
+ uint32_t offset = 0;
+ if (!globals) return 0;
+ for (WasmGlobal& global : *globals) {
+ byte size = WasmOpcodes::MemSize(global.type);
+ offset = (offset + size - 1) & ~(size - 1); // align
+ global.offset = offset;
+ offset += size;
+ }
+ return offset;
+}
+
+
+void LoadDataSegments(WasmModule* module, byte* mem_addr, size_t mem_size) {
+ for (const WasmDataSegment& segment : *module->data_segments) {
+ if (!segment.init) continue;
+ CHECK_LT(segment.dest_addr, mem_size);
+ CHECK_LE(segment.source_size, mem_size);
+ CHECK_LE(segment.dest_addr + segment.source_size, mem_size);
+ byte* addr = mem_addr + segment.dest_addr;
+ memcpy(addr, module->module_start + segment.source_offset,
+ segment.source_size);
+ }
+}
+
+
+Handle<FixedArray> BuildFunctionTable(Isolate* isolate, WasmModule* module) {
+ if (!module->function_table || module->function_table->size() == 0) {
+ return Handle<FixedArray>::null();
+ }
+ int table_size = static_cast<int>(module->function_table->size());
+ Handle<FixedArray> fixed = isolate->factory()->NewFixedArray(2 * table_size);
+ for (int i = 0; i < table_size; i++) {
+ WasmFunction* function =
+ &module->functions->at(module->function_table->at(i));
+ fixed->set(i, Smi::FromInt(function->sig_index));
+ }
+ return fixed;
+}
+
+
+Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, int size,
+ byte** backing_store) {
+ void* memory = isolate->array_buffer_allocator()->Allocate(size);
+ if (!memory) return Handle<JSArrayBuffer>::null();
+ *backing_store = reinterpret_cast<byte*>(memory);
+
+#if DEBUG
+ // Double check the API allocator actually zero-initialized the memory.
+ for (int i = 0; i < size; i++) {
+ DCHECK_EQ(0, (*backing_store)[i]);
+ }
+#endif
+
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ JSArrayBuffer::Setup(buffer, isolate, false, memory, size);
+ buffer->set_is_neuterable(false);
+ return buffer;
+}
+} // namespace
+
+
+WasmModule::WasmModule()
+ : globals(nullptr),
+ signatures(nullptr),
+ functions(nullptr),
+ data_segments(nullptr),
+ function_table(nullptr) {}
+
+
+WasmModule::~WasmModule() {
+ if (globals) delete globals;
+ if (signatures) delete signatures;
+ if (functions) delete functions;
+ if (data_segments) delete data_segments;
+ if (function_table) delete function_table;
+}
+
+
+// Instantiates a wasm module as a JSObject.
+// * allocates a backing store of {mem_size} bytes.
+// * installs a named property "memory" for that buffer if exported
+// * installs named properties on the object for exported functions
+// * compiles wasm code to machine code
+MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
+ Handle<JSObject> ffi,
+ Handle<JSArrayBuffer> memory) {
+ this->shared_isolate = isolate; // TODO(titzer): have a real shared isolate.
+ ErrorThrower thrower(isolate, "WasmModule::Instantiate()");
+
+ Factory* factory = isolate->factory();
+ // Memory is bigger than maximum supported size.
+ if (memory.is_null() && min_mem_size_log2 > kMaxMemSize) {
+ thrower.Error("Out of memory: wasm memory too large");
+ return MaybeHandle<JSObject>();
+ }
+
+ Handle<Map> map = factory->NewMap(
+ JS_OBJECT_TYPE,
+ JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
+
+ //-------------------------------------------------------------------------
+ // Allocate the module object.
+ //-------------------------------------------------------------------------
+ Handle<JSObject> module = factory->NewJSObjectFromMap(map, TENURED);
+ Handle<FixedArray> code_table =
+ factory->NewFixedArray(static_cast<int>(functions->size()), TENURED);
+
+ //-------------------------------------------------------------------------
+ // Allocate the linear memory.
+ //-------------------------------------------------------------------------
+ uint32_t mem_size = 1 << min_mem_size_log2;
+ byte* mem_addr = nullptr;
+ Handle<JSArrayBuffer> mem_buffer;
+ if (!memory.is_null()) {
+ memory->set_is_neuterable(false);
+ mem_addr = reinterpret_cast<byte*>(memory->backing_store());
+ mem_size = memory->byte_length()->Number();
+ mem_buffer = memory;
+ } else {
+ mem_buffer = NewArrayBuffer(isolate, mem_size, &mem_addr);
+ if (!mem_addr) {
+ // Not enough space for backing store of memory
+ thrower.Error("Out of memory: wasm memory");
+ return MaybeHandle<JSObject>();
+ }
+ }
+
+ // Load initialized data segments.
+ LoadDataSegments(this, mem_addr, mem_size);
+
+ module->SetInternalField(kWasmMemArrayBuffer, *mem_buffer);
+
+ if (mem_export) {
+ // Export the memory as a named property.
+ Handle<String> name = factory->InternalizeUtf8String("memory");
+ JSObject::AddProperty(module, name, mem_buffer, READ_ONLY);
+ }
+
+ //-------------------------------------------------------------------------
+ // Allocate the globals area if necessary.
+ //-------------------------------------------------------------------------
+ size_t globals_size = AllocateGlobalsOffsets(globals);
+ byte* globals_addr = nullptr;
+ if (globals_size > 0) {
+ Handle<JSArrayBuffer> globals_buffer =
+ NewArrayBuffer(isolate, mem_size, &globals_addr);
+ if (!globals_addr) {
+ // Not enough space for backing store of globals.
+ thrower.Error("Out of memory: wasm globals");
+ return MaybeHandle<JSObject>();
+ }
+
+ module->SetInternalField(kWasmGlobalsArrayBuffer, *globals_buffer);
+ } else {
+ module->SetInternalField(kWasmGlobalsArrayBuffer, Smi::FromInt(0));
+ }
+
+ //-------------------------------------------------------------------------
+ // Compile all functions in the module.
+ //-------------------------------------------------------------------------
+ int index = 0;
+ WasmLinker linker(isolate, functions->size());
+ ModuleEnv module_env;
+ module_env.module = this;
+ module_env.mem_start = reinterpret_cast<uintptr_t>(mem_addr);
+ module_env.mem_end = reinterpret_cast<uintptr_t>(mem_addr) + mem_size;
+ module_env.globals_area = reinterpret_cast<uintptr_t>(globals_addr);
+ module_env.linker = &linker;
+ module_env.function_code = nullptr;
+ module_env.function_table = BuildFunctionTable(isolate, this);
+ module_env.memory = memory;
+ module_env.context = isolate->native_context();
+ module_env.asm_js = false;
+
+ // First pass: compile each function and initialize the code table.
+ for (const WasmFunction& func : *functions) {
+ if (thrower.error()) break;
+
+ const char* cstr = GetName(func.name_offset);
+ Handle<String> name = factory->InternalizeUtf8String(cstr);
+ Handle<Code> code = Handle<Code>::null();
+ Handle<JSFunction> function = Handle<JSFunction>::null();
+ if (func.external) {
+ // Lookup external function in FFI object.
+ if (!ffi.is_null()) {
+ MaybeHandle<Object> result = Object::GetProperty(ffi, name);
+ if (!result.is_null()) {
+ Handle<Object> obj = result.ToHandleChecked();
+ if (obj->IsJSFunction()) {
+ function = Handle<JSFunction>::cast(obj);
+ code = compiler::CompileWasmToJSWrapper(isolate, &module_env,
+ function, index);
+ } else {
+ thrower.Error("FFI function #%d:%s is not a JSFunction.", index,
+ cstr);
+ return MaybeHandle<JSObject>();
+ }
+ } else {
+ thrower.Error("FFI function #%d:%s not found.", index, cstr);
+ return MaybeHandle<JSObject>();
+ }
+ } else {
+ thrower.Error("FFI table is not an object.");
+ return MaybeHandle<JSObject>();
+ }
+ } else {
+ // Compile the function.
+ code = compiler::CompileWasmFunction(thrower, isolate, &module_env, func,
+ index);
+ if (code.is_null()) {
+ thrower.Error("Compilation of #%d:%s failed.", index, cstr);
+ return MaybeHandle<JSObject>();
+ }
+ if (func.exported) {
+ function = compiler::CompileJSToWasmWrapper(isolate, &module_env, name,
+ code, module, index);
+ }
+ }
+ if (!code.is_null()) {
+ // Install the code into the linker table.
+ linker.Finish(index, code);
+ code_table->set(index, *code);
+ }
+ if (func.exported) {
+ // Exported functions are installed as read-only properties on the module.
+ JSObject::AddProperty(module, name, function, READ_ONLY);
+ }
+ index++;
+ }
+
+ // Second pass: patch all direct call sites.
+ linker.Link(module_env.function_table, this->function_table);
+
+ module->SetInternalField(kWasmModuleFunctionTable, Smi::FromInt(0));
+ module->SetInternalField(kWasmModuleCodeTable, *code_table);
+ return module;
+}
+
+
+Handle<Code> ModuleEnv::GetFunctionCode(uint32_t index) {
+ DCHECK(IsValidFunction(index));
+ if (linker) return linker->GetFunctionCode(index);
+ if (function_code) return function_code->at(index);
+ return Handle<Code>::null();
+}
+
+
+compiler::CallDescriptor* ModuleEnv::GetCallDescriptor(Zone* zone,
+ uint32_t index) {
+ DCHECK(IsValidFunction(index));
+ // Always make a direct call to whatever is in the table at that location.
+ // A wrapper will be generated for FFI calls.
+ WasmFunction* function = &module->functions->at(index);
+ return GetWasmCallDescriptor(zone, function->sig);
+}
+
+
+int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, bool asm_js) {
+ HandleScope scope(isolate);
+ Zone zone;
+ // Decode the module, but don't verify function bodies, since we'll
+ // be compiling them anyway.
+ ModuleResult result =
+ DecodeWasmModule(isolate, &zone, module_start, module_end, false, false);
+ if (result.failed()) {
+ // Module verification failed. throw.
+ std::ostringstream str;
+ str << "WASM.compileRun() failed: " << result;
+ isolate->Throw(
+ *isolate->factory()->NewStringFromAsciiChecked(str.str().c_str()));
+ return -1;
+ }
+
+ int32_t retval = CompileAndRunWasmModule(isolate, result.val);
+ delete result.val;
+ return retval;
+}
+
+
+int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module) {
+ ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+
+ // Allocate temporary linear memory and globals.
+ size_t mem_size = 1 << module->min_mem_size_log2;
+ size_t globals_size = AllocateGlobalsOffsets(module->globals);
+
+ base::SmartArrayPointer<byte> mem_addr(new byte[mem_size]);
+ base::SmartArrayPointer<byte> globals_addr(new byte[globals_size]);
+
+ memset(mem_addr.get(), 0, mem_size);
+ memset(globals_addr.get(), 0, globals_size);
+
+ // Create module environment.
+ WasmLinker linker(isolate, module->functions->size());
+ ModuleEnv module_env;
+ module_env.module = module;
+ module_env.mem_start = reinterpret_cast<uintptr_t>(mem_addr.get());
+ module_env.mem_end = reinterpret_cast<uintptr_t>(mem_addr.get()) + mem_size;
+ module_env.globals_area = reinterpret_cast<uintptr_t>(globals_addr.get());
+ module_env.linker = &linker;
+ module_env.function_code = nullptr;
+ module_env.function_table = BuildFunctionTable(isolate, module);
+ module_env.asm_js = false;
+
+ // Load data segments.
+ // TODO(titzer): throw instead of crashing if segments don't fit in memory?
+ LoadDataSegments(module, mem_addr.get(), mem_size);
+
+ // Compile all functions.
+ Handle<Code> main_code = Handle<Code>::null(); // record last code.
+ int index = 0;
+ for (const WasmFunction& func : *module->functions) {
+ if (!func.external) {
+ // Compile the function and install it in the code table.
+ Handle<Code> code = compiler::CompileWasmFunction(
+ thrower, isolate, &module_env, func, index);
+ if (!code.is_null()) {
+ if (func.exported) main_code = code;
+ linker.Finish(index, code);
+ }
+ if (thrower.error()) return -1;
+ }
+ index++;
+ }
+
+ if (!main_code.is_null()) {
+ linker.Link(module_env.function_table, module->function_table);
+#if USE_SIMULATOR && V8_TARGET_ARCH_ARM64
+ // Run the main code on arm64 simulator.
+ Simulator* simulator = Simulator::current(isolate);
+ Simulator::CallArgument args[] = {Simulator::CallArgument(0),
+ Simulator::CallArgument::End()};
+ return static_cast<int32_t>(simulator->CallInt64(main_code->entry(), args));
+#elif USE_SIMULATOR
+ // Run the main code on simulator.
+ Simulator* simulator = Simulator::current(isolate);
+ return static_cast<int32_t>(
+ simulator->Call(main_code->entry(), 4, 0, 0, 0, 0));
+#else
+ // Run the main code as raw machine code.
+ int32_t (*raw_func)() = reinterpret_cast<int32_t (*)()>(
+ reinterpret_cast<uintptr_t>(main_code->entry()));
+ return raw_func();
+#endif
+ } else {
+ // No main code was found.
+ isolate->Throw(*isolate->factory()->NewStringFromStaticChars(
+ "WASM.compileRun() failed: no valid main code produced."));
+ }
+ return -1;
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-module.h b/chromium/v8/src/wasm/wasm-module.h
new file mode 100644
index 00000000000..5e2ba58a441
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-module.h
@@ -0,0 +1,192 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MODULE_H_
+#define V8_WASM_MODULE_H_
+
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
+
+#include "src/api.h"
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class CallDescriptor;
+}
+
+namespace wasm {
+const size_t kMaxModuleSize = 1024 * 1024 * 1024;
+const size_t kMaxFunctionSize = 128 * 1024;
+const size_t kMaxStringSize = 256;
+
+enum WasmSectionDeclCode {
+ kDeclMemory = 0x00,
+ kDeclSignatures = 0x01,
+ kDeclFunctions = 0x02,
+ kDeclGlobals = 0x03,
+ kDeclDataSegments = 0x04,
+ kDeclFunctionTable = 0x05,
+ kDeclWLL = 0x11,
+ kDeclEnd = 0x06,
+};
+
+static const int kMaxModuleSectionCode = 6;
+
+enum WasmFunctionDeclBit {
+ kDeclFunctionName = 0x01,
+ kDeclFunctionImport = 0x02,
+ kDeclFunctionLocals = 0x04,
+ kDeclFunctionExport = 0x08
+};
+
+// Constants for fixed-size elements within a module.
+static const size_t kDeclMemorySize = 3;
+static const size_t kDeclGlobalSize = 6;
+static const size_t kDeclDataSegmentSize = 13;
+
+// Static representation of a wasm function.
+struct WasmFunction {
+ FunctionSig* sig; // signature of the function.
+ uint16_t sig_index; // index into the signature table.
+ uint32_t name_offset; // offset in the module bytes of the name, if any.
+ uint32_t code_start_offset; // offset in the module bytes of code start.
+ uint32_t code_end_offset; // offset in the module bytes of code end.
+ uint16_t local_int32_count; // number of int32 local variables.
+ uint16_t local_int64_count; // number of int64 local variables.
+ uint16_t local_float32_count; // number of float32 local variables.
+ uint16_t local_float64_count; // number of float64 local variables.
+ bool exported; // true if this function is exported.
+ bool external; // true if this function is externally supplied.
+};
+
+struct ModuleEnv; // forward declaration of decoder interface.
+
+// Static representation of a wasm global variable.
+struct WasmGlobal {
+ uint32_t name_offset; // offset in the module bytes of the name, if any.
+ MachineType type; // type of the global.
+ uint32_t offset; // offset from beginning of globals area.
+ bool exported; // true if this global is exported.
+};
+
+// Static representation of a wasm data segment.
+struct WasmDataSegment {
+ uint32_t dest_addr; // destination memory address of the data.
+ uint32_t source_offset; // start offset in the module bytes.
+ uint32_t source_size; // end offset in the module bytes.
+ bool init; // true if loaded upon instantiation.
+};
+
+// Static representation of a module.
+struct WasmModule {
+ static const uint8_t kMinMemSize = 12; // Minimum memory size = 4kb
+ static const uint8_t kMaxMemSize = 30; // Maximum memory size = 1gb
+
+ Isolate* shared_isolate; // isolate for storing shared code.
+ const byte* module_start; // starting address for the module bytes.
+ const byte* module_end; // end address for the module bytes.
+ uint8_t min_mem_size_log2; // minimum size of the memory (log base 2).
+ uint8_t max_mem_size_log2; // maximum size of the memory (log base 2).
+ bool mem_export; // true if the memory is exported.
+ bool mem_external; // true if the memory is external.
+
+ std::vector<WasmGlobal>* globals; // globals in this module.
+ std::vector<FunctionSig*>* signatures; // signatures in this module.
+ std::vector<WasmFunction>* functions; // functions in this module.
+ std::vector<WasmDataSegment>* data_segments; // data segments in this module.
+ std::vector<uint16_t>* function_table; // function table.
+
+ WasmModule();
+ ~WasmModule();
+
+ // Get a pointer to a string stored in the module bytes representing a name.
+ const char* GetName(uint32_t offset) {
+ CHECK(BoundsCheck(offset, offset + 1));
+ if (offset == 0) return "<?>"; // no name.
+ return reinterpret_cast<const char*>(module_start + offset);
+ }
+
+ // Checks the given offset range is contained within the module bytes.
+ bool BoundsCheck(uint32_t start, uint32_t end) {
+ size_t size = module_end - module_start;
+ return start < size && end < size;
+ }
+
+ // Creates a new instantiation of the module in the given isolate.
+ MaybeHandle<JSObject> Instantiate(Isolate* isolate, Handle<JSObject> ffi,
+ Handle<JSArrayBuffer> memory);
+};
+
+// forward declaration.
+class WasmLinker;
+
+// Interface provided to the decoder/graph builder which contains only
+// minimal information about the globals, functions, and function tables.
+struct ModuleEnv {
+ uintptr_t globals_area; // address of the globals area.
+ uintptr_t mem_start; // address of the start of linear memory.
+ uintptr_t mem_end; // address of the end of linear memory.
+
+ WasmModule* module;
+ WasmLinker* linker;
+ std::vector<Handle<Code>>* function_code;
+ Handle<FixedArray> function_table;
+ Handle<JSArrayBuffer> memory;
+ Handle<Context> context;
+ bool asm_js; // true if the module originated from asm.js.
+
+ bool IsValidGlobal(uint32_t index) {
+ return module && index < module->globals->size();
+ }
+ bool IsValidFunction(uint32_t index) {
+ return module && index < module->functions->size();
+ }
+ bool IsValidSignature(uint32_t index) {
+ return module && index < module->signatures->size();
+ }
+ MachineType GetGlobalType(uint32_t index) {
+ DCHECK(IsValidGlobal(index));
+ return module->globals->at(index).type;
+ }
+ FunctionSig* GetFunctionSignature(uint32_t index) {
+ DCHECK(IsValidFunction(index));
+ return module->functions->at(index).sig;
+ }
+ FunctionSig* GetSignature(uint32_t index) {
+ DCHECK(IsValidSignature(index));
+ return module->signatures->at(index);
+ }
+ size_t FunctionTableSize() {
+ return module ? module->function_table->size() : 0;
+ }
+
+ Handle<Code> GetFunctionCode(uint32_t index);
+ Handle<FixedArray> GetFunctionTable();
+
+ compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone, FunctionSig* sig);
+ compiler::CallDescriptor* GetCallDescriptor(Zone* zone, uint32_t index);
+};
+
+std::ostream& operator<<(std::ostream& os, const WasmModule& module);
+std::ostream& operator<<(std::ostream& os, const WasmFunction& function);
+
+typedef Result<WasmModule*> ModuleResult;
+typedef Result<WasmFunction*> FunctionResult;
+
+// For testing. Decode, verify, and run the last exported function in the
+// given encoded module.
+int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, bool asm_js = false);
+
+// For testing. Decode, verify, and run the last exported function in the
+// given decoded module.
+int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MODULE_H_
diff --git a/chromium/v8/src/wasm/wasm-opcodes.cc b/chromium/v8/src/wasm/wasm-opcodes.cc
new file mode 100644
index 00000000000..25eef034d7f
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-opcodes.cc
@@ -0,0 +1,133 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-opcodes.h"
+#include "src/signature.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+typedef Signature<LocalType> FunctionSig;
+
+const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
+ switch (opcode) {
+#define DECLARE_NAME_CASE(name, opcode, sig) \
+ case kExpr##name: \
+ return "Expr" #name;
+ FOREACH_OPCODE(DECLARE_NAME_CASE)
+#undef DECLARE_NAME_CASE
+ default:
+ break;
+ }
+ return "Unknown";
+}
+
+
+#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
+
+
+enum WasmOpcodeSig { FOREACH_SIGNATURE(DECLARE_SIG_ENUM) };
+
+
+// TODO(titzer): not static-initializer safe. Wrap in LazyInstance.
+#define DECLARE_SIG(name, ...) \
+ static LocalType kTypes_##name[] = {__VA_ARGS__}; \
+ static const FunctionSig kSig_##name( \
+ 1, static_cast<int>(arraysize(kTypes_##name)) - 1, kTypes_##name);
+
+FOREACH_SIGNATURE(DECLARE_SIG)
+
+#define DECLARE_SIG_ENTRY(name, ...) &kSig_##name,
+
+static const FunctionSig* kSimpleExprSigs[] = {
+ nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
+
+static byte kSimpleExprSigTable[256];
+
+
+// Initialize the signature table.
+static void InitSigTable() {
+#define SET_SIG_TABLE(name, opcode, sig) \
+ kSimpleExprSigTable[opcode] = static_cast<int>(kSigEnum_##sig) + 1;
+ FOREACH_SIMPLE_OPCODE(SET_SIG_TABLE);
+#undef SET_SIG_TABLE
+}
+
+
+FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
+ // TODO(titzer): use LazyInstance to make this thread safe.
+ if (kSimpleExprSigTable[kExprI32Add] == 0) InitSigTable();
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kSimpleExprSigTable[static_cast<byte>(opcode)]]);
+}
+
+
+// TODO(titzer): pull WASM_64 up to a common header.
+#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
+#define WASM_64 1
+#else
+#define WASM_64 0
+#endif
+
+
+bool WasmOpcodes::IsSupported(WasmOpcode opcode) {
+#if !WASM_64
+ switch (opcode) {
+ // Opcodes not supported on 32-bit platforms.
+ case kExprI64Add:
+ case kExprI64Sub:
+ case kExprI64Mul:
+ case kExprI64DivS:
+ case kExprI64DivU:
+ case kExprI64RemS:
+ case kExprI64RemU:
+ case kExprI64And:
+ case kExprI64Ior:
+ case kExprI64Xor:
+ case kExprI64Shl:
+ case kExprI64ShrU:
+ case kExprI64ShrS:
+ case kExprI64Eq:
+ case kExprI64Ne:
+ case kExprI64LtS:
+ case kExprI64LeS:
+ case kExprI64LtU:
+ case kExprI64LeU:
+ case kExprI64GtS:
+ case kExprI64GeS:
+ case kExprI64GtU:
+ case kExprI64GeU:
+
+ case kExprI32ConvertI64:
+ case kExprI64SConvertI32:
+ case kExprI64UConvertI32:
+
+ case kExprF64ReinterpretI64:
+ case kExprI64ReinterpretF64:
+
+ case kExprI64Clz:
+ case kExprI64Ctz:
+ case kExprI64Popcnt:
+
+ case kExprF32SConvertI64:
+ case kExprF32UConvertI64:
+ case kExprF64SConvertI64:
+ case kExprF64UConvertI64:
+ case kExprI64SConvertF32:
+ case kExprI64SConvertF64:
+ case kExprI64UConvertF32:
+ case kExprI64UConvertF64:
+
+ return false;
+ default:
+ return true;
+ }
+#else
+ return true;
+#endif
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-opcodes.h b/chromium/v8/src/wasm/wasm-opcodes.h
new file mode 100644
index 00000000000..ae2843a6c15
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-opcodes.h
@@ -0,0 +1,476 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_OPCODES_H_
+#define V8_WASM_OPCODES_H_
+
+#include "src/machine-type.h"
+#include "src/signature.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Binary encoding of local types.
+enum LocalTypeCode {
+ kLocalVoid = 0,
+ kLocalI32 = 1,
+ kLocalI64 = 2,
+ kLocalF32 = 3,
+ kLocalF64 = 4
+};
+
+// Binary encoding of memory types.
+enum MemTypeCode {
+ kMemI8 = 0,
+ kMemU8 = 1,
+ kMemI16 = 2,
+ kMemU16 = 3,
+ kMemI32 = 4,
+ kMemU32 = 5,
+ kMemI64 = 6,
+ kMemU64 = 7,
+ kMemF32 = 8,
+ kMemF64 = 9
+};
+
+// We reuse the internal machine type to represent WebAssembly AST types.
+// A typedef improves readability without adding a whole new type system.
+typedef MachineRepresentation LocalType;
+const LocalType kAstStmt = MachineRepresentation::kNone;
+const LocalType kAstI32 = MachineRepresentation::kWord32;
+const LocalType kAstI64 = MachineRepresentation::kWord64;
+const LocalType kAstF32 = MachineRepresentation::kFloat32;
+const LocalType kAstF64 = MachineRepresentation::kFloat64;
+// We use kTagged here because kNone is already used by kAstStmt.
+const LocalType kAstEnd = MachineRepresentation::kTagged;
+
+// Functionality related to encoding memory accesses.
+struct MemoryAccess {
+ // Atomicity annotations for access to the memory and globals.
+ enum Atomicity {
+ kNone = 0, // non-atomic
+ kSequential = 1, // sequential consistency
+ kAcquire = 2, // acquire semantics
+ kRelease = 3 // release semantics
+ };
+
+ // Alignment annotations for memory accesses.
+ enum Alignment { kAligned = 0, kUnaligned = 1 };
+
+ // Bitfields for the various annotations for memory accesses.
+ typedef BitField<Alignment, 7, 1> AlignmentField;
+ typedef BitField<Atomicity, 5, 2> AtomicityField;
+ typedef BitField<bool, 4, 1> OffsetField;
+};
+
+typedef Signature<LocalType> FunctionSig;
+
+// Control expressions and blocks.
+#define FOREACH_CONTROL_OPCODE(V) \
+ V(Nop, 0x00, _) \
+ V(Block, 0x01, _) \
+ V(Loop, 0x02, _) \
+ V(If, 0x03, _) \
+ V(IfElse, 0x04, _) \
+ V(Select, 0x05, _) \
+ V(Br, 0x06, _) \
+ V(BrIf, 0x07, _) \
+ V(TableSwitch, 0x08, _) \
+ V(Return, 0x14, _) \
+ V(Unreachable, 0x15, _)
+// TODO(titzer): numbering
+
+// Constants, locals, globals, and calls.
+#define FOREACH_MISC_OPCODE(V) \
+ V(I8Const, 0x09, _) \
+ V(I32Const, 0x0a, _) \
+ V(I64Const, 0x0b, _) \
+ V(F64Const, 0x0c, _) \
+ V(F32Const, 0x0d, _) \
+ V(GetLocal, 0x0e, _) \
+ V(SetLocal, 0x0f, _) \
+ V(LoadGlobal, 0x10, _) \
+ V(StoreGlobal, 0x11, _) \
+ V(CallFunction, 0x12, _) \
+ V(CallIndirect, 0x13, _)
+
+// Load memory expressions.
+#define FOREACH_LOAD_MEM_OPCODE(V) \
+ V(I32LoadMem8S, 0x20, i_i) \
+ V(I32LoadMem8U, 0x21, i_i) \
+ V(I32LoadMem16S, 0x22, i_i) \
+ V(I32LoadMem16U, 0x23, i_i) \
+ V(I64LoadMem8S, 0x24, l_i) \
+ V(I64LoadMem8U, 0x25, l_i) \
+ V(I64LoadMem16S, 0x26, l_i) \
+ V(I64LoadMem16U, 0x27, l_i) \
+ V(I64LoadMem32S, 0x28, l_i) \
+ V(I64LoadMem32U, 0x29, l_i) \
+ V(I32LoadMem, 0x2a, i_i) \
+ V(I64LoadMem, 0x2b, l_i) \
+ V(F32LoadMem, 0x2c, f_i) \
+ V(F64LoadMem, 0x2d, d_i)
+
+// Store memory expressions.
+#define FOREACH_STORE_MEM_OPCODE(V) \
+ V(I32StoreMem8, 0x2e, i_ii) \
+ V(I32StoreMem16, 0x2f, i_ii) \
+ V(I64StoreMem8, 0x30, l_il) \
+ V(I64StoreMem16, 0x31, l_il) \
+ V(I64StoreMem32, 0x32, l_il) \
+ V(I32StoreMem, 0x33, i_ii) \
+ V(I64StoreMem, 0x34, l_il) \
+ V(F32StoreMem, 0x35, f_if) \
+ V(F64StoreMem, 0x36, d_id)
+
+// Load memory expressions.
+#define FOREACH_MISC_MEM_OPCODE(V) \
+ V(MemorySize, 0x3b, i_v) \
+ V(GrowMemory, 0x39, i_i)
+
+// Expressions with signatures.
+#define FOREACH_SIMPLE_OPCODE(V) \
+ V(I32Add, 0x40, i_ii) \
+ V(I32Sub, 0x41, i_ii) \
+ V(I32Mul, 0x42, i_ii) \
+ V(I32DivS, 0x43, i_ii) \
+ V(I32DivU, 0x44, i_ii) \
+ V(I32RemS, 0x45, i_ii) \
+ V(I32RemU, 0x46, i_ii) \
+ V(I32And, 0x47, i_ii) \
+ V(I32Ior, 0x48, i_ii) \
+ V(I32Xor, 0x49, i_ii) \
+ V(I32Shl, 0x4a, i_ii) \
+ V(I32ShrU, 0x4b, i_ii) \
+ V(I32ShrS, 0x4c, i_ii) \
+ V(I32Eq, 0x4d, i_ii) \
+ V(I32Ne, 0x4e, i_ii) \
+ V(I32LtS, 0x4f, i_ii) \
+ V(I32LeS, 0x50, i_ii) \
+ V(I32LtU, 0x51, i_ii) \
+ V(I32LeU, 0x52, i_ii) \
+ V(I32GtS, 0x53, i_ii) \
+ V(I32GeS, 0x54, i_ii) \
+ V(I32GtU, 0x55, i_ii) \
+ V(I32GeU, 0x56, i_ii) \
+ V(I32Clz, 0x57, i_i) \
+ V(I32Ctz, 0x58, i_i) \
+ V(I32Popcnt, 0x59, i_i) \
+ V(BoolNot, 0x5a, i_i) \
+ V(I64Add, 0x5b, l_ll) \
+ V(I64Sub, 0x5c, l_ll) \
+ V(I64Mul, 0x5d, l_ll) \
+ V(I64DivS, 0x5e, l_ll) \
+ V(I64DivU, 0x5f, l_ll) \
+ V(I64RemS, 0x60, l_ll) \
+ V(I64RemU, 0x61, l_ll) \
+ V(I64And, 0x62, l_ll) \
+ V(I64Ior, 0x63, l_ll) \
+ V(I64Xor, 0x64, l_ll) \
+ V(I64Shl, 0x65, l_ll) \
+ V(I64ShrU, 0x66, l_ll) \
+ V(I64ShrS, 0x67, l_ll) \
+ V(I64Eq, 0x68, i_ll) \
+ V(I64Ne, 0x69, i_ll) \
+ V(I64LtS, 0x6a, i_ll) \
+ V(I64LeS, 0x6b, i_ll) \
+ V(I64LtU, 0x6c, i_ll) \
+ V(I64LeU, 0x6d, i_ll) \
+ V(I64GtS, 0x6e, i_ll) \
+ V(I64GeS, 0x6f, i_ll) \
+ V(I64GtU, 0x70, i_ll) \
+ V(I64GeU, 0x71, i_ll) \
+ V(I64Clz, 0x72, l_l) \
+ V(I64Ctz, 0x73, l_l) \
+ V(I64Popcnt, 0x74, l_l) \
+ V(F32Add, 0x75, f_ff) \
+ V(F32Sub, 0x76, f_ff) \
+ V(F32Mul, 0x77, f_ff) \
+ V(F32Div, 0x78, f_ff) \
+ V(F32Min, 0x79, f_ff) \
+ V(F32Max, 0x7a, f_ff) \
+ V(F32Abs, 0x7b, f_f) \
+ V(F32Neg, 0x7c, f_f) \
+ V(F32CopySign, 0x7d, f_ff) \
+ V(F32Ceil, 0x7e, f_f) \
+ V(F32Floor, 0x7f, f_f) \
+ V(F32Trunc, 0x80, f_f) \
+ V(F32NearestInt, 0x81, f_f) \
+ V(F32Sqrt, 0x82, f_f) \
+ V(F32Eq, 0x83, i_ff) \
+ V(F32Ne, 0x84, i_ff) \
+ V(F32Lt, 0x85, i_ff) \
+ V(F32Le, 0x86, i_ff) \
+ V(F32Gt, 0x87, i_ff) \
+ V(F32Ge, 0x88, i_ff) \
+ V(F64Add, 0x89, d_dd) \
+ V(F64Sub, 0x8a, d_dd) \
+ V(F64Mul, 0x8b, d_dd) \
+ V(F64Div, 0x8c, d_dd) \
+ V(F64Min, 0x8d, d_dd) \
+ V(F64Max, 0x8e, d_dd) \
+ V(F64Abs, 0x8f, d_d) \
+ V(F64Neg, 0x90, d_d) \
+ V(F64CopySign, 0x91, d_dd) \
+ V(F64Ceil, 0x92, d_d) \
+ V(F64Floor, 0x93, d_d) \
+ V(F64Trunc, 0x94, d_d) \
+ V(F64NearestInt, 0x95, d_d) \
+ V(F64Sqrt, 0x96, d_d) \
+ V(F64Eq, 0x97, i_dd) \
+ V(F64Ne, 0x98, i_dd) \
+ V(F64Lt, 0x99, i_dd) \
+ V(F64Le, 0x9a, i_dd) \
+ V(F64Gt, 0x9b, i_dd) \
+ V(F64Ge, 0x9c, i_dd) \
+ V(I32SConvertF32, 0x9d, i_f) \
+ V(I32SConvertF64, 0x9e, i_d) \
+ V(I32UConvertF32, 0x9f, i_f) \
+ V(I32UConvertF64, 0xa0, i_d) \
+ V(I32ConvertI64, 0xa1, i_l) \
+ V(I64SConvertF32, 0xa2, l_f) \
+ V(I64SConvertF64, 0xa3, l_d) \
+ V(I64UConvertF32, 0xa4, l_f) \
+ V(I64UConvertF64, 0xa5, l_d) \
+ V(I64SConvertI32, 0xa6, l_i) \
+ V(I64UConvertI32, 0xa7, l_i) \
+ V(F32SConvertI32, 0xa8, f_i) \
+ V(F32UConvertI32, 0xa9, f_i) \
+ V(F32SConvertI64, 0xaa, f_l) \
+ V(F32UConvertI64, 0xab, f_l) \
+ V(F32ConvertF64, 0xac, f_d) \
+ V(F32ReinterpretI32, 0xad, f_i) \
+ V(F64SConvertI32, 0xae, d_i) \
+ V(F64UConvertI32, 0xaf, d_i) \
+ V(F64SConvertI64, 0xb0, d_l) \
+ V(F64UConvertI64, 0xb1, d_l) \
+ V(F64ConvertF32, 0xb2, d_f) \
+ V(F64ReinterpretI64, 0xb3, d_l) \
+ V(I32ReinterpretF32, 0xb4, i_f) \
+ V(I64ReinterpretF64, 0xb5, l_d)
+
+// All opcodes.
+#define FOREACH_OPCODE(V) \
+ FOREACH_CONTROL_OPCODE(V) \
+ FOREACH_MISC_OPCODE(V) \
+ FOREACH_SIMPLE_OPCODE(V) \
+ FOREACH_STORE_MEM_OPCODE(V) \
+ FOREACH_LOAD_MEM_OPCODE(V) \
+ FOREACH_MISC_MEM_OPCODE(V)
+
+// All signatures.
+#define FOREACH_SIGNATURE(V) \
+ V(i_ii, kAstI32, kAstI32, kAstI32) \
+ V(i_i, kAstI32, kAstI32) \
+ V(i_v, kAstI32) \
+ V(i_ff, kAstI32, kAstF32, kAstF32) \
+ V(i_f, kAstI32, kAstF32) \
+ V(i_dd, kAstI32, kAstF64, kAstF64) \
+ V(i_d, kAstI32, kAstF64) \
+ V(i_l, kAstI32, kAstI64) \
+ V(l_ll, kAstI64, kAstI64, kAstI64) \
+ V(i_ll, kAstI32, kAstI64, kAstI64) \
+ V(l_l, kAstI64, kAstI64) \
+ V(l_i, kAstI64, kAstI32) \
+ V(l_f, kAstI64, kAstF32) \
+ V(l_d, kAstI64, kAstF64) \
+ V(f_ff, kAstF32, kAstF32, kAstF32) \
+ V(f_f, kAstF32, kAstF32) \
+ V(f_d, kAstF32, kAstF64) \
+ V(f_i, kAstF32, kAstI32) \
+ V(f_l, kAstF32, kAstI64) \
+ V(d_dd, kAstF64, kAstF64, kAstF64) \
+ V(d_d, kAstF64, kAstF64) \
+ V(d_f, kAstF64, kAstF32) \
+ V(d_i, kAstF64, kAstI32) \
+ V(d_l, kAstF64, kAstI64) \
+ V(d_id, kAstF64, kAstI32, kAstF64) \
+ V(f_if, kAstF32, kAstI32, kAstF32) \
+ V(l_il, kAstI64, kAstI32, kAstI64)
+
+enum WasmOpcode {
+// Declare expression opcodes.
+#define DECLARE_NAMED_ENUM(name, opcode, sig) kExpr##name = opcode,
+ FOREACH_OPCODE(DECLARE_NAMED_ENUM)
+#undef DECLARE_NAMED_ENUM
+};
+
+// A collection of opcode-related static methods.
+class WasmOpcodes {
+ public:
+ static bool IsSupported(WasmOpcode opcode);
+ static const char* OpcodeName(WasmOpcode opcode);
+ static FunctionSig* Signature(WasmOpcode opcode);
+
+ static byte MemSize(MachineType type) {
+ return 1 << ElementSizeLog2Of(type.representation());
+ }
+
+ static LocalTypeCode LocalTypeCodeFor(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return kLocalI32;
+ case kAstI64:
+ return kLocalI64;
+ case kAstF32:
+ return kLocalF32;
+ case kAstF64:
+ return kLocalF64;
+ case kAstStmt:
+ return kLocalVoid;
+ default:
+ UNREACHABLE();
+ return kLocalVoid;
+ }
+ }
+
+ static MemTypeCode MemTypeCodeFor(MachineType type) {
+ if (type == MachineType::Int8()) {
+ return kMemI8;
+ } else if (type == MachineType::Uint8()) {
+ return kMemU8;
+ } else if (type == MachineType::Int16()) {
+ return kMemI16;
+ } else if (type == MachineType::Uint16()) {
+ return kMemU16;
+ } else if (type == MachineType::Int32()) {
+ return kMemI32;
+ } else if (type == MachineType::Uint32()) {
+ return kMemU32;
+ } else if (type == MachineType::Int64()) {
+ return kMemI64;
+ } else if (type == MachineType::Uint64()) {
+ return kMemU64;
+ } else if (type == MachineType::Float32()) {
+ return kMemF32;
+ } else if (type == MachineType::Float64()) {
+ return kMemF64;
+ } else {
+ UNREACHABLE();
+ return kMemI32;
+ }
+ }
+
+ static MachineType MachineTypeFor(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return MachineType::Int32();
+ case kAstI64:
+ return MachineType::Int64();
+ case kAstF32:
+ return MachineType::Float32();
+ case kAstF64:
+ return MachineType::Float64();
+ case kAstStmt:
+ return MachineType::None();
+ default:
+ UNREACHABLE();
+ return MachineType::None();
+ }
+ }
+
+ static LocalType LocalTypeFor(MachineType type) {
+ if (type == MachineType::Int8()) {
+ return kAstI32;
+ } else if (type == MachineType::Uint8()) {
+ return kAstI32;
+ } else if (type == MachineType::Int16()) {
+ return kAstI32;
+ } else if (type == MachineType::Uint16()) {
+ return kAstI32;
+ } else if (type == MachineType::Int32()) {
+ return kAstI32;
+ } else if (type == MachineType::Uint32()) {
+ return kAstI32;
+ } else if (type == MachineType::Int64()) {
+ return kAstI64;
+ } else if (type == MachineType::Uint64()) {
+ return kAstI64;
+ } else if (type == MachineType::Float32()) {
+ return kAstF32;
+ } else if (type == MachineType::Float64()) {
+ return kAstF64;
+ } else {
+ UNREACHABLE();
+ return kAstI32;
+ }
+ }
+
+ // TODO(titzer): remove this method
+ static WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
+ if (type == MachineType::Int8()) {
+ return store ? kExprI32StoreMem8 : kExprI32LoadMem8S;
+ } else if (type == MachineType::Uint8()) {
+ return store ? kExprI32StoreMem8 : kExprI32LoadMem8U;
+ } else if (type == MachineType::Int16()) {
+ return store ? kExprI32StoreMem16 : kExprI32LoadMem16S;
+ } else if (type == MachineType::Uint16()) {
+ return store ? kExprI32StoreMem16 : kExprI32LoadMem16U;
+ } else if (type == MachineType::Int32()) {
+ return store ? kExprI32StoreMem : kExprI32LoadMem;
+ } else if (type == MachineType::Uint32()) {
+ return store ? kExprI32StoreMem : kExprI32LoadMem;
+ } else if (type == MachineType::Int64()) {
+ return store ? kExprI64StoreMem : kExprI64LoadMem;
+ } else if (type == MachineType::Uint64()) {
+ return store ? kExprI64StoreMem : kExprI64LoadMem;
+ } else if (type == MachineType::Float32()) {
+ return store ? kExprF32StoreMem : kExprF32LoadMem;
+ } else if (type == MachineType::Float64()) {
+ return store ? kExprF64StoreMem : kExprF64LoadMem;
+ } else {
+ UNREACHABLE();
+ return kExprNop;
+ }
+ }
+
+ static byte LoadStoreAccessOf(bool with_offset) {
+ return MemoryAccess::OffsetField::encode(with_offset);
+ }
+
+ static char ShortNameOf(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return 'i';
+ case kAstI64:
+ return 'l';
+ case kAstF32:
+ return 'f';
+ case kAstF64:
+ return 'd';
+ case kAstStmt:
+ return 'v';
+ case kAstEnd:
+ return 'x';
+ default:
+ UNREACHABLE();
+ return '?';
+ }
+ }
+
+ static const char* TypeName(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return "i32";
+ case kAstI64:
+ return "i64";
+ case kAstF32:
+ return "f32";
+ case kAstF64:
+ return "f64";
+ case kAstStmt:
+ return "<stmt>";
+ case kAstEnd:
+ return "<end>";
+ default:
+ return "<unknown>";
+ }
+ }
+};
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_OPCODES_H_
diff --git a/chromium/v8/src/wasm/wasm-result.cc b/chromium/v8/src/wasm/wasm-result.cc
new file mode 100644
index 00000000000..4fd17ee364f
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-result.cc
@@ -0,0 +1,53 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-result.h"
+
+#include "src/factory.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/objects.h"
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code) {
+ switch (error_code) {
+ case kSuccess:
+ os << "Success";
+ break;
+ default: // TODO(titzer): render error codes
+ os << "Error";
+ break;
+ }
+ return os;
+}
+
+
+void ErrorThrower::Error(const char* format, ...) {
+ if (error_) return; // only report the first error.
+ error_ = true;
+ char buffer[256];
+
+ va_list arguments;
+ va_start(arguments, format);
+ base::OS::VSNPrintF(buffer, 255, format, arguments);
+ va_end(arguments);
+
+ std::ostringstream str;
+ if (context_ != nullptr) {
+ str << context_ << ": ";
+ }
+ str << buffer;
+
+ isolate_->ScheduleThrow(
+ *isolate_->factory()->NewStringFromAsciiChecked(str.str().c_str()));
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-result.h b/chromium/v8/src/wasm/wasm-result.h
new file mode 100644
index 00000000000..59ab29ebe40
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-result.h
@@ -0,0 +1,116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_RESULT_H_
+#define V8_WASM_RESULT_H_
+
+#include "src/base/smart-pointers.h"
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+namespace wasm {
+
+// Error codes for programmatic checking of the decoder's verification.
+enum ErrorCode {
+ kSuccess,
+ kError, // TODO(titzer): remove me
+ kOutOfMemory, // decoder ran out of memory
+ kEndOfCode, // end of code reached prematurely
+ kInvalidOpcode, // found invalid opcode
+ kUnreachableCode, // found unreachable code
+ kImproperContinue, // improperly nested continue
+ kImproperBreak, // improperly nested break
+ kReturnCount, // return count mismatch
+ kTypeError, // type mismatch
+ kInvalidLocalIndex, // invalid local
+ kInvalidGlobalIndex, // invalid global
+ kInvalidFunctionIndex, // invalid function
+ kInvalidMemType // invalid memory type
+};
+
+// The overall result of decoding a function or a module.
+template <typename T>
+struct Result {
+ Result()
+ : val(nullptr), error_code(kSuccess), start(nullptr), error_pc(nullptr) {
+ error_msg.Reset(nullptr);
+ }
+
+ T val;
+ ErrorCode error_code;
+ const byte* start;
+ const byte* error_pc;
+ const byte* error_pt;
+ base::SmartArrayPointer<char> error_msg;
+
+ bool ok() const { return error_code == kSuccess; }
+ bool failed() const { return error_code != kSuccess; }
+
+ template <typename V>
+ void CopyFrom(Result<V>& that) {
+ error_code = that.error_code;
+ start = that.start;
+ error_pc = that.error_pc;
+ error_pt = that.error_pt;
+ error_msg = that.error_msg;
+ }
+};
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const Result<T>& result) {
+ os << "Result = ";
+ if (result.ok()) {
+ if (result.val != nullptr) {
+ os << *result.val;
+ } else {
+ os << "success (no value)";
+ }
+ } else if (result.error_msg.get() != nullptr) {
+ ptrdiff_t offset = result.error_pc - result.start;
+ if (offset < 0) {
+ os << result.error_msg.get() << " @" << offset;
+ } else {
+ os << result.error_msg.get() << " @+" << offset;
+ }
+ } else {
+ os << result.error_code;
+ }
+ os << std::endl;
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code);
+
+// A helper for generating error messages that bubble up to JS exceptions.
+class ErrorThrower {
+ public:
+ ErrorThrower(Isolate* isolate, const char* context)
+ : isolate_(isolate), context_(context), error_(false) {}
+
+ void Error(const char* fmt, ...);
+
+ template <typename T>
+ void Failed(const char* error, Result<T>& result) {
+ std::ostringstream str;
+ str << error << result;
+ return Error(str.str().c_str());
+ }
+
+ bool error() const { return error_; }
+
+ private:
+ Isolate* isolate_;
+ const char* context_;
+ bool error_;
+};
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/chromium/v8/src/x64/assembler-x64-inl.h b/chromium/v8/src/x64/assembler-x64-inl.h
index c66e86df3f9..bfec51c462f 100644
--- a/chromium/v8/src/x64/assembler-x64-inl.h
+++ b/chromium/v8/src/x64/assembler-x64-inl.h
@@ -272,18 +272,18 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc, sizeof(int32_t));
+ Assembler::FlushICache(isolate, pc, sizeof(int32_t));
}
}
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@@ -354,7 +354,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -404,7 +405,7 @@ void RelocInfo::set_target_object(Object* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -451,7 +452,7 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL) {
@@ -469,7 +470,8 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -512,8 +514,8 @@ void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(*pc_ == kCallOpcode);
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
- icache_flush_mode);
+ Assembler::set_target_address_at(
+ isolate_, pc_ + 1, host_, stub->instruction_start(), icache_flush_mode);
}
@@ -527,8 +529,9 @@ void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset) =
target;
- Assembler::FlushICacheWithoutIsolate(
- pc_ + Assembler::kPatchDebugBreakSlotAddressOffset, sizeof(Address));
+ Assembler::FlushICache(isolate_,
+ pc_ + Assembler::kPatchDebugBreakSlotAddressOffset,
+ sizeof(Address));
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -541,7 +544,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate, pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
@@ -629,6 +632,7 @@ void Operand::set_disp64(int64_t disp) {
*p = disp;
len_ += sizeof(disp);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_ASSEMBLER_X64_INL_H_
diff --git a/chromium/v8/src/x64/assembler-x64.cc b/chromium/v8/src/x64/assembler-x64.cc
index cb93ab878b9..9626efc4a76 100644
--- a/chromium/v8/src/x64/assembler-x64.cc
+++ b/chromium/v8/src/x64/assembler-x64.cc
@@ -116,20 +116,6 @@ void CpuFeatures::PrintFeatures() {
// -----------------------------------------------------------------------------
-// Register constants.
-
-const int
- Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
- // rax, rbx, rdx, rcx, rsi, rdi, r8, r9, r11, r12, r14, r15
- 0, 3, 2, 1, 6, 7, 8, 9, 11, 12, 14, 15
-};
-
-const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
- 0, 3, 2, 1, -1, -1, 4, 5, 6, 7, -1, 8, 9, -1, 10, 11
-};
-
-
-// -----------------------------------------------------------------------------
// Implementation of Operand
Operand::Operand(Register base, int32_t disp) : rex_(0) {
@@ -306,6 +292,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->reloc_size =
static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
+ desc->constant_pool_size = 0;
}
@@ -402,6 +389,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size =
static_cast<int>((buffer_ + buffer_size_) - (reloc_info_writer.pos()));
@@ -759,6 +747,60 @@ void Assembler::bsrl(Register dst, const Operand& src) {
}
+void Assembler::bsrq(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xBD);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::bsrq(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xBD);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::bsfl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBC);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::bsfl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBC);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::bsfq(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xBC);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::bsfq(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xBC);
+ emit_operand(dst, src);
+}
+
+
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
@@ -2445,6 +2487,7 @@ void Assembler::orps(XMMRegister dst, const Operand& src) {
void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2454,6 +2497,7 @@ void Assembler::xorps(XMMRegister dst, XMMRegister src) {
void Assembler::xorps(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
@@ -2537,6 +2581,7 @@ void Assembler::divps(XMMRegister dst, const Operand& src) {
// SSE 2 operations.
void Assembler::movd(XMMRegister dst, Register src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -2547,6 +2592,7 @@ void Assembler::movd(XMMRegister dst, Register src) {
void Assembler::movd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -2557,6 +2603,7 @@ void Assembler::movd(XMMRegister dst, const Operand& src) {
void Assembler::movd(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(src, dst);
@@ -2567,6 +2614,7 @@ void Assembler::movd(Register dst, XMMRegister src) {
void Assembler::movq(XMMRegister dst, Register src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_rex_64(dst, src);
@@ -2577,6 +2625,7 @@ void Assembler::movq(XMMRegister dst, Register src) {
void Assembler::movq(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_rex_64(src, dst);
@@ -2587,6 +2636,7 @@ void Assembler::movq(Register dst, XMMRegister src) {
void Assembler::movq(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
if (dst.low_bits() == 4) {
// Avoid unnecessary SIB byte.
@@ -2699,6 +2749,7 @@ void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
void Assembler::movsd(const Operand& dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2); // double
emit_optional_rex_32(src, dst);
@@ -2709,6 +2760,7 @@ void Assembler::movsd(const Operand& dst, XMMRegister src) {
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2); // double
emit_optional_rex_32(dst, src);
@@ -2719,6 +2771,7 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) {
void Assembler::movsd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2); // double
emit_optional_rex_32(dst, src);
@@ -2729,6 +2782,7 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
// Try to avoid an unnecessary SIB byte.
@@ -2757,6 +2811,7 @@ void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
void Assembler::movapd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
// Try to avoid an unnecessary SIB byte.
@@ -2916,6 +2971,7 @@ void Assembler::sqrtss(XMMRegister dst, const Operand& src) {
void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -2925,6 +2981,7 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0f);
@@ -2933,7 +2990,19 @@ void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
}
+void Assembler::movss(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3); // single
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x10); // load
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::movss(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3); // single
emit_optional_rex_32(dst, src);
@@ -2944,6 +3013,7 @@ void Assembler::movss(XMMRegister dst, const Operand& src) {
void Assembler::movss(const Operand& src, XMMRegister dst) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3); // single
emit_optional_rex_32(dst, src);
@@ -2954,6 +3024,7 @@ void Assembler::movss(const Operand& src, XMMRegister dst) {
void Assembler::psllq(XMMRegister reg, byte imm8) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg);
@@ -2965,6 +3036,7 @@ void Assembler::psllq(XMMRegister reg, byte imm8) {
void Assembler::psrlq(XMMRegister reg, byte imm8) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg);
@@ -2998,6 +3070,7 @@ void Assembler::psrld(XMMRegister reg, byte imm8) {
void Assembler::cvttss2si(Register dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3008,6 +3081,7 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
void Assembler::cvttss2si(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3018,6 +3092,7 @@ void Assembler::cvttss2si(Register dst, XMMRegister src) {
void Assembler::cvttsd2si(Register dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3028,6 +3103,7 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
void Assembler::cvttsd2si(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3037,7 +3113,30 @@ void Assembler::cvttsd2si(Register dst, XMMRegister src) {
}
+void Assembler::cvttss2siq(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvttss2siq(Register dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_rex_64(dst, src);
@@ -3048,6 +3147,7 @@ void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
void Assembler::cvttsd2siq(Register dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_rex_64(dst, src);
@@ -3058,6 +3158,7 @@ void Assembler::cvttsd2siq(Register dst, const Operand& src) {
void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3068,6 +3169,7 @@ void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3087,7 +3189,30 @@ void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
}
+void Assembler::cvtqsi2ss(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtqsi2ss(XMMRegister dst, Register src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvtqsi2sd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_rex_64(dst, src);
@@ -3098,6 +3223,7 @@ void Assembler::cvtqsi2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_rex_64(dst, src);
@@ -3108,6 +3234,7 @@ void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3118,6 +3245,7 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
emit_optional_rex_32(dst, src);
@@ -3128,6 +3256,7 @@ void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3138,6 +3267,7 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3148,6 +3278,7 @@ void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3158,6 +3289,7 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) {
void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_rex_64(dst, src);
@@ -3308,6 +3440,7 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) {
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3318,6 +3451,7 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3328,6 +3462,7 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3338,6 +3473,7 @@ void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3348,6 +3484,7 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3368,7 +3505,23 @@ void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x3a);
+ emit(0x0a);
+ emit_sse_operand(dst, src);
+ // Mask precision exception.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3377,7 +3530,7 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
emit(0x3a);
emit(0x0b);
emit_sse_operand(dst, src);
- // Mask precision exeption.
+ // Mask precision exception.
emit(static_cast<byte>(mode) | 0x8);
}
@@ -3402,6 +3555,7 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
@@ -3472,39 +3626,80 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
}
-void Assembler::vucomisd(XMMRegister dst, XMMRegister src) {
+void Assembler::vmovd(XMMRegister dst, Register src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kLIG, k66, k0F, kWIG);
- emit(0x2e);
+ XMMRegister isrc = {src.code()};
+ emit_vex_prefix(dst, xmm0, isrc, kL128, k66, k0F, kW0);
+ emit(0x6e);
emit_sse_operand(dst, src);
}
-void Assembler::vucomisd(XMMRegister dst, const Operand& src) {
+void Assembler::vmovd(XMMRegister dst, const Operand& src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, xmm0, src, kLIG, k66, k0F, kWIG);
- emit(0x2e);
+ emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kW0);
+ emit(0x6e);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::vmovd(Register dst, XMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ XMMRegister idst = {dst.code()};
+ emit_vex_prefix(src, xmm0, idst, kL128, k66, k0F, kW0);
+ emit(0x7e);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::vmovq(XMMRegister dst, Register src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ XMMRegister isrc = {src.code()};
+ emit_vex_prefix(dst, xmm0, isrc, kL128, k66, k0F, kW1);
+ emit(0x6e);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::vmovq(XMMRegister dst, const Operand& src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, xmm0, src, kL128, k66, k0F, kW1);
+ emit(0x6e);
emit_sse_operand(dst, src);
}
+void Assembler::vmovq(Register dst, XMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ XMMRegister idst = {dst.code()};
+ emit_vex_prefix(src, xmm0, idst, kL128, k66, k0F, kW1);
+ emit(0x7e);
+ emit_sse_operand(src, dst);
+}
+
+
void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
+ XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kLIG, kF2, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kLIG, pp, m, w);
emit(op);
emit_sse_operand(dst, src2);
}
void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
- const Operand& src2) {
+ const Operand& src2, SIMDPrefix pp, LeadingOpcode m,
+ VexW w) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
- emit_vex_prefix(dst, src1, src2, kLIG, kF2, k0F, kWIG);
+ emit_vex_prefix(dst, src1, src2, kLIG, pp, m, w);
emit(op);
emit_sse_operand(dst, src2);
}
@@ -3922,7 +4117,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// Don't record psuedo relocation info for code age sequence mode.
return;
}
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
diff --git a/chromium/v8/src/x64/assembler-x64.h b/chromium/v8/src/x64/assembler-x64.h
index 47e4d2bddae..799fa6fe9d7 100644
--- a/chromium/v8/src/x64/assembler-x64.h
+++ b/chromium/v8/src/x64/assembler-x64.h
@@ -40,13 +40,45 @@
#include <deque>
#include "src/assembler.h"
-#include "src/compiler.h"
namespace v8 {
namespace internal {
// Utility functions
+#define GENERAL_REGISTERS(V) \
+ V(rax) \
+ V(rcx) \
+ V(rdx) \
+ V(rbx) \
+ V(rsp) \
+ V(rbp) \
+ V(rsi) \
+ V(rdi) \
+ V(r8) \
+ V(r9) \
+ V(r10) \
+ V(r11) \
+ V(r12) \
+ V(r13) \
+ V(r14) \
+ V(r15)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(rax) \
+ V(rbx) \
+ V(rdx) \
+ V(rcx) \
+ V(rsi) \
+ V(rdi) \
+ V(r8) \
+ V(r9) \
+ V(r11) \
+ V(r12) \
+ V(r14) \
+ V(r15)
+
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -68,226 +100,153 @@ namespace internal {
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
//
-
struct Register {
- // The non-allocatable registers are:
- // rsp - stack pointer
- // rbp - frame pointer
- // r10 - fixed scratch register
- // r13 - root register
- static const int kMaxNumAllocatableRegisters = 12;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
- static const int kNumRegisters = 16;
-
- static int ToAllocationIndex(Register reg) {
- return kAllocationIndexByRegisterCode[reg.code()];
- }
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- static Register FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- Register result = { kRegisterCodeByAllocationIndex[index] };
- return result;
- }
-
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "rax",
- "rbx",
- "rdx",
- "rcx",
- "rsi",
- "rdi",
- "r8",
- "r9",
- "r11",
- "r12",
- "r14",
- "r15"
- };
- return names[index];
- }
+ static const int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) {
- Register r = { code };
+ DCHECK(code >= 0);
+ DCHECK(code < kNumRegisters);
+ Register r = {code};
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- // rax, rbx, rcx and rdx are byte registers, the rest are not.
- bool is_byte_register() const { return code_ <= 3; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
- return 1 << code_;
+ DCHECK(is_valid());
+ return 1 << reg_code;
}
+ bool is_byte_register() const { return reg_code <= 3; }
// Return the high bit of the register code as a 0 or 1. Used often
// when constructing the REX prefix byte.
- int high_bit() const {
- return code_ >> 3;
- }
+ int high_bit() const { return reg_code >> 3; }
// Return the 3 low bits of the register code. Used when encoding registers
// in modR/M, SIB, and opcode bytes.
- int low_bits() const {
- return code_ & 0x7;
- }
+ int low_bits() const { return reg_code & 0x7; }
// Unfortunately we can't make this private in a struct when initializing
// by assignment.
- int code_;
-
- private:
- static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters];
- static const int kAllocationIndexByRegisterCode[kNumRegisters];
+ int reg_code;
};
-const int kRegister_rax_Code = 0;
-const int kRegister_rcx_Code = 1;
-const int kRegister_rdx_Code = 2;
-const int kRegister_rbx_Code = 3;
-const int kRegister_rsp_Code = 4;
-const int kRegister_rbp_Code = 5;
-const int kRegister_rsi_Code = 6;
-const int kRegister_rdi_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_r11_Code = 11;
-const int kRegister_r12_Code = 12;
-const int kRegister_r13_Code = 13;
-const int kRegister_r14_Code = 14;
-const int kRegister_r15_Code = 15;
-const int kRegister_no_reg_Code = -1;
-
-const Register rax = { kRegister_rax_Code };
-const Register rcx = { kRegister_rcx_Code };
-const Register rdx = { kRegister_rdx_Code };
-const Register rbx = { kRegister_rbx_Code };
-const Register rsp = { kRegister_rsp_Code };
-const Register rbp = { kRegister_rbp_Code };
-const Register rsi = { kRegister_rsi_Code };
-const Register rdi = { kRegister_rdi_Code };
-const Register r8 = { kRegister_r8_Code };
-const Register r9 = { kRegister_r9_Code };
-const Register r10 = { kRegister_r10_Code };
-const Register r11 = { kRegister_r11_Code };
-const Register r12 = { kRegister_r12_Code };
-const Register r13 = { kRegister_r13_Code };
-const Register r14 = { kRegister_r14_Code };
-const Register r15 = { kRegister_r15_Code };
-const Register no_reg = { kRegister_no_reg_Code };
+
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
+
#ifdef _WIN64
// Windows calling convention
- const Register arg_reg_1 = { kRegister_rcx_Code };
- const Register arg_reg_2 = { kRegister_rdx_Code };
- const Register arg_reg_3 = { kRegister_r8_Code };
- const Register arg_reg_4 = { kRegister_r9_Code };
+const Register arg_reg_1 = {Register::kCode_rcx};
+const Register arg_reg_2 = {Register::kCode_rdx};
+const Register arg_reg_3 = {Register::kCode_r8};
+const Register arg_reg_4 = {Register::kCode_r9};
#else
// AMD64 calling convention
- const Register arg_reg_1 = { kRegister_rdi_Code };
- const Register arg_reg_2 = { kRegister_rsi_Code };
- const Register arg_reg_3 = { kRegister_rdx_Code };
- const Register arg_reg_4 = { kRegister_rcx_Code };
+const Register arg_reg_1 = {Register::kCode_rdi};
+const Register arg_reg_2 = {Register::kCode_rsi};
+const Register arg_reg_3 = {Register::kCode_rdx};
+const Register arg_reg_4 = {Register::kCode_rcx};
#endif // _WIN64
-struct XMMRegister {
- static const int kMaxNumRegisters = 16;
- static const int kMaxNumAllocatableRegisters = 15;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
-
- // TODO(turbofan): Proper support for float32.
- static int NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
- }
- static int ToAllocationIndex(XMMRegister reg) {
- DCHECK(reg.code() != 0);
- return reg.code() - 1;
- }
-
- static XMMRegister FromAllocationIndex(int index) {
- DCHECK(0 <= index && index < kMaxNumAllocatableRegisters);
- XMMRegister result = { index + 1 };
+#define DOUBLE_REGISTERS(V) \
+ V(xmm0) \
+ V(xmm1) \
+ V(xmm2) \
+ V(xmm3) \
+ V(xmm4) \
+ V(xmm5) \
+ V(xmm6) \
+ V(xmm7) \
+ V(xmm8) \
+ V(xmm9) \
+ V(xmm10) \
+ V(xmm11) \
+ V(xmm12) \
+ V(xmm13) \
+ V(xmm14) \
+ V(xmm15)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(xmm1) \
+ V(xmm2) \
+ V(xmm3) \
+ V(xmm4) \
+ V(xmm5) \
+ V(xmm6) \
+ V(xmm7) \
+ V(xmm8) \
+ V(xmm9) \
+ V(xmm10) \
+ V(xmm11) \
+ V(xmm12) \
+ V(xmm13) \
+ V(xmm14) \
+ V(xmm15)
+
+
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
+ static const int kMaxNumRegisters = Code::kAfterLast;
+
+ static DoubleRegister from_code(int code) {
+ DoubleRegister result = {code};
return result;
}
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "xmm1",
- "xmm2",
- "xmm3",
- "xmm4",
- "xmm5",
- "xmm6",
- "xmm7",
- "xmm8",
- "xmm9",
- "xmm10",
- "xmm11",
- "xmm12",
- "xmm13",
- "xmm14",
- "xmm15"
- };
- return names[index];
- }
-
- static XMMRegister from_code(int code) {
- DCHECK(code >= 0);
- DCHECK(code < kMaxNumRegisters);
- XMMRegister r = { code };
- return r;
- }
- bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; }
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
// Return the high bit of the register code as a 0 or 1. Used often
// when constructing the REX prefix byte.
- int high_bit() const {
- return code_ >> 3;
- }
+ int high_bit() const { return reg_code >> 3; }
// Return the 3 low bits of the register code. Used when encoding registers
// in modR/M, SIB, and opcode bytes.
- int low_bits() const {
- return code_ & 0x7;
- }
+ int low_bits() const { return reg_code & 0x7; }
- int code_;
+ // Unfortunately we can't make this private in a struct when initializing
+ // by assignment.
+ int reg_code;
};
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
-const XMMRegister xmm8 = { 8 };
-const XMMRegister xmm9 = { 9 };
-const XMMRegister xmm10 = { 10 };
-const XMMRegister xmm11 = { 11 };
-const XMMRegister xmm12 = { 12 };
-const XMMRegister xmm13 = { 13 };
-const XMMRegister xmm14 = { 14 };
-const XMMRegister xmm15 = { 15 };
+#define DECLARE_REGISTER(R) \
+ const DoubleRegister R = {DoubleRegister::kCode_##R};
+DOUBLE_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
-typedef XMMRegister DoubleRegister;
+typedef DoubleRegister XMMRegister;
enum Condition {
// any value < 0 is considered no_condition
@@ -537,19 +496,18 @@ class Assembler : public AssemblerBase {
// the relative displacements stored in the code.
static inline Address target_address_at(Address pc, Address constant_pool);
static inline void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
- static inline void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED) {
+ static inline void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target,
+ icache_flush_mode);
}
// Return the code target address at a call site from the return address
@@ -559,13 +517,14 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
- set_target_address_at(instruction_payload, code, target);
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target) {
+ set_target_address_at(isolate, instruction_payload, code, target);
}
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
static inline RelocInfo::Mode RelocInfoNone() {
@@ -617,6 +576,11 @@ class Assembler : public AssemblerBase {
static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
static const byte kJzShortOpcode = kJccShortPrefix | zero;
+ // VEX prefix encodings.
+ enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
+ enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
+ enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
+ enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
// ---------------------------------------------------------------------------
// Code generation
@@ -883,8 +847,14 @@ class Assembler : public AssemblerBase {
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
+ void bsrq(Register dst, Register src);
+ void bsrq(Register dst, const Operand& src);
void bsrl(Register dst, Register src);
void bsrl(Register dst, const Operand& src);
+ void bsfq(Register dst, Register src);
+ void bsfq(Register dst, const Operand& src);
+ void bsfl(Register dst, Register src);
+ void bsfl(Register dst, const Operand& src);
// Miscellaneous
void clc();
@@ -1042,6 +1012,13 @@ class Assembler : public AssemblerBase {
void ucomiss(XMMRegister dst, XMMRegister src);
void ucomiss(XMMRegister dst, const Operand& src);
void movaps(XMMRegister dst, XMMRegister src);
+
+ // Don't use this unless it's important to keep the
+ // top half of the destination register unchanged.
+ // Use movaps when moving float values and movd for integer
+ // values in xmm registers.
+ void movss(XMMRegister dst, XMMRegister src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
@@ -1078,7 +1055,7 @@ class Assembler : public AssemblerBase {
// Don't use this unless it's important to keep the
// top half of the destination register unchanged.
- // Used movaps when moving double values and movq for integer
+ // Use movapd when moving double values and movq for integer
// values in xmm registers.
void movsd(XMMRegister dst, XMMRegister src);
@@ -1100,11 +1077,17 @@ class Assembler : public AssemblerBase {
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
+ void cvttss2siq(Register dst, XMMRegister src);
+ void cvttss2siq(Register dst, const Operand& src);
void cvttsd2siq(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, const Operand& src);
void cvtlsi2sd(XMMRegister dst, const Operand& src);
void cvtlsi2sd(XMMRegister dst, Register src);
+
+ void cvtqsi2ss(XMMRegister dst, const Operand& src);
+ void cvtqsi2ss(XMMRegister dst, Register src);
+
void cvtqsi2sd(XMMRegister dst, const Operand& src);
void cvtqsi2sd(XMMRegister dst, Register src);
@@ -1155,6 +1138,7 @@ class Assembler : public AssemblerBase {
void pinsrd(XMMRegister dst, Register src, int8_t imm8);
void pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+ void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
// AVX instruction
@@ -1308,88 +1292,180 @@ class Assembler : public AssemblerBase {
void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
- void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x58, dst, src1, src2);
+ void vmovd(XMMRegister dst, Register src);
+ void vmovd(XMMRegister dst, const Operand& src);
+ void vmovd(Register dst, XMMRegister src);
+ void vmovq(XMMRegister dst, Register src);
+ void vmovq(XMMRegister dst, const Operand& src);
+ void vmovq(Register dst, XMMRegister src);
+
+ void vmovsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vsd(0x10, dst, src1, src2);
}
- void vaddsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x58, dst, src1, src2);
+ void vmovsd(XMMRegister dst, const Operand& src) {
+ vsd(0x10, dst, xmm0, src);
}
- void vsubsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x5c, dst, src1, src2);
+ void vmovsd(const Operand& dst, XMMRegister src) {
+ vsd(0x11, src, xmm0, dst);
}
- void vsubsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x5c, dst, src1, src2);
+
+#define AVX_SP_3(instr, opcode) \
+ AVX_S_3(instr, opcode) \
+ AVX_P_3(instr, opcode)
+
+#define AVX_S_3(instr, opcode) \
+ AVX_3(instr##ss, opcode, vss) \
+ AVX_3(instr##sd, opcode, vsd)
+
+#define AVX_P_3(instr, opcode) \
+ AVX_3(instr##ps, opcode, vps) \
+ AVX_3(instr##pd, opcode, vpd)
+
+#define AVX_3(instr, opcode, impl) \
+ void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ impl(opcode, dst, src1, src2); \
+ } \
+ void instr(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
+ impl(opcode, dst, src1, src2); \
}
- void vmulsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x59, dst, src1, src2);
+
+ AVX_SP_3(vsqrt, 0x51);
+ AVX_SP_3(vadd, 0x58);
+ AVX_SP_3(vsub, 0x5c);
+ AVX_SP_3(vmul, 0x59);
+ AVX_SP_3(vdiv, 0x5e);
+ AVX_SP_3(vmin, 0x5d);
+ AVX_SP_3(vmax, 0x5f);
+ AVX_P_3(vand, 0x54);
+ AVX_P_3(vor, 0x56);
+ AVX_P_3(vxor, 0x57);
+ AVX_3(vpcmpeqd, 0x76, vpd);
+ AVX_3(vcvtsd2ss, 0x5a, vsd);
+
+#undef AVX_3
+#undef AVX_S_3
+#undef AVX_P_3
+#undef AVX_SP_3
+
+ void vpsrlq(XMMRegister dst, XMMRegister src, byte imm8) {
+ XMMRegister iop = {2};
+ vpd(0x73, iop, dst, src);
+ emit(imm8);
}
- void vmulsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x59, dst, src1, src2);
+ void vpsllq(XMMRegister dst, XMMRegister src, byte imm8) {
+ XMMRegister iop = {6};
+ vpd(0x73, iop, dst, src);
+ emit(imm8);
}
- void vdivsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x5e, dst, src1, src2);
+ void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vsd(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
- void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x5e, dst, src1, src2);
+ void vcvtss2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
- void vmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x5f, dst, src1, src2);
+ void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
+ XMMRegister isrc2 = {src2.code()};
+ vsd(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
}
- void vmaxsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x5f, dst, src1, src2);
+ void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x2a, dst, src1, src2, kF2, k0F, kW0);
}
- void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vsd(0x5d, dst, src1, src2);
+ void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
+ XMMRegister isrc2 = {src2.code()};
+ vsd(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
}
- void vminsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vsd(0x5d, dst, src1, src2);
+ void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x2a, dst, src1, src2, kF3, k0F, kW1);
}
- void vucomisd(XMMRegister dst, XMMRegister src);
- void vucomisd(XMMRegister dst, const Operand& src);
- void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
-
- void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vss(0x58, dst, src1, src2);
+ void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
+ XMMRegister isrc2 = {src2.code()};
+ vsd(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
+ }
+ void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x2a, dst, src1, src2, kF2, k0F, kW1);
+ }
+ void vcvttsd2si(Register dst, XMMRegister src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0);
+ }
+ void vcvttsd2si(Register dst, const Operand& src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
- void vaddss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vss(0x58, dst, src1, src2);
+ void vcvttss2siq(Register dst, XMMRegister src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF3, k0F, kW1);
}
- void vsubss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vss(0x5c, dst, src1, src2);
+ void vcvttss2siq(Register dst, const Operand& src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF3, k0F, kW1);
}
- void vsubss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vss(0x5c, dst, src1, src2);
+ void vcvttsd2siq(Register dst, XMMRegister src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1);
}
- void vmulss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vss(0x59, dst, src1, src2);
+ void vcvttsd2siq(Register dst, const Operand& src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1);
}
- void vmulss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vss(0x59, dst, src1, src2);
+ void vcvtsd2si(Register dst, XMMRegister src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2d, idst, xmm0, src, kF2, k0F, kW0);
}
- void vdivss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vss(0x5e, dst, src1, src2);
+ void vucomisd(XMMRegister dst, XMMRegister src) {
+ vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG);
}
- void vdivss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vss(0x5e, dst, src1, src2);
+ void vucomisd(XMMRegister dst, const Operand& src) {
+ vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG);
+ }
+ void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ RoundingMode mode) {
+ vsd(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
+ void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ RoundingMode mode) {
+ vsd(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
+
+ void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vsd(op, dst, src1, src2, kF2, k0F, kWIG);
}
- void vmaxss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vss(0x5f, dst, src1, src2);
+ void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(op, dst, src1, src2, kF2, k0F, kWIG);
}
- void vmaxss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vss(0x5f, dst, src1, src2);
+ void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w);
+ void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2,
+ SIMDPrefix pp, LeadingOpcode m, VexW w);
+
+ void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vss(0x10, dst, src1, src2);
}
- void vminss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vss(0x5d, dst, src1, src2);
+ void vmovss(XMMRegister dst, const Operand& src) {
+ vss(0x10, dst, xmm0, src);
}
- void vminss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
- vss(0x5d, dst, src1, src2);
+ void vmovss(const Operand& dst, XMMRegister src) {
+ vss(0x11, src, xmm0, dst);
}
void vucomiss(XMMRegister dst, XMMRegister src);
void vucomiss(XMMRegister dst, const Operand& src);
void vss(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vss(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
+ void vmovapd(XMMRegister dst, XMMRegister src) { vpd(0x28, dst, xmm0, src); }
+ void vmovmskpd(Register dst, XMMRegister src) {
+ XMMRegister idst = {dst.code()};
+ vpd(0x50, idst, xmm0, src);
+ }
+
+ void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+
// BMI instruction
void andnq(Register dst, Register src1, Register src2) {
bmi1q(0xf2, dst, src1, src2);
@@ -1567,33 +1643,6 @@ class Assembler : public AssemblerBase {
void rorxl(Register dst, Register src, byte imm8);
void rorxl(Register dst, const Operand& src, byte imm8);
-#define PACKED_OP_LIST(V) \
- V(and, 0x54) \
- V(xor, 0x57)
-
-#define AVX_PACKED_OP_DECLARE(name, opcode) \
- void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vps(opcode, dst, src1, src2); \
- } \
- void v##name##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vps(opcode, dst, src1, src2); \
- } \
- void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vpd(opcode, dst, src1, src2); \
- } \
- void v##name##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
- vpd(opcode, dst, src1, src2); \
- }
-
- PACKED_OP_LIST(AVX_PACKED_OP_DECLARE);
- void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
- void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
-
- // Debugging
- void Print();
-
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
@@ -1603,7 +1652,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
@@ -1790,11 +1839,6 @@ class Assembler : public AssemblerBase {
}
// Emit vex prefix
- enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
- enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
- enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
- enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
-
void emit_vex2_byte0() { emit(0xc5); }
inline void emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
SIMDPrefix pp);
@@ -2148,6 +2192,7 @@ class EnsureSpace BASE_EMBEDDED {
#endif
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_ASSEMBLER_X64_H_
diff --git a/chromium/v8/src/x64/builtins-x64.cc b/chromium/v8/src/x64/builtins-x64.cc
index 38d7e5abeb6..cb092f2f2d7 100644
--- a/chromium/v8/src/x64/builtins-x64.cc
+++ b/chromium/v8/src/x64/builtins-x64.cc
@@ -21,12 +21,12 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- rax : number of arguments excluding receiver
- // -- rdi : called function (only guaranteed when
- // extra_args requires it)
+ // -- rdi : target
+ // -- rdx : new.target
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -- ...
- // -- rsp[8 * argc] : first argument (argc == rax)
+ // -- rsp[8 * argc] : first argument
// -- rsp[8 * (argc + 1)] : receiver
// -----------------------------------
__ AssertFunction(rdi);
@@ -35,37 +35,48 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
+ if (extra_args != BuiltinExtraArguments::kNone) {
__ PopReturnAddressTo(kScratchRegister);
- __ Push(rdi);
+ if (extra_args & BuiltinExtraArguments::kTarget) {
+ ++num_extra_args;
+ __ Push(rdi);
+ }
+ if (extra_args & BuiltinExtraArguments::kNewTarget) {
+ ++num_extra_args;
+ __ Push(rdx);
+ }
__ PushReturnAddressFrom(kScratchRegister);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
}
// JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
__ addp(rax, Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
+
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- rdx : new target (preserved for callee)
+ // -- rdi : target function (preserved for callee)
+ // -----------------------------------
+
FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
+ // Push a copy of the target function and the new target.
__ Push(rdi);
+ __ Push(rdx);
// Function is also the parameter to the runtime call.
__ Push(rdi);
__ CallRuntime(function_id, 1);
- // Restore receiver.
+ // Restore target function and new target.
+ __ Pop(rdx);
__ Pop(rdi);
}
@@ -105,12 +116,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rdi: constructor function
// -- rbx: allocation site or undefined
- // -- rdx: original constructor
+ // -- rdx: new target
// -----------------------------------
// Enter a construct frame.
@@ -120,175 +132,167 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(rbx);
__ Push(rbx);
- __ Integer32ToSmi(rax, rax);
- __ Push(rax);
- __ Push(rdi);
- __ Push(rdx);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ Move(kScratchRegister, debug_step_in_fp);
- __ cmpp(Operand(kScratchRegister, 0), Immediate(0));
- __ j(not_equal, &rt_call);
-
- // Fall back to runtime if the original constructor and function differ.
- __ cmpp(rdx, rdi);
- __ j(not_equal, &rt_call);
-
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // rdi: constructor
- __ movp(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- DCHECK(kSmiTag == 0);
- __ JumpIfSmi(rax, &rt_call);
- // rdi: constructor
- // rax: initial map (if proven valid below)
- __ CmpObjectType(rax, MAP_TYPE, rbx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // rdi: constructor
- // rax: initial map
- __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
- if (!is_api_function) {
- Label allocate;
- // The code below relies on these assumptions.
- STATIC_ASSERT(Map::Counter::kShift + Map::Counter::kSize == 32);
- // Check if slack tracking is enabled.
- __ movl(rsi, FieldOperand(rax, Map::kBitField3Offset));
- __ shrl(rsi, Immediate(Map::Counter::kShift));
- __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
- __ j(less, &allocate);
- // Decrease generous allocation count.
- __ subl(FieldOperand(rax, Map::kBitField3Offset),
- Immediate(1 << Map::Counter::kShift));
-
- __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
- __ j(not_equal, &allocate);
-
- __ Push(rax);
- __ Push(rdx);
- __ Push(rdi);
-
- __ Push(rdi); // constructor
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ Integer32ToSmi(rcx, rax);
+ __ Push(rcx);
+
+ if (create_implicit_receiver) {
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rbx);
+ __ j(not_equal, &rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ // rdx: new target
+ __ movp(rax,
+ FieldOperand(rdx, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ DCHECK(kSmiTag == 0);
+ __ JumpIfSmi(rax, &rt_call);
+ // rdi: constructor
+ // rax: initial map (if proven valid below)
+ __ CmpObjectType(rax, MAP_TYPE, rbx);
+ __ j(not_equal, &rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ cmpp(rdi, FieldOperand(rax, Map::kConstructorOrBackPointerOffset));
+ __ j(not_equal, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ __ movzxbp(r9, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ shlp(r9, Immediate(kPointerSizeLog2));
+ // r9: size of new object
+ __ Allocate(r9, rbx, r9, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+ // Allocated the JSObject, now initialize the fields.
+ // rdi: constructor
+ // rdx: new target
+ // rax: initial map
+ // rbx: JSObject (not HeapObject tagged - the actual address).
+ // r9: start of next object
+ __ movp(Operand(rbx, JSObject::kMapOffset), rax);
+ __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+ __ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+ __ movp(Operand(rbx, JSObject::kElementsOffset), rcx);
+ __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ orp(rbx, Immediate(kHeapObjectTag));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // rbx: JSObject (tagged)
+ // rcx: First in-object property of JSObject (not tagged)
+ __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // The code below relies on these assumptions.
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ // Check if slack tracking is enabled.
+ __ movl(rsi, FieldOperand(rax, Map::kBitField3Offset));
+ __ shrl(rsi, Immediate(Map::ConstructionCounter::kShift));
+ __ j(zero, &no_inobject_slack_tracking); // Map::kNoSlackTracking
+ __ Push(rsi); // Save allocation count value.
+ // Decrease generous allocation count.
+ __ subl(FieldOperand(rax, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCounter::kShift));
+
+ // Allocate object with a slack.
+ __ movzxbp(rsi, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ negp(rsi);
+ __ leap(rsi, Operand(r9, rsi, times_pointer_size, 0));
+ // rsi: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmpp(rcx, rsi);
+ __ Assert(less_equal,
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(rcx, rsi, r11);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ LoadRoot(r11, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(rcx, r9, r11);
+
+ __ Pop(rsi); // Restore allocation count value before decreasing.
+ __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
+ __ j(not_equal, &allocated);
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(rdi);
+ __ Push(rdx);
+ __ Push(rbx);
+
+ __ Push(rax); // initial map
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+
+ __ Pop(rbx);
+ __ Pop(rdx);
+ __ Pop(rdi);
+
+ // Continue with JSObject being successfully allocated.
+ // rdi: constructor
+ // rdx: new target
+ // rbx: JSObject (tagged)
+ __ jmp(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
+ }
- __ Pop(rdi);
- __ Pop(rdx);
- __ Pop(rax);
- __ movl(rsi, Immediate(Map::kSlackTrackingCounterEnd - 1));
+ __ InitializeFieldsWithFiller(rcx, r9, r11);
- __ bind(&allocate);
+ // Continue with JSObject being successfully allocated
+ // rdi: constructor
+ // rdx: new target
+ // rbx: JSObject (tagged)
+ __ jmp(&allocated);
}
- // Now allocate the JSObject on the heap.
- __ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shlp(rdi, Immediate(kPointerSizeLog2));
- // rdi: size of new object
- __ Allocate(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // rax: initial map
- // rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
- __ movp(Operand(rbx, JSObject::kMapOffset), rax);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movp(Operand(rbx, JSObject::kElementsOffset), rcx);
- // Set extra fields in the newly allocated object.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- // rsi: slack tracking counter (non-API function case)
- __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
- __ j(less, &no_inobject_slack_tracking);
-
- // Allocate object with a slack.
- __ movzxbp(
- rsi,
- FieldOperand(
- rax, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ movzxbp(rax, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ subp(rsi, rax);
- __ leap(rsi,
- Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
- // rsi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmpp(rsi, rdi);
- __ Assert(less_equal,
- kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
- __ InitializeFieldsWithFiller(rcx, rsi, rdx);
- __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
- // Fill the remaining fields with one pointer filler map.
+ // Allocate the new receiver object using the runtime call.
+ // rdi: constructor
+ // rdx: new target
+ __ bind(&rt_call);
- __ bind(&no_inobject_slack_tracking);
- }
+ // Must restore rsi (context) before calling runtime.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ InitializeFieldsWithFiller(rcx, rdi, rdx);
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(rdi);
+ __ Push(rdx);
+ __ Push(rdi); // constructor function
+ __ Push(rdx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ movp(rbx, rax); // store result in rbx
+ __ Pop(rdx);
+ __ Pop(rdi);
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- // rbx: JSObject (untagged)
- __ orp(rbx, Immediate(kHeapObjectTag));
+ // Receiver for constructor call allocated.
+ // rdi: constructor
+ // rdx: new target
+ // rbx: newly allocated object
+ __ bind(&allocated);
- // Continue with JSObject being successfully allocated
- // rbx: JSObject (tagged)
- __ jmp(&allocated);
+ // Retrieve smi-tagged arguments count from the stack.
+ __ movp(rax, Operand(rsp, 0));
+ __ SmiToInteger32(rax, rax);
}
- // Allocate the new receiver object using the runtime call.
- // rdx: original constructor
- __ bind(&rt_call);
- int offset = kPointerSize;
-
- // Must restore rsi (context) and rdi (constructor) before calling runtime.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movp(rdi, Operand(rsp, offset));
- __ Push(rdi); // argument 2/1: constructor function
- __ Push(rdx); // argument 3/2: original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ movp(rbx, rax); // store result in rbx
-
- // New object allocated.
- // rbx: newly allocated object
- __ bind(&allocated);
-
- // Restore the parameters.
- __ Pop(rdx);
- __ Pop(rdi);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ movp(rax, Operand(rsp, 0));
- __ SmiToInteger32(rax, rax);
-
- // Push new.target onto the construct frame. This is stored just below the
- // receiver on the stack.
- __ Push(rdx);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(rbx);
- __ Push(rbx);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(rbx);
+ __ Push(rbx);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
@@ -311,39 +315,44 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(rdi, rdx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(rax, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ movp(rax, Operand(rsp, 0));
-
- // Restore the arguments count and leave the construct frame. The arguments
- // count is stored below the reciever and the new.target.
- __ bind(&exit);
- __ movp(rbx, Operand(rsp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(rax, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
+ __ j(above_equal, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ movp(rax, Operand(rsp, 0));
+
+ // Restore the arguments count and leave the construct frame. The
+ // arguments count is stored below the receiver.
+ __ bind(&exit);
+ __ movp(rbx, Operand(rsp, 1 * kPointerSize));
+ } else {
+ __ movp(rbx, Operand(rsp, 0));
+ }
// Leave construct frame.
}
@@ -353,95 +362,33 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
+ if (create_implicit_receiver) {
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->constructed_objects(), 1);
+ }
__ ret(0);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax: number of arguments
- // -- rdi: constructor function
- // -- rbx: allocation site or undefined
- // -- rdx: original constructor
- // -----------------------------------
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve allocation site.
- __ AssertUndefinedOrAllocationSite(rbx);
- __ Push(rbx);
-
- // Store a smi-tagged arguments count on the stack.
- __ Integer32ToSmi(rax, rax);
- __ Push(rax);
- __ SmiToInteger32(rax, rax);
-
- // Push new.target
- __ Push(rdx);
-
- // receiver is the hole.
- __ Push(masm->isolate()->factory()->the_hole_value());
-
- // Set up pointer to last argument.
- __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ movp(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ Push(Operand(rbx, rcx, times_pointer_size, 0));
- __ bind(&entry);
- __ decp(rcx);
- __ j(greater_equal, &loop);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ Move(kScratchRegister, debug_step_in_fp);
- __ cmpp(Operand(kScratchRegister, 0), Immediate(0));
- __ j(equal, &skip_step_in);
-
- __ Push(rax);
- __ Push(rdi);
- __ Push(rdi);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(rdi);
- __ Pop(rax);
-
- __ bind(&skip_step_in);
-
- // Call the function.
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Get arguments count, skipping over new.target.
- __ movp(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
- } // Leave construct frame.
- // Remove caller arguments from the stack and return.
- __ PopReturnAddressTo(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ PushReturnAddressFrom(rcx);
- __ ret(0);
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -476,7 +423,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -632,6 +579,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o rdi: the JS function object being called
+// o rdx: the new target
// o rsi: our context
// o rbp: the caller's frame pointer
// o rsp: stack pointer (pointing to return address)
@@ -649,6 +597,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movp(rbp, rsp);
__ Push(rsi); // Callee's context.
__ Push(rdi); // Callee's JS function.
+ __ Push(rdx); // Callee's new target.
+
+ // Push zero for bytecode array offset.
+ __ Push(Immediate(0));
// Get the bytecode array from the function object and load the pointer to the
// first entry into edi (InterpreterBytecodeRegister).
@@ -676,7 +628,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ subp(rdx, rcx);
__ CompareRoot(rdx, Heap::kRealStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -698,28 +650,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ Push(kInterpreterBytecodeArrayRegister);
+ __ CallRuntime(Runtime::kStackGuard);
+ __ Pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -727,9 +667,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ movp(kInterpreterRegisterFileRegister, rbp);
- __ subp(
- kInterpreterRegisterFileRegister,
- Immediate(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ addp(kInterpreterRegisterFileRegister,
+ Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ movp(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -771,36 +710,188 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ bool push_receiver) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rbx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ movp(rcx, rax);
+ if (push_receiver) {
+ __ addp(rcx, Immediate(1)); // Add one for receiver.
+ }
+
+ __ shlp(rcx, Immediate(kPointerSizeLog2));
+ __ negp(rcx);
+ __ addp(rcx, rbx);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ j(always, &loop_check);
+ __ bind(&loop_header);
+ __ Push(Operand(rbx, 0));
+ __ subp(rbx, Immediate(kPointerSize));
+ __ bind(&loop_check);
+ __ cmpp(rbx, rcx);
+ __ j(greater, &loop_header, Label::kNear);
}
-static void CallCompileOptimized(MacroAssembler* masm,
- bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- __ Push(rdi);
- // Function is also the parameter to the runtime call.
- __ Push(rdi);
- // Whether to compile in a background thread.
- __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rbx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- rdi : the target to call (can be any Object).
+ // -----------------------------------
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ Pop(rdi);
+ // Pop return address to allow tail-call after pushing arguments.
+ __ PopReturnAddressTo(kScratchRegister);
+
+ Generate_InterpreterPushArgs(masm, true);
+
+ // Call the target.
+ __ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- rdi : the constructor to call (can be any Object)
+ // -- rbx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ PopReturnAddressTo(kScratchRegister);
+
+ // Push slot for the receiver to be constructed.
+ __ Push(Immediate(0));
+
+ Generate_InterpreterPushArgs(masm, false);
+
+ // Push return address in preparation for the tail-call.
+ __ PushReturnAddressFrom(kScratchRegister);
+
+ // Call the constructor (rax, rdx, rdi passed on).
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
+
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and push PC at top
+ // of stack (to simulate initial call to bytecode handler in interpreter entry
+ // trampoline).
+ __ Pop(rbx);
+ __ Drop(1);
+ __ Push(rbx);
+
+ // Initialize register file register and dispatch table register.
+ __ movp(kInterpreterRegisterFileRegister, rbp);
+ __ addp(kInterpreterRegisterFileRegister,
+ Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ addp(kInterpreterDispatchTableRegister,
+ Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ movp(kContextRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ movp(rbx,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(kInterpreterBytecodeArrayRegister,
+ FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+ __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+ rbx);
+ __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ movp(
+ kInterpreterBytecodeOffsetRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
+ times_pointer_size, 0));
+ __ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(rbx);
+}
+
+
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -893,7 +984,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ Popad();
// Tear down internal frame.
}
@@ -922,7 +1013,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the deoptimization type to the runtime system.
__ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
// Tear down internal frame.
}
@@ -962,7 +1053,138 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into rax and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ StackArgumentsAccessor args(rsp, 0);
+ __ movp(rax, args.GetReceiverOperand());
+ __ JumpIfSmi(rax, &receiver_not_date);
+ __ CmpObjectType(rax, JS_DATE_TYPE, rbx);
+ __ j(not_equal, &receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ movp(rax, FieldOperand(rax, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ Load(rdx, ExternalReference::date_cache_stamp(masm->isolate()));
+ __ cmpp(rdx, FieldOperand(rax, JSDate::kCacheStampOffset));
+ __ j(not_equal, &stamp_mismatch, Label::kNear);
+ __ movp(rax, FieldOperand(
+ rax, JSDate::kValueOffset + field_index * kPointerSize));
+ __ ret(1 * kPointerSize);
+ __ bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2);
+ __ Move(arg_reg_1, rax);
+ __ Move(arg_reg_2, Smi::FromInt(field_index));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ ret(1 * kPointerSize);
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowNotDateError);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : argArray
+ // -- rsp[16] : thisArg
+ // -- rsp[24] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into rdi, argArray into rax (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label no_arg_array, no_this_arg;
+ StackArgumentsAccessor args(rsp, rax);
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ movp(rbx, rdx);
+ __ movp(rdi, args.GetReceiverOperand());
+ __ testp(rax, rax);
+ __ j(zero, &no_this_arg, Label::kNear);
+ {
+ __ movp(rdx, args.GetArgumentOperand(1));
+ __ cmpp(rax, Immediate(1));
+ __ j(equal, &no_arg_array, Label::kNear);
+ __ movp(rbx, args.GetArgumentOperand(2));
+ __ bind(&no_arg_array);
+ }
+ __ bind(&no_this_arg);
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ Push(rdx);
+ __ PushReturnAddressFrom(rcx);
+ __ movp(rax, rbx);
+ }
+
+ // ----------- S t a t e -------------
+ // -- rax : argArray
+ // -- rdi : receiver
+ // -- rsp[0] : return address
+ // -- rsp[8] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(rdi, &receiver_not_callable, Label::kNear);
+ __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsCallable));
+ __ j(zero, &receiver_not_callable, Label::kNear);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(rax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+ __ JumpIfRoot(rax, Heap::kUndefinedValueRootIndex, &no_arguments,
+ Label::kNear);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ Set(rax, 0);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ StackArgumentsAccessor args(rsp, 0);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// rsp[0] : Return address
// rsp[8] : Argument n
@@ -1012,202 +1234,150 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ movp(key, Operand(rbp, indexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ movp(receiver, Operand(rbp, argumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ Move(slot, Smi::FromInt(slot_index));
- __ movp(vector, Operand(rbp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ Push(rax);
-
- // Update the index on the stack and in register key.
- __ movp(key, Operand(rbp, indexOffset));
- __ SmiAddConstant(key, key, Smi::FromInt(1));
- __ movp(Operand(rbp, indexOffset), key);
-
- __ bind(&entry);
- __ cmpp(key, Operand(rbp, limitOffset));
- __ j(not_equal, &loop);
-
- // On exit, the pushed arguments count is in rax, untagged
- __ SmiToInteger64(rax, key);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
-
- // Stack at entry:
- // rsp : return address
- // rsp[8] : arguments
- // rsp[16] : receiver ("this")
- // rsp[24] : function
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // rbp : Old base pointer
- // rbp[8] : return address
- // rbp[16] : function arguments
- // rbp[24] : receiver
- // rbp[32] : function
- static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- static const int kFunctionOffset = kReceiverOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ movp(rdi, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rdi, FieldOperand(rdi, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(rdi);
-
- __ Push(Operand(rbp, kFunctionOffset));
- __ Push(Operand(rbp, kArgumentsOffset));
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : argumentsList
+ // -- rsp[16] : thisArgument
+ // -- rsp[24] : target
+ // -- rsp[32] : receiver
+ // -----------------------------------
- Generate_CheckStackOverflow(masm, kRaxIsSmiTagged);
+ // 1. Load target into rdi (if present), argumentsList into rax (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
+ {
+ Label done;
+ StackArgumentsAccessor args(rsp, rax);
+ __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
+ __ movp(rdx, rdi);
+ __ movp(rbx, rdi);
+ __ cmpp(rax, Immediate(1));
+ __ j(below, &done, Label::kNear);
+ __ movp(rdi, args.GetArgumentOperand(1)); // target
+ __ j(equal, &done, Label::kNear);
+ __ movp(rdx, args.GetArgumentOperand(2)); // thisArgument
+ __ cmpp(rax, Immediate(3));
+ __ j(below, &done, Label::kNear);
+ __ movp(rbx, args.GetArgumentOperand(3)); // argumentsList
+ __ bind(&done);
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ Push(rdx);
+ __ PushReturnAddressFrom(rcx);
+ __ movp(rax, rbx);
+ }
- // Push current index and limit, and receiver.
- const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(rax); // limit
- __ Push(Immediate(0)); // index
- __ Push(Operand(rbp, kReceiverOffset)); // receiver
+ // ----------- S t a t e -------------
+ // -- rax : argumentsList
+ // -- rdi : target
+ // -- rsp[0] : return address
+ // -- rsp[8] : thisArgument
+ // -----------------------------------
- // Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(rdi, &target_not_callable, Label::kNear);
+ __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsCallable));
+ __ j(zero, &target_not_callable, Label::kNear);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ movp(rdi, Operand(rbp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Leave internal frame.
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
+ {
+ StackArgumentsAccessor args(rsp, 0);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
}
-// Used by ReflectConstruct
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : new.target (optional)
+ // -- rsp[16] : argumentsList
+ // -- rsp[24] : target
+ // -- rsp[32] : receiver
+ // -----------------------------------
- // Stack at entry:
- // rsp : return address
- // rsp[8] : original constructor (new.target)
- // rsp[16] : arguments
- // rsp[24] : constructor
+ // 1. Load target into rdi (if present), argumentsList into rax (if present),
+ // new.target into rdx (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // rbp : Old base pointer
- // rbp[8] : return address
- // rbp[16] : original constructor (new.target)
- // rbp[24] : arguments
- // rbp[32] : constructor
- static const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- static const int kFunctionOffset = kArgumentsOffset + kPointerSize;
-
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ movp(rdi, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rdi, FieldOperand(rdi, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(rdi);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ movp(rax, Operand(rbp, kNewTargetOffset));
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &validate_arguments, Label::kNear);
- __ movp(rax, Operand(rbp, kFunctionOffset));
- __ movp(Operand(rbp, kNewTargetOffset), rax);
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ Push(Operand(rbp, kFunctionOffset));
- __ Push(Operand(rbp, kArgumentsOffset));
- __ Push(Operand(rbp, kNewTargetOffset));
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- Generate_CheckStackOverflow(masm, kRaxIsSmiTagged);
-
- // Push current index and limit.
- const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(rax); // limit
- __ Push(Immediate(0)); // index
- // Push the constructor function as callee.
- __ Push(Operand(rbp, kFunctionOffset));
-
- // Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ movp(rdi, Operand(rbp, kFunctionOffset));
- __ movp(rcx, Operand(rbp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ Label done;
+ StackArgumentsAccessor args(rsp, rax);
+ __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
+ __ movp(rdx, rdi);
+ __ movp(rbx, rdi);
+ __ cmpp(rax, Immediate(1));
+ __ j(below, &done, Label::kNear);
+ __ movp(rdi, args.GetArgumentOperand(1)); // target
+ __ movp(rdx, rdi); // new.target defaults to target
+ __ j(equal, &done, Label::kNear);
+ __ movp(rbx, args.GetArgumentOperand(2)); // argumentsList
+ __ cmpp(rax, Immediate(3));
+ __ j(below, &done, Label::kNear);
+ __ movp(rdx, args.GetArgumentOperand(3)); // new.target
+ __ bind(&done);
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushReturnAddressFrom(rcx);
+ __ movp(rax, rbx);
}
- // remove this, target, arguments and newTarget
- __ ret(kStackSize * kPointerSize);
-}
+ // ----------- S t a t e -------------
+ // -- rax : argumentsList
+ // -- rdx : new.target
+ // -- rdi : target
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(rdi, &target_not_constructor, Label::kNear);
+ __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
+ __ j(zero, &target_not_constructor, Label::kNear);
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
+ __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
+ __ j(zero, &new_target_not_constructor, Label::kNear);
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ StackArgumentsAccessor args(rsp, 0);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ StackArgumentsAccessor args(rsp, 0);
+ __ movp(args.GetReceiverOperand(), rdx);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1220,7 +1390,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
Label generic_array_code;
// Get the InternalArray function.
- __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
@@ -1250,7 +1420,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
Label generic_array_code;
// Get the Array function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rdi);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, rdi);
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
@@ -1273,6 +1443,115 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : number of arguments
+ // -- rdi : constructor function
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into rax and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ StackArgumentsAccessor args(rsp, rax);
+ __ testp(rax, rax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ movp(rbx, args.GetArgumentOperand(1));
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ __ movp(rax, rbx);
+ }
+
+ // 2a. Convert the first argument to a number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0 (already in rax).
+ __ bind(&no_arguments);
+ __ ret(1 * kPointerSize);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : number of arguments
+ // -- rdi : constructor function
+ // -- rdx : new target
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into rbx and get rid of the rest (including the
+ // receiver).
+ {
+ StackArgumentsAccessor args(rsp, rax);
+ Label no_arguments, done;
+ __ testp(rax, rax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ movp(rbx, args.GetArgumentOperand(1));
+ __ jmp(&done, Label::kNear);
+ __ bind(&no_arguments);
+ __ Move(rbx, Smi::FromInt(0));
+ __ bind(&done);
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ }
+
+ // 3. Make sure rbx is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(rbx, &done_convert);
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(equal, &done_convert);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rdx);
+ __ Push(rdi);
+ __ Move(rax, rbx);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(rbx, rax);
+ __ Pop(rdi);
+ __ Pop(rdx);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmpp(rdx, rdi);
+ __ j(not_equal, &new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(rax, rdi, rbx, rcx, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rbx); // the first argument
+ __ Push(rdi); // constructor function
+ __ Push(rdx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(FieldOperand(rax, JSValue::kValueOffset));
+ }
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments
@@ -1328,7 +1607,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx);
__ Push(rax);
__ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -1338,12 +1617,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments
// -- rdi : constructor function
+ // -- rdx : new target
// -- rsp[0] : return address
// -- rsp[(argc - n) * 8] : arg[n] (zero-based)
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
- // 1. Load the first argument into rbx and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into rbx and get rid of the rest (including the
// receiver).
{
StackArgumentsAccessor args(rsp, rax);
@@ -1360,59 +1643,47 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ PushReturnAddressFrom(rcx);
}
- // 2. Make sure rbx is a string.
+ // 3. Make sure rbx is a string.
{
Label convert, done_convert;
__ JumpIfSmi(rbx, &convert, Label::kNear);
- __ CmpObjectType(rbx, FIRST_NONSTRING_TYPE, rdx);
+ __ CmpObjectType(rbx, FIRST_NONSTRING_TYPE, rcx);
__ j(below, &done_convert);
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
+ __ Push(rdx);
__ Push(rdi);
__ Move(rax, rbx);
__ CallStub(&stub);
__ Move(rbx, rax);
__ Pop(rdi);
+ __ Pop(rdx);
}
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- rbx : the first argument
- // -- rdi : constructor function
- // -----------------------------------
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmpp(rdx, rdi);
+ __ j(not_equal, &new_object);
- Label allocate, done_allocate;
- __ Allocate(JSValue::kSize, rax, rcx, no_reg, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
-
- // Initialize the JSValue in rax.
- __ LoadGlobalFunctionInitialMap(rdi, rcx);
- __ movp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
- __ movp(FieldOperand(rax, JSObject::kElementsOffset), rcx);
- __ movp(FieldOperand(rax, JSValue::kValueOffset), rbx);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(rax, rdi, rbx, rcx, &new_object);
+ __ Ret();
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rbx);
- __ Push(rdi);
- __ Push(Smi::FromInt(JSValue::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(rdi);
- __ Pop(rbx);
- }
- __ jmp(&done_allocate);
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rbx); // the first argument
+ __ Push(rdi); // constructor function
+ __ Push(rdx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(FieldOperand(rax, JSValue::kValueOffset));
}
+ __ Ret();
}
@@ -1421,23 +1692,24 @@ static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- rax : actual number of arguments
// -- rbx : expected number of arguments
- // -- rdi: function (passed through to callee)
+ // -- rdx : new target (passed through to callee)
+ // -- rdi : function (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- __ LoadRoot(rdx, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(r8, Heap::kRealStackLimitRootIndex);
__ movp(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
- __ subp(rcx, rdx);
- // Make rdx the space we need for the array when it is unrolled onto the
+ __ subp(rcx, r8);
+ // Make r8 the space we need for the array when it is unrolled onto the
// stack.
- __ movp(rdx, rbx);
- __ shlp(rdx, Immediate(kPointerSizeLog2));
+ __ movp(r8, rbx);
+ __ shlp(r8, Immediate(kPointerSizeLog2));
// Check if the arguments will overflow the stack.
- __ cmpp(rcx, rdx);
+ __ cmpp(rcx, r8);
__ j(less_equal, stack_overflow); // Signed comparison.
}
@@ -1480,18 +1752,15 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : actual number of arguments
// -- rbx : expected number of arguments
- // -- rdi: function (passed through to callee)
+ // -- rdx : new target (passed through to callee)
+ // -- rdi : function (passed through to callee)
// -----------------------------------
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->arguments_adaptors(), 1);
- Label stack_overflow;
- ArgumentsAdaptorStackCheck(masm, &stack_overflow);
-
Label enough, too_few;
- __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpp(rax, rbx);
__ j(less, &too_few);
__ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -1500,6 +1769,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -1549,11 +1819,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -1585,8 +1856,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&invoke);
__ movp(rax, rbx);
// rax : expected number of arguments
- // rdi: function (passed through to callee)
- __ call(rdx);
+ // rdx : new target (passed through to callee)
+ // rdi : function (passed through to callee)
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ call(rcx);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1599,88 +1872,232 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ jmp(rdx);
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ jmp(rcx);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ int3();
}
}
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argumentsList
+ // -- rdi : target
+ // -- rdx : new.target (checked to be constructor or undefined)
+ // -- rsp[0] : return address.
+ // -- rsp[8] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(rax, &create_runtime);
+
+ // Load the map of argumentsList into rcx.
+ __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+
+ // Load native context into rbx.
+ __ movp(rbx, NativeContextOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ cmpp(rcx, ContextOperand(rbx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ j(equal, &create_arguments);
+ __ cmpp(rcx, ContextOperand(rbx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ j(equal, &create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
+ __ j(equal, &create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rdi);
+ __ Push(rdx);
+ __ Push(rax);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(rdx);
+ __ Pop(rdi);
+ __ SmiToInteger32(rbx, FieldOperand(rax, FixedArray::kLengthOffset));
+ }
+ __ jmp(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ movp(rbx,
+ FieldOperand(rax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ movp(rcx, FieldOperand(rax, JSObject::kElementsOffset));
+ __ cmpp(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ j(not_equal, &create_runtime);
+ __ SmiToInteger32(rbx, rbx);
+ __ movp(rax, rcx);
+ __ jmp(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(rcx);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ cmpl(rcx, Immediate(FAST_ELEMENTS));
+ __ j(above, &create_runtime);
+ __ cmpl(rcx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
+ __ j(equal, &create_runtime);
+ __ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
+ __ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+ __ movp(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subp(rcx, kScratchRegister);
+ __ sarp(rcx, Immediate(kPointerSizeLog2));
+ // Check if the arguments will overflow the stack.
+ __ cmpp(rcx, rbx);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- rdi : target
+ // -- rax : args (a FixedArray built from argumentsList)
+ // -- rbx : len (number of elements to push from args)
+ // -- rdx : new.target (checked to be constructor or undefined)
+ // -- rsp[0] : return address.
+ // -- rsp[8] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ __ PopReturnAddressTo(r8);
+ __ Set(rcx, 0);
+ Label done, loop;
+ __ bind(&loop);
+ __ cmpl(rcx, rbx);
+ __ j(equal, &done, Label::kNear);
+ __ Push(
+ FieldOperand(rax, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ incl(rcx);
+ __ jmp(&loop);
+ __ bind(&done);
+ __ PushReturnAddressFrom(r8);
+ __ Move(rax, rcx);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
StackArgumentsAccessor args(rsp, rax);
__ AssertFunction(rdi);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kFunctionKindByteOffset),
+ Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ j(not_zero, &class_constructor);
+
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the shared function info.
+ // -- rdi : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
__ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
__ j(not_zero, &done_convert);
{
- __ movp(rcx, args.GetReceiverOperand());
-
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
- // -- rcx : the receiver
// -- rdx : the shared function info.
// -- rdi : the function to call (checked to be a JSFunction)
// -- rsi : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(rcx, &convert_to_object, Label::kNear);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rbx);
- __ j(above_equal, &done_convert);
- __ JumpIfRoot(rcx, Heap::kUndefinedValueRootIndex, &convert_global_proxy,
- Label::kNear);
- __ JumpIfNotRoot(rcx, Heap::kNullValueRootIndex, &convert_to_object,
- Label::kNear);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(rcx);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ movp(rcx, args.GetReceiverOperand());
+ __ JumpIfSmi(rcx, &convert_to_object, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rbx);
+ __ j(above_equal, &done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(rcx, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy, Label::kNear);
+ __ JumpIfNotRoot(rcx, Heap::kNullValueRootIndex, &convert_to_object,
+ Label::kNear);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(rcx);
+ }
+ __ jmp(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+ __ Push(rdi);
+ __ movp(rax, rcx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ movp(rcx, rax);
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ jmp(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Integer32ToSmi(rax, rax);
- __ Push(rax);
- __ Push(rdi);
- __ movp(rax, rcx);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ movp(rcx, rax);
- __ Pop(rdi);
- __ Pop(rax);
- __ SmiToInteger32(rax, rax);
- }
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ movp(args.GetReceiverOperand(), rcx);
}
__ bind(&done_convert);
@@ -1694,15 +2111,134 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
__ LoadSharedFunctionInfoSpecialField(
rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
- __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount actual(rax);
ParameterCount expected(rbx);
- __ InvokeCode(rdx, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ InvokeFunctionCode(rdi, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : new.target (only in case of [[Construct]])
+ // -- rdi : target (checked to be a JSBoundFunction)
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into rcx and length of that into rbx.
+ Label no_bound_arguments;
+ __ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ testl(rbx, rbx);
+ __ j(zero, &no_bound_arguments);
+ {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : new.target (only in case of [[Construct]])
+ // -- rdi : target (checked to be a JSBoundFunction)
+ // -- rcx : the [[BoundArguments]] (implemented as FixedArray)
+ // -- rbx : the number of [[BoundArguments]] (checked to be non-zero)
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ leap(kScratchRegister, Operand(rbx, times_pointer_size, 0));
+ __ subp(rsp, kScratchRegister);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(rsp, Heap::kRealStackLimitRootIndex);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ // Restore the stack pointer.
+ __ leap(rsp, Operand(rsp, rbx, times_pointer_size, 0));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Adjust effective number of arguments to include return address.
+ __ incl(rax);
+
+ // Relocate arguments and return address down the stack.
+ {
+ Label loop;
+ __ Set(rcx, 0);
+ __ leap(rbx, Operand(rsp, rbx, times_pointer_size, 0));
+ __ bind(&loop);
+ __ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+ __ movp(Operand(rsp, rcx, times_pointer_size, 0), kScratchRegister);
+ __ incl(rcx);
+ __ cmpl(rcx, rax);
+ __ j(less, &loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ bind(&loop);
+ __ decl(rbx);
+ __ movp(kScratchRegister, FieldOperand(rcx, rbx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ movp(Operand(rsp, rax, times_pointer_size, 0), kScratchRegister);
+ __ leal(rax, Operand(rax, 1));
+ __ j(greater, &loop);
+ }
+
+ // Adjust effective number of arguments (rax contains the number of
+ // arguments from the call plus return address plus the number of
+ // [[BoundArguments]]), so we need to subtract one for the return address.
+ __ decl(rax);
+ }
+ __ bind(&no_bound_arguments);
+}
+
+} // namespace
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdi : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(rdi);
+
+ // Patch the receiver to [[BoundThis]].
+ StackArgumentsAccessor args(rsp, rax);
+ __ movp(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
+ __ movp(args.GetReceiverOperand(), rbx);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ movp(rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Load(rcx,
+ ExternalReference(Builtins::kCall_ReceiverIsAny, masm->isolate()));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the target to call (can be any Object)
@@ -1713,16 +2249,24 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(rdi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET);
+ __ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
RelocInfo::CODE_TARGET);
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+ __ CmpInstanceType(rcx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ movp(rdi, FieldOperand(rdi, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(rdi);
- __ jmp(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ PopReturnAddressTo(kScratchRegister);
+ __ Push(rdi);
+ __ PushReturnAddressFrom(kScratchRegister);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ addp(rax, Immediate(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1734,15 +2278,17 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ movp(args.GetReceiverOperand(), rdi);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(rdi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1751,10 +2297,9 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
- // -- rdx : the original constructor (checked to be a JSFunction)
+ // -- rdx : the new target (checked to be a constructor)
// -- rdi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(rdx);
__ AssertFunction(rdi);
// Calling convention for function specific ConstructStubs require
@@ -1771,17 +2316,53 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the new target (checked to be a constructor)
+ // -- rdi : the constructor to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(rdi);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label done;
+ __ cmpp(rdi, rdx);
+ __ j(not_equal, &done, Label::kNear);
+ __ movp(rdx,
+ FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&done);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ movp(rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Load(rcx, ExternalReference(Builtins::kConstruct, masm->isolate()));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
- // -- rdx : the original constructor (either the same as the constructor or
+ // -- rdi : the constructor to call (checked to be a JSProxy)
+ // -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
- // -- rdi : the constructor to call (checked to be a JSFunctionProxy)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ movp(rdi, FieldOperand(rdi, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ PopReturnAddressTo(kScratchRegister);
+ __ Push(rdi);
+ __ Push(rdx);
+ __ PushReturnAddressFrom(kScratchRegister);
+ // Include the pushed new_target, constructor and the receiver.
+ __ addp(rax, Immediate(3));
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1789,25 +2370,34 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
- // -- rdx : the original constructor (either the same as the constructor or
+ // -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
// -----------------------------------
StackArgumentsAccessor args(rsp, rax);
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(rdi, &non_constructor, Label::kNear);
- __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+
+ // Dispatch based on instance type.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET);
+
+ // Check if target has a [[Construct]] internal method.
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsConstructor));
__ j(zero, &non_constructor, Label::kNear);
- // Dispatch based on instance type.
- __ CmpInstanceType(rcx, JS_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->ConstructBoundFunction(),
RelocInfo::CODE_TARGET);
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ CmpInstanceType(rcx, JS_PROXY_TYPE);
__ j(equal, masm->isolate()->builtins()->ConstructProxy(),
RelocInfo::CODE_TARGET);
@@ -1816,7 +2406,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ movp(args.GetReceiverOperand(), rdi);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, rdi);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, rdi);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1824,46 +2414,121 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rdi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Register scratch0, Register scratch1,
+ Register scratch2,
+ Label* receiver_check_failed) {
+ Register signature = scratch0;
+ Register map = scratch1;
+ Register constructor = scratch2;
+
+ // If there is no signature, return the holder.
+ __ movp(signature, FieldOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ __ CompareRoot(signature, Heap::kUndefinedValueRootIndex);
+ Label receiver_check_passed;
+ __ j(equal, &receiver_check_passed, Label::kNear);
+
+ // Walk the prototype chain.
+ __ movp(map, FieldOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(constructor, map, kScratchRegister);
+ __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
+ Label next_prototype;
+ __ j(not_equal, &next_prototype, Label::kNear);
+
+ // Get the constructor's signature.
+ Register type = constructor;
+ __ movp(type,
+ FieldOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(type, FieldOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ cmpp(signature, type);
+ __ j(equal, &receiver_check_passed, Label::kNear);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype, Label::kNear);
+ __ CmpObjectType(type, FUNCTION_TEMPLATE_INFO_TYPE, kScratchRegister);
+ __ j(not_equal, &next_prototype, Label::kNear);
+
+ // Otherwise load the parent function template and iterate.
+ __ movp(type,
+ FieldOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+ __ jmp(&function_template_loop, Label::kNear);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ movp(receiver, FieldOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ j(equal, receiver_check_failed);
+ __ movp(map, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ testq(FieldOperand(map, Map::kBitField3Offset),
+ Immediate(Map::IsHiddenPrototype::kMask));
+ __ j(zero, receiver_check_failed);
+ // Iterate.
+ __ jmp(&prototype_loop_start, Label::kNear);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
- // -- rbx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- rdi : the target to call (can be any Object).
+ // -- rax : number of arguments (not including the receiver)
+ // -- rdi : callee
+ // -- rsi : context
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -- ...
+ // -- rsp[rax * 8] : first argument
+ // -- rsp[(rax + 1) * 8] : receiver
+ // -----------------------------------
- // Pop return address to allow tail-call after pushing arguments.
- __ Pop(rdx);
+ StackArgumentsAccessor args(rsp, rax);
- // Find the address of the last argument.
- __ movp(rcx, rax);
- __ addp(rcx, Immediate(1)); // Add one for receiver.
- __ shlp(rcx, Immediate(kPointerSizeLog2));
- __ negp(rcx);
- __ addp(rcx, rbx);
+ // Load the FunctionTemplateInfo.
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
- // Push the arguments.
- Label loop_header, loop_check;
- __ j(always, &loop_check);
- __ bind(&loop_header);
- __ Push(Operand(rbx, 0));
- __ subp(rbx, Immediate(kPointerSize));
- __ bind(&loop_check);
- __ cmpp(rbx, rcx);
- __ j(greater, &loop_header, Label::kNear);
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ movp(rcx, args.GetReceiverOperand());
+ CompatibleReceiverCheck(masm, rcx, rbx, rdx, r8, r9, &receiver_check_failed);
- // Call the target.
- __ Push(rdx); // Re-push return address.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ movp(rdx, FieldOperand(rbx, FunctionTemplateInfo::kCallCodeOffset));
+ __ movp(rdx, FieldOperand(rdx, CallHandlerInfo::kFastHandlerOffset));
+ __ addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(rdx);
+
+ // Compatible receiver check failed: pop return address, arguments and
+ // receiver and throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ __ PopReturnAddressTo(rbx);
+ __ leap(rax, Operand(rax, times_pointer_size, 1 * kPointerSize));
+ __ addp(rsp, rax);
+ __ PushReturnAddressFrom(rbx);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+ }
}
@@ -1874,7 +2539,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ Push(rax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
Label skip;
@@ -1910,7 +2575,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/chromium/v8/src/x64/code-stubs-x64.cc b/chromium/v8/src/x64/code-stubs-x64.cc
index 0942b2fb3c1..1e14f83d9b2 100644
--- a/chromium/v8/src/x64/code-stubs-x64.cc
+++ b/chromium/v8/src/x64/code-stubs-x64.cc
@@ -180,7 +180,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
bool stash_exponent_copy = !input_reg.is(rsp);
__ movl(scratch1, mantissa_operand);
- __ movsd(xmm0, mantissa_operand);
+ __ Movsd(xmm0, mantissa_operand);
__ movl(rcx, exponent_operand);
if (stash_exponent_copy) __ pushq(rcx);
@@ -200,7 +200,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ jmp(&check_negative);
__ bind(&process_64_bits);
- __ cvttsd2siq(result_reg, xmm0);
+ __ Cvttsd2siq(result_reg, xmm0);
__ jmp(&done, Label::kNear);
// If the double was negative, negate the integer result.
@@ -237,14 +237,14 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
__ JumpIfSmi(rdx, &load_smi_rdx);
__ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers); // Argument in rdx is not a number.
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Load operand in rax into xmm1, or branch to not_numbers.
__ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
__ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi_rdx);
@@ -288,7 +288,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
- __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ Movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear);
__ bind(&base_is_smi);
@@ -304,14 +304,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
- __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+ __ Movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type() == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
__ SmiToInteger32(exponent, exponent);
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
- __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+ __ Movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
}
if (exponent_type() != INTEGER) {
@@ -324,7 +324,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&int_exponent);
__ bind(&try_arithmetic_simplification);
- __ cvttsd2si(exponent, double_exponent);
+ __ Cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
__ cmpl(exponent, Immediate(0x1));
__ j(overflow, &call_runtime);
@@ -337,9 +337,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Test for 0.5.
// Load double_scratch with 0.5.
__ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
- __ movq(double_scratch, scratch);
+ __ Movq(double_scratch, scratch);
// Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
+ __ Ucomisd(double_scratch, double_exponent);
__ j(not_equal, &not_plus_half, Label::kNear);
// Calculates square root of base. Check for the special case of
@@ -347,31 +347,31 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
- __ movq(double_scratch, scratch);
- __ ucomisd(double_scratch, double_base);
+ __ Movq(double_scratch, scratch);
+ __ Ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &continue_sqrt, Label::kNear);
__ j(carry, &continue_sqrt, Label::kNear);
// Set result to Infinity in the special case.
- __ xorps(double_result, double_result);
- __ subsd(double_result, double_scratch);
+ __ Xorpd(double_result, double_result);
+ __ Subsd(double_result, double_scratch);
__ jmp(&done);
__ bind(&continue_sqrt);
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_scratch, double_scratch);
- __ addsd(double_scratch, double_base); // Convert -0 to 0.
- __ sqrtsd(double_result, double_scratch);
+ __ Xorpd(double_scratch, double_scratch);
+ __ Addsd(double_scratch, double_base); // Convert -0 to 0.
+ __ Sqrtsd(double_result, double_scratch);
__ jmp(&done);
// Test for -0.5.
__ bind(&not_plus_half);
// Load double_scratch with -0.5 by substracting 1.
- __ subsd(double_scratch, double_result);
+ __ Subsd(double_scratch, double_result);
// Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
+ __ Ucomisd(double_scratch, double_exponent);
__ j(not_equal, &fast_power, Label::kNear);
// Calculates reciprocal of square root of base. Check for the special
@@ -379,23 +379,23 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
- __ movq(double_scratch, scratch);
- __ ucomisd(double_scratch, double_base);
+ __ Movq(double_scratch, scratch);
+ __ Ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &continue_rsqrt, Label::kNear);
__ j(carry, &continue_rsqrt, Label::kNear);
// Set result to 0 in the special case.
- __ xorps(double_result, double_result);
+ __ Xorpd(double_result, double_result);
__ jmp(&done);
__ bind(&continue_rsqrt);
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_exponent, double_exponent);
- __ addsd(double_exponent, double_base); // Convert -0 to +0.
- __ sqrtsd(double_exponent, double_exponent);
- __ divsd(double_result, double_exponent);
+ __ Xorpd(double_exponent, double_exponent);
+ __ Addsd(double_exponent, double_base); // Convert -0 to +0.
+ __ Sqrtsd(double_exponent, double_exponent);
+ __ Divsd(double_result, double_exponent);
__ jmp(&done);
}
@@ -405,9 +405,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
__ subp(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), double_exponent);
+ __ Movsd(Operand(rsp, 0), double_exponent);
__ fld_d(Operand(rsp, 0)); // E
- __ movsd(Operand(rsp, 0), double_base);
+ __ Movsd(Operand(rsp, 0), double_base);
__ fld_d(Operand(rsp, 0)); // B, E
// Exponent is in st(1) and base is in st(0)
@@ -430,7 +430,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(rsp, 0));
- __ movsd(double_result, Operand(rsp, 0));
+ __ Movsd(double_result, Operand(rsp, 0));
__ addp(rsp, Immediate(kDoubleSize));
__ jmp(&done);
@@ -445,8 +445,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const XMMRegister double_scratch2 = double_exponent;
// Back up exponent as we need to check if exponent is negative later.
__ movp(scratch, exponent); // Back up exponent.
- __ movsd(double_scratch, double_base); // Back up base.
- __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
+ __ Movsd(double_scratch, double_base); // Back up base.
+ __ Movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
Label no_neg, while_true, while_false;
@@ -460,26 +460,26 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Above condition means CF==0 && ZF==0. This means that the
// bit that has been shifted out is 0 and the result is not 0.
__ j(above, &while_true, Label::kNear);
- __ movsd(double_result, double_scratch);
+ __ Movsd(double_result, double_scratch);
__ j(zero, &while_false, Label::kNear);
__ bind(&while_true);
__ shrl(scratch, Immediate(1));
- __ mulsd(double_scratch, double_scratch);
+ __ Mulsd(double_scratch, double_scratch);
__ j(above, &while_true, Label::kNear);
- __ mulsd(double_result, double_scratch);
+ __ Mulsd(double_result, double_scratch);
__ j(not_zero, &while_true);
__ bind(&while_false);
// If the exponent is negative, return 1/result.
__ testl(exponent, exponent);
__ j(greater, &done);
- __ divsd(double_scratch2, double_result);
- __ movsd(double_result, double_scratch2);
+ __ Divsd(double_scratch2, double_result);
+ __ Movsd(double_result, double_scratch2);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ xorps(double_scratch2, double_scratch2);
- __ ucomisd(double_scratch2, double_result);
+ __ Xorpd(double_scratch2, double_scratch2);
+ __ Ucomisd(double_scratch2, double_result);
// double_exponent aliased as double_scratch2 has already been overwritten
// and may not have contained the exponent value in the first place when the
// input was a smi. We reset it with exponent value before bailing out.
@@ -491,19 +491,19 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in rax.
__ bind(&done);
__ AllocateHeapNumber(rax, rcx, &call_runtime);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
+ __ Movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
__ bind(&call_runtime);
// Move base to the correct argument register. Exponent is already in xmm1.
- __ movsd(xmm0, double_base);
+ __ Movsd(xmm0, double_base);
DCHECK(double_exponent.is(xmm1));
{
AllowExternalCallThatCantCauseGC scope(masm);
@@ -512,7 +512,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
ExternalReference::power_double_double_function(isolate()), 2);
}
// Return value is in xmm0.
- __ movsd(double_result, xmm0);
+ __ Movsd(double_result, xmm0);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1);
@@ -591,7 +591,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ PopReturnAddressTo(rbx);
__ Push(rdx);
__ PushReturnAddressFrom(rbx);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -663,8 +663,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r11 = argument count (untagged)
// Get the arguments map from the current native context into r9.
Label has_mapped_parameters, instantiate;
- __ movp(r9, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(r9, FieldOperand(r9, GlobalObject::kNativeContextOffset));
+ __ movp(r9, NativeContextOperand());
__ testp(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -804,7 +803,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Push(rdx); // Push parameters pointer.
__ Push(r11); // Push parameter count.
__ PushReturnAddressFrom(rax);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -838,7 +837,37 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ Push(rdx); // Push parameters pointer.
__ Push(rcx); // Push parameter count.
__ PushReturnAddressFrom(rax);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // rcx : number of parameters (tagged)
+ // rdx : parameters pointer
+ // rbx : rest parameter index (tagged)
+ // rsp[0] : return address
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ movp(r8, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(r8, StandardFrameConstants::kContextOffset));
+ __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ StackArgumentsAccessor args(rsp, 4, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movp(rcx, Operand(r8, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger64(rax, rcx);
+ __ leap(rdx, Operand(r8, rax, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+
+ __ bind(&runtime);
+ __ PopReturnAddressTo(rax);
+ __ Push(rcx); // Push number of parameters.
+ __ Push(rdx); // Push parameters pointer.
+ __ Push(rbx); // Push rest parameter index.
+ __ PushReturnAddressFrom(rax);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -862,7 +891,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ PushReturnAddressFrom(scratch);
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -945,10 +974,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Allocate(rax, rax, rbx, no_reg, &runtime, TAG_OBJECT);
// Get the arguments map from the current native context.
- __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
- const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
- __ movp(rdi, Operand(rdi, offset));
+ __ movp(rdi, NativeContextOperand());
+ __ movp(rdi, ContextOperand(rdi, Context::STRICT_ARGUMENTS_MAP_INDEX));
__ movp(FieldOperand(rax, JSObject::kMapOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
@@ -998,7 +1025,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Push(rdx); // Push parameters pointer.
__ Push(rcx); // Push parameter count.
__ PushReturnAddressFrom(rax);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
@@ -1007,7 +1034,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1390,11 +1417,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(equal, &runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -1541,7 +1568,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
__ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
// Call runtime on identical objects. Otherwise return equal.
- __ cmpb(rcx, Immediate(static_cast<uint8_t>(FIRST_SPEC_OBJECT_TYPE)));
+ __ cmpb(rcx, Immediate(static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE)));
__ j(above_equal, &runtime_call, Label::kFar);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpb(rcx, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
@@ -1565,8 +1592,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// greater-equal. Return -1 for them, so the comparison yields
// false for all conditions except not-equal.
__ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
+ __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ Ucomisd(xmm0, xmm0);
__ setcc(parity_even, rax);
// rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
if (cc == greater_equal || cc == greater) {
@@ -1608,9 +1635,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// There is no test for undetectability in strict equality.
// If the first object is a JS object, we have done pointer comparison.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label first_non_object;
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
__ j(below, &first_non_object, Label::kNear);
// Return non-zero (rax (not rax) is not zero)
Label return_not_equal;
@@ -1623,7 +1650,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ CmpInstanceType(rcx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rdx, FIRST_JS_RECEIVER_TYPE, rcx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -1641,7 +1668,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
__ xorl(rax, rax);
__ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
+ __ Ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, Label::kNear);
@@ -1710,9 +1737,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ leap(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime_call, Label::kNear);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rbx);
__ j(below, &runtime_call, Label::kNear);
- __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rdx, FIRST_JS_RECEIVER_TYPE, rcx);
__ j(below, &runtime_call, Label::kNear);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -1738,14 +1765,12 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Figure out which native to call and setup the arguments.
if (cc == equal) {
__ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
__ Push(Smi::FromInt(NegativeComparisonResult(cc)));
__ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -1753,11 +1778,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// rax : number of arguments to the construct function
// rbx : feedback vector
- // rcx : original constructor (for IsSuperConstructorCall)
// rdx : slot in feedback vector (Smi)
// rdi : the function to call
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1769,15 +1792,9 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
__ Integer32ToSmi(rdx, rdx);
__ Push(rdx);
__ Push(rbx);
- if (is_super) {
- __ Push(rcx);
- }
__ CallStub(stub);
- if (is_super) {
- __ Pop(rcx);
- }
__ Pop(rbx);
__ Pop(rdx);
__ Pop(rdi);
@@ -1786,13 +1803,12 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rax : number of arguments to the construct function
// rbx : feedback vector
- // rcx : original constructor (for IsSuperConstructorCall)
// rdx : slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
@@ -1832,7 +1848,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ j(not_equal, &miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
__ cmpp(rdi, r11);
__ j(not_equal, &megamorphic);
__ jmp(&done);
@@ -1855,17 +1871,17 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ bind(&initialize);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
__ cmpp(rdi, r11);
__ j(not_equal, &not_array_function);
CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ jmp(&done_no_smi_convert);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ jmp(&done_no_smi_convert);
__ bind(&done);
@@ -1875,109 +1891,10 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions.
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rcx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, cont);
-
- // Do not transform the receiver for natives.
- // SharedFunctionInfo is already loaded into rcx.
- __ testb(FieldOperand(rcx, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, cont);
-}
-
-
-static void EmitSlowCase(MacroAssembler* masm, StackArgumentsAccessor* args,
- int argc) {
- __ Set(rax, argc);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm,
- StackArgumentsAccessor* args,
- Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ Push(rdi);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ Pop(rdi);
- }
- __ movp(args->GetReceiverOperand(), rax);
- __ jmp(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // rdi : the function to call
-
- // wrap_and_call can only be true if we are compiling a monomorphic method.
- Label slow, wrap, cont;
- StackArgumentsAccessor args(rsp, argc);
-
- if (needs_checks) {
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &slow);
-
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
- }
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Load the receiver from the stack.
- __ movp(rax, args.GetReceiverOperand());
-
- if (needs_checks) {
- __ JumpIfSmi(rax, &wrap);
-
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(below, &wrap);
- } else {
- __ jmp(&wrap);
- }
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm, &args, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- EmitWrapCase(masm, &args, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
// rbx : feedback vector
- // rcx : original constructor (for IsSuperConstructorCall)
- // rdx : slot in feedback vector (Smi, for RecordCallTarget)
+ // rdx : slot in feedback vector (Smi)
// rdi : constructor function
Label non_function;
@@ -1987,28 +1904,22 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, r11);
__ j(not_equal, &non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
+ GenerateRecordCallTarget(masm);
- __ SmiToInteger32(rdx, rdx);
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into rbx, or undefined.
- __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
- __ j(equal, &feedback_register_initialized);
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
+ __ SmiToInteger32(rdx, rdx);
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into rbx, or undefined.
+ __ movp(rbx,
+ FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
+ __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
+ __ j(equal, &feedback_register_initialized, Label::kNear);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(rbx);
- }
+ __ AssertUndefinedOrAllocationSite(rbx);
- // Pass original constructor to construct stub.
- if (IsSuperConstructorCall()) {
- __ movp(rdx, rcx);
- } else {
- __ movp(rdx, rdi);
- }
+ // Pass new target to construct stub.
+ __ movp(rdx, rdi);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -2028,7 +1939,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// rdx - slot id
// rbx - vector
// rcx - allocation site (loaded from vector[slot]).
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmpp(rdi, r8);
__ j(not_equal, miss);
@@ -2047,17 +1958,13 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
void CallICStub::Generate(MacroAssembler* masm) {
- // rdi - function
- // rdx - slot id
- // rbx - vector
+ // ----------- S t a t e -------------
+ // -- rdi - function
+ // -- rdx - slot id
+ // -- rbx - vector
+ // -----------------------------------
Isolate* isolate = masm->isolate();
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
StackArgumentsAccessor args(rsp, argc);
ParameterCount actual(argc);
@@ -2093,36 +2000,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::kHeaderSize + kPointerSize),
Smi::FromInt(CallICNexus::kCallCountIncrement));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
-
- // Load the receiver from the stack.
- __ movp(rax, args.GetReceiverOperand());
-
- __ JumpIfSmi(rax, &wrap);
-
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(below, &wrap);
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(masm, &args, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, &args, &cont);
- }
+ __ bind(&call_function);
+ __ Set(rax, argc);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
- __ j(equal, &slow_start);
+ __ j(equal, &call);
// Check if we have an allocation site.
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
@@ -2150,10 +2037,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
__ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
TypeFeedbackVector::MegamorphicSentinel(isolate));
- // We have to update statistics for runtime profiling.
- __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(-1));
- __ SmiAddConstant(FieldOperand(rbx, generic_offset), Smi::FromInt(1));
- __ jmp(&slow_start);
+
+ __ bind(&call);
+ __ Set(rax, argc);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2166,12 +2054,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, rcx);
__ cmpp(rdi, rcx);
__ j(equal, &miss);
- // Update stats.
- __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(1));
+ // Make sure the function belongs to the same native context.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rcx, ContextOperand(rcx, Context::NATIVE_CONTEXT_INDEX));
+ __ cmpp(rcx, NativeContextOperand());
+ __ j(not_equal, &miss);
// Initialize the call counter.
__ Move(FieldOperand(rbx, rdx, times_pointer_size,
@@ -2192,21 +2083,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(rdi);
}
- __ jmp(&have_js_function);
+ __ jmp(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
- // Check that function is not a smi.
- __ JumpIfSmi(rdi, &slow);
- // Check that function is a JSFunction.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
- __ jmp(&have_js_function);
+ __ jmp(&call);
// Unreachable
__ int3();
@@ -2223,7 +2107,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(rdx);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ movp(rdi, rax);
@@ -2268,6 +2152,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// rbp: frame pointer of calling JS frame (restored after C call)
// rsp: stack pointer (restored after C call)
// rsi: current context (restored)
+ //
+ // If argv_in_register():
+ // r15: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -2277,7 +2164,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
#else // _WIN64
int arg_stack_space = 0;
#endif // _WIN64
- __ EnterExitFrame(arg_stack_space, save_doubles());
+ if (argv_in_register()) {
+ DCHECK(!save_doubles());
+ __ EnterApiExitFrame(arg_stack_space);
+ // Move argc into r14 (argv is already in r15).
+ __ movp(r14, rax);
+ } else {
+ __ EnterExitFrame(arg_stack_space, save_doubles());
+ }
// rbx: pointer to builtin function (C callee-saved).
// rbp: frame pointer of exit frame (restored after C call).
@@ -2357,7 +2251,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles());
+ __ LeaveExitFrame(save_doubles(), !argv_in_register());
__ ret(0);
// Handling of exception.
@@ -2617,15 +2511,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
Immediate(1 << Map::kHasNonInstancePrototype));
__ j(not_zero, &slow_case);
- // Ensure that {function} is not bound.
- Register const shared_info = kScratchRegister;
- __ movp(shared_info,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ TestBitSharedFunctionInfoSpecialField(
- shared_info, SharedFunctionInfo::kCompilerHintsOffset,
- SharedFunctionInfo::kBoundFunction);
- __ j(not_zero, &slow_case);
-
// Get the "prototype" (or initial map) of the {function}.
__ movp(function_prototype,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -2651,28 +2536,45 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
- Label done, loop;
+ Label done, loop, fast_runtime_fallback;
__ LoadRoot(rax, Heap::kTrueValueRootIndex);
__ bind(&loop);
- __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmpp(object_prototype, function_prototype);
+
+ __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &fast_runtime_fallback, Label::kNear);
+ __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+ __ j(equal, &fast_runtime_fallback, Label::kNear);
+
+ __ movp(object, FieldOperand(object_map, Map::kPrototypeOffset));
+ __ cmpp(object, function_prototype);
__ j(equal, &done, Label::kNear);
- __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
- __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+ __ CompareRoot(object, Heap::kNullValueRootIndex);
+ __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
__ j(not_equal, &loop);
__ LoadRoot(rax, Heap::kFalseValueRootIndex);
__ bind(&done);
__ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
__ ret(0);
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime.
+ __ bind(&fast_runtime_fallback);
+ __ PopReturnAddressTo(kScratchRegister);
+ __ Push(object);
+ __ Push(function_prototype);
+ __ PushReturnAddressFrom(kScratchRegister);
+ // Invalidate the instanceof cache.
+ __ Move(rax, Smi::FromInt(0));
+ __ StoreRoot(rax, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ PopReturnAddressTo(kScratchRegister);
__ Push(object);
__ Push(function);
__ PushReturnAddressFrom(kScratchRegister);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -2731,11 +2633,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_);
__ Push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
@@ -2764,7 +2666,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_);
__ Integer32ToSmi(index_, index_);
__ Push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
if (!result_.is(rax)) {
__ movp(result_, rax);
}
@@ -2802,7 +2704,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ Push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
if (!result_.is(rax)) {
__ movp(result_, rax);
}
@@ -3049,7 +2951,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// rax: string
@@ -3095,7 +2997,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx); // Pop return address.
__ Push(rax); // Push argument.
__ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -3108,7 +3010,26 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx); // Pop return address.
__ Push(rax); // Push argument.
__ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
+}
+
+
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes on argument in rax.
+ Label not_smi, positive_smi;
+ __ JumpIfNotSmi(rax, &not_smi, Label::kNear);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ testp(rax, rax);
+ __ j(greater_equal, &positive_smi, Label::kNear);
+ __ xorl(rax, rax);
+ __ bind(&positive_smi);
+ __ Ret();
+ __ bind(&not_smi);
+
+ __ PopReturnAddressTo(rcx); // Pop return address.
+ __ Push(rax); // Push argument.
+ __ PushReturnAddressFrom(rcx); // Push return address.
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3143,7 +3064,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx); // Pop return address.
__ Push(rax); // Push argument.
__ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3318,7 +3239,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ Push(rdx);
__ Push(rax);
__ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3363,14 +3284,16 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ JumpIfNotRoot(rcx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
__ JumpIfNotRoot(rbx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
__ AssertSmi(rax);
__ movp(rdx, FieldOperand(rdx, Oddball::kToNumberOffset));
__ AssertSmi(rdx);
- __ xchgp(rax, rdx);
+ __ pushq(rax);
+ __ movq(rax, rdx);
+ __ popq(rdx);
}
__ subp(rax, rdx);
__ Ret();
@@ -3424,7 +3347,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ JumpIfSmi(rax, &right_smi, Label::kNear);
__ CompareMap(rax, isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
@@ -3434,7 +3357,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ JumpIfSmi(rdx, &left_smi, Label::kNear);
__ CompareMap(rdx, isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&left_smi);
__ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
@@ -3442,7 +3365,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&done);
// Compare operands
- __ ucomisd(xmm0, xmm1);
+ __ Ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, Label::kNear);
@@ -3645,9 +3568,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ Push(right);
__ PushReturnAddressFrom(tmp1);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3655,18 +3578,19 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
- __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, Label::kNear);
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, Label::kNear);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
+ __ j(below, &miss, Label::kNear);
+ __ CmpObjectType(rdx, FIRST_JS_RECEIVER_TYPE, rcx);
+ __ j(below, &miss, Label::kNear);
- DCHECK(GetCondition() == equal);
+ DCHECK_EQ(equal, GetCondition());
__ subp(rax, rdx);
__ ret(0);
@@ -3675,7 +3599,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
@@ -3691,14 +3615,14 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ subp(rax, rdx);
__ ret(0);
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ Push(rax);
__ Push(Smi::FromInt(NegativeComparisonResult(GetCondition())));
__ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -3715,7 +3639,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(rdx);
__ Push(rax);
__ Push(Smi::FromInt(op()));
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ leap(rdi, FieldOperand(rax, Code::kHeaderSize));
@@ -4100,11 +4024,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need an extra register for this, so we push the object register
// temporarily.
__ Push(regs_.object());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object,
- Label::kNear);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ &need_incremental_pop_object, Label::kNear);
__ Pop(regs_.object());
regs_.Restore(masm);
@@ -4124,85 +4047,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : element value to store
- // -- rcx : element index as smi
- // -- rsp[0] : return address
- // -- rsp[8] : array literal index in function
- // -- rsp[16] : array literal
- // clobbers rbx, rdx, rdi
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rdx, args.GetArgumentOperand(1));
- __ movp(rbx, args.GetArgumentOperand(0));
- __ movp(rdi, FieldOperand(rbx, JSObject::kMapOffset));
-
- __ CheckFastElements(rdi, &double_elements);
-
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
- __ JumpIfSmi(rax, &smi_element);
- __ CheckFastSmiElements(rdi, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
-
- __ bind(&slow_elements);
- __ PopReturnAddressTo(rdi);
- __ Push(rbx);
- __ Push(rcx);
- __ Push(rax);
- __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ Push(rdx);
- __ PushReturnAddressFrom(rdi);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ SmiToInteger32(kScratchRegister, rcx);
- __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ leap(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
- FixedArrayBase::kHeaderSize));
- __ movp(Operand(rcx, 0), rax);
- // Update the write barrier for the array store.
- __ RecordWrite(rbx, rcx, rax,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
- // FAST_*_ELEMENTS, and value is Smi.
- __ bind(&smi_element);
- __ SmiToInteger32(kScratchRegister, rcx);
- __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ movp(FieldOperand(rbx, kScratchRegister, times_pointer_size,
- FixedArrayBase::kHeaderSize), rax);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
-
- __ movp(r9, FieldOperand(rbx, JSObject::kElementsOffset));
- __ SmiToInteger32(r11, rcx);
- __ StoreNumberToDoubleElements(rax,
- r9,
- r11,
- xmm0,
- &slow_elements);
- __ ret(0);
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -4860,7 +4704,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- rax : argc
// -- rbx : AllocationSite or undefined
// -- rdi : constructor
- // -- rdx : original constructor
+ // -- rdx : new target
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
@@ -4881,6 +4725,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(rbx);
}
+ // Enter the context of the Array function.
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
Label subclassing;
__ cmpp(rdi, rdx);
__ j(not_equal, &subclassing);
@@ -4903,28 +4750,32 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing
__ bind(&subclassing);
- __ Pop(rcx); // return address.
- __ Push(rdi);
- __ Push(rdx);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
- case MORE_THAN_ONE:
- __ addp(rax, Immediate(2));
+ case MORE_THAN_ONE: {
+ StackArgumentsAccessor args(rsp, rax);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ addp(rax, Immediate(3));
break;
- case NONE:
- __ movp(rax, Immediate(2));
+ }
+ case NONE: {
+ StackArgumentsAccessor args(rsp, 0);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ Set(rax, 3);
break;
- case ONE:
- __ movp(rax, Immediate(3));
+ }
+ case ONE: {
+ StackArgumentsAccessor args(rsp, 1);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ Set(rax, 4);
break;
+ }
}
-
- __ Push(rcx);
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()),
- 1);
+ __ PopReturnAddressTo(rcx);
+ __ Push(rdx);
+ __ Push(rbx);
+ __ PushReturnAddressFrom(rcx);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5043,7 +4894,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(kScratchRegister);
__ Push(slot_reg);
__ Push(kScratchRegister);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5166,8 +5017,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(kScratchRegister);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5307,7 +5157,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ CmpInstanceType(map, LAST_NAME_TYPE);
__ j(below_equal, &ok, Label::kNear);
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, &ok, Label::kNear);
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -5341,7 +5191,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/chromium/v8/src/x64/code-stubs-x64.h b/chromium/v8/src/x64/code-stubs-x64.h
index 1344400d48e..d4f8b29dbc0 100644
--- a/chromium/v8/src/x64/code-stubs-x64.h
+++ b/chromium/v8/src/x64/code-stubs-x64.h
@@ -294,13 +294,15 @@ class RecordWriteStub: public PlatformCodeStub {
Register GetRegThatIsNotRcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(rcx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ Register candidate = Register::from_code(i);
+ if (candidate.IsAllocatable()) {
+ if (candidate.is(rcx)) continue;
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
}
UNREACHABLE();
return no_reg;
@@ -360,6 +362,7 @@ class RecordWriteStub: public PlatformCodeStub {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/chromium/v8/src/x64/codegen-x64.cc b/chromium/v8/src/x64/codegen-x64.cc
index 4f08c7e7a64..81c1a69aa8e 100644
--- a/chromium/v8/src/x64/codegen-x64.cc
+++ b/chromium/v8/src/x64/codegen-x64.cc
@@ -32,15 +32,15 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
// xmm0: raw double input.
XMMRegister input = xmm0;
XMMRegister result = xmm1;
@@ -51,131 +51,42 @@ UnaryMathFunction CreateExpFunction() {
__ popq(rbx);
__ popq(rax);
- __ movsd(xmm0, result);
+ __ Movsd(xmm0, result);
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
+ if (buffer == nullptr) return nullptr;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
// xmm0: raw double input.
// Move double input into registers.
- __ sqrtsd(xmm0, xmm0);
+ __ Sqrtsd(xmm0, xmm0);
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
-
-#ifdef _WIN64
-typedef double (*ModuloFunction)(double, double);
-// Define custom fmod implementation.
-ModuloFunction CreateModuloFunction() {
- size_t actual_size;
- byte* buffer = static_cast<byte*>(
- base::OS::Allocate(Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Assembler masm(NULL, buffer, static_cast<int>(actual_size));
- // Generated code is put into a fixed, unmovable, buffer, and not into
- // the V8 heap. We can't, and don't, refer to any relocatable addresses
- // (e.g. the JavaScript nan-object).
-
- // Windows 64 ABI passes double arguments in xmm0, xmm1 and
- // returns result in xmm0.
- // Argument backing space is allocated on the stack above
- // the return address.
-
- // Compute x mod y.
- // Load y and x (use argument backing store as temporary storage).
- __ movsd(Operand(rsp, kRegisterSize * 2), xmm1);
- __ movsd(Operand(rsp, kRegisterSize), xmm0);
- __ fld_d(Operand(rsp, kRegisterSize * 2));
- __ fld_d(Operand(rsp, kRegisterSize));
-
- // Clear exception flags before operation.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testb(rax, Immediate(5));
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
-
- Label valid_result;
- Label return_result;
- // If Invalid Operand or Zero Division exceptions are set,
- // return NaN.
- __ testb(rax, Immediate(5));
- __ j(zero, &valid_result);
- __ fstp(0); // Drop result in st(0).
- int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
- __ movq(rcx, kNaNValue);
- __ movq(Operand(rsp, kRegisterSize), rcx);
- __ movsd(xmm0, Operand(rsp, kRegisterSize));
- __ jmp(&return_result);
-
- // If result is valid, return that.
- __ bind(&valid_result);
- __ fstp_d(Operand(rsp, kRegisterSize));
- __ movsd(xmm0, Operand(rsp, kRegisterSize));
-
- // Clean up FPU stack and exceptions and return xmm0
- __ bind(&return_result);
- __ fstp(0); // Unload y.
-
- Label clear_exceptions;
- __ testb(rax, Immediate(0x3f /* Any Exception*/));
- __ j(not_zero, &clear_exceptions);
- __ ret(0);
- __ bind(&clear_exceptions);
- __ fnclex();
- __ ret(0);
-
- CodeDesc desc;
- masm.GetCode(&desc);
- base::OS::ProtectCode(buffer, actual_size);
- // Call the function from C++ through this pointer.
- return FUNCTION_CAST<ModuloFunction>(buffer);
-}
-
-#endif
-
#undef __
// -------------------------------------------------------------------------
@@ -333,8 +244,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ JumpIfNotSmi(rbx, &convert_hole);
__ SmiToInteger32(rbx, rbx);
__ Cvtlsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
- xmm0);
+ __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), xmm0);
__ jmp(&entry);
__ bind(&convert_hole);
@@ -604,38 +514,38 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
Label done;
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
- __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
- __ xorpd(result, result);
- __ ucomisd(double_scratch, input);
+ __ Movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
+ __ Xorpd(result, result);
+ __ Ucomisd(double_scratch, input);
__ j(above_equal, &done);
- __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
- __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
+ __ Ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
+ __ Movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
__ j(above_equal, &done);
- __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
- __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
- __ mulsd(double_scratch, input);
- __ addsd(double_scratch, result);
- __ movq(temp2, double_scratch);
- __ subsd(double_scratch, result);
- __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
+ __ Movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
+ __ Movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
+ __ Mulsd(double_scratch, input);
+ __ Addsd(double_scratch, result);
+ __ Movq(temp2, double_scratch);
+ __ Subsd(double_scratch, result);
+ __ Movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
__ leaq(temp1, Operand(temp2, 0x1ff800));
__ andq(temp2, Immediate(0x7ff));
__ shrq(temp1, Immediate(11));
- __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
+ __ Mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
__ Move(kScratchRegister, ExternalReference::math_exp_log_table());
__ shlq(temp1, Immediate(52));
__ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
- __ subsd(double_scratch, input);
- __ movsd(input, double_scratch);
- __ subsd(result, double_scratch);
- __ mulsd(input, double_scratch);
- __ mulsd(result, input);
- __ movq(input, temp1);
- __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
- __ subsd(result, double_scratch);
- __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
- __ mulsd(result, input);
+ __ Subsd(double_scratch, input);
+ __ Movsd(input, double_scratch);
+ __ Subsd(result, double_scratch);
+ __ Mulsd(input, double_scratch);
+ __ Mulsd(result, input);
+ __ Movq(input, temp1);
+ __ Mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
+ __ Subsd(result, double_scratch);
+ __ Addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
+ __ Mulsd(result, input);
__ bind(&done);
}
@@ -643,12 +553,14 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
- CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+ CodePatcher patcher(isolate, young_sequence_.start(),
+ young_sequence_.length());
patcher.masm()->pushq(rbp);
patcher.masm()->movp(rbp, rsp);
patcher.masm()->Push(rsi);
@@ -695,7 +607,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length);
+ CodePatcher patcher(isolate, sequence, young_length);
patcher.masm()->call(stub->instruction_start());
patcher.masm()->Nop(
kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
diff --git a/chromium/v8/src/x64/codegen-x64.h b/chromium/v8/src/x64/codegen-x64.h
index 728d04048e8..1403781c67b 100644
--- a/chromium/v8/src/x64/codegen-x64.h
+++ b/chromium/v8/src/x64/codegen-x64.h
@@ -5,7 +5,7 @@
#ifndef V8_X64_CODEGEN_X64_H_
#define V8_X64_CODEGEN_X64_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -108,6 +108,7 @@ class StackArgumentsAccessor BASE_EMBEDDED {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/chromium/v8/src/x64/deoptimizer-x64.cc b/chromium/v8/src/x64/deoptimizer-x64.cc
index 72c92f0a39b..c2fd970c67f 100644
--- a/chromium/v8/src/x64/deoptimizer-x64.cc
+++ b/chromium/v8/src/x64/deoptimizer-x64.cc
@@ -7,6 +7,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
@@ -40,14 +41,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->int3();
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->int3();
}
}
@@ -73,7 +75,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address call_address = instruction_start + deopt_data->Pc(i)->value();
// There is room enough to write a long call instruction because we pad
// LLazyBailout instructions with nops if necessary.
- CodePatcher patcher(call_address, Assembler::kCallSequenceLength);
+ CodePatcher patcher(isolate, call_address, Assembler::kCallSequenceLength);
patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY),
Assembler::RelocInfoNone());
DCHECK(prev_call_address == NULL ||
@@ -95,7 +97,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -117,7 +119,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
@@ -138,14 +140,16 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
- const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::NumAllocatableRegisters();
+ const int kDoubleRegsSize = kDoubleSize * XMMRegister::kMaxNumRegisters;
__ subp(rsp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movsd(Operand(rsp, offset), xmm_reg);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ Movsd(Operand(rsp, offset), xmm_reg);
}
// We push all registers onto the stack, even though we do not need
@@ -210,7 +214,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
__ popq(Operand(rbx, dst_offset));
}
@@ -274,10 +278,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ cmpp(rax, rdx);
__ j(below, &outer_push_loop);
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(rbx, src_offset));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ XMMRegister xmm_reg = XMMRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ Movsd(xmm_reg, Operand(rbx, src_offset));
}
// Push state, pc, and continuation from the last output frame.
diff --git a/chromium/v8/src/x64/disasm-x64.cc b/chromium/v8/src/x64/disasm-x64.cc
index 5534887f5ac..05b199d558d 100644
--- a/chromium/v8/src/x64/disasm-x64.cc
+++ b/chromium/v8/src/x64/disasm-x64.cc
@@ -351,6 +351,11 @@ class DisassemblerX64 {
bool rex_w() { return (rex_ & 0x08) != 0; }
+ bool vex_w() {
+ DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
+ return vex_byte0_ == VEX3_PREFIX ? (vex_byte2_ & 0x80) != 0 : false;
+ }
+
bool vex_128() {
DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
@@ -947,10 +952,48 @@ int DisassemblerX64::AVXInstruction(byte* data) {
default:
UnimplementedInstruction();
}
+ } else if (vex_66() && vex_0f3a()) {
+ int mod, regop, rm, vvvv = vex_vreg();
+ get_modrm(*current, &mod, &regop, &rm);
+ switch (opcode) {
+ case 0x0b:
+ AppendToBuffer("vroundsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", *current++);
+ break;
+ default:
+ UnimplementedInstruction();
+ }
} else if (vex_f3() && vex_0f()) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x10:
+ AppendToBuffer("vmovss %s,", NameOfXMMRegister(regop));
+ if (mod == 3) {
+ AppendToBuffer("%s,", NameOfXMMRegister(vvvv));
+ }
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x11:
+ AppendToBuffer("vmovss ");
+ current += PrintRightXMMOperand(current);
+ if (mod == 3) {
+ AppendToBuffer(",%s", NameOfXMMRegister(vvvv));
+ }
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
+ case 0x2a:
+ AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2ss" : "vcvtlsi2ss",
+ NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ current += PrintRightOperand(current);
+ break;
+ case 0x2c:
+ AppendToBuffer("vcvttss2si%s %s,", vex_w() ? "q" : "",
+ NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x58:
AppendToBuffer("vaddss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -961,6 +1004,11 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5a:
+ AppendToBuffer("vcvtss2sd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5c:
AppendToBuffer("vsubss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -988,6 +1036,41 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x10:
+ AppendToBuffer("vmovsd %s,", NameOfXMMRegister(regop));
+ if (mod == 3) {
+ AppendToBuffer("%s,", NameOfXMMRegister(vvvv));
+ }
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x11:
+ AppendToBuffer("vmovsd ");
+ current += PrintRightXMMOperand(current);
+ if (mod == 3) {
+ AppendToBuffer(",%s", NameOfXMMRegister(vvvv));
+ }
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
+ case 0x2a:
+ AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2sd" : "vcvtlsi2sd",
+ NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ current += PrintRightOperand(current);
+ break;
+ case 0x2c:
+ AppendToBuffer("vcvttsd2si%s %s,", vex_w() ? "q" : "",
+ NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x2d:
+ AppendToBuffer("vcvtsd2si%s %s,", vex_w() ? "q" : "",
+ NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x51:
+ AppendToBuffer("vsqrtsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x58:
AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -998,6 +1081,11 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5a:
+ AppendToBuffer("vcvtsd2ss %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5c:
AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1133,6 +1221,15 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x28:
+ AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x29:
+ AppendToBuffer("vmovaps ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
case 0x2e:
AppendToBuffer("vucomiss %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1154,20 +1251,59 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x28:
+ AppendToBuffer("vmovapd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x29:
+ AppendToBuffer("vmovapd ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
case 0x2e:
AppendToBuffer("vucomisd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x50:
+ AppendToBuffer("vmovmskpd %s,", NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x54:
AppendToBuffer("vandpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x56:
+ AppendToBuffer("vorpd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x57:
AppendToBuffer("vxorpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x6e:
+ AppendToBuffer("vmov%c %s,", vex_w() ? 'q' : 'd',
+ NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ break;
+ case 0x73:
+ AppendToBuffer("%s %s,", regop == 6 ? "vpsllq" : "vpsrlq",
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%u", *current++);
+ break;
+ case 0x76:
+ AppendToBuffer("vpcmpeqd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x7e:
+ AppendToBuffer("vmov%c ", vex_w() ? 'q' : 'd');
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
default:
UnimplementedInstruction();
}
@@ -1385,7 +1521,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// roundsd xmm, xmm/m64, imm8
AppendToBuffer("roundsd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", (*current) & 3);
+ AppendToBuffer(",0x%x", (*current) & 3);
current += 1;
} else if (third_byte == 0x16) {
get_modrm(*current, &mod, &rm, &regop);
@@ -1726,7 +1862,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
- } else if (opcode == 0xBD) {
+ } else if (opcode == 0xB8 || opcode == 0xBC || opcode == 0xBD) {
+ // POPCNT, CTZ, CLZ.
AppendToBuffer("%s%c ", mnemonic, operand_size_code());
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
@@ -1780,6 +1917,8 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "movzxb";
case 0xB7:
return "movzxw";
+ case 0xBC:
+ return "bsf";
case 0xBD:
return "bsr";
case 0xBE:
diff --git a/chromium/v8/src/x64/frames-x64.h b/chromium/v8/src/x64/frames-x64.h
index 1d9cf1ec13a..d213ecb7dcb 100644
--- a/chromium/v8/src/x64/frames-x64.h
+++ b/chromium/v8/src/x64/frames-x64.h
@@ -71,6 +71,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_FRAMES_X64_H_
diff --git a/chromium/v8/src/x64/interface-descriptors-x64.cc b/chromium/v8/src/x64/interface-descriptors-x64.cc
index a062df590fd..79315c70a0a 100644
--- a/chromium/v8/src/x64/interface-descriptors-x64.cc
+++ b/chromium/v8/src/x64/interface-descriptors-x64.cc
@@ -63,6 +63,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return rcx; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return rdx; }
+const Register RestParamAccessDescriptor::parameter_count() { return rcx; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return rdx; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return rbx; }
+
+
const Register ApiGetterDescriptor::function_address() { return r8; }
@@ -78,14 +83,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister(), MapRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rbx};
@@ -116,6 +113,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return rax; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return rax; }
@@ -130,6 +131,13 @@ void NumberToStringDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdi, rax, rcx, rdx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rax, rbx, rcx};
@@ -190,12 +198,11 @@ void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
// rbx : feedback vector
- // rcx : original constructor (for IsSuperConstructorCall)
// rdx : slot in feedback vector (Smi, for RecordCallTarget)
// rdi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {rax, rdi, rcx, rbx};
+ Register registers[] = {rax, rdi, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -209,6 +216,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments
+ // rdx : the new target
+ // rdi : the target to call
+ // rbx : allocation site or undefined
+ Register registers[] = {rdi, rdx, rax, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments
+ // rdx : the new target
+ // rdi : the target to call
+ Register registers[] = {rdi, rdx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rcx, rbx, rax};
@@ -229,6 +257,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -339,6 +374,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
rdi, // JSFunction
+ rdx, // the new target
rax, // actual number of arguments
rbx, // expected number of arguments
};
@@ -371,33 +407,35 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- rdi, // math rounding function
- rdx, // vector slot id
+ rax, // argument count (not including receiver)
+ rbx, // address of first argument
+ rdi // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- rdi, // math rounding function
- rdx, // vector slot id
- rbx // type vector
+ rax, // argument count (not including receiver)
+ rdx, // new target
+ rdi, // constructor
+ rbx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- rax, // argument count (including receiver)
- rbx, // address of first argument
- rdi // the target callable to be call
+ rax, // argument count (argc)
+ r15, // address of first argument (argv)
+ rbx // the runtime function to call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/chromium/v8/src/x64/macro-assembler-x64.cc b/chromium/v8/src/x64/macro-assembler-x64.cc
index ea837dca4b0..9952eb3b659 100644
--- a/chromium/v8/src/x64/macro-assembler-x64.cc
+++ b/chromium/v8/src/x64/macro-assembler-x64.cc
@@ -10,20 +10,22 @@
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/heap/heap.h"
+#include "src/register-configuration.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false),
root_array_available_(true) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -660,39 +662,30 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : argument num_arguments - 1
// ...
// -- rsp[8 * num_arguments] : argument 0 (receiver)
+ //
+ // For runtime functions with variable arguments:
+ // -- rax : number of arguments
// -----------------------------------
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- JumpToExternalReference(ext, result_size);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ Set(rax, function->nargs);
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
- int result_size) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- CEntryStub ces(isolate(), result_size);
+ CEntryStub ces(isolate(), 1);
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -702,34 +695,15 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
+ // Fake a parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
- GetBuiltinEntry(rdx, native_context_index);
- InvokeCode(rdx, expected, expected, flag, call_wrapper);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(target, FieldOperand(target, GlobalObject::kNativeContextOffset));
- movp(target, ContextOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(rdi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(rdi, native_context_index);
- movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ LoadNativeContextSlot(native_context_index, rdi);
+ InvokeFunctionCode(rdi, no_reg, expected, expected, flag, call_wrapper);
}
-#define REG(Name) { kRegister_ ## Name ## _Code }
+#define REG(Name) \
+ { Register::kCode_##Name }
static const Register saved_regs[] = {
REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
@@ -759,7 +733,7 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(Operand(rsp, i * kDoubleSize), reg);
+ Movsd(Operand(rsp, i * kDoubleSize), reg);
}
}
}
@@ -772,7 +746,7 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
if (fp_mode == kSaveFPRegs) {
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(reg, Operand(rsp, i * kDoubleSize));
+ Movsd(reg, Operand(rsp, i * kDoubleSize));
}
addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
@@ -785,15 +759,222 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
}
+void MacroAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvtss2sd(dst, src, src);
+ } else {
+ cvtss2sd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtss2sd(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvtss2sd(dst, dst, src);
+ } else {
+ cvtss2sd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvtsd2ss(dst, src, src);
+ } else {
+ cvtsd2ss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtsd2ss(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvtsd2ss(dst, dst, src);
+ } else {
+ cvtsd2ss(dst, src);
+ }
+}
+
+
void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
- xorps(dst, dst);
- cvtlsi2sd(dst, src);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorpd(dst, dst, dst);
+ vcvtlsi2sd(dst, dst, src);
+ } else {
+ xorpd(dst, dst);
+ cvtlsi2sd(dst, src);
+ }
}
void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
- xorps(dst, dst);
- cvtlsi2sd(dst, src);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorpd(dst, dst, dst);
+ vcvtlsi2sd(dst, dst, src);
+ } else {
+ xorpd(dst, dst);
+ cvtlsi2sd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorps(dst, dst, dst);
+ vcvtqsi2ss(dst, dst, src);
+ } else {
+ xorps(dst, dst);
+ cvtqsi2ss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtqsi2ss(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorps(dst, dst, dst);
+ vcvtqsi2ss(dst, dst, src);
+ } else {
+ xorps(dst, dst);
+ cvtqsi2ss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorpd(dst, dst, dst);
+ vcvtqsi2sd(dst, dst, src);
+ } else {
+ xorpd(dst, dst);
+ cvtqsi2sd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorpd(dst, dst, dst);
+ vcvtqsi2sd(dst, dst, src);
+ } else {
+ xorpd(dst, dst);
+ cvtqsi2sd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src, Register tmp) {
+ Label msb_set_src;
+ Label jmp_return;
+ testq(src, src);
+ j(sign, &msb_set_src, Label::kNear);
+ Cvtqsi2ss(dst, src);
+ jmp(&jmp_return, Label::kNear);
+ bind(&msb_set_src);
+ movq(tmp, src);
+ shrq(src, Immediate(1));
+ // Recover the least significant bit to avoid rounding errors.
+ andq(tmp, Immediate(1));
+ orq(src, tmp);
+ Cvtqsi2ss(dst, src);
+ addss(dst, dst);
+ bind(&jmp_return);
+}
+
+
+void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src, Register tmp) {
+ Label msb_set_src;
+ Label jmp_return;
+ testq(src, src);
+ j(sign, &msb_set_src, Label::kNear);
+ Cvtqsi2sd(dst, src);
+ jmp(&jmp_return, Label::kNear);
+ bind(&msb_set_src);
+ movq(tmp, src);
+ shrq(src, Immediate(1));
+ andq(tmp, Immediate(1));
+ orq(src, tmp);
+ Cvtqsi2sd(dst, src);
+ addsd(dst, dst);
+ bind(&jmp_return);
+}
+
+
+void MacroAssembler::Cvtsd2si(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvtsd2si(dst, src);
+ } else {
+ cvtsd2si(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttsd2si(dst, src);
+ } else {
+ cvttsd2si(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttsd2si(dst, src);
+ } else {
+ cvttsd2si(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttss2siq(dst, src);
+ } else {
+ cvttss2siq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttss2siq(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttss2siq(dst, src);
+ } else {
+ cvttss2siq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttsd2siq(dst, src);
+ } else {
+ cvttsd2siq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttsd2siq(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttsd2siq(dst, src);
+ } else {
+ cvttsd2siq(dst, src);
+ }
}
@@ -910,6 +1091,7 @@ void MacroAssembler::SafePush(Smi* src) {
Register MacroAssembler::GetSmiConstant(Smi* source) {
+ STATIC_ASSERT(kSmiTag == 0);
int value = source->value();
if (value == 0) {
xorl(kScratchRegister, kScratchRegister);
@@ -921,9 +1103,13 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
- // Special-casing 0 here to use xorl seems to make things slower, so we don't
- // do it.
- Move(dst, source, Assembler::RelocInfoNone());
+ STATIC_ASSERT(kSmiTag == 0);
+ int value = source->value();
+ if (value == 0) {
+ xorl(dst, dst);
+ } else {
+ Move(dst, source, Assembler::RelocInfoNone());
+ }
}
@@ -2391,15 +2577,15 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) {
- xorps(dst, dst);
+ Xorpd(dst, dst);
} else {
unsigned pop = base::bits::CountPopulation32(src);
DCHECK_NE(0u, pop);
if (pop == 32) {
- pcmpeqd(dst, dst);
+ Pcmpeqd(dst, dst);
} else {
movl(kScratchRegister, Immediate(src));
- movq(dst, kScratchRegister);
+ Movq(dst, kScratchRegister);
}
}
}
@@ -2407,20 +2593,20 @@ void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
if (src == 0) {
- xorps(dst, dst);
+ Xorpd(dst, dst);
} else {
unsigned nlz = base::bits::CountLeadingZeros64(src);
unsigned ntz = base::bits::CountTrailingZeros64(src);
unsigned pop = base::bits::CountPopulation64(src);
DCHECK_NE(0u, pop);
if (pop == 64) {
- pcmpeqd(dst, dst);
+ Pcmpeqd(dst, dst);
} else if (pop + ntz == 64) {
- pcmpeqd(dst, dst);
- psllq(dst, ntz);
+ Pcmpeqd(dst, dst);
+ Psllq(dst, ntz);
} else if (pop + nlz == 64) {
- pcmpeqd(dst, dst);
- psrlq(dst, nlz);
+ Pcmpeqd(dst, dst);
+ Psrlq(dst, nlz);
} else {
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
@@ -2428,13 +2614,235 @@ void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
Move(dst, lower);
} else {
movq(kScratchRegister, src);
- movq(dst, kScratchRegister);
+ Movq(dst, kScratchRegister);
}
}
}
}
+void MacroAssembler::Movaps(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovaps(dst, src);
+ } else {
+ movaps(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movapd(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovapd(dst, src);
+ } else {
+ movapd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movsd(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovsd(dst, dst, src);
+ } else {
+ movsd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movsd(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovsd(dst, src);
+ } else {
+ movsd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movsd(const Operand& dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovsd(dst, src);
+ } else {
+ movsd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movss(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovss(dst, dst, src);
+ } else {
+ movss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movss(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovss(dst, src);
+ } else {
+ movss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movss(const Operand& dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovss(dst, src);
+ } else {
+ movss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movd(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovd(dst, src);
+ } else {
+ movd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movd(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovd(dst, src);
+ } else {
+ movd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movd(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovd(dst, src);
+ } else {
+ movd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movq(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovq(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movq(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovq(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovmskpd(dst, src);
+ } else {
+ movmskpd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Roundss(XMMRegister dst, XMMRegister src,
+ RoundingMode mode) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vroundss(dst, dst, src, mode);
+ } else {
+ roundss(dst, src, mode);
+ }
+}
+
+
+void MacroAssembler::Roundsd(XMMRegister dst, XMMRegister src,
+ RoundingMode mode) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vroundsd(dst, dst, src, mode);
+ } else {
+ roundsd(dst, src, mode);
+ }
+}
+
+
+void MacroAssembler::Sqrtsd(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsqrtsd(dst, dst, src);
+ } else {
+ sqrtsd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Sqrtsd(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsqrtsd(dst, dst, src);
+ } else {
+ sqrtsd(dst, src);
+ }
+}
+
+
+void MacroAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vucomiss(src1, src2);
+ } else {
+ ucomiss(src1, src2);
+ }
+}
+
+
+void MacroAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vucomiss(src1, src2);
+ } else {
+ ucomiss(src1, src2);
+ }
+}
+
+
+void MacroAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vucomisd(src1, src2);
+ } else {
+ ucomisd(src1, src2);
+ }
+}
+
+
+void MacroAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vucomisd(src1, src2);
+ } else {
+ ucomisd(src1, src2);
+ }
+}
+
+
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
@@ -2743,7 +3151,7 @@ void MacroAssembler::Call(Handle<Code> code_object,
void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
if (imm8 == 0) {
- movd(dst, src);
+ Movd(dst, src);
return;
}
DCHECK_EQ(1, imm8);
@@ -2763,14 +3171,12 @@ void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
pinsrd(dst, src, imm8);
return;
}
- movd(xmm0, src);
+ Movd(xmm0, src);
if (imm8 == 1) {
punpckldq(dst, xmm0);
} else {
DCHECK_EQ(0, imm8);
- psrlq(dst, 32);
- punpckldq(xmm0, dst);
- movaps(dst, xmm0);
+ Movss(dst, xmm0);
}
}
@@ -2782,14 +3188,12 @@ void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
pinsrd(dst, src, imm8);
return;
}
- movd(xmm0, src);
+ Movd(xmm0, src);
if (imm8 == 1) {
punpckldq(dst, xmm0);
} else {
DCHECK_EQ(0, imm8);
- psrlq(dst, 32);
- punpckldq(xmm0, dst);
- movaps(dst, xmm0);
+ Movss(dst, xmm0);
}
}
@@ -2824,6 +3228,134 @@ void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
}
+void MacroAssembler::Lzcntq(Register dst, Register src) {
+ if (CpuFeatures::IsSupported(LZCNT)) {
+ CpuFeatureScope scope(this, LZCNT);
+ lzcntq(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsrq(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Set(dst, 127); // 127^63 == 64
+ bind(&not_zero_src);
+ xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
+}
+
+
+void MacroAssembler::Lzcntq(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(LZCNT)) {
+ CpuFeatureScope scope(this, LZCNT);
+ lzcntq(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsrq(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Set(dst, 127); // 127^63 == 64
+ bind(&not_zero_src);
+ xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
+}
+
+
+void MacroAssembler::Tzcntq(Register dst, Register src) {
+ if (CpuFeatures::IsSupported(BMI1)) {
+ CpuFeatureScope scope(this, BMI1);
+ tzcntq(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsfq(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
+ Set(dst, 64);
+ bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Tzcntq(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(BMI1)) {
+ CpuFeatureScope scope(this, BMI1);
+ tzcntq(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsfq(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
+ Set(dst, 64);
+ bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Tzcntl(Register dst, Register src) {
+ if (CpuFeatures::IsSupported(BMI1)) {
+ CpuFeatureScope scope(this, BMI1);
+ tzcntl(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsfl(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Set(dst, 32); // The result of tzcnt is 32 if src = 0.
+ bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Tzcntl(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(BMI1)) {
+ CpuFeatureScope scope(this, BMI1);
+ tzcntl(dst, src);
+ return;
+ }
+ Label not_zero_src;
+ bsfl(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Set(dst, 32); // The result of tzcnt is 32 if src = 0.
+ bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Popcntl(Register dst, Register src) {
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ CpuFeatureScope scope(this, POPCNT);
+ popcntl(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+
+void MacroAssembler::Popcntl(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ CpuFeatureScope scope(this, POPCNT);
+ popcntl(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+
+void MacroAssembler::Popcntq(Register dst, Register src) {
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ CpuFeatureScope scope(this, POPCNT);
+ popcntq(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+
+void MacroAssembler::Popcntq(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(POPCNT)) {
+ CpuFeatureScope scope(this, POPCNT);
+ popcntq(dst, src);
+ return;
+ }
+ UNREACHABLE();
+}
+
+
void MacroAssembler::Pushad() {
Push(rax);
Push(rcx);
@@ -3043,7 +3575,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
SmiToInteger32(kScratchRegister, maybe_number);
Cvtlsi2sd(xmm_scratch, kScratchRegister);
bind(&done);
- movsd(FieldOperand(elements, index, times_8,
+ Movsd(FieldOperand(elements, index, times_8,
FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
}
@@ -3082,8 +3614,8 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
Register result_reg) {
Label done;
Label conv_failure;
- xorps(temp_xmm_reg, temp_xmm_reg);
- cvtsd2si(result_reg, input_reg);
+ Xorpd(temp_xmm_reg, temp_xmm_reg);
+ Cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
cmpl(result_reg, Immediate(1));
@@ -3095,7 +3627,7 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
jmp(&done, Label::kNear);
bind(&conv_failure);
Set(result_reg, 0);
- ucomisd(input_reg, temp_xmm_reg);
+ Ucomisd(input_reg, temp_xmm_reg);
j(below, &done, Label::kNear);
Set(result_reg, 255);
bind(&done);
@@ -3108,7 +3640,7 @@ void MacroAssembler::LoadUint32(XMMRegister dst,
cmpq(src, Immediate(0xffffffff));
Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
}
- cvtqsi2sd(dst, src);
+ Cvtqsi2sd(dst, src);
}
@@ -3123,15 +3655,15 @@ void MacroAssembler::SlowTruncateToI(Register result_reg,
void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
Register input_reg) {
Label done;
- movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- cvttsd2siq(result_reg, xmm0);
+ Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ Cvttsd2siq(result_reg, xmm0);
cmpq(result_reg, Immediate(1));
j(no_overflow, &done, Label::kNear);
// Slow case.
if (input_reg.is(result_reg)) {
subp(rsp, Immediate(kDoubleSize));
- movsd(MemOperand(rsp, 0), xmm0);
+ Movsd(MemOperand(rsp, 0), xmm0);
SlowTruncateToI(result_reg, rsp, 0);
addp(rsp, Immediate(kDoubleSize));
} else {
@@ -3147,12 +3679,12 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
- cvttsd2siq(result_reg, input_reg);
+ Cvttsd2siq(result_reg, input_reg);
cmpq(result_reg, Immediate(1));
j(no_overflow, &done, Label::kNear);
subp(rsp, Immediate(kDoubleSize));
- movsd(MemOperand(rsp, 0), input_reg);
+ Movsd(MemOperand(rsp, 0), input_reg);
SlowTruncateToI(result_reg, rsp, 0);
addp(rsp, Immediate(kDoubleSize));
@@ -3167,9 +3699,9 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
MinusZeroMode minus_zero_mode,
Label* lost_precision, Label* is_nan,
Label* minus_zero, Label::Distance dst) {
- cvttsd2si(result_reg, input_reg);
+ Cvttsd2si(result_reg, input_reg);
Cvtlsi2sd(xmm0, result_reg);
- ucomisd(xmm0, input_reg);
+ Ucomisd(xmm0, input_reg);
j(not_equal, lost_precision, dst);
j(parity_even, is_nan, dst); // NaN.
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
@@ -3178,7 +3710,7 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
// only have to test if we got -0 as an input.
testl(result_reg, result_reg);
j(not_zero, &done, Label::kNear);
- movmskpd(result_reg, input_reg);
+ Movmskpd(result_reg, input_reg);
// Bit 0 contains the sign of the double in input_reg.
// If input was positive, we are ok and return 0, otherwise
// jump to minus_zero.
@@ -3321,6 +3853,18 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotABoundFunction);
+ Push(object);
+ CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
@@ -3451,26 +3995,75 @@ void MacroAssembler::DebugBreak() {
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ LoadSharedFunctionInfoSpecialField(
+ rbx, rbx, SharedFunctionInfo::kFormalParameterCountOffset);
+
+ ParameterCount expected(rbx);
+ InvokeFunction(function, new_target, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ Move(rdi, function);
+ InvokeFunction(rdi, no_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ DCHECK(function.is(rdi));
+ movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ InvokeFunctionCode(rdi, new_target, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(rdi));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(rdx));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ }
Label done;
bool definitely_mismatches = false;
InvokePrologue(expected,
actual,
- Handle<Code>::null(),
- code,
&done,
&definitely_mismatches,
flag,
Label::kNear,
call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
call(code);
@@ -3484,59 +4077,8 @@ void MacroAssembler::InvokeCode(Register code,
}
-void MacroAssembler::InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- DCHECK(function.is(rdi));
- movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
- LoadSharedFunctionInfoSpecialField(rbx, rdx,
- SharedFunctionInfo::kFormalParameterCountOffset);
- // Advances rdx to the end of the Code object header, to the start of
- // the executable code.
- movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-
- ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::InvokeFunction(Register function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- DCHECK(function.is(rdi));
- movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
- // Advances rdx to the end of the Code object header, to the start of
- // the executable code.
- movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-
- InvokeCode(rdx, expected, actual, flag, call_wrapper);
-}
-
-
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- Move(rdi, function);
- InvokeFunction(rdi, expected, actual, flag, call_wrapper);
-}
-
-
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -3586,13 +4128,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (!definitely_matches) {
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_register.is(rdx)) {
- movp(rdx, code_register);
- }
-
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
Call(adaptor, RelocInfo::CODE_TARGET);
@@ -3608,6 +4143,49 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ Operand step_in_enabled_operand = ExternalOperand(step_in_enabled);
+ cmpb(step_in_enabled_operand, Immediate(0));
+ j(equal, &skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ Integer32ToSmi(expected.reg(), expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ Integer32ToSmi(actual.reg(), actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiToInteger64(actual.reg(), actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiToInteger64(expected.reg(), expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
void MacroAssembler::StubPrologue() {
pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
@@ -3710,13 +4288,16 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
- arg_stack_space * kRegisterSize;
+ int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
+ arg_stack_space * kRegisterSize;
subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
- XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ DoubleRegister reg =
+ DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
+ Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
subp(rsp, Immediate(arg_stack_space * kRegisterSize));
@@ -3753,25 +4334,34 @@ void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Registers:
// r15 : argv
if (save_doubles) {
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
- XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ DoubleRegister reg =
+ DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
+ Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
}
}
- // Get the return address from the stack and restore the frame pointer.
- movp(rcx, Operand(rbp, kFPOnStackSize));
- movp(rbp, Operand(rbp, 0 * kPointerSize));
- // Drop everything up to and including the arguments and the receiver
- // from the caller stack.
- leap(rsp, Operand(r15, 1 * kPointerSize));
+ if (pop_arguments) {
+ // Get the return address from the stack and restore the frame pointer.
+ movp(rcx, Operand(rbp, kFPOnStackSize));
+ movp(rbp, Operand(rbp, 0 * kPointerSize));
- PushReturnAddressFrom(rcx);
+ // Drop everything up to and including the arguments and the receiver
+ // from the caller stack.
+ leap(rsp, Operand(r15, 1 * kPointerSize));
+
+ PushReturnAddressFrom(rcx);
+ } else {
+ // Otherwise just leave the exit frame.
+ leave();
+ }
LeaveExitFrameEpilogue(true);
}
@@ -3820,10 +4410,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- movp(scratch, FieldOperand(scratch, offset));
- movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ movp(scratch, ContextOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -4353,6 +4940,27 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
}
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch,
+ Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch));
+ DCHECK(!result.is(value));
+
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch);
+ movp(FieldOperand(result, HeapObject::kMapOffset), scratch);
+ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+ movp(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ movp(FieldOperand(result, JSObject::kElementsOffset), scratch);
+ movp(FieldOperand(result, JSValue::kValueOffset), value);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+}
+
+
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Destination is incremented by length, source, length and scratch are
@@ -4439,16 +5047,16 @@ void MacroAssembler::CopyBytes(Register destination,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
jmp(&entry);
bind(&loop);
- movp(Operand(start_offset, 0), filler);
- addp(start_offset, Immediate(kPointerSize));
+ movp(Operand(current_address, 0), filler);
+ addp(current_address, Immediate(kPointerSize));
bind(&entry);
- cmpp(start_offset, end_offset);
+ cmpp(current_address, end_address);
j(below, &loop);
}
@@ -4479,36 +5087,24 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- movp(dst, GlobalObjectOperand());
- movp(dst, FieldOperand(dst, GlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- movp(scratch,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- movp(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- int offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmpp(map_in_out, FieldOperand(scratch, offset));
+ movp(scratch, NativeContextOperand());
+ cmpp(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
j(not_equal, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- movp(map_in_out, FieldOperand(scratch, offset));
+ movp(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
@@ -4518,14 +5114,10 @@ static const int kRegisterPassedArguments = 4;
static const int kRegisterPassedArguments = 6;
#endif
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- movp(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- movp(function, Operand(function, Context::SlotOffset(index)));
+
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ movp(dst, NativeContextOperand());
+ movp(dst, ContextOperand(dst, index));
}
@@ -4664,10 +5256,10 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(byte* address, int size)
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
: address_(address),
size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
+ masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@@ -4677,7 +5269,7 @@ CodePatcher::CodePatcher(byte* address, int size)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- Assembler::FlushICacheWithoutIsolate(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
@@ -4715,45 +5307,19 @@ void MacroAssembler::JumpIfBlack(Register object,
Label* on_black,
Label::Distance on_black_distance) {
DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+
GetMarkBits(object, bitmap_scratch, mask_scratch);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
// The mask_scratch register contains a 1 at the position of the first bit
- // and a 0 at all other positions, including the position of the second bit.
+ // and a 1 at a position of the second bit. All other positions are zero.
movp(rcx, mask_scratch);
- // Make rcx into a mask that covers both marking bits using the operation
- // rcx = mask | (mask << 1).
- leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
- // Note that we are using a 4-byte aligned 8-byte load.
andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(
- Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance) {
- Label is_data_object;
- movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- j(equal, &is_data_object, Label::kNear);
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- Immediate(kIsIndirectStringMask | kIsNotStringMask));
- j(not_zero, not_data_object, not_data_object_distance);
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
@@ -4773,104 +5339,27 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
- movl(mask_reg, Immediate(1));
+ movl(mask_reg, Immediate(3));
shlp_cl(mask_reg);
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Label* value_is_white,
+ Label::Distance distance) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(not_zero, &done, Label::kNear);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- Push(mask_scratch);
- // shl. May overflow making the check conservative.
- addp(mask_scratch, mask_scratch);
- testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- Pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = rcx; // Holds map while checking type.
- Register length = rcx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- movp(map, FieldOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- j(not_equal, &not_heap_number, Label::kNear);
- movp(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = rcx;
- movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- testb(instance_type, Immediate(kExternalStringTag));
- j(zero, &not_external, Label::kNear);
- movp(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either Latin1 or UC16.
- DCHECK(kOneByteStringTag == 0x04);
- andp(length, Immediate(kStringEncodingMask));
- xorp(length, Immediate(kStringEncodingMask));
- addp(length, Immediate(0x04));
- // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
- imulp(length, FieldOperand(value, String::kLengthOffset));
- shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- andp(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
-
- bind(&done);
+ j(zero, value_is_white, distance);
}
diff --git a/chromium/v8/src/x64/macro-assembler-x64.h b/chromium/v8/src/x64/macro-assembler-x64.h
index 1fca0e3594e..1aa2c74f228 100644
--- a/chromium/v8/src/x64/macro-assembler-x64.h
+++ b/chromium/v8/src/x64/macro-assembler-x64.h
@@ -16,17 +16,19 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_rax_Code};
-const Register kReturnRegister1 = {kRegister_rdx_Code};
-const Register kJSFunctionRegister = {kRegister_rdi_Code};
-const Register kContextRegister = {kRegister_rsi_Code};
-const Register kInterpreterAccumulatorRegister = {kRegister_rax_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_r11_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_r12_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_r14_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_r15_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_rbx_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_rax_Code};
+const Register kReturnRegister0 = {Register::kCode_rax};
+const Register kReturnRegister1 = {Register::kCode_rdx};
+const Register kJSFunctionRegister = {Register::kCode_rdi};
+const Register kContextRegister = {Register::kCode_rsi};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_r11};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_r15};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_rax};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_rdx};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_rbx};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_rax};
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
@@ -83,11 +85,8 @@ struct SmiIndex {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
// Prevent the use of the RootArray during the lifetime of this
// scope object.
@@ -150,7 +149,13 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
- Label::Distance if_equal_distance = Label::kNear) {
+ Label::Distance if_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(equal, if_equal, if_equal_distance);
+ }
+ void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_equal,
+ Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
@@ -158,7 +163,13 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value and jump if they are not equal.
void JumpIfNotRoot(Register with, Heap::RootListIndex index,
Label* if_not_equal,
- Label::Distance if_not_equal_distance = Label::kNear) {
+ Label::Distance if_not_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(not_equal, if_not_equal, if_not_equal_distance);
+ }
+ void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_not_equal,
+ Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
@@ -219,29 +230,14 @@ class MacroAssembler: public Assembler {
}
// Check if an object has the black incremental marking color. Also uses rcx!
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_distance = Label::kFar);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
+ void JumpIfBlack(Register object, Register bitmap_scratch,
+ Register mask_scratch, Label* on_black,
+ Label::Distance on_black_distance);
+
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Label* value_is_white, Label::Distance distance);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -342,8 +338,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects/provides the return value in
// register rax:rdx (untouched) and the pointer to the first
- // argument in register rsi.
- void LeaveExitFrame(bool save_doubles = false);
+ // argument in register rsi (if pop_arguments == true).
+ void LeaveExitFrame(bool save_doubles = false, bool pop_arguments = true);
// Leave the current exit frame. Expects/provides the return value in
// register rax (untouched).
@@ -369,20 +365,25 @@ class MacroAssembler: public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
@@ -398,13 +399,6 @@ class MacroAssembler: public Assembler {
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, int native_context_index);
-
-
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
@@ -806,12 +800,35 @@ class MacroAssembler: public Assembler {
void Set(Register dst, int64_t x);
void Set(const Operand& dst, intptr_t x);
+ void Cvtss2sd(XMMRegister dst, XMMRegister src);
+ void Cvtss2sd(XMMRegister dst, const Operand& src);
+ void Cvtsd2ss(XMMRegister dst, XMMRegister src);
+ void Cvtsd2ss(XMMRegister dst, const Operand& src);
+
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
- // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ // xorpd to clear the dst register before cvtsi2sd to solve this issue.
void Cvtlsi2sd(XMMRegister dst, Register src);
void Cvtlsi2sd(XMMRegister dst, const Operand& src);
+ void Cvtqsi2ss(XMMRegister dst, Register src);
+ void Cvtqsi2ss(XMMRegister dst, const Operand& src);
+
+ void Cvtqsi2sd(XMMRegister dst, Register src);
+ void Cvtqsi2sd(XMMRegister dst, const Operand& src);
+
+ void Cvtqui2ss(XMMRegister dst, Register src, Register tmp);
+ void Cvtqui2sd(XMMRegister dst, Register src, Register tmp);
+
+ void Cvtsd2si(Register dst, XMMRegister src);
+
+ void Cvttsd2si(Register dst, XMMRegister src);
+ void Cvttsd2si(Register dst, const Operand& src);
+ void Cvttss2siq(Register dst, XMMRegister src);
+ void Cvttss2siq(Register dst, const Operand& src);
+ void Cvttsd2siq(Register dst, XMMRegister src);
+ void Cvttsd2siq(Register dst, const Operand& src);
+
// Move if the registers are not identical.
void Move(Register target, Register source);
@@ -894,6 +911,66 @@ class MacroAssembler: public Assembler {
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
+#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
+ void macro_name(XMMRegister dst, src_type src) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, dst, src); \
+ } else { \
+ name(dst, src); \
+ } \
+ }
+#define AVX_OP2_X(macro_name, name) \
+ AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
+#define AVX_OP2_O(macro_name, name) \
+ AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
+#define AVX_OP2_XO(macro_name, name) \
+ AVX_OP2_X(macro_name, name) \
+ AVX_OP2_O(macro_name, name)
+
+ AVX_OP2_XO(Addsd, addsd)
+ AVX_OP2_XO(Subsd, subsd)
+ AVX_OP2_XO(Mulsd, mulsd)
+ AVX_OP2_XO(Divsd, divsd)
+ AVX_OP2_X(Andpd, andpd)
+ AVX_OP2_X(Orpd, orpd)
+ AVX_OP2_X(Xorpd, xorpd)
+ AVX_OP2_X(Pcmpeqd, pcmpeqd)
+ AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
+ AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)
+
+#undef AVX_OP2_O
+#undef AVX_OP2_X
+#undef AVX_OP2_XO
+#undef AVX_OP2_WITH_TYPE
+
+ void Movsd(XMMRegister dst, XMMRegister src);
+ void Movsd(XMMRegister dst, const Operand& src);
+ void Movsd(const Operand& dst, XMMRegister src);
+ void Movss(XMMRegister dst, XMMRegister src);
+ void Movss(XMMRegister dst, const Operand& src);
+ void Movss(const Operand& dst, XMMRegister src);
+
+ void Movd(XMMRegister dst, Register src);
+ void Movd(XMMRegister dst, const Operand& src);
+ void Movd(Register dst, XMMRegister src);
+ void Movq(XMMRegister dst, Register src);
+ void Movq(Register dst, XMMRegister src);
+
+ void Movaps(XMMRegister dst, XMMRegister src);
+ void Movapd(XMMRegister dst, XMMRegister src);
+ void Movmskpd(Register dst, XMMRegister src);
+
+ void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void Sqrtsd(XMMRegister dst, XMMRegister src);
+ void Sqrtsd(XMMRegister dst, const Operand& src);
+
+ void Ucomiss(XMMRegister src1, XMMRegister src2);
+ void Ucomiss(XMMRegister src1, const Operand& src2);
+ void Ucomisd(XMMRegister src1, XMMRegister src2);
+ void Ucomisd(XMMRegister src1, const Operand& src2);
+
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
@@ -936,9 +1013,24 @@ class MacroAssembler: public Assembler {
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+ void Lzcntq(Register dst, Register src);
+ void Lzcntq(Register dst, const Operand& src);
+
void Lzcntl(Register dst, Register src);
void Lzcntl(Register dst, const Operand& src);
+ void Tzcntq(Register dst, Register src);
+ void Tzcntq(Register dst, const Operand& src);
+
+ void Tzcntl(Register dst, Register src);
+ void Tzcntl(Register dst, const Operand& src);
+
+ void Popcntl(Register dst, Register src);
+ void Popcntl(Register dst, const Operand& src);
+
+ void Popcntq(Register dst, Register src);
+ void Popcntq(Register dst, const Operand& src);
+
// Non-x64 instructions.
// Push/pop all general purpose registers.
// Does not push rsp/rbp nor any of the assembler's special purpose registers
@@ -1108,6 +1200,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
@@ -1223,6 +1319,11 @@ class MacroAssembler: public Assembler {
void AllocateOneByteSlicedString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch, Label* gc_required);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -1260,8 +1361,15 @@ class MacroAssembler: public Assembler {
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
@@ -1274,8 +1382,8 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the global function with the given index.
- void LoadGlobalFunction(int index, Register function);
+ // Load the native context slot with the current index.
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same.
@@ -1299,36 +1407,33 @@ class MacroAssembler: public Assembler {
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Call a runtime function and save the value of XMM registers.
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
+ // Convenience function: tail call a runtime routine (jump)
+ void TailCallRuntime(Runtime::FunctionId fid);
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& ext, int result_size);
+ // Jump to a runtime routines
+ void JumpToExternalReference(const ExternalReference& ext);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in rsp[0], rsp[8],
@@ -1377,12 +1482,11 @@ class MacroAssembler: public Assembler {
int min_length = 0,
Register scratch = kScratchRegister);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// Emit code for a truncating division by a constant. The dividend register is
@@ -1489,13 +1593,11 @@ class MacroAssembler: public Assembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- Label::Distance near_jump = Label::kFar,
- const CallWrapper& call_wrapper = NullCallWrapper());
+ Label::Distance near_jump,
+ const CallWrapper& call_wrapper);
void EnterExitFramePrologue(bool save_rax);
@@ -1558,7 +1660,7 @@ class MacroAssembler: public Assembler {
// an assertion.
class CodePatcher {
public:
- CodePatcher(byte* address, int size);
+ CodePatcher(Isolate* isolate, byte* address, int size);
~CodePatcher();
// Macro assembler to emit code.
@@ -1599,8 +1701,8 @@ inline Operand ContextOperand(Register context, Register index) {
}
-inline Operand GlobalObjectOperand() {
- return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX);
+inline Operand NativeContextOperand() {
+ return ContextOperand(rsi, Context::NATIVE_CONTEXT_INDEX);
}
@@ -1640,6 +1742,7 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define ACCESS_MASM(masm) masm->
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_MACRO_ASSEMBLER_X64_H_
diff --git a/chromium/v8/src/x64/simulator-x64.h b/chromium/v8/src/x64/simulator-x64.h
index 35cbdc78884..f1351c88cfe 100644
--- a/chromium/v8/src/x64/simulator-x64.h
+++ b/chromium/v8/src/x64/simulator-x64.h
@@ -13,7 +13,7 @@ namespace internal {
// Since there is no simulator for the x64 architecture the only thing we can
// do is to call the entry directly.
// TODO(X64): Don't pass p0, since it isn't used?
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*regexp_matcher)(String*, int, const byte*,
@@ -21,7 +21,8 @@ typedef int (*regexp_matcher)(String*, int, const byte*,
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
// The stack limit beyond which we will throw stack overflow errors in
@@ -34,13 +35,16 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() { }
+ static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_SIMULATOR_X64_H_
diff --git a/chromium/v8/src/x87/assembler-x87-inl.h b/chromium/v8/src/x87/assembler-x87-inl.h
index 45430470802..0e529c7ab62 100644
--- a/chromium/v8/src/x87/assembler-x87-inl.h
+++ b/chromium/v8/src/x87/assembler-x87-inl.h
@@ -104,8 +104,9 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
- Assembler::set_target_address_at(pc_, host_, target);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target);
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
@@ -134,7 +135,7 @@ void RelocInfo::set_target_object(Object* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -200,7 +201,7 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -230,8 +231,8 @@ void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(*pc_ == kCallOpcode);
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
- icache_flush_mode);
+ Assembler::set_target_address_at(
+ isolate_, pc_ + 1, host_, stub->instruction_start(), icache_flush_mode);
}
@@ -245,7 +246,7 @@ Address RelocInfo::debug_call_address() {
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
- Assembler::set_target_address_at(location, host_, target);
+ Assembler::set_target_address_at(isolate_, location, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -260,7 +261,8 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -281,7 +283,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate, pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
@@ -306,7 +308,7 @@ void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(heap->isolate(), pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
@@ -454,13 +456,13 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(p, sizeof(int32_t));
+ Assembler::FlushICache(isolate, p, sizeof(int32_t));
}
}
@@ -500,7 +502,7 @@ void Assembler::emit_near_disp(Label* L) {
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@@ -554,6 +556,7 @@ Operand::Operand(Immediate imm) {
set_modrm(0, ebp);
set_dispr(imm.x_, imm.rmode_);
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_ASSEMBLER_X87_INL_H_
diff --git a/chromium/v8/src/x87/assembler-x87.cc b/chromium/v8/src/x87/assembler-x87.cc
index 323d2434f67..53919486d66 100644
--- a/chromium/v8/src/x87/assembler-x87.cc
+++ b/chromium/v8/src/x87/assembler-x87.cc
@@ -102,37 +102,6 @@ bool RelocInfo::IsInConstantPool() {
}
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Call instruction takes up 5 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 5;
- int code_size = kCallCodeSize + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc_, code_size);
-
-// Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->call(target, RelocInfo::NONE32);
-
- // Check that the size of the code generated is as expected.
- DCHECK_EQ(kCallCodeSize,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- DCHECK_GE(guard_bytes, 0);
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-}
-
-
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -253,6 +222,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
+ desc->constant_pool_size = 0;
}
@@ -1178,6 +1148,14 @@ void Assembler::bsr(Register dst, const Operand& src) {
}
+void Assembler::bsf(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xBC);
+ emit_operand(dst, src);
+}
+
+
void Assembler::hlt() {
EnsureSpace ensure_space(this);
EMIT(0xF4);
@@ -1967,6 +1945,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
@@ -2102,7 +2081,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!serializer_enabled() && !emit_debug_code()) {
return;
}
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
diff --git a/chromium/v8/src/x87/assembler-x87.h b/chromium/v8/src/x87/assembler-x87.h
index 1f454bcd902..668dc7bb40d 100644
--- a/chromium/v8/src/x87/assembler-x87.h
+++ b/chromium/v8/src/x87/assembler-x87.h
@@ -40,12 +40,48 @@
#include <deque>
#include "src/assembler.h"
-#include "src/compiler.h"
#include "src/isolate.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
+#define GENERAL_REGISTERS(V) \
+ V(eax) \
+ V(ecx) \
+ V(edx) \
+ V(ebx) \
+ V(esp) \
+ V(ebp) \
+ V(esi) \
+ V(edi)
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(eax) \
+ V(ecx) \
+ V(edx) \
+ V(ebx) \
+ V(esi) \
+ V(edi)
+
+#define DOUBLE_REGISTERS(V) \
+ V(stX_0) \
+ V(stX_1) \
+ V(stX_2) \
+ V(stX_3) \
+ V(stX_4) \
+ V(stX_5) \
+ V(stX_6) \
+ V(stX_7)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(stX_0) \
+ V(stX_1) \
+ V(stX_2) \
+ V(stX_3) \
+ V(stX_4) \
+ V(stX_5)
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -68,145 +104,87 @@ namespace internal {
// and best performance in optimized code.
//
struct Register {
- static const int kMaxNumAllocatableRegisters = 6;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
- static const int kNumRegisters = 8;
-
- static inline const char* AllocationIndexToString(int index);
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
- static inline int ToAllocationIndex(Register reg);
-
- static inline Register FromAllocationIndex(int index);
+ static const int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) {
DCHECK(code >= 0);
DCHECK(code < kNumRegisters);
- Register r = { code };
+ Register r = {code};
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- // eax, ebx, ecx and edx are byte registers, the rest are not.
- bool is_byte_register() const { return code_ <= 3; }
+ const char* ToString();
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
+ bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
- return code_;
+ return reg_code;
}
int bit() const {
DCHECK(is_valid());
- return 1 << code_;
+ return 1 << reg_code;
}
+ bool is_byte_register() const { return reg_code <= 3; }
+
// Unfortunately we can't make this private in a struct.
- int code_;
+ int reg_code;
};
-const int kRegister_eax_Code = 0;
-const int kRegister_ecx_Code = 1;
-const int kRegister_edx_Code = 2;
-const int kRegister_ebx_Code = 3;
-const int kRegister_esp_Code = 4;
-const int kRegister_ebp_Code = 5;
-const int kRegister_esi_Code = 6;
-const int kRegister_edi_Code = 7;
-const int kRegister_no_reg_Code = -1;
-
-const Register eax = { kRegister_eax_Code };
-const Register ecx = { kRegister_ecx_Code };
-const Register edx = { kRegister_edx_Code };
-const Register ebx = { kRegister_ebx_Code };
-const Register esp = { kRegister_esp_Code };
-const Register ebp = { kRegister_ebp_Code };
-const Register esi = { kRegister_esi_Code };
-const Register edi = { kRegister_edi_Code };
-const Register no_reg = { kRegister_no_reg_Code };
-
-
-inline const char* Register::AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- // This is the mapping of allocation indices to registers.
- const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
- return kNames[index];
-}
+#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const Register no_reg = {Register::kCode_no_reg};
-inline int Register::ToAllocationIndex(Register reg) {
- DCHECK(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
- return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
-}
-
-
-inline Register Register::FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- return (index >= 4) ? from_code(index + 2) : from_code(index);
-}
+struct DoubleRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
-struct X87Register {
+ static const int kMaxNumRegisters = Code::kAfterLast;
static const int kMaxNumAllocatableRegisters = 6;
- static const int kMaxNumRegisters = 8;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
-
- // TODO(turbofan): Proper support for float32.
- static int NumAllocatableAliasedRegisters() {
- return NumAllocatableRegisters();
- }
-
-
- static int ToAllocationIndex(X87Register reg) {
- return reg.code_;
- }
-
- static const char* AllocationIndexToString(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "stX_0", "stX_1", "stX_2", "stX_3", "stX_4",
- "stX_5", "stX_6", "stX_7"
- };
- return names[index];
- }
-
- static X87Register FromAllocationIndex(int index) {
- DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
- X87Register result;
- result.code_ = index;
+ static DoubleRegister from_code(int code) {
+ DoubleRegister result = {code};
return result;
}
- bool is_valid() const {
- return 0 <= code_ && code_ < kMaxNumRegisters;
- }
+ bool IsAllocatable() const;
+ bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
int code() const {
DCHECK(is_valid());
- return code_;
- }
-
- bool is(X87Register reg) const {
- return code_ == reg.code_;
+ return reg_code;
}
- int code_;
-};
-
+ bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
-typedef X87Register DoubleRegister;
+ const char* ToString();
+ int reg_code;
+};
-const X87Register stX_0 = { 0 };
-const X87Register stX_1 = { 1 };
-const X87Register stX_2 = { 2 };
-const X87Register stX_3 = { 3 };
-const X87Register stX_4 = { 4 };
-const X87Register stX_5 = { 5 };
-const X87Register stX_6 = { 6 };
-const X87Register stX_7 = { 7 };
+#define DECLARE_REGISTER(R) \
+ const DoubleRegister R = {DoubleRegister::kCode_##R};
+DOUBLE_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
+typedef DoubleRegister X87Register;
enum Condition {
// any value < 0 is considered no_condition
@@ -505,19 +483,17 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target in the branch/call instruction at pc.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
- static inline void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED) {
+ static inline void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target);
+ set_target_address_at(isolate, pc, constant_pool, target);
}
// Return the code target address at a call site from the return address
@@ -527,13 +503,14 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
- set_target_address_at(instruction_payload, code, target);
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target) {
+ set_target_address_at(isolate, instruction_payload, code, target);
}
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
static const int kSpecialTargetSize = kPointerSize;
@@ -793,6 +770,8 @@ class Assembler : public AssemblerBase {
void bts(const Operand& dst, Register src);
void bsr(Register dst, Register src) { bsr(dst, Operand(src)); }
void bsr(Register dst, const Operand& src);
+ void bsf(Register dst, Register src) { bsf(dst, Operand(src)); }
+ void bsf(Register dst, const Operand& src);
// Miscellaneous
void hlt();
@@ -948,7 +927,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
@@ -1097,6 +1076,7 @@ class EnsureSpace BASE_EMBEDDED {
#endif
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_ASSEMBLER_X87_H_
diff --git a/chromium/v8/src/x87/builtins-x87.cc b/chromium/v8/src/x87/builtins-x87.cc
index bb9829be342..55ec55fc6f8 100644
--- a/chromium/v8/src/x87/builtins-x87.cc
+++ b/chromium/v8/src/x87/builtins-x87.cc
@@ -22,12 +22,12 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- eax : number of arguments excluding receiver
- // -- edi : called function (only guaranteed when
- // extra_args requires it)
+ // -- edi : target
+ // -- edx : new.target
// -- esp[0] : return address
// -- esp[4] : last argument
// -- ...
- // -- esp[4 * argc] : first argument (argc == eax)
+ // -- esp[4 * argc] : first argument
// -- esp[4 * (argc +1)] : receiver
// -----------------------------------
__ AssertFunction(edi);
@@ -36,38 +36,48 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- Register scratch = ebx;
- __ pop(scratch); // Save return address.
- __ push(edi);
- __ push(scratch); // Restore return address.
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ if (extra_args != BuiltinExtraArguments::kNone) {
+ __ PopReturnAddressTo(ecx);
+ if (extra_args & BuiltinExtraArguments::kTarget) {
+ ++num_extra_args;
+ __ Push(edi);
+ }
+ if (extra_args & BuiltinExtraArguments::kNewTarget) {
+ ++num_extra_args;
+ __ Push(edx);
+ }
+ __ PushReturnAddressFrom(ecx);
}
// JumpToExternalReference expects eax to contain the number of arguments
// including the receiver and the extra arguments.
__ add(eax, Immediate(num_extra_args + 1));
+
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- edx : new target (preserved for callee)
+ // -- edi : target function (preserved for callee)
+ // -----------------------------------
+
FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function.
+ // Push a copy of the target function and the new target.
__ push(edi);
+ __ push(edx);
// Function is also the parameter to the runtime call.
__ push(edi);
__ CallRuntime(function_id, 1);
- // Restore receiver.
+ // Restore target function and new target.
+ __ pop(edx);
__ pop(edi);
}
@@ -107,12 +117,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
// -- ebx: allocation site or undefined
- // -- edx: original constructor
+ // -- edx: new target
// -----------------------------------
// Enter a construct frame.
@@ -124,173 +135,166 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(ebx);
__ SmiTag(eax);
__ push(eax);
- __ push(edi);
- __ push(edx);
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(not_equal, &rt_call);
-
- // Fall back to runtime if the original constructor and function differ.
- __ cmp(edx, edi);
- __ j(not_equal, &rt_call);
-
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- __ JumpIfSmi(eax, &rt_call);
- // edi: constructor
- // eax: initial map (if proven valid below)
- __ CmpObjectType(eax, MAP_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // edi: constructor
- // eax: initial map
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (!is_api_function) {
- Label allocate;
- // The code below relies on these assumptions.
- STATIC_ASSERT(Map::Counter::kShift + Map::Counter::kSize == 32);
- // Check if slack tracking is enabled.
- __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
- __ shr(esi, Map::Counter::kShift);
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(less, &allocate);
- // Decrease generous allocation count.
- __ sub(FieldOperand(eax, Map::kBitField3Offset),
- Immediate(1 << Map::Counter::kShift));
-
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(not_equal, &allocate);
-
- __ push(eax);
- __ push(edx);
- __ push(edi);
-
- __ push(edi); // constructor
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(edi);
- __ pop(edx);
- __ pop(eax);
- __ mov(esi, Map::kSlackTrackingCounterEnd - 1);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // edi: constructor
- // eax: initial map
- __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ shl(edi, kPointerSizeLog2);
-
- __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-
- Factory* factory = masm->isolate()->factory();
-
- // Allocated the JSObject, now initialize the fields.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ mov(Operand(ebx, JSObject::kMapOffset), eax);
- __ mov(ecx, factory->empty_fixed_array());
- __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
- __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
- // Set extra fields in the newly allocated object.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- // esi: slack tracking counter (non-API function case)
- __ mov(edx, factory->undefined_value());
- __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(less, &no_inobject_slack_tracking);
-
- // Allocate object with a slack.
- __ movzx_b(
- esi,
- FieldOperand(
- eax, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ sub(esi, eax);
- __ lea(esi,
- Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
- // esi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(esi, edi);
- __ Assert(less_equal,
- kUnexpectedNumberOfPreAllocatedPropertyFields);
+ if (create_implicit_receiver) {
+ __ push(edi);
+ __ push(edx);
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ // edx: new target
+ __ mov(eax,
+ FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ __ JumpIfSmi(eax, &rt_call);
+ // edi: constructor
+ // eax: initial map (if proven valid below)
+ __ CmpObjectType(eax, MAP_TYPE, ebx);
+ __ j(not_equal, &rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // edi: constructor
+ // eax: initial map
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ // edi: constructor
+ // eax: initial map
+ __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+ __ shl(edi, kPointerSizeLog2);
+
+ __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+
+ Factory* factory = masm->isolate()->factory();
+
+ // Allocated the JSObject, now initialize the fields.
+ // eax: initial map
+ // ebx: JSObject (not HeapObject tagged - the actual address).
+ // edi: start of next object
+ __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+ __ mov(ecx, factory->empty_fixed_array());
+ __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+ __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+ __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ or_(ebx, Immediate(kHeapObjectTag));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // ebx: JSObject (tagged)
+ // ecx: First in-object property of JSObject (not tagged)
+ __ mov(edx, factory->undefined_value());
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // The code below relies on these assumptions.
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ // Check if slack tracking is enabled.
+ __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
+ __ shr(esi, Map::ConstructionCounter::kShift);
+ __ j(zero, &no_inobject_slack_tracking); // Map::kNoSlackTracking
+ __ push(esi); // Save allocation count value.
+ // Decrease generous allocation count.
+ __ sub(FieldOperand(eax, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCounter::kShift));
+
+ // Allocate object with a slack.
+ __ movzx_b(esi, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+ __ neg(esi);
+ __ lea(esi, Operand(edi, esi, times_pointer_size, 0));
+ // esi: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(ecx, esi);
+ __ Assert(less_equal,
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(ecx, esi, edx);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ mov(edx, factory->one_pointer_filler_map());
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
+
+ __ pop(esi); // Restore allocation count value before decreasing.
+ __ cmp(esi, Map::kSlackTrackingCounterEnd);
+ __ j(not_equal, &allocated);
+
+ // Push the object to the stack, and then the initial map as
+ // an argument to the runtime call.
+ __ push(ebx);
+ __ push(eax); // initial map
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ pop(ebx);
+
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject (tagged)
+ __ jmp(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- __ InitializeFieldsWithFiller(ecx, esi, edx);
- __ mov(edx, factory->one_pointer_filler_map());
- // Fill the remaining fields with one pointer filler map.
-
- __ bind(&no_inobject_slack_tracking);
- }
- __ InitializeFieldsWithFiller(ecx, edi, edx);
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- // ebx: JSObject (untagged)
- __ or_(ebx, Immediate(kHeapObjectTag));
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject (tagged)
+ __ jmp(&allocated);
+ }
- // Continue with JSObject being successfully allocated
- // ebx: JSObject (tagged)
- __ jmp(&allocated);
+ // Allocate the new receiver object using the runtime call.
+ // edx: new target
+ __ bind(&rt_call);
+ int offset = kPointerSize;
+
+ // Must restore esi (context) and edi (constructor) before calling
+ // runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(edi, Operand(esp, offset));
+ __ push(edi); // constructor function
+ __ push(edx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ mov(ebx, eax); // store result in ebx
+
+ // New object allocated.
+ // ebx: newly allocated object
+ __ bind(&allocated);
+
+ // Restore the parameters.
+ __ pop(edx); // new.target
+ __ pop(edi); // Constructor function.
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ mov(eax, Operand(esp, 0));
}
- // Allocate the new receiver object using the runtime call.
- // edx: original constructor
- __ bind(&rt_call);
- int offset = kPointerSize;
-
- // Must restore esi (context) and edi (constructor) before calling
- // runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(edi, Operand(esp, offset));
- __ push(edi); // argument 2/1: constructor function
- __ push(edx); // argument 3/2: original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mov(ebx, eax); // store result in ebx
-
- // New object allocated.
- // ebx: newly allocated object
- __ bind(&allocated);
-
- // Restore the parameters.
- __ pop(edx); // new.target
- __ pop(edi); // Constructor function.
-
- // Retrieve smi-tagged arguments count from the stack.
- __ mov(eax, Operand(esp, 0));
__ SmiUntag(eax);
- // Push new.target onto the construct frame. This is stored just below the
- // receiver on the stack.
- __ push(edx);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(ebx);
- __ push(ebx);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(ebx);
+ __ push(ebx);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
@@ -313,40 +317,44 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper());
+ __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0));
-
- // Restore the arguments count and leave the construct frame. The arguments
- // count is stored below the reciever and the new.target.
- __ bind(&exit);
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(eax, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(above_equal, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ mov(eax, Operand(esp, 0));
+
+ // Restore the arguments count and leave the construct frame. The
+ // arguments count is stored below the receiver.
+ __ bind(&exit);
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ } else {
+ __ mov(ebx, Operand(esp, 0));
+ }
// Leave construct frame.
}
@@ -356,91 +364,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ pop(ecx);
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ }
__ ret(0);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax: number of arguments
- // -- edi: constructor function
- // -- ebx: allocation site or undefined
- // -- edx: original constructor
- // -----------------------------------
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve allocation site.
- __ AssertUndefinedOrAllocationSite(ebx);
- __ push(ebx);
-
- // Preserve actual arguments count.
- __ SmiTag(eax);
- __ push(eax);
- __ SmiUntag(eax);
-
- // Push new.target.
- __ push(edx);
-
- // receiver is the hole.
- __ push(Immediate(masm->isolate()->factory()->the_hole_value()));
-
- // Set up pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, eax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(ebx, ecx, times_4, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(equal, &skip_step_in);
-
- __ push(eax);
- __ push(edi);
- __ push(edi);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ pop(edi);
- __ pop(eax);
-
- __ bind(&skip_step_in);
-
- // Invoke function.
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Get arguments count, skipping over new.target.
- __ mov(ebx, Operand(esp, kPointerSize));
- }
- __ pop(ecx); // Return address.
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));
- __ push(ecx);
- __ ret(0);
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edi);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -473,7 +422,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -559,6 +508,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o edi: the JS function object being called
+// o edx: the new target
// o esi: our context
// o ebp: the caller's frame pointer
// o esp: stack pointer (pointing to return address)
@@ -576,6 +526,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(ebp, esp);
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS function.
+ __ push(edx); // Callee's new target.
+
+ // Push zero for bytecode array offset.
+ __ push(Immediate(0));
// Get the bytecode array from the function object and load the pointer to the
// first entry into edi (InterpreterBytecodeRegister).
@@ -605,7 +559,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::address_of_real_stack_limit(masm->isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -627,21 +581,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
- // - Deal with sloppy mode functions which need to replace the
- // receiver with the global proxy when called as functions (without an
- // explicit receiver object).
// - Code aging of the BytecodeArray object.
- // - Supporting FLAG_trace.
- //
- // The following items are also not done here, and will probably be done using
- // explicit bytecodes instead:
- // - Allocating a new local context if applicable.
- // - Setting up a local binding to the this function, which is used in
- // derived constructors with super calls.
- // - Setting new.target if required.
- // - Dealing with REST parameters (only if
- // https://codereview.chromium.org/1235153006 doesn't land by then).
- // - Dealing with argument objects.
// Perform stack guard check.
{
@@ -650,7 +590,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ CallRuntime(Runtime::kStackGuard);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -658,31 +600,33 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ mov(kInterpreterRegisterFileRegister, ebp);
- __ sub(
- kInterpreterRegisterFileRegister,
- Immediate(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ add(kInterpreterRegisterFileRegister,
+ Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
// Since the dispatch table root might be set after builtins are generated,
// load directly from the roots table.
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ add(kInterpreterDispatchTableRegister,
- Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
+ __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
- // Push context as a stack located parameter to the bytecode handler.
- DCHECK_EQ(-1, kInterpreterContextSpillSlot);
- __ push(esi);
+ // Push dispatch table as a stack located parameter to the bytecode handler.
+ DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
+ __ push(ebx);
// Dispatch to the first bytecode handler for the function.
- __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzx_b(eax, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(esi, Operand(kInterpreterDispatchTableRegister, esi,
- times_pointer_size, 0));
+ __ mov(ebx, Operand(ebx, eax, times_pointer_size, 0));
+ // Restore undefined_value in accumulator (eax)
+ // TODO(rmcilroy): Remove this once we move the dispatch table back into a
+ // register.
+ __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
- __ add(esi, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(esi);
+ __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(ebx);
+ __ nop(); // Ensure that return address still counts as interpreter entry
+ // trampoline.
}
@@ -708,36 +652,191 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterPushArgs(MacroAssembler* masm,
+ Register array_limit) {
+ // ----------- S t a t e -------------
+ // -- ebx : Pointer to the last argument in the args array.
+ // -- array_limit : Pointer to one before the first argument in the
+ // args array.
+ // -----------------------------------
+ Label loop_header, loop_check;
+ __ jmp(&loop_check);
+ __ bind(&loop_header);
+ __ Push(Operand(ebx, 0));
+ __ sub(ebx, Immediate(kPointerSize));
+ __ bind(&loop_check);
+ __ cmp(ebx, array_limit);
+ __ j(greater, &loop_header, Label::kNear);
}
+// static
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- ebx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- edi : the target to call (can be any Object).
+ // -----------------------------------
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function.
- __ push(edi);
- // Function is also the parameter to the runtime call.
- __ push(edi);
- // Whether to compile in a background thread.
- __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+ // Pop return address to allow tail-call after pushing arguments.
+ __ Pop(edx);
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ pop(edi);
+ // Find the address of the last argument.
+ __ mov(ecx, eax);
+ __ add(ecx, Immediate(1)); // Add one for receiver.
+ __ shl(ecx, kPointerSizeLog2);
+ __ neg(ecx);
+ __ add(ecx, ebx);
+
+ Generate_InterpreterPushArgs(masm, ecx);
+
+ // Call the target.
+ __ Push(edx); // Re-push return address.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the new target
+ // -- edi : the constructor
+ // -- ebx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+
+ // Save number of arguments on the stack below where arguments are going
+ // to be pushed.
+ __ mov(ecx, eax);
+ __ neg(ecx);
+ __ mov(Operand(esp, ecx, times_pointer_size, -kPointerSize), eax);
+ __ mov(eax, ecx);
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ Pop(ecx);
+
+ // Find the address of the last argument.
+ __ shl(eax, kPointerSizeLog2);
+ __ add(eax, ebx);
+
+ // Push padding for receiver.
+ __ Push(Immediate(0));
+
+ Generate_InterpreterPushArgs(masm, eax);
+
+ // Restore number of arguments from slot on stack.
+ __ mov(eax, Operand(esp, -kPointerSize));
+
+ // Re-push return address.
+ __ Push(ecx);
+
+ // Call the constructor with unmodified eax, edi, ebi values.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
+
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Initialize register file register.
+ __ mov(kInterpreterRegisterFileRegister, ebp);
+ __ add(kInterpreterRegisterFileRegister,
+ Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+
+ // Get the bytecode array pointer from the frame.
+ __ mov(ebx, Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(kInterpreterBytecodeArrayRegister,
+ FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+ __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+ ebx);
+ __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ mov(
+ kInterpreterBytecodeOffsetRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Push dispatch table as a stack located parameter to the bytecode handler -
+ // overwrite the state slot (we don't use these for interpreter deopts).
+ __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
+ __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
+ __ mov(Operand(esp, kPointerSize), ebx);
+
+ // Dispatch to the target bytecode.
+ __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ mov(kContextRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(ebx);
+}
+
+
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -833,7 +932,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ popad();
// Tear down internal frame.
}
@@ -860,7 +959,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass deoptimization type to the runtime system.
__ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
// Tear down internal frame.
}
@@ -902,7 +1001,136 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into eax and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ JumpIfSmi(eax, &receiver_not_date);
+ __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
+ __ j(not_equal, &receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ mov(eax, FieldOperand(eax, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ mov(edx, Operand::StaticVariable(
+ ExternalReference::date_cache_stamp(masm->isolate())));
+ __ cmp(edx, FieldOperand(eax, JSDate::kCacheStampOffset));
+ __ j(not_equal, &stamp_mismatch, Label::kNear);
+ __ mov(eax, FieldOperand(
+ eax, JSDate::kValueOffset + field_index * kPointerSize));
+ __ ret(1 * kPointerSize);
+ __ bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 0), eax);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(Smi::FromInt(field_index)));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ ret(1 * kPointerSize);
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowNotDateError);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : argArray
+ // -- esp[8] : thisArg
+ // -- esp[12] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into edi, argArray into eax (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label no_arg_array, no_this_arg;
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ mov(ebx, edx);
+ __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ test(eax, eax);
+ __ j(zero, &no_this_arg, Label::kNear);
+ {
+ __ mov(edx, Operand(esp, eax, times_pointer_size, 0));
+ __ cmp(eax, Immediate(1));
+ __ j(equal, &no_arg_array, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, -kPointerSize));
+ __ bind(&no_arg_array);
+ }
+ __ bind(&no_this_arg);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(eax, ebx);
+ }
+
+ // ----------- S t a t e -------------
+ // -- eax : argArray
+ // -- edi : receiver
+ // -- esp[0] : return address
+ // -- esp[4] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ j(zero, &receiver_not_callable, Label::kNear);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(eax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+ __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &no_arguments,
+ Label::kNear);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ Set(eax, 0);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ mov(Operand(esp, kPointerSize), edi);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// esp[0] : Return address
// esp[8] : Argument n
@@ -948,201 +1176,142 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- __ mov(key, Operand(ebp, indexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(receiver, Operand(ebp, argumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ mov(slot, Immediate(Smi::FromInt(slot_index)));
- __ mov(vector, Operand(ebp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(eax);
-
- // Update the index on the stack and in register key.
- __ mov(key, Operand(ebp, indexOffset));
- __ add(key, Immediate(1 << kSmiTagSize));
- __ mov(Operand(ebp, indexOffset), key);
-
- __ bind(&entry);
- __ cmp(key, Operand(ebp, limitOffset));
- __ j(not_equal, &loop);
-
- // On exit, the pushed arguments count is in eax, untagged
- __ Move(eax, key);
- __ SmiUntag(eax);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
-
- // Stack at entry:
- // esp : return address
- // esp[4] : arguments
- // esp[8] : receiver ("this")
- // esp[12] : function
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // ebp : Old base pointer
- // ebp[4] : return address
- // ebp[8] : function arguments
- // ebp[12] : receiver
- // ebp[16] : function
- static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- static const int kFunctionOffset = kReceiverOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(edi);
-
- __ push(Operand(ebp, kFunctionOffset)); // push this
- __ push(Operand(ebp, kArgumentsOffset)); // push arguments
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : argumentsList
+ // -- esp[8] : thisArgument
+ // -- esp[12] : target
+ // -- esp[16] : receiver
+ // -----------------------------------
- Generate_CheckStackOverflow(masm, kEaxIsSmiTagged);
+ // 1. Load target into edi (if present), argumentsList into eax (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
+ {
+ Label done;
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ mov(edx, edi);
+ __ mov(ebx, edi);
+ __ cmp(eax, Immediate(1));
+ __ j(below, &done, Label::kNear);
+ __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+ __ j(equal, &done, Label::kNear);
+ __ mov(edx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+ __ cmp(eax, Immediate(3));
+ __ j(below, &done, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(eax, ebx);
+ }
- // Push current index and limit.
- const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(eax); // limit
- __ Push(Immediate(0)); // index
- __ Push(Operand(ebp, kReceiverOffset)); // receiver
+ // ----------- S t a t e -------------
+ // -- eax : argumentsList
+ // -- edi : target
+ // -- esp[0] : return address
+ // -- esp[4] : thisArgument
+ // -----------------------------------
- // Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ j(zero, &target_not_callable, Label::kNear);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Leave internal frame.
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
+ {
+ __ mov(Operand(esp, kPointerSize), edi);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
}
-// Used by ReflectConstruct
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : new.target (optional)
+ // -- esp[8] : argumentsList
+ // -- esp[12] : target
+ // -- esp[16] : receiver
+ // -----------------------------------
- // Stack at entry:
- // esp : return address
- // esp[4] : original constructor (new.target)
- // esp[8] : arguments
- // esp[16] : constructor
+ // 1. Load target into edi (if present), argumentsList into eax (if present),
+ // new.target into edx (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // ebp : Old base pointer
- // ebp[4] : return address
- // ebp[8] : original constructor (new.target)
- // ebp[12] : arguments
- // ebp[16] : constructor
- static const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- static const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(edi);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ mov(eax, Operand(ebp, kNewTargetOffset));
- __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &validate_arguments, Label::kNear);
- __ mov(eax, Operand(ebp, kFunctionOffset));
- __ mov(Operand(ebp, kNewTargetOffset), eax);
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ push(Operand(ebp, kFunctionOffset));
- __ push(Operand(ebp, kArgumentsOffset));
- __ push(Operand(ebp, kNewTargetOffset));
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- Generate_CheckStackOverflow(masm, kEaxIsSmiTagged);
-
- // Push current index and limit.
- const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(eax); // limit
- __ push(Immediate(0)); // index
- // Push the constructor function as callee.
- __ push(Operand(ebp, kFunctionOffset));
-
- // Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ mov(ecx, Operand(ebp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ Label done;
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ mov(edx, edi);
+ __ mov(ebx, edi);
+ __ cmp(eax, Immediate(1));
+ __ j(below, &done, Label::kNear);
+ __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+ __ mov(edx, edi);
+ __ j(equal, &done, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+ __ cmp(eax, Immediate(3));
+ __ j(below, &done, Label::kNear);
+ __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(eax, ebx);
}
- // remove this, target, arguments, and newTarget
- __ ret(kStackSize * kPointerSize);
-}
+ // ----------- S t a t e -------------
+ // -- eax : argumentsList
+ // -- edx : new.target
+ // -- edi : target
+ // -- esp[0] : return address
+ // -- esp[4] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &target_not_constructor, Label::kNear);
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &new_target_not_constructor, Label::kNear);
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ mov(Operand(esp, kPointerSize), edi);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ mov(Operand(esp, kPointerSize), edx);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1206,6 +1375,113 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into eax and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ test(eax, eax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ mov(eax, ebx);
+ }
+
+ // 2a. Convert the first argument to a number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0 (already in eax).
+ __ bind(&no_arguments);
+ __ ret(1 * kPointerSize);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- edx : new target
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into ebx and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ test(eax, eax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&no_arguments);
+ __ Move(ebx, Smi::FromInt(0));
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ }
+
+ // 3. Make sure ebx is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(ebx, &done_convert);
+ __ CompareRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(equal, &done_convert);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(edi);
+ __ Push(edx);
+ __ Move(eax, ebx);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(ebx, eax);
+ __ Pop(edx);
+ __ Pop(edi);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(edx, edi);
+ __ j(not_equal, &new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx); // the first argument
+ __ Push(edi); // constructor function
+ __ Push(edx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(FieldOperand(eax, JSValue::kValueOffset));
+ }
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
@@ -1260,7 +1536,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ PopReturnAddressTo(ecx);
__ Push(eax);
__ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -1270,12 +1546,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
+ // -- edx : new target
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into ebx and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into ebx and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -1291,60 +1571,47 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ PushReturnAddressFrom(ecx);
}
- // 2. Make sure ebx is a string.
+ // 3. Make sure ebx is a string.
{
Label convert, done_convert;
__ JumpIfSmi(ebx, &convert, Label::kNear);
- __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, edx);
+ __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, ecx);
__ j(below, &done_convert);
__ bind(&convert);
{
FrameScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
__ Push(edi);
+ __ Push(edx);
__ Move(eax, ebx);
__ CallStub(&stub);
__ Move(ebx, eax);
+ __ Pop(edx);
__ Pop(edi);
}
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- ebx : the first argument
- // -- edi : constructor function
- // -----------------------------------
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(edx, edi);
+ __ j(not_equal, &new_object);
- Label allocate, done_allocate;
- __ Allocate(JSValue::kSize, eax, ecx, no_reg, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
-
- // Initialize the JSValue in eax.
- __ LoadGlobalFunctionInitialMap(edi, ecx);
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
+ __ Ret();
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx);
- __ Push(edi);
- __ Push(Smi::FromInt(JSValue::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(edi);
- __ Pop(ebx);
- }
- __ jmp(&done_allocate);
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx); // the first argument
+ __ Push(edi); // constructor function
+ __ Push(edx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(FieldOperand(eax, JSValue::kValueOffset));
}
+ __ Ret();
}
@@ -1353,24 +1620,24 @@ static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- eax : actual number of arguments
// -- ebx : expected number of arguments
- // -- edi : function (passed through to callee)
+ // -- edx : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
ExternalReference real_stack_limit =
ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edx, Operand::StaticVariable(real_stack_limit));
+ __ mov(edi, Operand::StaticVariable(real_stack_limit));
// Make ecx the space we have left. The stack might already be overflowed
// here which will cause ecx to become negative.
__ mov(ecx, esp);
- __ sub(ecx, edx);
- // Make edx the space we need for the array when it is unrolled onto the
+ __ sub(ecx, edi);
+ // Make edi the space we need for the array when it is unrolled onto the
// stack.
- __ mov(edx, ebx);
- __ shl(edx, kPointerSizeLog2);
+ __ mov(edi, ebx);
+ __ shl(edi, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
+ __ cmp(ecx, edi);
__ j(less_equal, stack_overflow); // Signed comparison.
}
@@ -1410,74 +1677,225 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argumentsList
+ // -- edi : target
+ // -- edx : new.target (checked to be constructor or undefined)
+ // -- esp[0] : return address.
+ // -- esp[4] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(eax, &create_runtime);
+
+ // Load the map of argumentsList into ecx.
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+
+ // Load native context into ebx.
+ __ mov(ebx, NativeContextOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ cmp(ecx, ContextOperand(ebx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ j(equal, &create_arguments);
+ __ cmp(ecx, ContextOperand(ebx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ j(equal, &create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CmpInstanceType(ecx, JS_ARRAY_TYPE);
+ __ j(equal, &create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(edi);
+ __ Push(edx);
+ __ Push(eax);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(edx);
+ __ Pop(edi);
+ __ mov(ebx, FieldOperand(eax, FixedArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ }
+ __ jmp(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ mov(ebx,
+ FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
+ __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ j(not_equal, &create_runtime);
+ __ SmiUntag(ebx);
+ __ mov(eax, ecx);
+ __ jmp(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(ecx);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(above, &create_runtime);
+ __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
+ __ j(equal, &create_runtime);
+ __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(ecx, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ neg(ecx);
+ __ add(ecx, esp);
+ __ sar(ecx, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, ebx);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- edi : target
+ // -- eax : args (a FixedArray built from argumentsList)
+ // -- ebx : len (number of elements to push from args)
+ // -- edx : new.target (checked to be constructor or undefined)
+ // -- esp[0] : return address.
+ // -- esp[4] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ __ push(edx);
+ __ fld_s(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kFloatSize));
+
+ __ PopReturnAddressTo(edx);
+ __ Move(ecx, Immediate(0));
+ Label done, loop;
+ __ bind(&loop);
+ __ cmp(ecx, ebx);
+ __ j(equal, &done, Label::kNear);
+ __ Push(
+ FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ __ inc(ecx);
+ __ jmp(&loop);
+ __ bind(&done);
+ __ PushReturnAddressFrom(edx);
+
+ __ lea(esp, Operand(esp, -kFloatSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ pop(edx);
+
+ __ Move(eax, ebx);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSFunction)
// -----------------------------------
-
- Label convert, convert_global_proxy, convert_to_object, done_convert;
__ AssertFunction(edi);
- // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
- // slot is "classConstructor".
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that the function is not a "classConstructor".
+ Label class_constructor;
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(edx, SharedFunctionInfo::kFunctionKindByteOffset),
+ SharedFunctionInfo::kClassConstructorBitsWithinByte);
+ __ j(not_zero, &class_constructor);
+
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
- // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
// We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
__ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
(1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_zero, &done_convert);
{
- __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
-
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- ecx : the receiver
// -- edx : the shared function info.
// -- edi : the function to call (checked to be a JSFunction)
// -- esi : the function context.
// -----------------------------------
- Label convert_receiver;
- __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
- __ j(above_equal, &done_convert);
- __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex, &convert_global_proxy,
- Label::kNear);
- __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
- Label::kNear);
- __ bind(&convert_global_proxy);
- {
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(ecx);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
+ __ j(above_equal, &done_convert);
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex,
+ &convert_global_proxy, Label::kNear);
+ __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
+ Label::kNear);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(ecx);
+ }
+ __ jmp(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(eax);
+ __ Push(eax);
+ __ Push(edi);
+ __ mov(eax, ecx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(ecx, eax);
+ __ Pop(edi);
+ __ Pop(eax);
+ __ SmiUntag(eax);
+ }
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
}
- __ jmp(&convert_receiver);
- __ bind(&convert_to_object);
- {
- // Convert receiver using ToObject.
- // TODO(bmeurer): Inline the allocation here to avoid building the frame
- // in the fast case? (fall back to AllocateInNewSpace?)
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(eax);
- __ Push(eax);
- __ Push(edi);
- __ mov(eax, ecx);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(ecx, eax);
- __ Pop(edi);
- __ Pop(eax);
- __ SmiUntag(eax);
- }
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ bind(&convert_receiver);
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
}
__ bind(&done_convert);
@@ -1494,13 +1912,131 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm) {
__ SmiUntag(ebx);
ParameterCount actual(eax);
ParameterCount expected(ebx);
- __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), expected,
- actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ push(edi);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : new.target (only in case of [[Construct]])
+ // -- edi : target (checked to be a JSBoundFunction)
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into ecx and length of that into ebx.
+ Label no_bound_arguments;
+ __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+ __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ test(ebx, ebx);
+ __ j(zero, &no_bound_arguments);
+ {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : new.target (only in case of [[Construct]])
+ // -- edi : target (checked to be a JSBoundFunction)
+ // -- ecx : the [[BoundArguments]] (implemented as FixedArray)
+ // -- ebx : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ lea(ecx, Operand(ebx, times_pointer_size, 0));
+ __ sub(esp, ecx);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ // Restore the stack pointer.
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, 0));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Adjust effective number of arguments to include return address.
+ __ inc(eax);
+
+ // Relocate arguments and return address down the stack.
+ {
+ Label loop;
+ __ Set(ecx, 0);
+ __ lea(ebx, Operand(esp, ebx, times_pointer_size, 0));
+ __ bind(&loop);
+ __ fld_s(Operand(ebx, ecx, times_pointer_size, 0));
+ __ fstp_s(Operand(esp, ecx, times_pointer_size, 0));
+ __ inc(ecx);
+ __ cmp(ecx, eax);
+ __ j(less, &loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+ __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ bind(&loop);
+ __ dec(ebx);
+ __ fld_s(
+ FieldOperand(ecx, ebx, times_pointer_size, FixedArray::kHeaderSize));
+ __ fstp_s(Operand(esp, eax, times_pointer_size, 0));
+ __ lea(eax, Operand(eax, 1));
+ __ j(greater, &loop);
+ }
+
+ // Adjust effective number of arguments (eax contains the number of
+ // arguments from the call plus return address plus the number of
+ // [[BoundArguments]]), so we need to subtract one for the return address.
+ __ dec(eax);
+ }
+ __ bind(&no_bound_arguments);
+}
+
+} // namespace
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edi : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(edi);
+
+ // Patch the receiver to [[BoundThis]].
+ __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ecx, Operand::StaticVariable(ExternalReference(
+ Builtins::kCall_ReceiverIsAny, masm->isolate())));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
}
// static
-void Builtins::Generate_Call(MacroAssembler* masm) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object).
@@ -1510,16 +2046,24 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ JumpIfSmi(edi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET);
+ __ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ mov(edi, FieldOperand(edi, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(edi);
- __ jmp(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ PopReturnAddressTo(ecx);
+ __ Push(edi);
+ __ PushReturnAddressFrom(ecx);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ add(eax, Immediate(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1531,14 +2075,16 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
- __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(edi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1547,10 +2093,9 @@ void Builtins::Generate_Call(MacroAssembler* masm) {
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor (checked to be a JSFunction)
+ // -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(edx);
__ AssertFunction(edi);
// Calling convention for function specific ConstructStubs require
@@ -1567,17 +2112,54 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the new target (checked to be a constructor)
+ // -- edi : the constructor to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(edi);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label done;
+ __ cmp(edi, edx);
+ __ j(not_equal, &done, Label::kNear);
+ __ mov(edx, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&done);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ecx, Operand::StaticVariable(
+ ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor (either the same as the constructor or
+ // -- edi : the constructor to call (checked to be a JSProxy)
+ // -- edx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
- // -- edi : the constructor to call (checked to be a JSFunctionProxy)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ mov(edi, FieldOperand(edi, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ PopReturnAddressTo(ecx);
+ __ Push(edi);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ // Include the pushed new_target, constructor and the receiver.
+ __ add(eax, Immediate(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1585,23 +2167,32 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor (either the same as the constructor or
+ // -- edx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- edi : the constructor to call (can be any Object)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(edi, &non_constructor, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
- __ j(zero, &non_constructor, Label::kNear);
// Dispatch based on instance type.
- __ CmpInstanceType(ecx, JS_FUNCTION_TYPE);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(equal, masm->isolate()->builtins()->ConstructFunction(),
RelocInfo::CODE_TARGET);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+
+ // Check if target has a [[Construct]] internal method.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &non_constructor, Label::kNear);
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->ConstructBoundFunction(),
+ RelocInfo::CODE_TARGET);
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(equal, masm->isolate()->builtins()->ConstructProxy(),
RelocInfo::CODE_TARGET);
@@ -1618,46 +2209,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
-}
-
-
-// static
-void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
- // -- ebx : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- edi : the target to call (can be any Object).
-
- // Pop return address to allow tail-call after pushing arguments.
- __ Pop(edx);
-
- // Find the address of the last argument.
- __ mov(ecx, eax);
- __ add(ecx, Immediate(1)); // Add one for receiver.
- __ shl(ecx, kPointerSizeLog2);
- __ neg(ecx);
- __ add(ecx, ebx);
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ jmp(&loop_check);
- __ bind(&loop_header);
- __ Push(Operand(ebx, 0));
- __ sub(ebx, Immediate(kPointerSize));
- __ bind(&loop_check);
- __ cmp(ebx, ecx);
- __ j(greater, &loop_header, Label::kNear);
-
- // Call the target.
- __ Push(edx); // Re-push return address.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1665,17 +2218,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
// -- ebx : expected number of arguments
+ // -- edx : new target (passed through to callee)
// -- edi : function (passed through to callee)
// -----------------------------------
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
__ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
- Label stack_overflow;
- ArgumentsAdaptorStackCheck(masm, &stack_overflow);
-
Label enough, too_few;
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ cmp(eax, ebx);
__ j(less, &too_few);
__ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
@@ -1684,6 +2234,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -1720,11 +2271,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
// Remember expected arguments in ecx.
__ mov(ecx, ebx);
@@ -1763,8 +2315,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Restore function pointer.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
// eax : expected number of arguments
+ // edx : new target (passed through to callee)
// edi : function (passed through to callee)
- __ call(edx);
+ __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ call(ecx);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1777,18 +2331,128 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ jmp(edx);
+ __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ jmp(ecx);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ int3();
}
}
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Register scratch0, Register scratch1,
+ Label* receiver_check_failed) {
+ // If there is no signature, return the holder.
+ __ CompareRoot(FieldOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset),
+ Heap::kUndefinedValueRootIndex);
+ Label receiver_check_passed;
+ __ j(equal, &receiver_check_passed, Label::kNear);
+
+ // Walk the prototype chain.
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(scratch0, scratch0, scratch1);
+ __ CmpInstanceType(scratch1, JS_FUNCTION_TYPE);
+ Label next_prototype;
+ __ j(not_equal, &next_prototype, Label::kNear);
+
+ // Get the constructor's signature.
+ __ mov(scratch0,
+ FieldOperand(scratch0, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(scratch0,
+ FieldOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ cmp(scratch0, FieldOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ __ j(equal, &receiver_check_passed, Label::kNear);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(scratch0, &next_prototype, Label::kNear);
+ __ CmpObjectType(scratch0, FUNCTION_TEMPLATE_INFO_TYPE, scratch1);
+ __ j(not_equal, &next_prototype, Label::kNear);
+
+ // Otherwise load the parent function template and iterate.
+ __ mov(scratch0,
+ FieldOperand(scratch0, FunctionTemplateInfo::kParentTemplateOffset));
+ __ jmp(&function_template_loop, Label::kNear);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ mov(receiver, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ j(equal, receiver_check_failed);
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ test(FieldOperand(scratch0, Map::kBitField3Offset),
+ Immediate(Map::IsHiddenPrototype::kMask));
+ __ j(zero, receiver_check_failed);
+ // Iterate.
+ __ jmp(&prototype_loop_start, Label::kNear);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments (not including the receiver)
+ // -- edi : callee
+ // -- esi : context
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[eax * 4] : first argument
+ // -- esp[(eax + 1) * 4] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, kPCOnStackSize));
+ __ Push(eax);
+ CompatibleReceiverCheck(masm, ecx, ebx, edx, eax, &receiver_check_failed);
+ __ Pop(eax);
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ mov(edx, FieldOperand(ebx, FunctionTemplateInfo::kCallCodeOffset));
+ __ mov(edx, FieldOperand(edx, CallHandlerInfo::kFastHandlerOffset));
+ __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(edx);
+
+ // Compatible receiver check failed: pop return address, arguments and
+ // receiver and throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ __ Pop(eax);
+ __ PopReturnAddressTo(ebx);
+ __ lea(eax, Operand(eax, times_pointer_size, 1 * kPointerSize));
+ __ add(esp, eax);
+ __ PushReturnAddressFrom(ebx);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+ }
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1796,7 +2460,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
Label skip;
@@ -1835,7 +2499,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok, Label::kNear);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/chromium/v8/src/x87/code-stubs-x87.cc b/chromium/v8/src/x87/code-stubs-x87.cc
index 0d59b180680..1da5f41a886 100644
--- a/chromium/v8/src/x87/code-stubs-x87.cc
+++ b/chromium/v8/src/x87/code-stubs-x87.cc
@@ -334,7 +334,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -388,7 +388,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ push(scratch); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -488,7 +488,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ pop(ebx); // Return address.
__ push(edx);
__ push(ebx);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -520,7 +520,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ push(edx); // Push parameters pointer.
__ push(ecx); // Push parameter count.
__ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -593,8 +593,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// esp[8] = parameter count (tagged)
// Get the arguments map from the current native context into edi.
Label has_mapped_parameters, instantiate;
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
+ __ mov(edi, NativeContextOperand());
__ mov(ebx, Operand(esp, 0 * kPointerSize));
__ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -756,7 +755,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ push(edx); // Push parameters pointer.
__ push(ecx); // Push parameter count.
__ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -797,10 +796,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Allocate(eax, eax, ebx, no_reg, &runtime, TAG_OBJECT);
// Get the arguments map from the current native context.
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
- const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
- __ mov(edi, Operand(edi, offset));
+ __ mov(edi, NativeContextOperand());
+ __ mov(edi, ContextOperand(edi, Context::STRICT_ARGUMENTS_MAP_INDEX));
__ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -852,7 +849,35 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ push(edx); // Push parameters pointer.
__ push(ecx); // Push parameter count.
__ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // ecx : number of parameters (tagged)
+ // edx : parameters pointer
+ // ebx : rest parameter index (tagged)
+ // esp[0] : return address
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ mov(edi, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(edi, StandardFrameConstants::kContextOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ mov(ecx, Operand(edi, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx,
+ Operand(edi, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
+
+ __ bind(&runtime);
+ __ pop(eax); // Save return address.
+ __ push(ecx); // Push number of parameters.
+ __ push(edx); // Push parameters pointer.
+ __ push(ebx); // Push rest parameter index.
+ __ push(eax); // Push return address.
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -861,7 +886,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1139,7 +1164,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(equal, &runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure to match, return null.
@@ -1225,7 +1250,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -1380,7 +1405,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// Call runtime on identical JSObjects. Otherwise return equal.
- __ cmpb(ecx, static_cast<uint8_t>(FIRST_SPEC_OBJECT_TYPE));
+ __ cmpb(ecx, static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE));
__ j(above_equal, &runtime_call, Label::kFar);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
@@ -1448,8 +1473,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison.
Label first_non_object;
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(below, &first_non_object, Label::kNear);
// Return non-zero (eax is not zero)
@@ -1463,7 +1488,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -1559,9 +1584,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime_call, Label::kNear);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(below, &runtime_call, Label::kNear);
- __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
+ __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ebx);
__ j(below, &runtime_call, Label::kNear);
// We do not bail out after this point. Both are JSObjects, and
// they are equal if and only if both are undetectable.
@@ -1590,8 +1615,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Figure out which native to call and setup the arguments.
if (cc == equal) {
__ push(ecx);
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
__ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
@@ -1600,9 +1624,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -1610,16 +1633,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// eax : number of arguments to the construct function
// ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
- // esp[0]: original receiver (for IsSuperConstructorCall)
- if (is_super) {
- __ pop(ecx);
- }
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1630,29 +1648,19 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
__ push(edi);
__ push(edx);
__ push(ebx);
- if (is_super) {
- __ push(ecx);
- }
__ CallStub(stub);
- if (is_super) {
- __ pop(ecx);
- }
__ pop(ebx);
__ pop(edx);
__ pop(edi);
__ pop(eax);
__ SmiUntag(eax);
}
-
- if (is_super) {
- __ push(ecx);
- }
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -1660,7 +1668,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
- // esp[0]: original receiver (for IsSuperConstructorCall)
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
@@ -1726,118 +1733,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ jmp(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
-static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
- // Do not transform the receiver for strict mode functions.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, cont);
-
- // Do not transform the receiver for natives (shared already in ecx).
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, cont);
-}
-
-
-static void EmitSlowCase(Isolate* isolate, MacroAssembler* masm, int argc) {
- __ Set(eax, argc);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
- // Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ push(edi);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ pop(edi);
- }
- __ mov(Operand(esp, (argc + 1) * kPointerSize), eax);
- __ jmp(cont);
-}
-
-
-static void CallFunctionNoFeedback(MacroAssembler* masm,
- int argc, bool needs_checks,
- bool call_as_method) {
- // edi : the function to call
- Label slow, wrap, cont;
-
- if (needs_checks) {
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &slow);
-
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
- }
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc);
-
- if (call_as_method) {
- if (needs_checks) {
- EmitContinueIfStrictOrNative(masm, &cont);
- }
-
- // Load the receiver from the stack.
- __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
-
- if (needs_checks) {
- __ JumpIfSmi(eax, &wrap);
-
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &wrap);
- } else {
- __ jmp(&wrap);
- }
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
-
- if (needs_checks) {
- // Slow-case: Non-function called.
- __ bind(&slow);
- EmitSlowCase(masm->isolate(), masm, argc);
- }
-
- if (call_as_method) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
-}
-
-
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
// ebx : feedback vector
- // ecx : original constructor (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
- if (IsSuperConstructorCall()) {
- __ push(ecx);
- }
-
Label non_function;
// Check that function is not a smi.
__ JumpIfSmi(edi, &non_function);
@@ -1845,29 +1756,22 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
+ GenerateRecordCallTarget(masm);
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into ebx, or undefined.
- __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Handle<Map> allocation_site_map =
- isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
- __ j(equal, &feedback_register_initialized);
- __ mov(ebx, isolate()->factory()->undefined_value());
- __ bind(&feedback_register_initialized);
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into ebx, or undefined.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ j(equal, &feedback_register_initialized);
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(ebx);
- }
+ __ AssertUndefinedOrAllocationSite(ebx);
- if (IsSuperConstructorCall()) {
- __ pop(edx);
- } else {
- // Pass original constructor to construct stub.
- __ mov(edx, edi);
- }
+ // Pass new target to construct stub.
+ __ mov(edx, edi);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -1877,7 +1781,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ jmp(ecx);
__ bind(&non_function);
- if (IsSuperConstructorCall()) __ Drop(1);
__ mov(edx, edi);
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -1915,13 +1818,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// edx - slot id
// ebx - vector
Isolate* isolate = masm->isolate();
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, slow_start;
- Label slow, wrap, cont;
- Label have_js_function;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -1955,36 +1852,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::kHeaderSize + kPointerSize),
Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
- __ bind(&have_js_function);
- if (CallAsMethod()) {
- EmitContinueIfStrictOrNative(masm, &cont);
-
- // Load the receiver from the stack.
- __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
-
- __ JumpIfSmi(eax, &wrap);
-
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &wrap);
-
- __ bind(&cont);
- }
-
- __ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
-
- __ bind(&slow);
- EmitSlowCase(isolate, masm, argc);
-
- if (CallAsMethod()) {
- __ bind(&wrap);
- EmitWrapCase(masm, argc, &cont);
- }
+ __ bind(&call_function);
+ __ Set(eax, argc);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
- __ j(equal, &slow_start);
+ __ j(equal, &call);
// Check if we have an allocation site.
__ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
@@ -2013,10 +1890,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ mov(
FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
- // We have to update statistics for runtime profiling.
- __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
- __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
- __ jmp(&slow_start);
+
+ __ bind(&call);
+ __ Set(eax, argc);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2033,8 +1911,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ cmp(edi, ecx);
__ j(equal, &miss);
- // Update stats.
- __ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+ // Make sure the function belongs to the same native context.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kContextOffset));
+ __ mov(ecx, ContextOperand(ecx, Context::NATIVE_CONTEXT_INDEX));
+ __ cmp(ecx, NativeContextOperand());
+ __ j(not_equal, &miss);
// Initialize the call counter.
__ mov(FieldOperand(ebx, edx, times_half_pointer_size,
@@ -2053,23 +1934,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ pop(edi);
}
- __ jmp(&have_js_function);
+ __ jmp(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
- // the slow case
- __ bind(&slow_start);
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &slow);
-
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
- __ jmp(&have_js_function);
+ __ jmp(&call);
// Unreachable
__ int3();
@@ -2085,7 +1957,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ push(edx);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
@@ -2137,11 +2009,23 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// esp: stack pointer (restored after C call)
// esi: current context (C callee-saved)
// edi: JS function of the caller (C callee-saved)
+ //
+ // If argv_in_register():
+ // ecx: pointer to the first argument
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(save_doubles());
+ if (argv_in_register()) {
+ DCHECK(!save_doubles());
+ __ EnterApiExitFrame(3);
+
+ // Move argc and argv into the correct registers.
+ __ mov(esi, ecx);
+ __ mov(edi, eax);
+ } else {
+ __ EnterExitFrame(save_doubles());
+ }
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
@@ -2186,7 +2070,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles());
+ __ LeaveExitFrame(save_doubles(), !argv_in_register());
__ ret(0);
// Handling of exception.
@@ -2385,14 +2269,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
static_cast<uint8_t>(1 << Map::kHasNonInstancePrototype));
__ j(not_zero, &slow_case);
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ mov(shared_info,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ BooleanBitTest(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
- SharedFunctionInfo::kBoundFunction);
- __ j(not_zero, &slow_case);
-
// Get the "prototype" (or initial map) of the {function}.
__ mov(function_prototype,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -2418,28 +2294,48 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
- Label done, loop;
+ Label done, loop, fast_runtime_fallback;
__ mov(eax, isolate()->factory()->true_value());
__ bind(&loop);
- __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, function_prototype);
+
+ // Check if the object needs to be access checked.
+ __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ __ j(not_zero, &fast_runtime_fallback, Label::kNear);
+ // Check if the current object is a Proxy.
+ __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+ __ j(equal, &fast_runtime_fallback, Label::kNear);
+
+ __ mov(object, FieldOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object, function_prototype);
__ j(equal, &done, Label::kNear);
- __ cmp(object_prototype, isolate()->factory()->null_value());
- __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+ __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
+ __ cmp(object, isolate()->factory()->null_value());
__ j(not_equal, &loop);
__ mov(eax, isolate()->factory()->false_value());
+
__ bind(&done);
__ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
__ ret(0);
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime.
+ __ bind(&fast_runtime_fallback);
+ __ PopReturnAddressTo(scratch);
+ __ Push(object);
+ __ Push(function_prototype);
+ __ PushReturnAddressFrom(scratch);
+ // Invalidate the instanceof cache.
+ __ Move(eax, Immediate(Smi::FromInt(0)));
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
- __ pop(scratch); // Pop return address.
- __ push(object); // Push {object}.
- __ push(function); // Push {function}.
- __ push(scratch); // Push return address.
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ PopReturnAddressTo(scratch);
+ __ Push(object);
+ __ Push(function);
+ __ PushReturnAddressFrom(scratch);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -2498,11 +2394,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ push(object_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
if (!index_.is(eax)) {
// Save the conversion result before the pop instructions below
@@ -2532,7 +2428,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ push(object_);
__ SmiTag(index_);
__ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -2578,7 +2474,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -2828,7 +2724,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// eax: string
@@ -2873,7 +2769,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -2886,7 +2782,26 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
+}
+
+
+void ToLengthStub::Generate(MacroAssembler* masm) {
+ // The ToLength stub takes on argument in eax.
+ Label not_smi, positive_smi;
+ __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(eax, eax);
+ __ j(greater_equal, &positive_smi, Label::kNear);
+ __ xor_(eax, eax);
+ __ bind(&positive_smi);
+ __ Ret();
+ __ bind(&not_smi);
+
+ __ pop(ecx); // Pop return address.
+ __ push(eax); // Push argument.
+ __ push(ecx); // Push return address.
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -2921,7 +2836,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3084,7 +2999,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ Push(edx);
__ Push(eax);
__ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3129,7 +3044,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
__ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
@@ -3397,9 +3312,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ push(right);
__ push(tmp1);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3407,19 +3322,20 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
__ JumpIfSmi(ecx, &miss, Label::kNear);
- __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
- __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(below, &miss, Label::kNear);
+ __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(below, &miss, Label::kNear);
- DCHECK(GetCondition() == equal);
+ DCHECK_EQ(equal, GetCondition());
__ sub(eax, edx);
__ ret(0);
@@ -3428,7 +3344,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ mov(ecx, edx);
@@ -3445,14 +3361,14 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ sub(eax, edx);
__ ret(0);
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ Push(eax);
__ Push(Immediate(Smi::FromInt(NegativeComparisonResult(GetCondition()))));
__ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -3469,7 +3385,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ push(edx); // And also use them as the arguments.
__ push(eax);
__ push(Immediate(Smi::FromInt(op())));
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ lea(edi, FieldOperand(eax, Code::kHeaderSize));
__ pop(eax);
@@ -3860,11 +3776,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need an extra register for this, so we push the object register
// temporarily.
__ push(regs_.object());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object,
- Label::kNear);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ &need_incremental_pop_object, Label::kNear);
__ pop(regs_.object());
regs_.Restore(masm);
@@ -3884,89 +3799,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : element value to store
- // -- ecx : element index as smi
- // -- esp[0] : return address
- // -- esp[4] : array literal index in function
- // -- esp[8] : array literal
- // clobbers ebx, edx, edi
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label slow_elements_from_double;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
-
- __ CheckFastElements(edi, &double_elements);
-
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
- __ JumpIfSmi(eax, &smi_element);
- __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
-
- __ bind(&slow_elements);
- __ pop(edi); // Pop return address and remember to put back later for tail
- // call.
- __ push(ebx);
- __ push(ecx);
- __ push(eax);
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(edx);
- __ push(edi); // Return return address so that tail call returns to right
- // place.
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- __ bind(&slow_elements_from_double);
- __ pop(edx);
- __ jmp(&slow_elements);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
- FixedArrayBase::kHeaderSize));
- __ mov(Operand(ecx, 0), eax);
- // Update the write barrier for the array store.
- __ RecordWrite(ebx, ecx, eax, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
- FixedArrayBase::kHeaderSize), eax);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
-
- __ push(edx);
- __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(eax,
- edx,
- ecx,
- edi,
- &slow_elements_from_double,
- false);
- __ pop(edx);
- __ ret(0);
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -4241,13 +4073,14 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
Register key, Register vector,
Register slot, Register feedback,
- Label* miss) {
+ bool is_polymorphic, Label* miss) {
// feedback initially contains the feedback array
Label next, next_loop, prepare_next;
Label load_smi_map, compare_map;
Label start_polymorphic;
+ Label pop_and_miss;
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
__ push(receiver);
__ push(vector);
@@ -4279,16 +4112,18 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
__ jmp(Operand::StaticVariable(virtual_register));
// Polymorphic, we have to loop from 2 to N
-
- // TODO(mvstanton): I think there is a bug here, we are assuming the
- // array has more than one map/handler pair, but we call this function in the
- // keyed store with a string key case, where it might be just an array of two
- // elements.
-
__ bind(&start_polymorphic);
__ push(key);
Register counter = key;
__ mov(counter, Immediate(Smi::FromInt(2)));
+
+ if (!is_polymorphic) {
+ // If is_polymorphic is false, we may only have a two element array.
+ // Check against length now in that case.
+ __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+ __ j(greater_equal, &pop_and_miss);
+ }
+
__ bind(&next_loop);
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -4310,6 +4145,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
__ j(less, &next_loop);
// We exhausted our array of map handler pairs.
+ __ bind(&pop_and_miss);
__ pop(key);
__ pop(vector);
__ pop(receiver);
@@ -4328,7 +4164,7 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
// The store ic value is on the stack.
DCHECK(weak_cell.is(VectorStoreICDescriptor::ValueRegister()));
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
// feedback initially contains the feedback array
Label compare_smi_map;
@@ -4390,7 +4226,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&try_array);
__ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &not_array);
- HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+ HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, true,
+ &miss);
__ bind(&not_array);
__ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
@@ -4435,13 +4272,16 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
Label transition_call;
Label pop_and_miss;
ExternalReference virtual_register =
- ExternalReference::vector_store_virtual_register(masm->isolate());
+ ExternalReference::virtual_handler_register(masm->isolate());
+ ExternalReference virtual_slot =
+ ExternalReference::virtual_slot_register(masm->isolate());
__ push(receiver);
__ push(vector);
Register receiver_map = receiver;
Register cached_map = vector;
+ Register value = StoreDescriptor::ValueRegister();
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &load_smi_map);
@@ -4450,11 +4290,17 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
// Polymorphic, we have to loop from 0 to N - 1
__ push(key);
- // On the stack we have:
- // key (esp)
- // vector
- // receiver
- // value
+ // Current stack layout:
+ // - esp[0] -- key
+ // - esp[4] -- vector
+ // - esp[8] -- receiver
+ // - esp[12] -- value
+ // - esp[16] -- return address
+ //
+ // Required stack layout for handler call:
+ // - esp[0] -- return address
+ // - receiver, key, value, vector, slot in registers.
+ // - handler in virtual register.
Register counter = key;
__ mov(counter, Immediate(Smi::FromInt(0)));
__ bind(&next_loop);
@@ -4473,32 +4319,39 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ pop(receiver);
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
__ mov(Operand::StaticVariable(virtual_register), feedback);
- __ pop(feedback); // Pop "value".
+ __ pop(value);
__ jmp(Operand::StaticVariable(virtual_register));
__ bind(&transition_call);
- // Oh holy hell this will be tough.
- // The map goes in vector register.
- __ mov(receiver, FieldOperand(cached_map, WeakCell::kValueOffset));
- // The weak cell may have been cleared.
- __ JumpIfSmi(receiver, &pop_and_miss);
- // slot goes on the stack, and holds return address.
- __ xchg(slot, Operand(esp, 4 * kPointerSize));
- // Get the handler in value.
+ // Current stack layout:
+ // - esp[0] -- key
+ // - esp[4] -- vector
+ // - esp[8] -- receiver
+ // - esp[12] -- value
+ // - esp[16] -- return address
+ //
+ // Required stack layout for handler call:
+ // - esp[0] -- return address
+ // - receiver, key, value, map, vector in registers.
+ // - handler and slot in virtual registers.
+ __ mov(Operand::StaticVariable(virtual_slot), slot);
__ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize + 2 * kPointerSize));
__ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
+ __ mov(Operand::StaticVariable(virtual_register), feedback);
+
+ __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ // The weak cell may have been cleared.
+ __ JumpIfSmi(cached_map, &pop_and_miss);
+ DCHECK(!cached_map.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ mov(VectorStoreTransitionDescriptor::MapRegister(), cached_map);
+
// Pop key into place.
__ pop(key);
- // Put the return address on top of stack, vector goes in slot.
- __ xchg(slot, Operand(esp, 0));
- // put the map on the stack, receiver holds receiver.
- __ xchg(receiver, Operand(esp, 1 * kPointerSize));
- // put the vector on the stack, slot holds value.
- __ xchg(slot, Operand(esp, 2 * kPointerSize));
- // feedback (value) = value, slot = handler.
- __ xchg(feedback, slot);
- __ jmp(slot);
+ __ pop(vector);
+ __ pop(receiver);
+ __ pop(value);
+ __ jmp(Operand::StaticVariable(virtual_register));
__ bind(&prepare_next);
__ add(counter, Immediate(Smi::FromInt(3)));
@@ -4565,7 +4418,8 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
// at least one map/handler pair.
__ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
- HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+ HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false,
+ &miss);
__ bind(&miss);
__ pop(value);
@@ -4826,6 +4680,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label subclassing;
+ // Enter the context of the Array function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
__ cmp(edx, edi);
__ j(not_equal, &subclassing);
@@ -4847,27 +4704,26 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
- __ pop(ecx); // return address.
- __ push(edi);
- __ push(edx);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ add(eax, Immediate(2));
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+ __ add(eax, Immediate(3));
break;
case NONE:
- __ mov(eax, Immediate(2));
+ __ mov(Operand(esp, 1 * kPointerSize), edi);
+ __ mov(eax, Immediate(3));
break;
case ONE:
- __ mov(eax, Immediate(3));
+ __ mov(Operand(esp, 2 * kPointerSize), edi);
+ __ mov(eax, Immediate(4));
break;
}
-
- __ push(ecx);
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ Push(ebx);
+ __ PushReturnAddressFrom(ecx);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -4984,7 +4840,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Pop(result_reg); // Pop return address.
__ Push(slot_reg);
__ Push(result_reg); // Push return address.
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5107,8 +4963,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(cell_reg); // Push return address.
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5245,7 +5100,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ CmpInstanceType(map, LAST_NAME_TYPE);
__ j(below_equal, &ok, Label::kNear);
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, &ok, Label::kNear);
__ cmp(map, isolate->factory()->heap_number_map());
@@ -5279,7 +5134,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
diff --git a/chromium/v8/src/x87/code-stubs-x87.h b/chromium/v8/src/x87/code-stubs-x87.h
index 25fc4d77182..a6a2a130573 100644
--- a/chromium/v8/src/x87/code-stubs-x87.h
+++ b/chromium/v8/src/x87/code-stubs-x87.h
@@ -309,13 +309,15 @@ class RecordWriteStub: public PlatformCodeStub {
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(ecx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ Register candidate = Register::from_code(i);
+ if (candidate.IsAllocatable()) {
+ if (candidate.is(ecx)) continue;
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
}
UNREACHABLE();
return no_reg;
@@ -374,6 +376,7 @@ class RecordWriteStub: public PlatformCodeStub {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_CODE_STUBS_X87_H_
diff --git a/chromium/v8/src/x87/codegen-x87.cc b/chromium/v8/src/x87/codegen-x87.cc
index 5df3f1f0261..c66166f7f0e 100644
--- a/chromium/v8/src/x87/codegen-x87.cc
+++ b/chromium/v8/src/x87/codegen-x87.cc
@@ -33,16 +33,34 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-
-UnaryMathFunction CreateExpFunction() {
- // No SSE2 support
- return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
+ return nullptr;
}
-UnaryMathFunction CreateSqrtFunction() {
- // No SSE2 support
- return &std::sqrt;
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
+ size_t actual_size;
+ // Allocate buffer in executable space.
+ byte* buffer =
+ static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == nullptr) return nullptr;
+
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
+ // Load double input into registers.
+ __ fld_d(MemOperand(esp, 4));
+ __ X87SetFPUCW(0x027F);
+ __ fsqrt();
+ __ X87SetFPUCW(0x037F);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ DCHECK(!RelocInfo::RequiresRelocation(desc));
+
+ Assembler::FlushICache(isolate, buffer, actual_size);
+ base::OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@@ -76,13 +94,14 @@ class LabelConverter {
};
-MemMoveFunction CreateMemMoveFunction() {
+MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return NULL;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ if (buffer == nullptr) return nullptr;
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
LabelConverter conv(buffer);
// Generated code is put into a fixed, unmovable buffer, and not into
@@ -182,7 +201,7 @@ MemMoveFunction CreateMemMoveFunction() {
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
@@ -571,9 +590,11 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
#undef __
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
- CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+ CodePatcher patcher(isolate, young_sequence_.start(),
+ young_sequence_.length());
patcher.masm()->push(ebp);
patcher.masm()->mov(ebp, esp);
patcher.masm()->push(esi);
@@ -620,7 +641,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length);
+ CodePatcher patcher(isolate, sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
}
diff --git a/chromium/v8/src/x87/codegen-x87.h b/chromium/v8/src/x87/codegen-x87.h
index c23e8668dad..170b40397ab 100644
--- a/chromium/v8/src/x87/codegen-x87.h
+++ b/chromium/v8/src/x87/codegen-x87.h
@@ -5,7 +5,7 @@
#ifndef V8_X87_CODEGEN_X87_H_
#define V8_X87_CODEGEN_X87_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -28,6 +28,7 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_CODEGEN_X87_H_
diff --git a/chromium/v8/src/x87/deoptimizer-x87.cc b/chromium/v8/src/x87/deoptimizer-x87.cc
index 3a5d2640be8..5a1951a0ede 100644
--- a/chromium/v8/src/x87/deoptimizer-x87.cc
+++ b/chromium/v8/src/x87/deoptimizer-x87.cc
@@ -7,6 +7,7 @@
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
#include "src/safepoint-table.h"
#include "src/x87/frames-x87.h"
@@ -74,7 +75,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
new_reloc->GetDataStartAddress() + padding, 0);
intptr_t comment_string
= reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
- RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
+ RelocInfo rinfo(isolate, 0, RelocInfo::COMMENT, comment_string, NULL);
for (int i = 0; i < additional_comments; ++i) {
#ifdef DEBUG
byte* pos_before = reloc_info_writer.pos();
@@ -100,14 +101,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->int3();
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->int3();
}
}
@@ -136,14 +138,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Patch lazy deoptimization entry.
Address call_address = code_start_address + deopt_data->Pc(i)->value();
- CodePatcher patcher(call_address, patch_size());
+ CodePatcher patcher(isolate, call_address, patch_size());
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
// We use RUNTIME_ENTRY for deoptimization bailouts.
- RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
+ RelocInfo rinfo(isolate, call_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY,
- reinterpret_cast<intptr_t>(deopt_entry),
- NULL);
+ reinterpret_cast<intptr_t>(deopt_entry), NULL);
reloc_info_writer.Write(&rinfo);
DCHECK_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize);
@@ -156,18 +157,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
// Move the relocation info to the beginning of the byte array.
- int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
- MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
-
- // The relocation info is in place, update the size.
- reloc_info->set_length(new_reloc_size);
-
- // Handle the junk part after the new relocation info. We will create
- // a non-live object in the extra space at the end of the former reloc info.
- Address junk_address = reloc_info->address() + reloc_info->Size();
- DCHECK(junk_address <= reloc_end_address);
- isolate->heap()->CreateFillerObjectAt(junk_address,
- reloc_end_address - junk_address);
+ const int new_reloc_length = reloc_end_address - reloc_info_writer.pos();
+ MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_length);
+
+ // Right trim the relocation info to free up remaining space.
+ const int delta = reloc_info->length() - new_reloc_length;
+ if (delta > 0) {
+ isolate->heap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+ reloc_info, delta);
+ }
}
@@ -181,7 +179,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < X87Register::kMaxNumRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -203,7 +201,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < X87Register::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
@@ -233,8 +231,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
- const int kDoubleRegsSize =
- kDoubleSize * X87Register::kMaxNumAllocatableRegisters;
+ const int kDoubleRegsSize = kDoubleSize * X87Register::kMaxNumRegisters;
// Reserve space for x87 fp registers.
__ sub(esp, Immediate(kDoubleRegsSize));
@@ -312,10 +309,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
int double_regs_offset = FrameDescription::double_registers_offset();
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
// Fill in the double input registers.
for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize;
__ fld_d(Operand(esp, src_offset));
__ fstp_d(Operand(ebx, dst_offset));
}
diff --git a/chromium/v8/src/x87/frames-x87.h b/chromium/v8/src/x87/frames-x87.h
index 89e6ebda8cc..1b900784cc1 100644
--- a/chromium/v8/src/x87/frames-x87.h
+++ b/chromium/v8/src/x87/frames-x87.h
@@ -80,6 +80,7 @@ class JavaScriptFrameConstants : public AllStatic {
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_FRAMES_X87_H_
diff --git a/chromium/v8/src/x87/interface-descriptors-x87.cc b/chromium/v8/src/x87/interface-descriptors-x87.cc
index 36962351653..5bd84fc2988 100644
--- a/chromium/v8/src/x87/interface-descriptors-x87.cc
+++ b/chromium/v8/src/x87/interface-descriptors-x87.cc
@@ -35,12 +35,10 @@ const Register VectorStoreTransitionDescriptor::SlotRegister() {
}
-const Register VectorStoreTransitionDescriptor::VectorRegister() {
- return no_reg;
-}
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return ebx; }
-const Register VectorStoreTransitionDescriptor::MapRegister() { return no_reg; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return edi; }
const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
@@ -70,6 +68,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return ecx; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return edx; }
+const Register RestParamAccessDescriptor::parameter_count() { return ecx; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return edx; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return ebx; }
+
+
const Register ApiGetterDescriptor::function_address() { return edx; }
@@ -85,14 +88,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
-void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister()};
- // The other three parameters are on the stack in ia32.
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ebx};
@@ -116,6 +111,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToLengthDescriptor::ReceiverRegister() { return eax; }
+
+
+// static
const Register ToStringDescriptor::ReceiverRegister() { return eax; }
@@ -137,6 +136,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi, eax, ecx, edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax, ebx, ecx};
@@ -197,7 +203,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
- // ecx : original constructor (for IsSuperConstructorCall)
+ // ecx : new target (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
@@ -216,6 +222,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // edx : the new target
+ // edi : the target to call
+ // ebx : allocation site or undefined
+ Register registers[] = {edi, edx, eax, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // edx : the new target
+ // edi : the target to call
+ Register registers[] = {edi, edx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ecx, ebx, eax};
@@ -237,6 +264,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
}
+void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
@@ -347,6 +381,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // JSFunction
+ edx, // the new target
eax, // actual number of arguments
ebx, // expected number of arguments
};
@@ -379,33 +414,35 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- edi, // math rounding function
- edx, // vector slot id
+ eax, // argument count (not including receiver)
+ ebx, // address of first argument
+ edi // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
Register registers[] = {
- edi, // math rounding function
- edx, // vector slot id
- ebx // type vector
+ eax, // argument count (not including receiver)
+ edx, // new target
+ edi, // constructor
+ ebx, // address of first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- eax, // argument count (including receiver)
- ebx, // address of first argument
- edi // the target callable to be call
+ eax, // argument count (argc)
+ ecx, // address of first argument (argv)
+ ebx // the runtime function to call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/chromium/v8/src/x87/macro-assembler-x87.cc b/chromium/v8/src/x87/macro-assembler-x87.cc
index c34a47a2517..7a0beb57bc7 100644
--- a/chromium/v8/src/x87/macro-assembler-x87.cc
+++ b/chromium/v8/src/x87/macro-assembler-x87.cc
@@ -19,14 +19,14 @@ namespace internal {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
- if (isolate() != NULL) {
- // TODO(titzer): should we just use a null handle here instead?
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
+ if (create_code_object == CodeObjectRequired::kYes) {
+ code_object_ =
+ Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
@@ -793,6 +793,18 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotABoundFunction);
+ Push(object);
+ CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
@@ -943,22 +955,27 @@ void MacroAssembler::EnterApiExitFrame(int argc) {
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Optionally restore FPU state.
if (save_doubles) {
const int offset = -2 * kPointerSize;
frstor(MemOperand(ebp, offset - 108));
}
- // Get the return address from the stack and restore the frame pointer.
- mov(ecx, Operand(ebp, 1 * kPointerSize));
- mov(ebp, Operand(ebp, 0 * kPointerSize));
+ if (pop_arguments) {
+ // Get the return address from the stack and restore the frame pointer.
+ mov(ecx, Operand(ebp, 1 * kPointerSize));
+ mov(ebp, Operand(ebp, 0 * kPointerSize));
- // Pop the arguments and the receiver from the caller stack.
- lea(esp, Operand(esi, 1 * kPointerSize));
+ // Pop the arguments and the receiver from the caller stack.
+ lea(esp, Operand(esi, 1 * kPointerSize));
- // Push the return address to get ready to return.
- push(ecx);
+ // Push the return address to get ready to return.
+ push(ecx);
+ } else {
+ // Otherwise just leave the exit frame.
+ leave();
+ }
LeaveExitFrameEpilogue(true);
}
@@ -1030,10 +1047,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- mov(scratch1, FieldOperand(scratch1, offset));
- mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
+ mov(scratch1, ContextOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1628,6 +1642,27 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
}
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch,
+ Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch));
+ DCHECK(!result.is(value));
+
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch);
+ mov(FieldOperand(result, HeapObject::kMapOffset), scratch);
+ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
+ mov(FieldOperand(result, JSValue::kValueOffset), value);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+}
+
+
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Source and destination are incremented by length.
@@ -1695,16 +1730,16 @@ void MacroAssembler::CopyBytes(Register source,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
jmp(&entry);
bind(&loop);
- mov(Operand(start_offset, 0), filler);
- add(start_offset, Immediate(kPointerSize));
+ mov(Operand(current_address, 0), filler);
+ add(current_address, Immediate(kPointerSize));
bind(&entry);
- cmp(start_offset, end_offset);
+ cmp(current_address, end_address);
j(below, &loop);
}
@@ -1851,24 +1886,27 @@ void MacroAssembler::CallExternalReference(ExternalReference ref,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Move(eax, Immediate(num_arguments));
- JumpToExternalReference(ext);
-}
-
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[8] : argument num_arguments - 1
+ // ...
+ // -- esp[8 * num_arguments] : argument 0 (receiver)
+ //
+ // For runtime functions with variable arguments:
+ // -- eax : number of arguments
+ // -----------------------------------
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(eax, Immediate(function->nargs));
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -1882,8 +1920,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -1934,13 +1970,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (!definitely_matches) {
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- mov(edx, Immediate(code_constant));
- add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_operand.is_reg(edx)) {
- mov(edx, code_operand);
- }
-
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
call(adaptor, RelocInfo::CODE_TARGET);
@@ -1956,20 +1985,76 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ cmpb(Operand::StaticVariable(step_in_enabled), 0);
+ j(equal, &skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(edi));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ mov(edx, isolate()->factory()->undefined_value());
+ }
Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag, Label::kNear,
- call_wrapper);
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
+ Label::kNear, call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
call(code);
@@ -1983,7 +2068,7 @@ void MacroAssembler::InvokeCode(const Operand& code,
}
-void MacroAssembler::InvokeFunction(Register fun,
+void MacroAssembler::InvokeFunction(Register fun, Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -1991,14 +2076,13 @@ void MacroAssembler::InvokeFunction(Register fun,
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(fun.is(edi));
- mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(ebx);
ParameterCount expected(ebx);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(edi, new_target, expected, actual, flag, call_wrapper);
}
@@ -2013,8 +2097,7 @@ void MacroAssembler::InvokeFunction(Register fun,
DCHECK(fun.is(edi));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(edi, no_reg, expected, actual, flag, call_wrapper);
}
@@ -2033,35 +2116,21 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
+ // Fake a parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinFunction(edi, native_context_index);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, expected, flag, call_wrapper);
+ InvokeFunctionCode(edi, no_reg, expected, expected, flag, call_wrapper);
}
void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the JavaScript builtin function from the builtins object.
- mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(target, FieldOperand(target, GlobalObject::kNativeContextOffset));
+ mov(target, NativeContextOperand());
mov(target, ContextOperand(target, native_context_index));
}
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(edi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(edi, native_context_index);
- // Load the code entry point from the function into the target register.
- mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
-}
-
-
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
@@ -2089,8 +2158,8 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::LoadGlobalProxy(Register dst) {
- mov(dst, GlobalObjectOperand());
- mov(dst, FieldOperand(dst, GlobalObject::kGlobalProxyOffset));
+ mov(dst, NativeContextOperand());
+ mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
}
@@ -2100,35 +2169,26 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- mov(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmp(map_in_out, FieldOperand(scratch, offset));
+ mov(scratch, NativeContextOperand());
+ cmp(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
j(not_equal, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- mov(map_in_out, FieldOperand(scratch, offset));
+ mov(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- mov(function,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- mov(function,
- FieldOperand(function, GlobalObject::kNativeContextOffset));
+ // Load the native context from the current context.
+ mov(function, NativeContextOperand());
// Load the function from the native context.
- mov(function, Operand(function, Context::SlotOffset(index)));
+ mov(function, ContextOperand(function, index));
}
@@ -2312,6 +2372,27 @@ void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
}
+void MacroAssembler::Tzcnt(Register dst, const Operand& src) {
+ // TODO(intel): Add support for TZCNT (with ABM/BMI1).
+ Label not_zero_src;
+ bsf(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Move(dst, Immediate(32)); // The result of tzcnt is 32 if src = 0.
+ bind(&not_zero_src);
+}
+
+
+void MacroAssembler::Popcnt(Register dst, const Operand& src) {
+ // TODO(intel): Add support for POPCNT (with POPCNT)
+ // if (CpuFeatures::IsSupported(POPCNT)) {
+ // CpuFeatureScope scope(this, POPCNT);
+ // popcnt(dst, src);
+ // return;
+ // }
+ UNREACHABLE();
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
@@ -2639,10 +2720,10 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(byte* address, int size)
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
: address_(address),
size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
+ masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@@ -2652,7 +2733,7 @@ CodePatcher::CodePatcher(byte* address, int size)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- Assembler::FlushICacheWithoutIsolate(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
@@ -2712,10 +2793,9 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch1,
Label* on_black,
Label::Distance on_black_near) {
- HasColor(object, scratch0, scratch1,
- on_black, on_black_near,
- 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, on_black_near, 1,
+ 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -2769,110 +2849,22 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Label* value_is_white,
+ Label::Distance distance) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(not_zero, &done, Label::kNear);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- push(mask_scratch);
- // shl. May overflow making the check conservative.
- add(mask_scratch, mask_scratch);
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = ecx; // Holds map while checking type.
- Register length = ecx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- mov(map, FieldOperand(value, HeapObject::kMapOffset));
- cmp(map, isolate()->factory()->heap_number_map());
- j(not_equal, &not_heap_number, Label::kNear);
- mov(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = ecx;
- movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- test_b(instance_type, kExternalStringTag);
- j(zero, &not_external, Label::kNear);
- mov(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either Latin1 or UC16.
- DCHECK(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- add(length, Immediate(0x04));
- // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
- // by 2. If we multiply the string length as smi by this, it still
- // won't overflow a 32-bit value.
- DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
- DCHECK(SeqOneByteString::kMaxSize <=
- static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, 2 + kSmiTagSize + kSmiShiftSize);
- add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
- length);
- if (emit_debug_code()) {
- mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
- Check(less_equal, kLiveBytesCountOverflowChunkSize);
- }
-
- bind(&done);
+ j(zero, value_is_white, Label::kNear);
}
diff --git a/chromium/v8/src/x87/macro-assembler-x87.h b/chromium/v8/src/x87/macro-assembler-x87.h
index f1a8f82fe84..9b6c5e8a0ab 100644
--- a/chromium/v8/src/x87/macro-assembler-x87.h
+++ b/chromium/v8/src/x87/macro-assembler-x87.h
@@ -14,20 +14,21 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-const Register kReturnRegister0 = {kRegister_eax_Code};
-const Register kReturnRegister1 = {kRegister_edx_Code};
-const Register kJSFunctionRegister = {kRegister_edi_Code};
-const Register kContextRegister = {kRegister_esi_Code};
-const Register kInterpreterAccumulatorRegister = {kRegister_eax_Code};
-const Register kInterpreterRegisterFileRegister = {kRegister_edx_Code};
-const Register kInterpreterBytecodeOffsetRegister = {kRegister_ecx_Code};
-const Register kInterpreterBytecodeArrayRegister = {kRegister_edi_Code};
-const Register kInterpreterDispatchTableRegister = {kRegister_ebx_Code};
-const Register kRuntimeCallFunctionRegister = {kRegister_ebx_Code};
-const Register kRuntimeCallArgCountRegister = {kRegister_eax_Code};
+const Register kReturnRegister0 = {Register::kCode_eax};
+const Register kReturnRegister1 = {Register::kCode_edx};
+const Register kJSFunctionRegister = {Register::kCode_edi};
+const Register kContextRegister = {Register::kCode_esi};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_edx};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
// Spill slots used by interpreter dispatch calling convention.
-const int kInterpreterContextSpillSlot = -1;
+const int kInterpreterDispatchTableSpillSlot = -1;
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
@@ -40,33 +41,20 @@ enum PointersToHereCheck {
kPointersToHereAreAlwaysInteresting
};
-
-enum RegisterValueType {
- REGISTER_VALUE_IS_SMI,
- REGISTER_VALUE_IS_INT32
-};
-
+enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3 = no_reg,
- Register reg4 = no_reg,
- Register reg5 = no_reg,
- Register reg6 = no_reg,
- Register reg7 = no_reg,
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+ Register reg4 = no_reg, Register reg5 = no_reg,
+ Register reg6 = no_reg, Register reg7 = no_reg,
Register reg8 = no_reg);
#endif
-
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
void Load(Register dst, const Operand& src, Representation r);
void Store(Register src, const Operand& dst, Representation r);
@@ -93,7 +81,13 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
- Label::Distance if_equal_distance = Label::kNear) {
+ Label::Distance if_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(equal, if_equal, if_equal_distance);
+ }
+ void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_equal,
+ Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
@@ -101,17 +95,20 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value and jump if they are not equal.
void JumpIfNotRoot(Register with, Heap::RootListIndex index,
Label* if_not_equal,
- Label::Distance if_not_equal_distance = Label::kNear) {
+ Label::Distance if_not_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(not_equal, if_not_equal, if_not_equal_distance);
+ }
+ void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_not_equal,
+ Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
// ---------------------------------------------------------------------------
// GC Support
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
+ enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
@@ -121,63 +118,41 @@ class MacroAssembler: public Assembler {
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
void CheckPageFlagForMap(
- Handle<Map> map,
- int mask,
- Condition cc,
- Label* condition_met,
+ Handle<Map> map, int mask, Condition cc, Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch,
+ void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, zero, branch, distance);
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch,
+ void JumpIfInNewSpace(Register object, Register scratch, Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, not_zero, branch, distance);
}
// Check if an object has a given incremental marking color. Also uses ecx!
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- Label::Distance has_color_distance,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
+ void HasColor(Register object, Register scratch0, Register scratch1,
+ Label* has_color, Label::Distance has_color_distance,
+ int first_bit, int second_bit);
+
+ void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black,
Label::Distance on_black_distance = Label::kFar);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Label* value_is_white, Label::Distance distance);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -256,8 +231,8 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
- // argument in register esi.
- void LeaveExitFrame(bool save_doubles);
+ // argument in register esi (if pop_arguments == true).
+ void LeaveExitFrame(bool save_doubles, bool pop_arguments = true);
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
@@ -273,12 +248,11 @@ class MacroAssembler: public Assembler {
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
// expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
+ void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -326,37 +300,29 @@ class MacroAssembler: public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- InvokeCode(Operand(code), expected, actual, flag, call_wrapper);
- }
- void InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
+ void InvokeFunction(Register function, Register new_target,
+ const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void InvokeFunction(Register function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
+ void InvokeFunction(Register function, const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
+ const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
// Invoke specified builtin JavaScript function.
@@ -366,8 +332,6 @@ class MacroAssembler: public Assembler {
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, int native_context_index);
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, int native_context_index);
// Expression support
// Support for constant splitting.
@@ -384,30 +348,24 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Label* fail,
+ void CheckFastElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Label* fail,
+ void CheckFastObjectElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Label* fail,
+ void CheckFastSmiElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements, otherwise jump to fail.
- void StoreNumberToDoubleElements(Register maybe_number,
- Register elements,
- Register key,
- Register scratch,
- Label* fail,
+ void StoreNumberToDoubleElements(Register maybe_number, Register elements,
+ Register key, Register scratch, Label* fail,
int offset = 0);
// Compare an object's map with the specified map.
@@ -417,9 +375,7 @@ class MacroAssembler: public Assembler {
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
// against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
+ void CheckMap(Register obj, Handle<Map> map, Label* fail,
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified weak map and branch
@@ -434,8 +390,7 @@ class MacroAssembler: public Assembler {
// contains the instance_type. The registers map and instance_type can be the
// same in which case it contains the instance type afterwards. Either of the
// registers map and instance_type can be the same as heap_object.
- Condition IsObjectStringType(Register heap_object,
- Register map,
+ Condition IsObjectStringType(Register heap_object, Register map,
Register instance_type);
// Check if the object in register heap_object is a name. Afterwards the
@@ -443,8 +398,7 @@ class MacroAssembler: public Assembler {
// contains the instance_type. The registers map and instance_type can be the
// same in which case it contains the instance type afterwards. Either of the
// registers map and instance_type can be the same as heap_object.
- Condition IsObjectNameType(Register heap_object,
- Register map,
+ Condition IsObjectNameType(Register heap_object, Register map,
Register instance_type);
// FCmp is similar to integer cmp, but requires unsigned
@@ -493,22 +447,19 @@ class MacroAssembler: public Assembler {
void LoadUint32NoSSE2(const Operand& src);
// Jump the register contains a smi.
- inline void JumpIfSmi(Register value,
- Label* smi_label,
+ inline void JumpIfSmi(Register value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, distance);
}
// Jump if the operand is a smi.
- inline void JumpIfSmi(Operand value,
- Label* smi_label,
+ inline void JumpIfSmi(Operand value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, distance);
}
// Jump if register contain a non-smi.
- inline void JumpIfNotSmi(Register value,
- Label* not_smi_label,
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(not_zero, not_smi_label, distance);
@@ -562,6 +513,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
@@ -581,22 +536,15 @@ class MacroAssembler: public Assembler {
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register is clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch1,
- Register scratch2,
- Label* miss);
+ void CheckAccessGlobalProxy(Register holder_reg, Register scratch1,
+ Register scratch2, Label* miss);
void GetNumberHash(Register r0, Register scratch);
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
+ void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
+ Register r0, Register r1, Register r2,
Register result);
-
// ---------------------------------------------------------------------------
// Allocation support
@@ -610,48 +558,29 @@ class MacroAssembler: public Assembler {
// result is known to be the allocation top on entry (could be result_end
// from a previous call). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void Allocate(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void Allocate(int header_size,
- ScaleFactor element_size,
- Register element_count,
- RegisterValueType element_count_type,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void Allocate(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(int object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
+
+ void Allocate(int header_size, ScaleFactor element_size,
+ Register element_count, RegisterValueType element_count_type,
+ Register result, Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags);
+
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or
// jumps to gc_required if new space is full.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- MutableMode mode = IMMUTABLE);
+ void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
+ Label* gc_required, MutableMode mode = IMMUTABLE);
// Allocate a sequential string. All the header fields of the string object
// are initialized.
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
+ void AllocateTwoByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
void AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
@@ -660,36 +589,34 @@ class MacroAssembler: public Assembler {
// Allocate a raw cons string object. Only the map field of the result is
// initialized.
- void AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateTwoByteConsString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
void AllocateOneByteConsString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
// Allocate a raw sliced string object. Only the map field of the result is
// initialized.
- void AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateTwoByteSlicedString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
void AllocateOneByteSlicedString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch, Label* gc_required);
+
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies.
// The contents of index and scratch are destroyed.
- void CopyBytes(Register source,
- Register destination,
- Register length,
+ void CopyBytes(Register source, Register destination, Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@@ -738,31 +665,29 @@ class MacroAssembler: public Assembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
@@ -804,12 +729,19 @@ class MacroAssembler: public Assembler {
void Push(const Operand& src) { push(src); }
void Push(Immediate value) { push(value); }
void Pop(Register dst) { pop(dst); }
+ void Pop(const Operand& dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
void Lzcnt(Register dst, const Operand& src);
+ void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
+ void Tzcnt(Register dst, const Operand& src);
+
+ void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
+ void Popcnt(Register dst, const Operand& src);
+
// Emit call to the code we are currently generating.
void CallSelf() {
Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
@@ -823,9 +755,11 @@ class MacroAssembler: public Assembler {
void Move(Register dst, const Immediate& x);
void Move(const Operand& dst, const Immediate& x);
+ void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
+
// Push a handle value.
void Push(Handle<Object> handle) { push(Immediate(handle)); }
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+ void Push(Smi* smi) { Push(Immediate(smi)); }
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
@@ -848,7 +782,6 @@ class MacroAssembler: public Assembler {
void IncrementCounter(Condition cc, StatsCounter* counter, int value);
void DecrementCounter(Condition cc, StatsCounter* counter, int value);
-
// ---------------------------------------------------------------------------
// Debugging
@@ -899,10 +832,8 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
Label::Distance distance = Label::kFar);
- void EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask);
+ void EmitSeqStringSetCharCheck(Register string, Register index,
+ Register value, uint32_t encoding_mask);
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
@@ -952,14 +883,10 @@ class MacroAssembler: public Assembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
+ const ParameterCount& actual, Label* done,
+ bool* definitely_mismatches, InvokeFlag flag,
Label::Distance done_distance,
- const CallWrapper& call_wrapper = NullCallWrapper());
+ const CallWrapper& call_wrapper);
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
@@ -967,18 +894,14 @@ class MacroAssembler: public Assembler {
void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
- void LoadAllocationTopHelper(Register result,
- Register scratch,
+ void LoadAllocationTopHelper(Register result, Register scratch,
AllocationFlags flags);
- void UpdateAllocationTopHelper(Register result_end,
- Register scratch,
+ void UpdateAllocationTopHelper(Register result_end, Register scratch,
AllocationFlags flags);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
+ void InNewSpace(Register object, Register scratch, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
@@ -986,8 +909,7 @@ class MacroAssembler: public Assembler {
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Uses ecx as scratch and leaves addr_reg
// unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
+ inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg);
// Compute memory operands for safepoint stack slots.
@@ -999,7 +921,6 @@ class MacroAssembler: public Assembler {
friend class StandardFrame;
};
-
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
@@ -1007,19 +928,18 @@ class MacroAssembler: public Assembler {
// an assertion.
class CodePatcher {
public:
- CodePatcher(byte* address, int size);
+ CodePatcher(Isolate* isolate, byte* address, int size);
~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
+ byte* address_; // The address of the code being patched.
+ int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
-
// -----------------------------------------------------------------------------
// Static helper functions.
@@ -1028,39 +948,30 @@ inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
-
// Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
+inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
-
-inline Operand FixedArrayElementOperand(Register array,
- Register index_as_smi,
+inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
int additional_offset = 0) {
int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
}
-
inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
-
inline Operand ContextOperand(Register context, Register index) {
return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
}
-
-inline Operand GlobalObjectOperand() {
- return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
+inline Operand NativeContextOperand() {
+ return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
}
-
#ifdef GENERATED_CODE_COVERAGE
extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_STRINGIFY(x) #x
@@ -1082,7 +993,7 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define ACCESS_MASM(masm) masm->
#endif
-
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_MACRO_ASSEMBLER_X87_H_
diff --git a/chromium/v8/src/x87/simulator-x87.h b/chromium/v8/src/x87/simulator-x87.h
index a780e839d2d..667f0fd6d7c 100644
--- a/chromium/v8/src/x87/simulator-x87.h
+++ b/chromium/v8/src/x87/simulator-x87.h
@@ -12,7 +12,7 @@ namespace internal {
// Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
@@ -21,7 +21,8 @@ typedef int (*regexp_matcher)(String*, int, const byte*,
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
@@ -36,13 +37,16 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() { }
+ static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
};
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X87_SIMULATOR_X87_H_
diff --git a/chromium/v8/src/zone-allocator.h b/chromium/v8/src/zone-allocator.h
index 30abe218045..f46151ebc3d 100644
--- a/chromium/v8/src/zone-allocator.h
+++ b/chromium/v8/src/zone-allocator.h
@@ -66,6 +66,7 @@ class zone_allocator {
typedef zone_allocator<bool> ZoneBoolAllocator;
typedef zone_allocator<int> ZoneIntAllocator;
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_ZONE_ALLOCATOR_H_
diff --git a/chromium/v8/src/zone-containers.h b/chromium/v8/src/zone-containers.h
index 8daf0dd657f..79b168c37ea 100644
--- a/chromium/v8/src/zone-containers.h
+++ b/chromium/v8/src/zone-containers.h
@@ -114,12 +114,12 @@ class ZoneSet : public std::set<K, Compare, zone_allocator<K>> {
// a zone allocator.
template <typename K, typename V, typename Compare = std::less<K>>
class ZoneMap
- : public std::map<K, V, Compare, zone_allocator<std::pair<K, V>>> {
+ : public std::map<K, V, Compare, zone_allocator<std::pair<const K, V>>> {
public:
// Constructs an empty map.
explicit ZoneMap(Zone* zone)
- : std::map<K, V, Compare, zone_allocator<std::pair<K, V>>>(
- Compare(), zone_allocator<std::pair<K, V>>(zone)) {}
+ : std::map<K, V, Compare, zone_allocator<std::pair<const K, V>>>(
+ Compare(), zone_allocator<std::pair<const K, V>>(zone)) {}
};
diff --git a/chromium/v8/src/zone-type-cache.h b/chromium/v8/src/zone-type-cache.h
deleted file mode 100644
index bdc4388009a..00000000000
--- a/chromium/v8/src/zone-type-cache.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ZONE_TYPE_CACHE_H_
-#define V8_ZONE_TYPE_CACHE_H_
-
-
-#include "src/types.h"
-
-namespace v8 {
-namespace internal {
-
-class ZoneTypeCache final {
- private:
- // This has to be first for the initialization magic to work.
- Zone zone_;
-
- public:
- ZoneTypeCache() = default;
-
- Type* const kInt8 =
- CreateNative(CreateRange<int8_t>(), Type::UntaggedSigned8());
- Type* const kUint8 =
- CreateNative(CreateRange<uint8_t>(), Type::UntaggedUnsigned8());
- Type* const kUint8Clamped = kUint8;
- Type* const kInt16 =
- CreateNative(CreateRange<int16_t>(), Type::UntaggedSigned16());
- Type* const kUint16 =
- CreateNative(CreateRange<uint16_t>(), Type::UntaggedUnsigned16());
- Type* const kInt32 = CreateNative(Type::Signed32(), Type::UntaggedSigned32());
- Type* const kUint32 =
- CreateNative(Type::Unsigned32(), Type::UntaggedUnsigned32());
- Type* const kFloat32 = CreateNative(Type::Number(), Type::UntaggedFloat32());
- Type* const kFloat64 = CreateNative(Type::Number(), Type::UntaggedFloat64());
-
- Type* const kSingletonZero = CreateRange(0.0, 0.0);
- Type* const kSingletonOne = CreateRange(1.0, 1.0);
- Type* const kZeroOrOne = CreateRange(0.0, 1.0);
- Type* const kZeroish =
- Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
- Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
- Type* const kWeakint = Type::Union(kInteger, Type::MinusZeroOrNaN(), zone());
- Type* const kWeakintFunc1 = Type::Function(kWeakint, Type::Number(), zone());
-
- Type* const kRandomFunc0 = Type::Function(Type::OrderedNumber(), zone());
- Type* const kAnyFunc0 = Type::Function(Type::Any(), zone());
- Type* const kAnyFunc1 = Type::Function(Type::Any(), Type::Any(), zone());
- Type* const kAnyFunc2 =
- Type::Function(Type::Any(), Type::Any(), Type::Any(), zone());
- Type* const kAnyFunc3 = Type::Function(Type::Any(), Type::Any(), Type::Any(),
- Type::Any(), zone());
- Type* const kNumberFunc0 = Type::Function(Type::Number(), zone());
- Type* const kNumberFunc1 =
- Type::Function(Type::Number(), Type::Number(), zone());
- Type* const kNumberFunc2 =
- Type::Function(Type::Number(), Type::Number(), Type::Number(), zone());
- Type* const kImulFunc = Type::Function(Type::Signed32(), Type::Integral32(),
- Type::Integral32(), zone());
- Type* const kClz32Func =
- Type::Function(CreateRange(0, 32), Type::Number(), zone());
-
-#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
- Type* const k##TypeName##Array = CreateArray(k##TypeName);
- TYPED_ARRAYS(TYPED_ARRAY)
-#undef TYPED_ARRAY
-
- private:
- Type* CreateArray(Type* element) { return Type::Array(element, zone()); }
-
- Type* CreateArrayFunction(Type* array) {
- Type* arg1 = Type::Union(Type::Unsigned32(), Type::Object(), zone());
- Type* arg2 = Type::Union(Type::Unsigned32(), Type::Undefined(), zone());
- Type* arg3 = arg2;
- return Type::Function(array, arg1, arg2, arg3, zone());
- }
-
- Type* CreateNative(Type* semantic, Type* representation) {
- return Type::Intersect(semantic, representation, zone());
- }
-
- template <typename T>
- Type* CreateRange() {
- return CreateRange(std::numeric_limits<T>::min(),
- std::numeric_limits<T>::max());
- }
-
- Type* CreateRange(double min, double max) {
- return Type::Range(min, max, zone());
- }
-
- Zone* zone() { return &zone_; }
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_ZONE_TYPE_CACHE_H_
diff --git a/chromium/v8/test/benchmarks/benchmarks.gyp b/chromium/v8/test/benchmarks/benchmarks.gyp
new file mode 100644
index 00000000000..3884b0901f8
--- /dev/null
+++ b/chromium/v8/test/benchmarks/benchmarks.gyp
@@ -0,0 +1,26 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'benchmarks_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../../build/features.gypi',
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'benchmarks.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/benchmarks/benchmarks.isolate b/chromium/v8/test/benchmarks/benchmarks.isolate
new file mode 100644
index 00000000000..dbeca5e55cf
--- /dev/null
+++ b/chromium/v8/test/benchmarks/benchmarks.isolate
@@ -0,0 +1,14 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ './',
+ ],
+ },
+ 'includes': [
+ '../../src/d8.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+} \ No newline at end of file
diff --git a/chromium/v8/test/bot_default.gyp b/chromium/v8/test/bot_default.gyp
new file mode 100644
index 00000000000..ccdf42a7d74
--- /dev/null
+++ b/chromium/v8/test/bot_default.gyp
@@ -0,0 +1,32 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'bot_default_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'cctest/cctest.gyp:cctest_run',
+ 'intl/intl.gyp:intl_run',
+ 'message/message.gyp:message_run',
+ 'mjsunit/mjsunit.gyp:mjsunit_run',
+ 'preparser/preparser.gyp:preparser_run',
+ 'unittests/unittests.gyp:unittests_run',
+ 'webkit/webkit.gyp:webkit_run',
+ ],
+ 'includes': [
+ '../build/features.gypi',
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'bot_default.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/bot_default.isolate b/chromium/v8/test/bot_default.isolate
new file mode 100644
index 00000000000..32773587c2d
--- /dev/null
+++ b/chromium/v8/test/bot_default.isolate
@@ -0,0 +1,14 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'includes': [
+ 'cctest/cctest.isolate',
+ 'intl/intl.isolate',
+ 'message/message.isolate',
+ 'mjsunit/mjsunit.isolate',
+ 'preparser/preparser.isolate',
+ 'unittests/unittests.isolate',
+ 'webkit/webkit.isolate',
+ ],
+}
diff --git a/chromium/v8/test/cctest/cctest.gyp b/chromium/v8/test/cctest/cctest.gyp
index a7092a8bdc4..9ef2d9bfb28 100644
--- a/chromium/v8/test/cctest/cctest.gyp
+++ b/chromium/v8/test/cctest/cctest.gyp
@@ -52,6 +52,7 @@
'compiler/test-basic-block-profiler.cc',
'compiler/test-branch-combine.cc',
'compiler/test-changes-lowering.cc',
+ 'compiler/test-code-stub-assembler.cc',
'compiler/test-gap-resolver.cc',
'compiler/test-graph-visualizer.cc',
'compiler/test-instruction.cc',
@@ -63,6 +64,7 @@
'compiler/test-loop-assignment-analysis.cc',
'compiler/test-loop-analysis.cc',
'compiler/test-machine-operator-reducer.cc',
+ 'compiler/test-multiple-return.cc',
'compiler/test-node.cc',
'compiler/test-operator.cc',
'compiler/test-osr.cc',
@@ -92,15 +94,23 @@
'gay-fixed.cc',
'gay-precision.cc',
'gay-shortest.cc',
- 'heap-tester.h',
+ 'heap/heap-tester.h',
+ 'heap/test-alloc.cc',
+ 'heap/test-compaction.cc',
+ 'heap/test-heap.cc',
+ 'heap/test-incremental-marking.cc',
+ 'heap/test-lab.cc',
+ 'heap/test-mark-compact.cc',
+ 'heap/test-spaces.cc',
+ 'heap/utils-inl.h',
'print-extension.cc',
'profiler-extension.cc',
'test-accessors.cc',
- 'test-alloc.cc',
'test-api.cc',
'test-api.h',
'test-api-accessors.cc',
'test-api-interceptors.cc',
+ 'test-api-fast-accessor-builder.cc',
'test-array-list.cc',
'test-ast.cc',
'test-ast-expression-visitor.cc',
@@ -122,8 +132,10 @@
'test-diy-fp.cc',
'test-double.cc',
'test-dtoa.cc',
+ 'test-elements-kind.cc',
'test-fast-dtoa.cc',
'test-feedback-vector.cc',
+ 'test-field-type-tracking.cc',
'test-fixed-dtoa.cc',
'test-flags.cc',
'test-func-name-inference.cc',
@@ -132,24 +144,22 @@
'test-global-object.cc',
'test-hashing.cc',
'test-hashmap.cc',
- 'test-heap.cc',
'test-heap-profiler.cc',
'test-hydrogen-types.cc',
'test-identity-map.cc',
- 'test-incremental-marking.cc',
+ 'test-inobject-slack-tracking.cc',
'test-list.cc',
'test-liveedit.cc',
'test-lockers.cc',
'test-log.cc',
'test-microtask-delivery.cc',
- 'test-mark-compact.cc',
'test-mementos.cc',
- 'test-migrations.cc',
'test-object-observe.cc',
'test-parsing.cc',
'test-platform.cc',
'test-profile-generator.cc',
'test-random-number-generator.cc',
+ 'test-receiver-check-hidden-prototype.cc',
'test-regexp.cc',
'test-reloc-info.cc',
'test-representation.cc',
@@ -157,12 +167,12 @@
'test-serialize.cc',
'test-simd.cc',
'test-slots-buffer.cc',
- 'test-spaces.cc',
'test-strings.cc',
'test-symbols.cc',
'test-strtod.cc',
'test-thread-termination.cc',
'test-threads.cc',
+ 'test-trace-event.cc',
'test-transitions.cc',
'test-typedarrays.cc',
'test-types.cc',
@@ -176,6 +186,11 @@
'test-weakmaps.cc',
'test-weaksets.cc',
'trace-extension.cc',
+ 'wasm/test-run-wasm.cc',
+ 'wasm/test-run-wasm-js.cc',
+ 'wasm/test-run-wasm-module.cc',
+ 'wasm/test-signatures.h',
+ 'wasm/wasm-run-utils.h',
],
'conditions': [
['v8_target_arch=="ia32"', {
@@ -277,6 +292,11 @@
},
},
}],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ # disable fmadd/fmsub so that expected results match generated code in
+ # RunFloat64MulAndFloat64Add1 and friends.
+ 'cflags': ['-ffp-contract=off'],
+ }],
['OS=="aix"', {
'ldflags': [ '-Wl,-bbigtoc' ],
}],
@@ -287,11 +307,6 @@
}, {
'dependencies': ['../../tools/gyp/v8.gyp:v8'],
}],
- ['v8_wasm!=0', {
- 'dependencies': [
- '../../third_party/wasm/test/cctest/wasm/wasm.gyp:wasm_cctest'
- ],
- }],
],
},
{
@@ -330,4 +345,23 @@
],
},
],
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'cctest_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'cctest',
+ ],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'cctest.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
}
diff --git a/chromium/v8/test/cctest/cctest.isolate b/chromium/v8/test/cctest/cctest.isolate
new file mode 100644
index 00000000000..aee8d83f850
--- /dev/null
+++ b/chromium/v8/test/cctest/cctest.isolate
@@ -0,0 +1,16 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/cctest<(EXECUTABLE_SUFFIX)',
+ './cctest.status',
+ './testcfg.py',
+ ],
+ },
+ 'includes': [
+ '../../src/base.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+} \ No newline at end of file
diff --git a/chromium/v8/test/default.gyp b/chromium/v8/test/default.gyp
new file mode 100644
index 00000000000..53a8d7d4a2b
--- /dev/null
+++ b/chromium/v8/test/default.gyp
@@ -0,0 +1,31 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'default_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'cctest/cctest.gyp:cctest_run',
+ 'intl/intl.gyp:intl_run',
+ 'message/message.gyp:message_run',
+ 'mjsunit/mjsunit.gyp:mjsunit_run',
+ 'preparser/preparser.gyp:preparser_run',
+ 'unittests/unittests.gyp:unittests_run',
+ ],
+ 'includes': [
+ '../build/features.gypi',
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'default.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/default.isolate b/chromium/v8/test/default.isolate
new file mode 100644
index 00000000000..68044cf15b7
--- /dev/null
+++ b/chromium/v8/test/default.isolate
@@ -0,0 +1,13 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'includes': [
+ 'cctest/cctest.isolate',
+ 'intl/intl.isolate',
+ 'message/message.isolate',
+ 'mjsunit/mjsunit.isolate',
+ 'preparser/preparser.isolate',
+ 'unittests/unittests.isolate',
+ ],
+}
diff --git a/chromium/v8/test/ignition.gyp b/chromium/v8/test/ignition.gyp
new file mode 100644
index 00000000000..6aebec9e194
--- /dev/null
+++ b/chromium/v8/test/ignition.gyp
@@ -0,0 +1,27 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'ignition_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'cctest/cctest.gyp:cctest_run',
+ 'mjsunit/mjsunit.gyp:mjsunit_run',
+ ],
+ 'includes': [
+ '../build/features.gypi',
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'ignition.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/ignition.isolate b/chromium/v8/test/ignition.isolate
new file mode 100644
index 00000000000..9604a694b2d
--- /dev/null
+++ b/chromium/v8/test/ignition.isolate
@@ -0,0 +1,9 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'includes': [
+ 'cctest/cctest.isolate',
+ 'mjsunit/mjsunit.isolate',
+ ],
+}
diff --git a/chromium/v8/test/intl/intl.gyp b/chromium/v8/test/intl/intl.gyp
new file mode 100644
index 00000000000..8fa7f0674f2
--- /dev/null
+++ b/chromium/v8/test/intl/intl.gyp
@@ -0,0 +1,26 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'intl_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../../build/features.gypi',
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'intl.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/intl/intl.isolate b/chromium/v8/test/intl/intl.isolate
new file mode 100644
index 00000000000..dbeca5e55cf
--- /dev/null
+++ b/chromium/v8/test/intl/intl.isolate
@@ -0,0 +1,14 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ './',
+ ],
+ },
+ 'includes': [
+ '../../src/d8.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+} \ No newline at end of file
diff --git a/chromium/v8/test/message/message.gyp b/chromium/v8/test/message/message.gyp
new file mode 100644
index 00000000000..dac6d9ffffa
--- /dev/null
+++ b/chromium/v8/test/message/message.gyp
@@ -0,0 +1,26 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'message_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../../build/features.gypi',
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'message.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/message/message.isolate b/chromium/v8/test/message/message.isolate
new file mode 100644
index 00000000000..dbeca5e55cf
--- /dev/null
+++ b/chromium/v8/test/message/message.isolate
@@ -0,0 +1,14 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ './',
+ ],
+ },
+ 'includes': [
+ '../../src/d8.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+} \ No newline at end of file
diff --git a/chromium/v8/test/mjsunit/mjsunit.gyp b/chromium/v8/test/mjsunit/mjsunit.gyp
new file mode 100644
index 00000000000..35ce2ffdee6
--- /dev/null
+++ b/chromium/v8/test/mjsunit/mjsunit.gyp
@@ -0,0 +1,26 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'mjsunit_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../../build/features.gypi',
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'mjsunit.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/mjsunit/mjsunit.isolate b/chromium/v8/test/mjsunit/mjsunit.isolate
new file mode 100644
index 00000000000..18b73c2a14c
--- /dev/null
+++ b/chromium/v8/test/mjsunit/mjsunit.isolate
@@ -0,0 +1,23 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ './',
+ '../../tools/codemap.js',
+ '../../tools/consarray.js',
+ '../../tools/csvparser.js',
+ '../../tools/logreader.js',
+ '../../tools/profile.js',
+ '../../tools/profile_view.js',
+ '../../tools/profviz/composer.js',
+ '../../tools/splaytree.js',
+ '../../tools/tickprocessor.js'
+ ],
+ },
+ 'includes': [
+ '../../src/d8.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+} \ No newline at end of file
diff --git a/chromium/v8/test/mozilla/mozilla.gyp b/chromium/v8/test/mozilla/mozilla.gyp
new file mode 100644
index 00000000000..1202d28c0f6
--- /dev/null
+++ b/chromium/v8/test/mozilla/mozilla.gyp
@@ -0,0 +1,26 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'mozilla_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../../build/features.gypi',
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'mozilla.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/mozilla/mozilla.isolate b/chromium/v8/test/mozilla/mozilla.isolate
new file mode 100644
index 00000000000..dbeca5e55cf
--- /dev/null
+++ b/chromium/v8/test/mozilla/mozilla.isolate
@@ -0,0 +1,14 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ './',
+ ],
+ },
+ 'includes': [
+ '../../src/d8.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+} \ No newline at end of file
diff --git a/chromium/v8/test/optimize_for_size.gyp b/chromium/v8/test/optimize_for_size.gyp
new file mode 100644
index 00000000000..ac40ba8499d
--- /dev/null
+++ b/chromium/v8/test/optimize_for_size.gyp
@@ -0,0 +1,29 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'optimize_for_size_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'cctest/cctest.gyp:cctest_run',
+ 'intl/intl.gyp:intl_run',
+ 'mjsunit/mjsunit.gyp:mjsunit_run',
+ 'webkit/webkit.gyp:webkit_run',
+ ],
+ 'includes': [
+ '../build/features.gypi',
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'optimize_for_size.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/optimize_for_size.isolate b/chromium/v8/test/optimize_for_size.isolate
new file mode 100644
index 00000000000..aa7f57036ef
--- /dev/null
+++ b/chromium/v8/test/optimize_for_size.isolate
@@ -0,0 +1,11 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'includes': [
+ 'cctest/cctest.isolate',
+ 'intl/intl.isolate',
+ 'mjsunit/mjsunit.isolate',
+ 'webkit/webkit.isolate',
+ ],
+}
diff --git a/chromium/v8/test/preparser/preparser.gyp b/chromium/v8/test/preparser/preparser.gyp
new file mode 100644
index 00000000000..cb763d6dc59
--- /dev/null
+++ b/chromium/v8/test/preparser/preparser.gyp
@@ -0,0 +1,26 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'preparser_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../../build/features.gypi',
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'preparser.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/preparser/preparser.isolate b/chromium/v8/test/preparser/preparser.isolate
new file mode 100644
index 00000000000..dbeca5e55cf
--- /dev/null
+++ b/chromium/v8/test/preparser/preparser.isolate
@@ -0,0 +1,14 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ './',
+ ],
+ },
+ 'includes': [
+ '../../src/d8.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+} \ No newline at end of file
diff --git a/chromium/v8/test/simdjs/simdjs.gyp b/chromium/v8/test/simdjs/simdjs.gyp
new file mode 100644
index 00000000000..df0aa5e972e
--- /dev/null
+++ b/chromium/v8/test/simdjs/simdjs.gyp
@@ -0,0 +1,26 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'simdjs_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../../build/features.gypi',
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'simdjs.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/simdjs/simdjs.isolate b/chromium/v8/test/simdjs/simdjs.isolate
new file mode 100644
index 00000000000..dbeca5e55cf
--- /dev/null
+++ b/chromium/v8/test/simdjs/simdjs.isolate
@@ -0,0 +1,14 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ './',
+ ],
+ },
+ 'includes': [
+ '../../src/d8.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+} \ No newline at end of file
diff --git a/chromium/v8/test/test262/test262.gyp b/chromium/v8/test/test262/test262.gyp
new file mode 100644
index 00000000000..45e6bc7271d
--- /dev/null
+++ b/chromium/v8/test/test262/test262.gyp
@@ -0,0 +1,26 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'test262_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../../build/features.gypi',
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'test262.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/test262/test262.isolate b/chromium/v8/test/test262/test262.isolate
new file mode 100644
index 00000000000..dbeca5e55cf
--- /dev/null
+++ b/chromium/v8/test/test262/test262.isolate
@@ -0,0 +1,14 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ './',
+ ],
+ },
+ 'includes': [
+ '../../src/d8.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+} \ No newline at end of file
diff --git a/chromium/v8/test/unittests/unittests.gyp b/chromium/v8/test/unittests/unittests.gyp
index 663ae372c58..5339da35fdd 100644
--- a/chromium/v8/test/unittests/unittests.gyp
+++ b/chromium/v8/test/unittests/unittests.gyp
@@ -22,10 +22,6 @@
'include_dirs': [
'../..',
],
- 'defines': [
- # TODO(jochen): Remove again after this is globally turned on.
- 'V8_IMMINENT_DEPRECATION_WARNINGS',
- ],
'sources': [ ### gcmole(all) ###
'atomic-utils-unittest.cc',
'base/bits-unittest.cc',
@@ -42,8 +38,9 @@
'base/platform/time-unittest.cc',
'base/sys-info-unittest.cc',
'base/utils/random-number-generator-unittest.cc',
+ 'cancelable-tasks-unittest.cc',
'char-predicates-unittest.cc',
- 'compiler/bytecode-graph-builder-unittest.cc',
+ 'compiler/branch-elimination-unittest.cc',
'compiler/change-lowering-unittest.cc',
'compiler/coalesced-live-ranges-unittest.cc',
'compiler/common-operator-reducer-unittest.cc',
@@ -53,6 +50,7 @@
'compiler/control-flow-optimizer-unittest.cc',
'compiler/dead-code-elimination-unittest.cc',
'compiler/diamond-unittest.cc',
+ 'compiler/escape-analysis-unittest.cc',
'compiler/graph-reducer-unittest.cc',
'compiler/graph-reducer-unittest.h',
'compiler/graph-trimmer-unittest.cc',
@@ -69,7 +67,6 @@
'compiler/js-intrinsic-lowering-unittest.cc',
'compiler/js-operator-unittest.cc',
'compiler/js-typed-lowering-unittest.cc',
- 'compiler/js-type-feedback-unittest.cc',
'compiler/linkage-tail-call-unittest.cc',
'compiler/liveness-analyzer-unittest.cc',
'compiler/live-range-unittest.cc',
@@ -100,6 +97,8 @@
'interpreter/bytecodes-unittest.cc',
'interpreter/bytecode-array-builder-unittest.cc',
'interpreter/bytecode-array-iterator-unittest.cc',
+ 'interpreter/bytecode-register-allocator-unittest.cc',
+ 'interpreter/constant-array-builder-unittest.cc',
'libplatform/default-platform-unittest.cc',
'libplatform/task-queue-unittest.cc',
'libplatform/worker-thread-unittest.cc',
@@ -108,10 +107,15 @@
'heap/memory-reducer-unittest.cc',
'heap/heap-unittest.cc',
'heap/scavenge-job-unittest.cc',
+ 'locked-queue-unittest.cc',
'run-all-unittests.cc',
'runtime/runtime-interpreter-unittest.cc',
'test-utils.h',
'test-utils.cc',
+ 'wasm/ast-decoder-unittest.cc',
+ 'wasm/encoder-unittest.cc',
+ 'wasm/module-decoder-unittest.cc',
+ 'wasm/wasm-macro-gen-unittest.cc',
],
'conditions': [
['v8_target_arch=="arm"', {
@@ -171,12 +175,26 @@
],
},
}],
- ['v8_wasm!=0', {
+ ],
+ },
+ ],
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'unittests_run',
+ 'type': 'none',
'dependencies': [
- '../../third_party/wasm/test/unittests/wasm/wasm.gyp:wasm_unittests',
+ 'unittests',
],
- }],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'unittests.isolate',
+ ],
+ },
],
- },
+ }],
],
}
diff --git a/chromium/v8/test/unittests/unittests.isolate b/chromium/v8/test/unittests/unittests.isolate
new file mode 100644
index 00000000000..ae503bf9bf3
--- /dev/null
+++ b/chromium/v8/test/unittests/unittests.isolate
@@ -0,0 +1,15 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/unittests<(EXECUTABLE_SUFFIX)',
+ './unittests.status',
+ ],
+ },
+ 'includes': [
+ '../../src/base.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+} \ No newline at end of file
diff --git a/chromium/v8/test/webkit/webkit.gyp b/chromium/v8/test/webkit/webkit.gyp
new file mode 100644
index 00000000000..8d655feb227
--- /dev/null
+++ b/chromium/v8/test/webkit/webkit.gyp
@@ -0,0 +1,26 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'webkit_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../../build/features.gypi',
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'webkit.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/test/webkit/webkit.isolate b/chromium/v8/test/webkit/webkit.isolate
new file mode 100644
index 00000000000..dbeca5e55cf
--- /dev/null
+++ b/chromium/v8/test/webkit/webkit.isolate
@@ -0,0 +1,14 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ './',
+ ],
+ },
+ 'includes': [
+ '../../src/d8.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+} \ No newline at end of file
diff --git a/chromium/v8/tools/bash-completion.sh b/chromium/v8/tools/bash-completion.sh
index 6e324246d69..5b9f7f5073e 100755
--- a/chromium/v8/tools/bash-completion.sh
+++ b/chromium/v8/tools/bash-completion.sh
@@ -39,12 +39,16 @@ _v8_flag() {
cur="${COMP_WORDS[COMP_CWORD]}"
defines=$(cat $v8_source/src/flag-definitions.h \
| grep "^DEFINE" \
- | grep -v "DEFINE_implication" \
+ | grep -v "DEFINE_IMPLICATION" \
+ | sed -e 's/_/-/g'; \
+ cat $v8_source/src/flag-definitions.h \
+ | grep "^ V(harmony_" \
+ | sed -e 's/^ V/DEFINE-BOOL/' \
| sed -e 's/_/-/g')
targets=$(echo "$defines" \
| sed -ne 's/^DEFINE-[^(]*(\([^,]*\).*/--\1/p'; \
echo "$defines" \
- | sed -ne 's/^DEFINE-bool(\([^,]*\).*/--no\1/p'; \
+ | sed -ne 's/^DEFINE-BOOL(\([^,]*\).*/--no\1/p'; \
cat $v8_source/src/d8.cc \
| grep "strcmp(argv\[i\]" \
| sed -ne 's/^[^"]*"--\([^"]*\)".*/--\1/p')
diff --git a/chromium/v8/tools/check-static-initializers.gyp b/chromium/v8/tools/check-static-initializers.gyp
new file mode 100644
index 00000000000..547a6c873bb
--- /dev/null
+++ b/chromium/v8/tools/check-static-initializers.gyp
@@ -0,0 +1,26 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'check_static_initializers_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../build/features.gypi',
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'check-static-initializers.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/v8/tools/check-static-initializers.isolate b/chromium/v8/tools/check-static-initializers.isolate
new file mode 100644
index 00000000000..d1197d3d6c9
--- /dev/null
+++ b/chromium/v8/tools/check-static-initializers.isolate
@@ -0,0 +1,16 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ 'check-static-initializers.sh',
+ ],
+ 'files': [
+ 'check-static-initializers.sh',
+ ],
+ },
+ 'includes': [
+ '../src/d8.isolate',
+ ],
+}
diff --git a/chromium/v8/tools/cpu.sh b/chromium/v8/tools/cpu.sh
index 0597d09ea99..5634cac9395 100755
--- a/chromium/v8/tools/cpu.sh
+++ b/chromium/v8/tools/cpu.sh
@@ -14,26 +14,38 @@ set_governor() {
done
}
+enable_cores() {
+ # $1: How many cores to enable.
+ for (( i=1; i<=$MAXID; i++ )); do
+ if [ "$i" -lt "$1" ]; then
+ echo 1 > $CPUPATH/cpu$i/online
+ else
+ echo 0 > $CPUPATH/cpu$i/online
+ fi
+ done
+}
+
dual_core() {
echo "Switching to dual-core mode"
- for (( i=2; i<=$MAXID; i++ )); do
- echo 0 > $CPUPATH/cpu$i/online
- done
+ enable_cores 2
}
single_core() {
echo "Switching to single-core mode"
- for (( i=1; i<=$MAXID; i++ )); do
- echo 0 > $CPUPATH/cpu$i/online
- done
+ enable_cores 1
}
all_cores() {
echo "Reactivating all CPU cores"
- for (( i=1; i<=$MAXID; i++ )); do
- echo 1 > $CPUPATH/cpu$i/online
- done
+ enable_cores $((MAXID+1))
+}
+
+
+limit_cores() {
+ # $1: How many cores to enable.
+ echo "Limiting to $1 cores"
+ enable_cores $1
}
case "$1" in
@@ -55,8 +67,15 @@ case "$1" in
allcores | all)
all_cores
;;
+ limit_cores)
+ if [ $# -ne 2 ]; then
+ echo "Usage $0 limit_cores <num>"
+ exit 1
+ fi
+ limit_cores $2
+ ;;
*)
- echo "Usage: $0 fast|slow|default|singlecore|dualcore|all"
+ echo "Usage: $0 fast|slow|default|singlecore|dualcore|all|limit_cores"
exit 1
;;
esac
diff --git a/chromium/v8/tools/disasm.py b/chromium/v8/tools/disasm.py
index cc7ef0621a3..f409cb003e8 100644
--- a/chromium/v8/tools/disasm.py
+++ b/chromium/v8/tools/disasm.py
@@ -60,7 +60,9 @@ def GetDisasmLines(filename, offset, size, arch, inplace, arch_flags=""):
# Create a temporary file containing a copy of the code.
assert arch in _ARCH_MAP, "Unsupported architecture '%s'" % arch
arch_flags = arch_flags + " " + _ARCH_MAP[arch]
- tmp_name = tempfile.mktemp(".v8code")
+ tmp_file = tempfile.NamedTemporaryFile(prefix=".v8code", delete=False)
+ tmp_name = tmp_file.name
+ tmp_file.close()
command = "dd if=%s of=%s bs=1 count=%d skip=%d && " \
"%s %s -D -b binary %s %s" % (
filename, tmp_name, size, offset,
diff --git a/chromium/v8/tools/eval_gc_nvp.py b/chromium/v8/tools/eval_gc_nvp.py
index 8a9b8e70728..f18a579391c 100755
--- a/chromium/v8/tools/eval_gc_nvp.py
+++ b/chromium/v8/tools/eval_gc_nvp.py
@@ -85,6 +85,15 @@ class Category:
if self.histogram:
self.histogram.add(float(entry[self.key]))
+ def min(self):
+ return min(self.values)
+
+ def max(self):
+ return max(self.values)
+
+ def avg(self):
+ return sum(self.values) / len(self.values)
+
def __str__(self):
ret = [self.key]
ret.append(" len: {0}".format(len(self.values)))
@@ -96,6 +105,15 @@ class Category:
ret.append(str(self.histogram))
return "\n".join(ret)
+ def __repr__(self):
+ return "<Category: {0}>".format(self.key)
+
+
+def make_key_func(cmp_metric):
+ def key_func(a):
+ return getattr(a, cmp_metric)()
+ return key_func
+
def main():
parser = ArgumentParser(description="Process GCTracer's NVP output")
@@ -121,6 +139,10 @@ def main():
action='store_false', help='do not print histogram')
parser.set_defaults(histogram=True)
parser.set_defaults(histogram_omit_empty=False)
+ parser.add_argument('--rank', metavar='<no|min|max|avg>',
+ type=str, nargs='?',
+ default="no",
+ help="rank keys by metric (default: no)")
args = parser.parse_args()
histogram = None
@@ -143,6 +165,9 @@ def main():
for category in categories:
category.process_entry(obj)
+ if args.rank != "no":
+ categories = sorted(categories, key=make_key_func(args.rank), reverse=True)
+
for category in categories:
print(category)
diff --git a/chromium/v8/tools/eval_gc_time.sh b/chromium/v8/tools/eval_gc_time.sh
new file mode 100755
index 00000000000..92246d38661
--- /dev/null
+++ b/chromium/v8/tools/eval_gc_time.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+#
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Convenience Script used to rank GC NVP output.
+
+print_usage_and_die() {
+ echo "Usage: $0 new-gen-rank|old-gen-rank max|avg logfile"
+ exit 1
+}
+
+if [ $# -ne 3 ]; then
+ print_usage_and_die
+fi
+
+case $1 in
+ new-gen-rank|old-gen-rank)
+ OP=$1
+ ;;
+ *)
+ print_usage_and_die
+esac
+
+case $2 in
+ max|avg)
+ RANK_MODE=$2
+ ;;
+ *)
+ print_usage_and_die
+esac
+
+LOGFILE=$3
+
+GENERAL_INTERESTING_KEYS="\
+ pause \
+"
+
+INTERESTING_NEW_GEN_KEYS="\
+ ${GENERAL_INTERESTING_KEYS} \
+ scavenge \
+ weak \
+ roots \
+ old_new \
+ code \
+ semispace \
+ object_groups \
+"
+
+INTERESTING_OLD_GEN_KEYS="\
+ ${GENERAL_INTERESTING_KEYS} \
+ external \
+ clear \
+ clear.code_flush \
+ clear.dependent_code \
+ clear.global_handles \
+ clear.maps \
+ clear.slots_buffer \
+ clear.store_buffer \
+ clear.string_table \
+ clear.weak_cells \
+ clear.weak_collections \
+ clear.weak_lists \
+ finish \
+ evacuate \
+ evacuate.candidates \
+ evacuate.clean_up \
+ evacuate.new_space \
+ evacuate.update_pointers \
+ evacuate.update_pointers.between_evacuated \
+ evacuate.update_pointers.to_evacuated \
+ evacuate.update_pointers.to_new \
+ evacuate.update_pointers.weak \
+ mark \
+ mark.finish_incremental \
+ mark.prepare_code_flush \
+ mark.roots \
+ mark.weak_closure \
+ sweep \
+ sweep.code \
+ sweep.map \
+ sweep.old \
+ incremental_finalize \
+"
+
+BASE_DIR=$(dirname $0)
+
+case $OP in
+ new-gen-rank)
+ cat $LOGFILE | grep "gc=s" \
+ | $BASE_DIR/eval_gc_nvp.py \
+ --no-histogram \
+ --rank $RANK_MODE \
+ ${INTERESTING_NEW_GEN_KEYS}
+ ;;
+ old-gen-rank)
+ cat $LOGFILE | grep "gc=ms" | grep "reduce_memory=0" | grep -v "steps=0" \
+ | $BASE_DIR/eval_gc_nvp.py \
+ --no-histogram \
+ --rank $RANK_MODE \
+ ${INTERESTING_OLD_GEN_KEYS}
+ ;;
+ *)
+ ;;
+esac
+
diff --git a/chromium/v8/tools/gc_nvp_common.py b/chromium/v8/tools/gc_nvp_common.py
index 7a9ad1c4bc8..3b51731399d 100644
--- a/chromium/v8/tools/gc_nvp_common.py
+++ b/chromium/v8/tools/gc_nvp_common.py
@@ -12,7 +12,7 @@ import re
def split_nvp(s):
t = {}
- for (name, value) in re.findall(r"(\w+)=([-\w]+)", s):
+ for (name, value) in re.findall(r"([._\w]+)=([-\w]+(?:\.[0-9]+)?)", s):
try:
t[name] = float(value)
except ValueError:
diff --git a/chromium/v8/tools/gen-postmortem-metadata.py b/chromium/v8/tools/gen-postmortem-metadata.py
index 1b6a6bbcf05..516f8e74906 100644
--- a/chromium/v8/tools/gen-postmortem-metadata.py
+++ b/chromium/v8/tools/gen-postmortem-metadata.py
@@ -98,6 +98,30 @@ consts_misc = [
'value': 'PropertyDetails::FieldIndexField::kMask' },
{ 'name': 'prop_index_shift',
'value': 'PropertyDetails::FieldIndexField::kShift' },
+ { 'name': 'prop_representation_mask',
+ 'value': 'PropertyDetails::RepresentationField::kMask' },
+ { 'name': 'prop_representation_shift',
+ 'value': 'PropertyDetails::RepresentationField::kShift' },
+ { 'name': 'prop_representation_integer8',
+ 'value': 'Representation::Kind::kInteger8' },
+ { 'name': 'prop_representation_uinteger8',
+ 'value': 'Representation::Kind::kUInteger8' },
+ { 'name': 'prop_representation_integer16',
+ 'value': 'Representation::Kind::kInteger16' },
+ { 'name': 'prop_representation_uinteger16',
+ 'value': 'Representation::Kind::kUInteger16' },
+ { 'name': 'prop_representation_smi',
+ 'value': 'Representation::Kind::kSmi' },
+ { 'name': 'prop_representation_integer32',
+ 'value': 'Representation::Kind::kInteger32' },
+ { 'name': 'prop_representation_double',
+ 'value': 'Representation::Kind::kDouble' },
+ { 'name': 'prop_representation_heapobject',
+ 'value': 'Representation::Kind::kHeapObject' },
+ { 'name': 'prop_representation_tagged',
+ 'value': 'Representation::Kind::kTagged' },
+ { 'name': 'prop_representation_external',
+ 'value': 'Representation::Kind::kExternal' },
{ 'name': 'prop_desc_key',
'value': 'DescriptorArray::kDescriptorKey' },
@@ -121,6 +145,10 @@ consts_misc = [
'value': 'Map::ElementsKindBits::kShift' },
{ 'name': 'bit_field3_dictionary_map_shift',
'value': 'Map::DictionaryMap::kShift' },
+ { 'name': 'bit_field3_number_of_own_descriptors_mask',
+ 'value': 'Map::NumberOfOwnDescriptorsBits::kMask' },
+ { 'name': 'bit_field3_number_of_own_descriptors_shift',
+ 'value': 'Map::NumberOfOwnDescriptorsBits::kShift' },
{ 'name': 'off_fp_context',
'value': 'StandardFrameConstants::kContextOffset' },
@@ -139,14 +167,31 @@ consts_misc = [
'value': 'ScopeInfo::kStackLocalCount' },
{ 'name': 'scopeinfo_idx_ncontextlocals',
'value': 'ScopeInfo::kContextLocalCount' },
+ { 'name': 'scopeinfo_idx_ncontextglobals',
+ 'value': 'ScopeInfo::kContextGlobalCount' },
{ 'name': 'scopeinfo_idx_first_vars',
'value': 'ScopeInfo::kVariablePartIndex' },
+
+ { 'name': 'sharedfunctioninfo_start_position_mask',
+ 'value': 'SharedFunctionInfo::kStartPositionMask' },
+ { 'name': 'sharedfunctioninfo_start_position_shift',
+ 'value': 'SharedFunctionInfo::kStartPositionShift' },
+
+ { 'name': 'jsarray_buffer_was_neutered_mask',
+ 'value': 'JSArrayBuffer::WasNeutered::kMask' },
+ { 'name': 'jsarray_buffer_was_neutered_shift',
+ 'value': 'JSArrayBuffer::WasNeutered::kShift' },
];
#
# The following useful fields are missing accessors, so we define fake ones.
#
extras_accessors = [
+ 'JSFunction, context, Context, kContextOffset',
+ 'Context, closure_index, int, CLOSURE_INDEX',
+ 'Context, native_context_index, int, NATIVE_CONTEXT_INDEX',
+ 'Context, previous_index, int, PREVIOUS_INDEX',
+ 'Context, min_context_slots, int, MIN_CONTEXT_SLOTS',
'HeapObject, map, Map, kMapOffset',
'JSObject, elements, Object, kElementsOffset',
'FixedArray, data, uintptr_t, kHeaderSize',
@@ -162,6 +207,7 @@ extras_accessors = [
'Map, prototype, Object, kPrototypeOffset',
'NameDictionaryShape, prefix_size, int, kPrefixSize',
'NameDictionaryShape, entry_size, int, kEntrySize',
+ 'NameDictionary, prefix_start_index, int, kPrefixStartIndex',
'SeededNumberDictionaryShape, prefix_size, int, kPrefixSize',
'UnseededNumberDictionaryShape, prefix_size, int, kPrefixSize',
'NumberDictionaryShape, entry_size, int, kEntrySize',
@@ -173,6 +219,7 @@ extras_accessors = [
'SeqOneByteString, chars, char, kHeaderSize',
'SeqTwoByteString, chars, char, kHeaderSize',
'SharedFunctionInfo, code, Code, kCodeOffset',
+ 'SharedFunctionInfo, scope_info, ScopeInfo, kScopeInfoOffset',
'SlicedString, parent, String, kParentOffset',
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
@@ -227,6 +274,20 @@ footer = '''
'''
#
+# Get the base class
+#
+def get_base_class(klass):
+ if (klass == 'Object'):
+ return klass;
+
+ if (not (klass in klasses)):
+ return None;
+
+ k = klasses[klass];
+
+ return get_base_class(k['parent']);
+
+#
# Loads class hierarchy and type information from "objects.h".
#
def load_objects():
@@ -258,18 +319,20 @@ def load_objects():
in_insttype = False;
continue;
- line = re.sub('//.*', '', line.rstrip().lstrip());
+ line = re.sub('//.*', '', line.strip());
if (in_insttype):
typestr += line;
continue;
- match = re.match('class (\w[^\s:]*)(: public (\w[^\s{]*))?\s*{',
+ match = re.match('class (\w[^:]*)(: public (\w[^{]*))?\s*{\s*',
line);
if (match):
- klass = match.group(1);
+ klass = match.group(1).strip();
pklass = match.group(3);
+ if (pklass):
+ pklass = pklass.strip();
klasses[klass] = { 'parent': pklass };
#
@@ -520,6 +583,9 @@ def emit_config():
keys.sort();
for klassname in keys:
pklass = klasses[klassname]['parent'];
+ bklass = get_base_class(klassname);
+ if (bklass != 'Object'):
+ continue;
if (pklass == None):
continue;
diff --git a/chromium/v8/tools/gyp/v8.gyp b/chromium/v8/tools/gyp/v8.gyp
index bcb580167e4..ca5fb0902b8 100644
--- a/chromium/v8/tools/gyp/v8.gyp
+++ b/chromium/v8/tools/gyp/v8.gyp
@@ -181,7 +181,6 @@
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
@@ -231,7 +230,6 @@
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
@@ -369,6 +367,8 @@
},
'include_dirs+': [
'../..',
+ # To be able to find base/trace_event/common/trace_event_common.h
+ '../../..',
],
'defines': [
# TODO(jochen): Remove again after this is globally turned on.
@@ -376,6 +376,7 @@
],
'sources': [ ### gcmole(all) ###
'../../include/v8-debug.h',
+ '../../include/v8-experimental.h',
'../../include/v8-platform.h',
'../../include/v8-profiler.h',
'../../include/v8-testing.h',
@@ -385,10 +386,14 @@
'../../include/v8config.h',
'../../src/accessors.cc',
'../../src/accessors.h',
+ '../../src/address-map.cc',
+ '../../src/address-map.h',
'../../src/allocation.cc',
'../../src/allocation.h',
'../../src/allocation-site-scopes.cc',
'../../src/allocation-site-scopes.h',
+ '../../src/api-experimental.cc',
+ '../../src/api-experimental.h',
'../../src/api.cc',
'../../src/api.h',
'../../src/api-natives.cc',
@@ -399,16 +404,28 @@
'../../src/assembler.h',
'../../src/assert-scope.h',
'../../src/assert-scope.cc',
- '../../src/ast-expression-visitor.cc',
- '../../src/ast-expression-visitor.h',
- '../../src/ast-literal-reindexer.cc',
- '../../src/ast-literal-reindexer.h',
- '../../src/ast-numbering.cc',
- '../../src/ast-numbering.h',
- '../../src/ast-value-factory.cc',
- '../../src/ast-value-factory.h',
- '../../src/ast.cc',
- '../../src/ast.h',
+ '../../src/ast/ast-expression-rewriter.cc',
+ '../../src/ast/ast-expression-rewriter.h',
+ '../../src/ast/ast-expression-visitor.cc',
+ '../../src/ast/ast-expression-visitor.h',
+ '../../src/ast/ast-literal-reindexer.cc',
+ '../../src/ast/ast-literal-reindexer.h',
+ '../../src/ast/ast-numbering.cc',
+ '../../src/ast/ast-numbering.h',
+ '../../src/ast/ast-value-factory.cc',
+ '../../src/ast/ast-value-factory.h',
+ '../../src/ast/ast.cc',
+ '../../src/ast/ast.h',
+ '../../src/ast/modules.cc',
+ '../../src/ast/modules.h',
+ '../../src/ast/prettyprinter.cc',
+ '../../src/ast/prettyprinter.h',
+ '../../src/ast/scopeinfo.cc',
+ '../../src/ast/scopeinfo.h',
+ '../../src/ast/scopes.cc',
+ '../../src/ast/scopes.h',
+ '../../src/ast/variables.cc',
+ '../../src/ast/variables.h',
'../../src/atomic-utils.h',
'../../src/background-parsing-task.cc',
'../../src/background-parsing-task.h',
@@ -449,6 +466,8 @@
'../../src/compilation-statistics.h',
'../../src/compiler/access-builder.cc',
'../../src/compiler/access-builder.h',
+ '../../src/compiler/access-info.cc',
+ '../../src/compiler/access-info.h',
'../../src/compiler/all-nodes.cc',
'../../src/compiler/all-nodes.h',
'../../src/compiler/ast-graph-builder.cc',
@@ -457,6 +476,10 @@
'../../src/compiler/ast-loop-assignment-analyzer.h',
'../../src/compiler/basic-block-instrumentor.cc',
'../../src/compiler/basic-block-instrumentor.h',
+ '../../src/compiler/branch-elimination.cc',
+ '../../src/compiler/branch-elimination.h',
+ '../../src/compiler/bytecode-branch-analysis.cc',
+ '../../src/compiler/bytecode-branch-analysis.h',
'../../src/compiler/bytecode-graph-builder.cc',
'../../src/compiler/bytecode-graph-builder.h',
'../../src/compiler/change-lowering.cc',
@@ -467,6 +490,8 @@
'../../src/compiler/code-generator-impl.h',
'../../src/compiler/code-generator.cc',
'../../src/compiler/code-generator.h',
+ '../../src/compiler/code-stub-assembler.cc',
+ '../../src/compiler/code-stub-assembler.h',
'../../src/compiler/common-node-cache.cc',
'../../src/compiler/common-node-cache.h',
'../../src/compiler/common-operator-reducer.cc',
@@ -482,6 +507,12 @@
'../../src/compiler/dead-code-elimination.cc',
'../../src/compiler/dead-code-elimination.h',
'../../src/compiler/diamond.h',
+ '../../src/compiler/escape-analysis.cc',
+ '../../src/compiler/escape-analysis.h',
+ "../../src/compiler/escape-analysis-reducer.cc",
+ "../../src/compiler/escape-analysis-reducer.h",
+ '../../src/compiler/fast-accessor-assembler.cc',
+ '../../src/compiler/fast-accessor-assembler.h',
'../../src/compiler/frame.cc',
'../../src/compiler/frame.h',
'../../src/compiler/frame-elider.cc',
@@ -506,12 +537,16 @@
'../../src/compiler/instruction-selector-impl.h',
'../../src/compiler/instruction-selector.cc',
'../../src/compiler/instruction-selector.h',
+ '../../src/compiler/instruction-scheduler.cc',
+ '../../src/compiler/instruction-scheduler.h',
'../../src/compiler/instruction.cc',
'../../src/compiler/instruction.h',
'../../src/compiler/interpreter-assembler.cc',
'../../src/compiler/interpreter-assembler.h',
'../../src/compiler/js-builtin-reducer.cc',
'../../src/compiler/js-builtin-reducer.h',
+ '../../src/compiler/js-call-reducer.cc',
+ '../../src/compiler/js-call-reducer.h',
'../../src/compiler/js-context-relaxation.cc',
'../../src/compiler/js-context-relaxation.h',
'../../src/compiler/js-context-specialization.cc',
@@ -520,18 +555,20 @@
'../../src/compiler/js-frame-specialization.h',
'../../src/compiler/js-generic-lowering.cc',
'../../src/compiler/js-generic-lowering.h',
+ '../../src/compiler/js-global-object-specialization.cc',
+ '../../src/compiler/js-global-object-specialization.h',
'../../src/compiler/js-graph.cc',
'../../src/compiler/js-graph.h',
'../../src/compiler/js-inlining.cc',
'../../src/compiler/js-inlining.h',
+ '../../src/compiler/js-inlining-heuristic.cc',
+ '../../src/compiler/js-inlining-heuristic.h',
'../../src/compiler/js-intrinsic-lowering.cc',
'../../src/compiler/js-intrinsic-lowering.h',
+ '../../src/compiler/js-native-context-specialization.cc',
+ '../../src/compiler/js-native-context-specialization.h',
'../../src/compiler/js-operator.cc',
'../../src/compiler/js-operator.h',
- '../../src/compiler/js-type-feedback.cc',
- '../../src/compiler/js-type-feedback.h',
- '../../src/compiler/js-type-feedback-lowering.cc',
- '../../src/compiler/js-type-feedback-lowering.h',
'../../src/compiler/js-typed-lowering.cc',
'../../src/compiler/js-typed-lowering.h',
'../../src/compiler/jump-threading.cc',
@@ -552,8 +589,6 @@
'../../src/compiler/machine-operator-reducer.h',
'../../src/compiler/machine-operator.cc',
'../../src/compiler/machine-operator.h',
- '../../src/compiler/machine-type.cc',
- '../../src/compiler/machine-type.h',
'../../src/compiler/move-optimizer.cc',
'../../src/compiler/move-optimizer.h',
'../../src/compiler/node-aux-data.h',
@@ -585,8 +620,7 @@
'../../src/compiler/register-allocator.h',
'../../src/compiler/register-allocator-verifier.cc',
'../../src/compiler/register-allocator-verifier.h',
- '../../src/compiler/register-configuration.cc',
- '../../src/compiler/register-configuration.h',
+ '../../src/compiler/representation-change.cc',
'../../src/compiler/representation-change.h',
'../../src/compiler/schedule.cc',
'../../src/compiler/schedule.h',
@@ -606,12 +640,19 @@
'../../src/compiler/state-values-utils.h',
'../../src/compiler/tail-call-optimization.cc',
'../../src/compiler/tail-call-optimization.h',
+ '../../src/compiler/type-hint-analyzer.cc',
+ '../../src/compiler/type-hint-analyzer.h',
+ '../../src/compiler/type-hints.cc',
+ '../../src/compiler/type-hints.h',
'../../src/compiler/typer.cc',
'../../src/compiler/typer.h',
'../../src/compiler/value-numbering-reducer.cc',
'../../src/compiler/value-numbering-reducer.h',
'../../src/compiler/verifier.cc',
'../../src/compiler/verifier.h',
+ '../../src/compiler/wasm-compiler.cc',
+ '../../src/compiler/wasm-compiler.h',
+ '../../src/compiler/wasm-linkage.cc',
'../../src/compiler/zone-pool.cc',
'../../src/compiler/zone-pool.h',
'../../src/compiler.cc',
@@ -626,6 +667,69 @@
'../../src/conversions.h',
'../../src/counters.cc',
'../../src/counters.h',
+ '../../src/crankshaft/hydrogen-alias-analysis.h',
+ '../../src/crankshaft/hydrogen-bce.cc',
+ '../../src/crankshaft/hydrogen-bce.h',
+ '../../src/crankshaft/hydrogen-bch.cc',
+ '../../src/crankshaft/hydrogen-bch.h',
+ '../../src/crankshaft/hydrogen-canonicalize.cc',
+ '../../src/crankshaft/hydrogen-canonicalize.h',
+ '../../src/crankshaft/hydrogen-check-elimination.cc',
+ '../../src/crankshaft/hydrogen-check-elimination.h',
+ '../../src/crankshaft/hydrogen-dce.cc',
+ '../../src/crankshaft/hydrogen-dce.h',
+ '../../src/crankshaft/hydrogen-dehoist.cc',
+ '../../src/crankshaft/hydrogen-dehoist.h',
+ '../../src/crankshaft/hydrogen-environment-liveness.cc',
+ '../../src/crankshaft/hydrogen-environment-liveness.h',
+ '../../src/crankshaft/hydrogen-escape-analysis.cc',
+ '../../src/crankshaft/hydrogen-escape-analysis.h',
+ '../../src/crankshaft/hydrogen-flow-engine.h',
+ '../../src/crankshaft/hydrogen-gvn.cc',
+ '../../src/crankshaft/hydrogen-gvn.h',
+ '../../src/crankshaft/hydrogen-infer-representation.cc',
+ '../../src/crankshaft/hydrogen-infer-representation.h',
+ '../../src/crankshaft/hydrogen-infer-types.cc',
+ '../../src/crankshaft/hydrogen-infer-types.h',
+ '../../src/crankshaft/hydrogen-instructions.cc',
+ '../../src/crankshaft/hydrogen-instructions.h',
+ '../../src/crankshaft/hydrogen-load-elimination.cc',
+ '../../src/crankshaft/hydrogen-load-elimination.h',
+ '../../src/crankshaft/hydrogen-mark-deoptimize.cc',
+ '../../src/crankshaft/hydrogen-mark-deoptimize.h',
+ '../../src/crankshaft/hydrogen-mark-unreachable.cc',
+ '../../src/crankshaft/hydrogen-mark-unreachable.h',
+ '../../src/crankshaft/hydrogen-osr.cc',
+ '../../src/crankshaft/hydrogen-osr.h',
+ '../../src/crankshaft/hydrogen-range-analysis.cc',
+ '../../src/crankshaft/hydrogen-range-analysis.h',
+ '../../src/crankshaft/hydrogen-redundant-phi.cc',
+ '../../src/crankshaft/hydrogen-redundant-phi.h',
+ '../../src/crankshaft/hydrogen-removable-simulates.cc',
+ '../../src/crankshaft/hydrogen-removable-simulates.h',
+ '../../src/crankshaft/hydrogen-representation-changes.cc',
+ '../../src/crankshaft/hydrogen-representation-changes.h',
+ '../../src/crankshaft/hydrogen-sce.cc',
+ '../../src/crankshaft/hydrogen-sce.h',
+ '../../src/crankshaft/hydrogen-store-elimination.cc',
+ '../../src/crankshaft/hydrogen-store-elimination.h',
+ '../../src/crankshaft/hydrogen-types.cc',
+ '../../src/crankshaft/hydrogen-types.h',
+ '../../src/crankshaft/hydrogen-uint32-analysis.cc',
+ '../../src/crankshaft/hydrogen-uint32-analysis.h',
+ '../../src/crankshaft/hydrogen.cc',
+ '../../src/crankshaft/hydrogen.h',
+ '../../src/crankshaft/lithium-allocator-inl.h',
+ '../../src/crankshaft/lithium-allocator.cc',
+ '../../src/crankshaft/lithium-allocator.h',
+ '../../src/crankshaft/lithium-codegen.cc',
+ '../../src/crankshaft/lithium-codegen.h',
+ '../../src/crankshaft/lithium.cc',
+ '../../src/crankshaft/lithium.h',
+ '../../src/crankshaft/lithium-inl.h',
+ '../../src/crankshaft/typing.cc',
+ '../../src/crankshaft/typing.h',
+ '../../src/crankshaft/unique.h',
'../../src/date.cc',
'../../src/date.h',
'../../src/dateparser-inl.h',
@@ -658,7 +762,6 @@
'../../src/elements.h',
'../../src/execution.cc',
'../../src/execution.h',
- '../../src/expression-classifier.h',
'../../src/extensions/externalize-string-extension.cc',
'../../src/extensions/externalize-string-extension.h',
'../../src/extensions/free-buffer-extension.cc',
@@ -685,8 +788,6 @@
'../../src/frames.h',
'../../src/full-codegen/full-codegen.cc',
'../../src/full-codegen/full-codegen.h',
- '../../src/func-name-inferrer.cc',
- '../../src/func-name-inferrer.h',
'../../src/futex-emulation.cc',
'../../src/futex-emulation.h',
'../../src/gdb-jit.cc',
@@ -735,58 +836,6 @@
'../../src/heap/store-buffer-inl.h',
'../../src/heap/store-buffer.cc',
'../../src/heap/store-buffer.h',
- '../../src/hydrogen-alias-analysis.h',
- '../../src/hydrogen-bce.cc',
- '../../src/hydrogen-bce.h',
- '../../src/hydrogen-bch.cc',
- '../../src/hydrogen-bch.h',
- '../../src/hydrogen-canonicalize.cc',
- '../../src/hydrogen-canonicalize.h',
- '../../src/hydrogen-check-elimination.cc',
- '../../src/hydrogen-check-elimination.h',
- '../../src/hydrogen-dce.cc',
- '../../src/hydrogen-dce.h',
- '../../src/hydrogen-dehoist.cc',
- '../../src/hydrogen-dehoist.h',
- '../../src/hydrogen-environment-liveness.cc',
- '../../src/hydrogen-environment-liveness.h',
- '../../src/hydrogen-escape-analysis.cc',
- '../../src/hydrogen-escape-analysis.h',
- '../../src/hydrogen-flow-engine.h',
- '../../src/hydrogen-instructions.cc',
- '../../src/hydrogen-instructions.h',
- '../../src/hydrogen.cc',
- '../../src/hydrogen.h',
- '../../src/hydrogen-gvn.cc',
- '../../src/hydrogen-gvn.h',
- '../../src/hydrogen-infer-representation.cc',
- '../../src/hydrogen-infer-representation.h',
- '../../src/hydrogen-infer-types.cc',
- '../../src/hydrogen-infer-types.h',
- '../../src/hydrogen-load-elimination.cc',
- '../../src/hydrogen-load-elimination.h',
- '../../src/hydrogen-mark-deoptimize.cc',
- '../../src/hydrogen-mark-deoptimize.h',
- '../../src/hydrogen-mark-unreachable.cc',
- '../../src/hydrogen-mark-unreachable.h',
- '../../src/hydrogen-osr.cc',
- '../../src/hydrogen-osr.h',
- '../../src/hydrogen-range-analysis.cc',
- '../../src/hydrogen-range-analysis.h',
- '../../src/hydrogen-redundant-phi.cc',
- '../../src/hydrogen-redundant-phi.h',
- '../../src/hydrogen-removable-simulates.cc',
- '../../src/hydrogen-removable-simulates.h',
- '../../src/hydrogen-representation-changes.cc',
- '../../src/hydrogen-representation-changes.h',
- '../../src/hydrogen-sce.cc',
- '../../src/hydrogen-sce.h',
- '../../src/hydrogen-store-elimination.cc',
- '../../src/hydrogen-store-elimination.h',
- '../../src/hydrogen-types.cc',
- '../../src/hydrogen-types.h',
- '../../src/hydrogen-uint32-analysis.cc',
- '../../src/hydrogen-uint32-analysis.h',
'../../src/i18n.cc',
'../../src/i18n.h',
'../../src/icu_util.cc',
@@ -810,46 +859,49 @@
'../../src/interface-descriptors.h',
'../../src/interpreter/bytecodes.cc',
'../../src/interpreter/bytecodes.h',
- '../../src/interpreter/bytecode-generator.cc',
- '../../src/interpreter/bytecode-generator.h',
'../../src/interpreter/bytecode-array-builder.cc',
'../../src/interpreter/bytecode-array-builder.h',
'../../src/interpreter/bytecode-array-iterator.cc',
'../../src/interpreter/bytecode-array-iterator.h',
+ '../../src/interpreter/bytecode-register-allocator.cc',
+ '../../src/interpreter/bytecode-register-allocator.h',
+ '../../src/interpreter/bytecode-generator.cc',
+ '../../src/interpreter/bytecode-generator.h',
+ '../../src/interpreter/bytecode-traits.h',
+ '../../src/interpreter/constant-array-builder.cc',
+ '../../src/interpreter/constant-array-builder.h',
+ '../../src/interpreter/control-flow-builders.cc',
+ '../../src/interpreter/control-flow-builders.h',
'../../src/interpreter/interpreter.cc',
'../../src/interpreter/interpreter.h',
'../../src/isolate-inl.h',
'../../src/isolate.cc',
'../../src/isolate.h',
- '../../src/json-parser.h',
'../../src/json-stringifier.h',
+ '../../src/key-accumulator.h',
+ '../../src/key-accumulator.cc',
'../../src/layout-descriptor-inl.h',
'../../src/layout-descriptor.cc',
'../../src/layout-descriptor.h',
'../../src/list-inl.h',
'../../src/list.h',
- '../../src/lithium-allocator-inl.h',
- '../../src/lithium-allocator.cc',
- '../../src/lithium-allocator.h',
- '../../src/lithium-codegen.cc',
- '../../src/lithium-codegen.h',
- '../../src/lithium.cc',
- '../../src/lithium.h',
- '../../src/lithium-inl.h',
+ '../../src/locked-queue-inl.h',
+ '../../src/locked-queue.h',
'../../src/log-inl.h',
'../../src/log-utils.cc',
'../../src/log-utils.h',
'../../src/log.cc',
'../../src/log.h',
- '../../src/lookup-inl.h',
'../../src/lookup.cc',
'../../src/lookup.h',
'../../src/macro-assembler.h',
+ '../../src/machine-type.cc',
+ '../../src/machine-type.h',
'../../src/messages.cc',
'../../src/messages.h',
- '../../src/modules.cc',
- '../../src/modules.h',
'../../src/msan.h',
+ '../../src/objects-body-descriptors-inl.h',
+ '../../src/objects-body-descriptors.h',
'../../src/objects-debug.cc',
'../../src/objects-inl.h',
'../../src/objects-printer.cc',
@@ -859,18 +911,31 @@
'../../src/optimizing-compile-dispatcher.h',
'../../src/ostreams.cc',
'../../src/ostreams.h',
- '../../src/pattern-rewriter.cc',
- '../../src/parser.cc',
- '../../src/parser.h',
+ '../../src/parsing/expression-classifier.h',
+ '../../src/parsing/func-name-inferrer.cc',
+ '../../src/parsing/func-name-inferrer.h',
+ '../../src/parsing/json-parser.h',
+ '../../src/parsing/parameter-initializer-rewriter.cc',
+ '../../src/parsing/parameter-initializer-rewriter.h',
+ '../../src/parsing/parser-base.h',
+ '../../src/parsing/parser.cc',
+ '../../src/parsing/parser.h',
+ '../../src/parsing/pattern-rewriter.cc',
+ '../../src/parsing/preparse-data-format.h',
+ '../../src/parsing/preparse-data.cc',
+ '../../src/parsing/preparse-data.h',
+ '../../src/parsing/preparser.cc',
+ '../../src/parsing/preparser.h',
+ '../../src/parsing/rewriter.cc',
+ '../../src/parsing/rewriter.h',
+ '../../src/parsing/scanner-character-streams.cc',
+ '../../src/parsing/scanner-character-streams.h',
+ '../../src/parsing/scanner.cc',
+ '../../src/parsing/scanner.h',
+ '../../src/parsing/token.cc',
+ '../../src/parsing/token.h',
'../../src/pending-compilation-error-handler.cc',
'../../src/pending-compilation-error-handler.h',
- '../../src/preparse-data-format.h',
- '../../src/preparse-data.cc',
- '../../src/preparse-data.h',
- '../../src/preparser.cc',
- '../../src/preparser.h',
- '../../src/prettyprinter.cc',
- '../../src/prettyprinter.h',
'../../src/profiler/allocation-tracker.cc',
'../../src/profiler/allocation-tracker.h',
'../../src/profiler/circular-queue-inl.h',
@@ -888,8 +953,12 @@
'../../src/profiler/profile-generator.h',
'../../src/profiler/sampler.cc',
'../../src/profiler/sampler.h',
+ '../../src/profiler/strings-storage.cc',
+ '../../src/profiler/strings-storage.h',
'../../src/profiler/unbound-queue-inl.h',
'../../src/profiler/unbound-queue.h',
+ '../../src/property-descriptor.cc',
+ '../../src/property-descriptor.h',
'../../src/property-details.h',
'../../src/property.cc',
'../../src/property.h',
@@ -900,6 +969,8 @@
'../../src/regexp/jsregexp-inl.h',
'../../src/regexp/jsregexp.cc',
'../../src/regexp/jsregexp.h',
+ '../../src/regexp/regexp-ast.cc',
+ '../../src/regexp/regexp-ast.h',
'../../src/regexp/regexp-macro-assembler-irregexp-inl.h',
'../../src/regexp/regexp-macro-assembler-irregexp.cc',
'../../src/regexp/regexp-macro-assembler-irregexp.h',
@@ -907,10 +978,12 @@
'../../src/regexp/regexp-macro-assembler-tracer.h',
'../../src/regexp/regexp-macro-assembler.cc',
'../../src/regexp/regexp-macro-assembler.h',
+ '../../src/regexp/regexp-parser.cc',
+ '../../src/regexp/regexp-parser.h',
'../../src/regexp/regexp-stack.cc',
'../../src/regexp/regexp-stack.h',
- '../../src/rewriter.cc',
- '../../src/rewriter.h',
+ '../../src/register-configuration.cc',
+ '../../src/register-configuration.h',
'../../src/runtime-profiler.cc',
'../../src/runtime-profiler.h',
'../../src/runtime/runtime-array.cc',
@@ -949,14 +1022,6 @@
'../../src/runtime/runtime.h',
'../../src/safepoint-table.cc',
'../../src/safepoint-table.h',
- '../../src/scanner-character-streams.cc',
- '../../src/scanner-character-streams.h',
- '../../src/scanner.cc',
- '../../src/scanner.h',
- '../../src/scopeinfo.cc',
- '../../src/scopeinfo.h',
- '../../src/scopes.cc',
- '../../src/scopes.h',
'../../src/signature.h',
'../../src/simulator.h',
'../../src/small-pointer-list.h',
@@ -977,17 +1042,17 @@
'../../src/string-search.h',
'../../src/string-stream.cc',
'../../src/string-stream.h',
- '../../src/strings-storage.cc',
- '../../src/strings-storage.h',
'../../src/strtod.cc',
'../../src/strtod.h',
'../../src/ic/stub-cache.cc',
'../../src/ic/stub-cache.h',
- '../../src/token.cc',
- '../../src/token.h',
+ '../../src/tracing/trace-event.cc',
+ '../../src/tracing/trace-event.h',
'../../src/transitions-inl.h',
'../../src/transitions.cc',
'../../src/transitions.h',
+ '../../src/type-cache.cc',
+ '../../src/type-cache.h',
'../../src/type-feedback-vector-inl.h',
'../../src/type-feedback-vector.cc',
'../../src/type-feedback-vector.h',
@@ -1000,8 +1065,6 @@
'../../src/typing-asm.h',
'../../src/typing-reset.cc',
'../../src/typing-reset.h',
- '../../src/typing.cc',
- '../../src/typing.h',
'../../src/unicode-inl.h',
'../../src/unicode.cc',
'../../src/unicode.h',
@@ -1009,7 +1072,6 @@
'../../src/unicode-cache.h',
'../../src/unicode-decoder.cc',
'../../src/unicode-decoder.h',
- '../../src/unique.h',
'../../src/utils.cc',
'../../src/utils.h',
'../../src/v8.cc',
@@ -1017,14 +1079,29 @@
'../../src/v8memory.h',
'../../src/v8threads.cc',
'../../src/v8threads.h',
- '../../src/variables.cc',
- '../../src/variables.h',
'../../src/vector.h',
'../../src/version.cc',
'../../src/version.h',
'../../src/vm-state-inl.h',
'../../src/vm-state.h',
- '../../src/zone-type-cache.h',
+ '../../src/wasm/asm-wasm-builder.cc',
+ '../../src/wasm/asm-wasm-builder.h',
+ '../../src/wasm/ast-decoder.cc',
+ '../../src/wasm/ast-decoder.h',
+ '../../src/wasm/decoder.h',
+ '../../src/wasm/encoder.cc',
+ '../../src/wasm/encoder.h',
+ '../../src/wasm/module-decoder.cc',
+ '../../src/wasm/module-decoder.h',
+ '../../src/wasm/wasm-js.cc',
+ '../../src/wasm/wasm-js.h',
+ '../../src/wasm/wasm-macro-gen.h',
+ '../../src/wasm/wasm-module.cc',
+ '../../src/wasm/wasm-module.h',
+ '../../src/wasm/wasm-opcodes.cc',
+ '../../src/wasm/wasm-opcodes.h',
+ '../../src/wasm/wasm-result.cc',
+ '../../src/wasm/wasm-result.h',
'../../src/zone.cc',
'../../src/zone.h',
'../../src/zone-allocator.h',
@@ -1057,19 +1134,20 @@
'../../src/arm/frames-arm.h',
'../../src/arm/interface-descriptors-arm.cc',
'../../src/arm/interface-descriptors-arm.h',
- '../../src/arm/lithium-arm.cc',
- '../../src/arm/lithium-arm.h',
- '../../src/arm/lithium-codegen-arm.cc',
- '../../src/arm/lithium-codegen-arm.h',
- '../../src/arm/lithium-gap-resolver-arm.cc',
- '../../src/arm/lithium-gap-resolver-arm.h',
'../../src/arm/macro-assembler-arm.cc',
'../../src/arm/macro-assembler-arm.h',
'../../src/arm/simulator-arm.cc',
'../../src/arm/simulator-arm.h',
'../../src/compiler/arm/code-generator-arm.cc',
'../../src/compiler/arm/instruction-codes-arm.h',
+ '../../src/compiler/arm/instruction-scheduler-arm.cc',
'../../src/compiler/arm/instruction-selector-arm.cc',
+ '../../src/crankshaft/arm/lithium-arm.cc',
+ '../../src/crankshaft/arm/lithium-arm.h',
+ '../../src/crankshaft/arm/lithium-codegen-arm.cc',
+ '../../src/crankshaft/arm/lithium-codegen-arm.h',
+ '../../src/crankshaft/arm/lithium-gap-resolver-arm.cc',
+ '../../src/crankshaft/arm/lithium-gap-resolver-arm.h',
'../../src/debug/arm/debug-arm.cc',
'../../src/full-codegen/arm/full-codegen-arm.cc',
'../../src/ic/arm/access-compiler-arm.cc',
@@ -1096,9 +1174,6 @@
'../../src/arm64/decoder-arm64.cc',
'../../src/arm64/decoder-arm64.h',
'../../src/arm64/decoder-arm64-inl.h',
- '../../src/arm64/delayed-masm-arm64.cc',
- '../../src/arm64/delayed-masm-arm64.h',
- '../../src/arm64/delayed-masm-arm64-inl.h',
'../../src/arm64/deoptimizer-arm64.cc',
'../../src/arm64/disasm-arm64.cc',
'../../src/arm64/disasm-arm64.h',
@@ -1110,12 +1185,6 @@
'../../src/arm64/instrument-arm64.h',
'../../src/arm64/interface-descriptors-arm64.cc',
'../../src/arm64/interface-descriptors-arm64.h',
- '../../src/arm64/lithium-arm64.cc',
- '../../src/arm64/lithium-arm64.h',
- '../../src/arm64/lithium-codegen-arm64.cc',
- '../../src/arm64/lithium-codegen-arm64.h',
- '../../src/arm64/lithium-gap-resolver-arm64.cc',
- '../../src/arm64/lithium-gap-resolver-arm64.h',
'../../src/arm64/macro-assembler-arm64.cc',
'../../src/arm64/macro-assembler-arm64.h',
'../../src/arm64/macro-assembler-arm64-inl.h',
@@ -1125,7 +1194,17 @@
'../../src/arm64/utils-arm64.h',
'../../src/compiler/arm64/code-generator-arm64.cc',
'../../src/compiler/arm64/instruction-codes-arm64.h',
+ '../../src/compiler/arm64/instruction-scheduler-arm64.cc',
'../../src/compiler/arm64/instruction-selector-arm64.cc',
+ '../../src/crankshaft/arm64/delayed-masm-arm64.cc',
+ '../../src/crankshaft/arm64/delayed-masm-arm64.h',
+ '../../src/crankshaft/arm64/delayed-masm-arm64-inl.h',
+ '../../src/crankshaft/arm64/lithium-arm64.cc',
+ '../../src/crankshaft/arm64/lithium-arm64.h',
+ '../../src/crankshaft/arm64/lithium-codegen-arm64.cc',
+ '../../src/crankshaft/arm64/lithium-codegen-arm64.h',
+ '../../src/crankshaft/arm64/lithium-gap-resolver-arm64.cc',
+ '../../src/crankshaft/arm64/lithium-gap-resolver-arm64.h',
'../../src/debug/arm64/debug-arm64.cc',
'../../src/full-codegen/arm64/full-codegen-arm64.cc',
'../../src/ic/arm64/access-compiler-arm64.cc',
@@ -1153,17 +1232,18 @@
'../../src/ia32/frames-ia32.cc',
'../../src/ia32/frames-ia32.h',
'../../src/ia32/interface-descriptors-ia32.cc',
- '../../src/ia32/lithium-codegen-ia32.cc',
- '../../src/ia32/lithium-codegen-ia32.h',
- '../../src/ia32/lithium-gap-resolver-ia32.cc',
- '../../src/ia32/lithium-gap-resolver-ia32.h',
- '../../src/ia32/lithium-ia32.cc',
- '../../src/ia32/lithium-ia32.h',
'../../src/ia32/macro-assembler-ia32.cc',
'../../src/ia32/macro-assembler-ia32.h',
'../../src/compiler/ia32/code-generator-ia32.cc',
'../../src/compiler/ia32/instruction-codes-ia32.h',
+ '../../src/compiler/ia32/instruction-scheduler-ia32.cc',
'../../src/compiler/ia32/instruction-selector-ia32.cc',
+ '../../src/crankshaft/ia32/lithium-codegen-ia32.cc',
+ '../../src/crankshaft/ia32/lithium-codegen-ia32.h',
+ '../../src/crankshaft/ia32/lithium-gap-resolver-ia32.cc',
+ '../../src/crankshaft/ia32/lithium-gap-resolver-ia32.h',
+ '../../src/crankshaft/ia32/lithium-ia32.cc',
+ '../../src/crankshaft/ia32/lithium-ia32.h',
'../../src/debug/ia32/debug-ia32.cc',
'../../src/full-codegen/ia32/full-codegen-ia32.cc',
'../../src/ic/ia32/access-compiler-ia32.cc',
@@ -1191,17 +1271,18 @@
'../../src/x87/frames-x87.cc',
'../../src/x87/frames-x87.h',
'../../src/x87/interface-descriptors-x87.cc',
- '../../src/x87/lithium-codegen-x87.cc',
- '../../src/x87/lithium-codegen-x87.h',
- '../../src/x87/lithium-gap-resolver-x87.cc',
- '../../src/x87/lithium-gap-resolver-x87.h',
- '../../src/x87/lithium-x87.cc',
- '../../src/x87/lithium-x87.h',
'../../src/x87/macro-assembler-x87.cc',
'../../src/x87/macro-assembler-x87.h',
'../../src/compiler/x87/code-generator-x87.cc',
'../../src/compiler/x87/instruction-codes-x87.h',
+ '../../src/compiler/x87/instruction-scheduler-x87.cc',
'../../src/compiler/x87/instruction-selector-x87.cc',
+ '../../src/crankshaft/x87/lithium-codegen-x87.cc',
+ '../../src/crankshaft/x87/lithium-codegen-x87.h',
+ '../../src/crankshaft/x87/lithium-gap-resolver-x87.cc',
+ '../../src/crankshaft/x87/lithium-gap-resolver-x87.h',
+ '../../src/crankshaft/x87/lithium-x87.cc',
+ '../../src/crankshaft/x87/lithium-x87.h',
'../../src/debug/x87/debug-x87.cc',
'../../src/full-codegen/x87/full-codegen-x87.cc',
'../../src/ic/x87/access-compiler-x87.cc',
@@ -1231,19 +1312,20 @@
'../../src/mips/frames-mips.cc',
'../../src/mips/frames-mips.h',
'../../src/mips/interface-descriptors-mips.cc',
- '../../src/mips/lithium-codegen-mips.cc',
- '../../src/mips/lithium-codegen-mips.h',
- '../../src/mips/lithium-gap-resolver-mips.cc',
- '../../src/mips/lithium-gap-resolver-mips.h',
- '../../src/mips/lithium-mips.cc',
- '../../src/mips/lithium-mips.h',
'../../src/mips/macro-assembler-mips.cc',
'../../src/mips/macro-assembler-mips.h',
'../../src/mips/simulator-mips.cc',
'../../src/mips/simulator-mips.h',
'../../src/compiler/mips/code-generator-mips.cc',
'../../src/compiler/mips/instruction-codes-mips.h',
+ '../../src/compiler/mips/instruction-scheduler-mips.cc',
'../../src/compiler/mips/instruction-selector-mips.cc',
+ '../../src/crankshaft/mips/lithium-codegen-mips.cc',
+ '../../src/crankshaft/mips/lithium-codegen-mips.h',
+ '../../src/crankshaft/mips/lithium-gap-resolver-mips.cc',
+ '../../src/crankshaft/mips/lithium-gap-resolver-mips.h',
+ '../../src/crankshaft/mips/lithium-mips.cc',
+ '../../src/crankshaft/mips/lithium-mips.h',
'../../src/full-codegen/mips/full-codegen-mips.cc',
'../../src/debug/mips/debug-mips.cc',
'../../src/ic/mips/access-compiler-mips.cc',
@@ -1273,19 +1355,20 @@
'../../src/mips64/frames-mips64.cc',
'../../src/mips64/frames-mips64.h',
'../../src/mips64/interface-descriptors-mips64.cc',
- '../../src/mips64/lithium-codegen-mips64.cc',
- '../../src/mips64/lithium-codegen-mips64.h',
- '../../src/mips64/lithium-gap-resolver-mips64.cc',
- '../../src/mips64/lithium-gap-resolver-mips64.h',
- '../../src/mips64/lithium-mips64.cc',
- '../../src/mips64/lithium-mips64.h',
'../../src/mips64/macro-assembler-mips64.cc',
'../../src/mips64/macro-assembler-mips64.h',
'../../src/mips64/simulator-mips64.cc',
'../../src/mips64/simulator-mips64.h',
'../../src/compiler/mips64/code-generator-mips64.cc',
'../../src/compiler/mips64/instruction-codes-mips64.h',
+ '../../src/compiler/mips64/instruction-scheduler-mips64.cc',
'../../src/compiler/mips64/instruction-selector-mips64.cc',
+ '../../src/crankshaft/mips64/lithium-codegen-mips64.cc',
+ '../../src/crankshaft/mips64/lithium-codegen-mips64.h',
+ '../../src/crankshaft/mips64/lithium-gap-resolver-mips64.cc',
+ '../../src/crankshaft/mips64/lithium-gap-resolver-mips64.h',
+ '../../src/crankshaft/mips64/lithium-mips64.cc',
+ '../../src/crankshaft/mips64/lithium-mips64.h',
'../../src/debug/mips64/debug-mips64.cc',
'../../src/full-codegen/mips64/full-codegen-mips64.cc',
'../../src/ic/mips64/access-compiler-mips64.cc',
@@ -1299,6 +1382,12 @@
}],
['v8_target_arch=="x64" or v8_target_arch=="x32"', {
'sources': [ ### gcmole(arch:x64) ###
+ '../../src/crankshaft/x64/lithium-codegen-x64.cc',
+ '../../src/crankshaft/x64/lithium-codegen-x64.h',
+ '../../src/crankshaft/x64/lithium-gap-resolver-x64.cc',
+ '../../src/crankshaft/x64/lithium-gap-resolver-x64.h',
+ '../../src/crankshaft/x64/lithium-x64.cc',
+ '../../src/crankshaft/x64/lithium-x64.h',
'../../src/x64/assembler-x64-inl.h',
'../../src/x64/assembler-x64.cc',
'../../src/x64/assembler-x64.h',
@@ -1313,12 +1402,6 @@
'../../src/x64/frames-x64.cc',
'../../src/x64/frames-x64.h',
'../../src/x64/interface-descriptors-x64.cc',
- '../../src/x64/lithium-codegen-x64.cc',
- '../../src/x64/lithium-codegen-x64.h',
- '../../src/x64/lithium-gap-resolver-x64.cc',
- '../../src/x64/lithium-gap-resolver-x64.h',
- '../../src/x64/lithium-x64.cc',
- '../../src/x64/lithium-x64.h',
'../../src/x64/macro-assembler-x64.cc',
'../../src/x64/macro-assembler-x64.h',
'../../src/debug/x64/debug-x64.cc',
@@ -1336,11 +1419,29 @@
'sources': [
'../../src/compiler/x64/code-generator-x64.cc',
'../../src/compiler/x64/instruction-codes-x64.h',
+ '../../src/compiler/x64/instruction-scheduler-x64.cc',
'../../src/compiler/x64/instruction-selector-x64.cc',
],
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
'sources': [ ### gcmole(arch:ppc) ###
+ '../../src/compiler/ppc/code-generator-ppc.cc',
+ '../../src/compiler/ppc/instruction-codes-ppc.h',
+ '../../src/compiler/ppc/instruction-scheduler-ppc.cc',
+ '../../src/compiler/ppc/instruction-selector-ppc.cc',
+ '../../src/crankshaft/ppc/lithium-ppc.cc',
+ '../../src/crankshaft/ppc/lithium-ppc.h',
+ '../../src/crankshaft/ppc/lithium-codegen-ppc.cc',
+ '../../src/crankshaft/ppc/lithium-codegen-ppc.h',
+ '../../src/crankshaft/ppc/lithium-gap-resolver-ppc.cc',
+ '../../src/crankshaft/ppc/lithium-gap-resolver-ppc.h',
+ '../../src/debug/ppc/debug-ppc.cc',
+ '../../src/full-codegen/ppc/full-codegen-ppc.cc',
+ '../../src/ic/ppc/access-compiler-ppc.cc',
+ '../../src/ic/ppc/handler-compiler-ppc.cc',
+ '../../src/ic/ppc/ic-ppc.cc',
+ '../../src/ic/ppc/ic-compiler-ppc.cc',
+ '../../src/ic/ppc/stub-cache-ppc.cc',
'../../src/ppc/assembler-ppc-inl.h',
'../../src/ppc/assembler-ppc.cc',
'../../src/ppc/assembler-ppc.h',
@@ -1358,26 +1459,10 @@
'../../src/ppc/frames-ppc.h',
'../../src/ppc/interface-descriptors-ppc.cc',
'../../src/ppc/interface-descriptors-ppc.h',
- '../../src/ppc/lithium-ppc.cc',
- '../../src/ppc/lithium-ppc.h',
- '../../src/ppc/lithium-codegen-ppc.cc',
- '../../src/ppc/lithium-codegen-ppc.h',
- '../../src/ppc/lithium-gap-resolver-ppc.cc',
- '../../src/ppc/lithium-gap-resolver-ppc.h',
'../../src/ppc/macro-assembler-ppc.cc',
'../../src/ppc/macro-assembler-ppc.h',
'../../src/ppc/simulator-ppc.cc',
'../../src/ppc/simulator-ppc.h',
- '../../src/compiler/ppc/code-generator-ppc.cc',
- '../../src/compiler/ppc/instruction-codes-ppc.h',
- '../../src/compiler/ppc/instruction-selector-ppc.cc',
- '../../src/debug/ppc/debug-ppc.cc',
- '../../src/full-codegen/ppc/full-codegen-ppc.cc',
- '../../src/ic/ppc/access-compiler-ppc.cc',
- '../../src/ic/ppc/handler-compiler-ppc.cc',
- '../../src/ic/ppc/ic-ppc.cc',
- '../../src/ic/ppc/ic-compiler-ppc.cc',
- '../../src/ic/ppc/stub-cache-ppc.cc',
'../../src/regexp/ppc/regexp-macro-assembler-ppc.cc',
'../../src/regexp/ppc/regexp-macro-assembler-ppc.h',
],
@@ -1430,9 +1515,6 @@
}],
],
}],
- ['v8_wasm!=0', {
- 'dependencies': ['../../third_party/wasm/src/wasm/wasm.gyp:wasm'],
- }],
],
},
{
@@ -1745,7 +1827,6 @@
'inputs': [
'../../tools/concatenate-files.py',
'<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
- '<(SHARED_INTERMEDIATE_DIR)/libraries-code-stub.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
@@ -1799,7 +1880,7 @@
['v8_enable_i18n_support==1', {
'variables': {
'i18n_library_files': [
- '../../src/i18n.js',
+ '../../src/js/i18n.js',
],
},
}, {
@@ -1810,62 +1891,53 @@
],
'variables': {
'library_files': [
- '../../src/macros.py',
+ '../../src/js/macros.py',
'../../src/messages.h',
- '../../src/prologue.js',
- '../../src/runtime.js',
- '../../src/v8natives.js',
- '../../src/symbol.js',
- '../../src/array.js',
- '../../src/string.js',
- '../../src/uri.js',
- '../../src/math.js',
+ '../../src/js/prologue.js',
+ '../../src/js/runtime.js',
+ '../../src/js/v8natives.js',
+ '../../src/js/symbol.js',
+ '../../src/js/array.js',
+ '../../src/js/string.js',
+ '../../src/js/uri.js',
+ '../../src/js/math.js',
'../../src/third_party/fdlibm/fdlibm.js',
- '../../src/date.js',
- '../../src/regexp.js',
- '../../src/arraybuffer.js',
- '../../src/typedarray.js',
- '../../src/iterator-prototype.js',
- '../../src/generator.js',
- '../../src/object-observe.js',
- '../../src/collection.js',
- '../../src/weak-collection.js',
- '../../src/collection-iterator.js',
- '../../src/promise.js',
- '../../src/messages.js',
- '../../src/json.js',
- '../../src/array-iterator.js',
- '../../src/string-iterator.js',
- '../../src/templates.js',
- '../../src/harmony-array.js',
- '../../src/harmony-typedarray.js',
+ '../../src/js/regexp.js',
+ '../../src/js/arraybuffer.js',
+ '../../src/js/typedarray.js',
+ '../../src/js/iterator-prototype.js',
+ '../../src/js/generator.js',
+ '../../src/js/object-observe.js',
+ '../../src/js/collection.js',
+ '../../src/js/weak-collection.js',
+ '../../src/js/collection-iterator.js',
+ '../../src/js/promise.js',
+ '../../src/js/messages.js',
+ '../../src/js/json.js',
+ '../../src/js/array-iterator.js',
+ '../../src/js/string-iterator.js',
+ '../../src/js/templates.js',
+ '../../src/js/spread.js',
'../../src/debug/mirrors.js',
'../../src/debug/debug.js',
'../../src/debug/liveedit.js',
],
'experimental_library_files': [
- '../../src/macros.py',
- '../../src/messages.h',
- '../../src/proxy.js',
- '../../src/generator.js',
- '../../src/harmony-atomics.js',
- '../../src/harmony-array-includes.js',
- '../../src/harmony-concat-spreadable.js',
- '../../src/harmony-tostring.js',
- '../../src/harmony-regexp.js',
- '../../src/harmony-reflect.js',
- '../../src/harmony-spread.js',
- '../../src/harmony-object-observe.js',
- '../../src/harmony-sharedarraybuffer.js',
- '../../src/harmony-simd.js',
- ],
- 'code_stub_library_files': [
- '../../src/macros.py',
+ '../../src/js/macros.py',
'../../src/messages.h',
- '../../src/code-stubs.js',
+ '../../src/js/proxy.js',
+ '../../src/js/generator.js',
+ '../../src/js/harmony-atomics.js',
+ '../../src/js/harmony-regexp.js',
+ '../../src/js/harmony-reflect.js',
+ '../../src/js/harmony-object-observe.js',
+ '../../src/js/harmony-sharedarraybuffer.js',
+ '../../src/js/harmony-simd.js',
+ '../../src/js/harmony-species.js',
+ '../../src/js/harmony-unicode-regexps.js',
+ '../../src/js/promise-extra.js',
],
'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
- 'libraries_code_stub_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-code-stub.bin',
'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
'libraries_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
'libraries_experimental_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
@@ -1940,38 +2012,6 @@
],
},
{
- 'action_name': 'js2c_code_stubs',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(code_stub_library_files)',
- ],
- 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc'],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
- 'CODE_STUB',
- '<@(code_stub_library_files)'
- ],
- },
- {
- 'action_name': 'js2c_code_stubs_bin',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(code_stub_library_files)',
- ],
- 'outputs': ['<@(libraries_code_stub_bin_file)'],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
- 'CODE_STUB',
- '<@(code_stub_library_files)',
- '--startup_blob', '<@(libraries_code_stub_bin_file)',
- '--nojs',
- ],
- },
- {
'action_name': 'js2c_extras',
'inputs': [
'../../tools/js2c.py',
diff --git a/chromium/v8/tools/isolate_driver.py b/chromium/v8/tools/isolate_driver.py
new file mode 100644
index 00000000000..d1b39b09582
--- /dev/null
+++ b/chromium/v8/tools/isolate_driver.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Adaptor script called through build/isolate.gypi.
+
+Slimmed down version of chromium's isolate driver that doesn't process dynamic
+dependencies.
+"""
+
+import json
+import logging
+import os
+import subprocess
+import sys
+
+TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
+
+
+def prepare_isolate_call(args, output):
+ """Gathers all information required to run isolate.py later.
+
+ Dumps it as JSON to |output| file.
+ """
+ with open(output, 'wb') as f:
+ json.dump({
+ 'args': args,
+ 'dir': os.getcwd(),
+ 'version': 1,
+ }, f, indent=2, sort_keys=True)
+
+
+def main():
+ logging.basicConfig(level=logging.ERROR, format='%(levelname)7s %(message)s')
+ if len(sys.argv) < 2:
+ print >> sys.stderr, 'Internal failure; mode required'
+ return 1
+ mode = sys.argv[1]
+ args = sys.argv[1:]
+ isolate = None
+ isolated = None
+ for i, arg in enumerate(args):
+ if arg == '--isolate':
+ isolate = i + 1
+ if arg == '--isolated':
+ isolated = i + 1
+ if not isolate or not isolated:
+ print >> sys.stderr, 'Internal failure'
+ return 1
+
+ # In 'prepare' mode just collect all required information for postponed
+ # isolated.py invocation later, store it in *.isolated.gen.json file.
+ if mode == 'prepare':
+ prepare_isolate_call(args[1:], args[isolated] + '.gen.json')
+ return 0
+
+ swarming_client = os.path.join(TOOLS_DIR, 'swarming_client')
+ sys.stdout.flush()
+ return subprocess.call(
+ [sys.executable, os.path.join(swarming_client, 'isolate.py')] + args)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/chromium/v8/tools/js2c.py b/chromium/v8/tools/js2c.py
index bd50692bb8d..d9151331142 100755
--- a/chromium/v8/tools/js2c.py
+++ b/chromium/v8/tools/js2c.py
@@ -242,7 +242,7 @@ def ExpandInlineMacros(lines):
lines = ExpandMacroDefinition(lines, pos, name_pattern, macro, non_expander)
-INLINE_CONSTANT_PATTERN = re.compile(r'define\s+([a-zA-Z0-9_]+)\s*=\s*([^;\n]+)[;\n]')
+INLINE_CONSTANT_PATTERN = re.compile(r'define\s+([a-zA-Z0-9_]+)\s*=\s*([^;\n]+);\n')
def ExpandInlineConstants(lines):
pos = 0
@@ -348,8 +348,8 @@ def BuildFilterChain(macro_filename, message_template_file):
if macro_filename:
(consts, macros) = ReadMacros(ReadFile(macro_filename))
- filter_chain.append(lambda l: ExpandConstants(l, consts))
filter_chain.append(lambda l: ExpandMacros(l, macros))
+ filter_chain.append(lambda l: ExpandConstants(l, consts))
if message_template_file:
message_templates = ReadMessageTemplates(ReadFile(message_template_file))
@@ -417,7 +417,7 @@ def PrepareSources(source_files, native_type, emit_js):
message_template_file = message_template_files[0]
filters = None
- if native_type == "EXTRAS":
+ if native_type in ("EXTRAS", "EXPERIMENTAL_EXTRAS"):
filters = BuildExtraFilterChain()
else:
filters = BuildFilterChain(macro_file, message_template_file)
diff --git a/chromium/v8/tools/ll_prof.py b/chromium/v8/tools/ll_prof.py
index f9bea4a61f2..7dac2e05eba 100755
--- a/chromium/v8/tools/ll_prof.py
+++ b/chromium/v8/tools/ll_prof.py
@@ -568,7 +568,7 @@ PERF_EVENT_HEADER_DESC = Descriptor([
])
-# Reference: kernel/events/core.c
+# Reference: kernel/tools/perf/util/event.h
PERF_MMAP_EVENT_BODY_DESC = Descriptor([
("pid", "u32"),
("tid", "u32"),
@@ -577,6 +577,20 @@ PERF_MMAP_EVENT_BODY_DESC = Descriptor([
("pgoff", "u64")
])
+# Reference: kernel/tools/perf/util/event.h
+PERF_MMAP2_EVENT_BODY_DESC = Descriptor([
+ ("pid", "u32"),
+ ("tid", "u32"),
+ ("addr", "u64"),
+ ("len", "u64"),
+ ("pgoff", "u64"),
+ ("maj", "u32"),
+ ("min", "u32"),
+ ("ino", "u64"),
+ ("ino_generation", "u64"),
+ ("prot", "u32"),
+ ("flags","u32")
+])
# perf_event_attr.sample_type bits control the set of
# perf_sample_event fields.
@@ -616,6 +630,7 @@ PERF_SAMPLE_EVENT_IP_FORMAT = "u64"
PERF_RECORD_MMAP = 1
+PERF_RECORD_MMAP2 = 10
PERF_RECORD_SAMPLE = 9
@@ -664,6 +679,15 @@ class TraceReader(object):
mmap_info.filename = HOST_ROOT + filename[:filename.find(chr(0))]
return mmap_info
+ def ReadMmap2(self, header, offset):
+ mmap_info = PERF_MMAP2_EVENT_BODY_DESC.Read(self.trace,
+ offset + self.header_size)
+ # Read null-terminated filename.
+ filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
+ offset + header.size]
+ mmap_info.filename = HOST_ROOT + filename[:filename.find(chr(0))]
+ return mmap_info
+
def ReadSample(self, header, offset):
sample = self.sample_event_body_desc.Read(self.trace,
offset + self.header_size)
@@ -973,6 +997,14 @@ if __name__ == "__main__":
else:
library_repo.Load(mmap_info, code_map, options)
mmap_time += time.time() - start
+ elif header.type == PERF_RECORD_MMAP2:
+ start = time.time()
+ mmap_info = trace_reader.ReadMmap2(header, offset)
+ if mmap_info.filename == HOST_ROOT + V8_GC_FAKE_MMAP:
+ log_reader.ReadUpToGC()
+ else:
+ library_repo.Load(mmap_info, code_map, options)
+ mmap_time += time.time() - start
elif header.type == PERF_RECORD_SAMPLE:
ticks += 1
start = time.time()
diff --git a/chromium/v8/tools/luci-go/linux64/isolate.sha1 b/chromium/v8/tools/luci-go/linux64/isolate.sha1
new file mode 100644
index 00000000000..c2821fca105
--- /dev/null
+++ b/chromium/v8/tools/luci-go/linux64/isolate.sha1
@@ -0,0 +1 @@
+32a3d49a4f7279ad022f346f7d960b2d58e2a0fe \ No newline at end of file
diff --git a/chromium/v8/tools/luci-go/mac64/isolate.sha1 b/chromium/v8/tools/luci-go/mac64/isolate.sha1
new file mode 100644
index 00000000000..fcb6c8fa9e2
--- /dev/null
+++ b/chromium/v8/tools/luci-go/mac64/isolate.sha1
@@ -0,0 +1 @@
+83306c575904ec92c1af9ccc67240d26069df337 \ No newline at end of file
diff --git a/chromium/v8/tools/luci-go/win64/isolate.exe.sha1 b/chromium/v8/tools/luci-go/win64/isolate.exe.sha1
new file mode 100644
index 00000000000..032483cba74
--- /dev/null
+++ b/chromium/v8/tools/luci-go/win64/isolate.exe.sha1
@@ -0,0 +1 @@
+da358c2666ef9b89022e0eadf363cc6e123384e2 \ No newline at end of file
diff --git a/chromium/v8/tools/parser-shell.cc b/chromium/v8/tools/parser-shell.cc
index da874595de4..5d4b0cc490d 100644
--- a/chromium/v8/tools/parser-shell.cc
+++ b/chromium/v8/tools/parser-shell.cc
@@ -36,12 +36,12 @@
#include "include/libplatform/libplatform.h"
#include "src/api.h"
#include "src/compiler.h"
-#include "src/scanner-character-streams.h"
+#include "src/parsing/scanner-character-streams.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/preparse-data-format.h"
+#include "src/parsing/preparse-data.h"
+#include "src/parsing/preparser.h"
#include "tools/shell-utils.h"
-#include "src/parser.h"
-#include "src/preparse-data-format.h"
-#include "src/preparse-data.h"
-#include "src/preparser.h"
using namespace v8::internal;
diff --git a/chromium/v8/tools/presubmit.py b/chromium/v8/tools/presubmit.py
index 338c7085485..998656908dd 100755
--- a/chromium/v8/tools/presubmit.py
+++ b/chromium/v8/tools/presubmit.py
@@ -35,6 +35,7 @@ except ImportError, e:
md5er = md5.new
+import json
import optparse
import os
from os.path import abspath, join, dirname, basename, exists
@@ -45,12 +46,15 @@ import subprocess
import multiprocessing
from subprocess import PIPE
+from testrunner.local import statusfile
+from testrunner.local import testsuite
+from testrunner.local import utils
+
# Special LINT rules diverging from default and reason.
# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
# build/include_what_you_use: Started giving false positives for variables
# named "string" and "map" assuming that you needed to include STL headers.
# TODO(bmeurer): Fix and re-enable readability/check
-# TODO(mstarzinger): Fix and re-enable readability/namespace
LINT_RULES = """
-build/header_guard
@@ -58,9 +62,6 @@ LINT_RULES = """
-build/include_what_you_use
-build/namespaces
-readability/check
--readability/inheritance
--readability/namespace
--readability/nolint
+readability/streams
-runtime/references
""".split()
@@ -406,6 +407,61 @@ def CheckExternalReferenceRegistration(workspace):
[sys.executable, join(workspace, "tools", "external-reference-check.py")])
return code == 0
+
+def _CheckStatusFileForDuplicateKeys(filepath):
+ comma_space_bracket = re.compile(", *]")
+ lines = []
+ with open(filepath) as f:
+ for line in f.readlines():
+ # Skip all-comment lines.
+ if line.lstrip().startswith("#"): continue
+ # Strip away comments at the end of the line.
+ comment_start = line.find("#")
+ if comment_start != -1:
+ line = line[:comment_start]
+ line = line.strip()
+ # Strip away trailing commas within the line.
+ line = comma_space_bracket.sub("]", line)
+ if len(line) > 0:
+ lines.append(line)
+
+ # Strip away trailing commas at line ends. Ugh.
+ for i in range(len(lines) - 1):
+ if (lines[i].endswith(",") and len(lines[i + 1]) > 0 and
+ lines[i + 1][0] in ("}", "]")):
+ lines[i] = lines[i][:-1]
+
+ contents = "\n".join(lines)
+ # JSON wants double-quotes.
+ contents = contents.replace("'", '"')
+ # Fill in keywords (like PASS, SKIP).
+ for key in statusfile.KEYWORDS:
+ contents = re.sub(r"\b%s\b" % key, "\"%s\"" % key, contents)
+
+ status = {"success": True}
+ def check_pairs(pairs):
+ keys = {}
+ for key, value in pairs:
+ if key in keys:
+ print("%s: Error: duplicate key %s" % (filepath, key))
+ status["success"] = False
+ keys[key] = True
+
+ json.loads(contents, object_pairs_hook=check_pairs)
+ return status["success"]
+
+def CheckStatusFiles(workspace):
+ success = True
+ suite_paths = utils.GetSuitePaths(join(workspace, "test"))
+ for root in suite_paths:
+ suite_path = join(workspace, "test", root)
+ status_file_path = join(suite_path, root + ".status")
+ suite = testsuite.TestSuite.LoadTestSuite(suite_path)
+ if suite and exists(status_file_path):
+ success &= statusfile.PresubmitCheck(status_file_path)
+ success &= _CheckStatusFileForDuplicateKeys(status_file_path)
+ return success
+
def CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
@@ -448,11 +504,12 @@ def Main():
success = True
print "Running C++ lint check..."
if not options.no_lint:
- success = CppLintProcessor().Run(workspace) and success
+ success &= CppLintProcessor().Run(workspace)
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
- success = SourceProcessor().Run(workspace) and success
- success = CheckExternalReferenceRegistration(workspace) and success
+ success &= SourceProcessor().Run(workspace)
+ success &= CheckExternalReferenceRegistration(workspace)
+ success &= CheckStatusFiles(workspace)
if success:
return 0
else:
diff --git a/chromium/v8/tools/release/auto_push.py b/chromium/v8/tools/release/auto_push.py
index aba5cba72a7..ca9e5e87347 100755
--- a/chromium/v8/tools/release/auto_push.py
+++ b/chromium/v8/tools/release/auto_push.py
@@ -46,12 +46,12 @@ class Preparation(Step):
class FetchCandidate(Step):
- MESSAGE = "Fetching V8 roll ref."
+ MESSAGE = "Fetching V8 lkgr ref."
def RunStep(self):
# The roll ref points to the candidate to be rolled.
- self.Git("fetch origin +refs/heads/roll:refs/heads/roll")
- self["candidate"] = self.Git("show-ref -s refs/heads/roll").strip()
+ self.Git("fetch origin +refs/heads/lkgr:refs/heads/lkgr")
+ self["candidate"] = self.Git("show-ref -s refs/heads/lkgr").strip()
class LastReleaseBailout(Step):
diff --git a/chromium/v8/tools/release/auto_roll.py b/chromium/v8/tools/release/auto_roll.py
index f7692cf6f97..27fd3709716 100755
--- a/chromium/v8/tools/release/auto_roll.py
+++ b/chromium/v8/tools/release/auto_roll.py
@@ -4,61 +4,68 @@
# found in the LICENSE file.
import argparse
-import json
import os
import sys
-import urllib
from common_includes import *
-import chromium_roll
+ROLL_SUMMARY = ("Summary of changes available at:\n"
+ "https://chromium.googlesource.com/v8/v8/+log/%s..%s")
-class CheckActiveRoll(Step):
- MESSAGE = "Check active roll."
+ISSUE_MSG = (
+"""Please follow these instructions for assigning/CC'ing issues:
+https://github.com/v8/v8/wiki/Triaging%20issues
- @staticmethod
- def ContainsChromiumRoll(changes):
- for change in changes:
- if change["subject"].startswith("Update V8 to"):
- return True
- return False
+Please close rolling in case of a roll revert:
+https://v8-roll.appspot.com/
+This only works with a Google account.""")
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
def RunStep(self):
- params = {
- "closed": 3,
- "owner": self._options.author,
- "limit": 30,
- "format": "json",
- }
- params = urllib.urlencode(params)
- search_url = "https://codereview.chromium.org/search"
- result = self.ReadURL(search_url, params, wait_plan=[5, 20])
- if self.ContainsChromiumRoll(json.loads(result)["results"]):
- print "Stop due to existing Chromium roll."
- return True
+ self['json_output']['monitoring_state'] = 'preparation'
+ # Update v8 remote tracking branches.
+ self.GitFetchOrigin()
+ self.Git("fetch origin +refs/tags/*:refs/tags/*")
class DetectLastRoll(Step):
MESSAGE = "Detect commit ID of the last Chromium roll."
def RunStep(self):
+ self['json_output']['monitoring_state'] = 'detect_last_roll'
+ self["last_roll"] = self._options.last_roll
+ if not self["last_roll"]:
+ # Interpret the DEPS file to retrieve the v8 revision.
+ # TODO(machenbach): This should be part or the roll-deps api of
+ # depot_tools.
+ Var = lambda var: '%s'
+ exec(FileToText(os.path.join(self._options.chromium, "DEPS")))
+
+ # The revision rolled last.
+ self["last_roll"] = vars['v8_revision']
+ self["last_version"] = self.GetVersionTag(self["last_roll"])
+ assert self["last_version"], "The last rolled v8 revision is not tagged."
+
+
+class DetectRevisionToRoll(Step):
+ MESSAGE = "Detect commit ID of the V8 revision to roll."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'detect_revision'
+ self["roll"] = self._options.revision
+ if self["roll"]:
+ # If the revision was passed on the cmd line, continue script execution
+ # in the next step.
+ return False
+
# The revision that should be rolled. Check for the latest of the most
# recent releases based on commit timestamp.
revisions = self.GetRecentReleases(
max_age=self._options.max_age * DAY_IN_SECONDS)
assert revisions, "Didn't find any recent release."
- # Interpret the DEPS file to retrieve the v8 revision.
- # TODO(machenbach): This should be part or the roll-deps api of
- # depot_tools.
- Var = lambda var: '%s'
- exec(FileToText(os.path.join(self._options.chromium, "DEPS")))
-
- # The revision rolled last.
- self["last_roll"] = vars['v8_revision']
- last_version = self.GetVersionTag(self["last_roll"])
- assert last_version, "The last rolled v8 revision is not tagged."
-
# There must be some progress between the last roll and the new candidate
# revision (i.e. we don't go backwards). The revisions are ordered newest
# to oldest. It is possible that the newest timestamp has no progress
@@ -68,35 +75,107 @@ class DetectLastRoll(Step):
version = self.GetVersionTag(revision)
assert version, "Internal error. All recent releases should have a tag"
- if SortingKey(last_version) < SortingKey(version):
+ if SortingKey(self["last_version"]) < SortingKey(version):
self["roll"] = revision
break
else:
print("There is no newer v8 revision than the one in Chromium (%s)."
% self["last_roll"])
+ self['json_output']['monitoring_state'] = 'up_to_date'
return True
-class RollChromium(Step):
- MESSAGE = "Roll V8 into Chromium."
+class PrepareRollCandidate(Step):
+ MESSAGE = "Robustness checks of the roll candidate."
def RunStep(self):
- if self._options.roll:
- args = [
- "--author", self._options.author,
- "--reviewer", self._options.reviewer,
- "--chromium", self._options.chromium,
- "--last-roll", self["last_roll"],
- "--use-commit-queue",
- self["roll"],
- ]
- if self._options.sheriff:
- args.append("--sheriff")
- if self._options.dry_run:
- args.append("--dry-run")
- if self._options.work_dir:
- args.extend(["--work-dir", self._options.work_dir])
- self._side_effect_handler.Call(chromium_roll.ChromiumRoll().Run, args)
+ self['json_output']['monitoring_state'] = 'prepare_candidate'
+ self["roll_title"] = self.GitLog(n=1, format="%s",
+ git_hash=self["roll"])
+
+ # Make sure the last roll and the roll candidate are releases.
+ version = self.GetVersionTag(self["roll"])
+ assert version, "The revision to roll is not tagged."
+ version = self.GetVersionTag(self["last_roll"])
+ assert version, "The revision used as last roll is not tagged."
+
+
+class SwitchChromium(Step):
+ MESSAGE = "Switch to Chromium checkout."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'switch_chromium'
+ cwd = self._options.chromium
+ self.InitialEnvironmentChecks(cwd)
+ # Check for a clean workdir.
+ if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
+ self.Die("Workspace is not clean. Please commit or undo your changes.")
+ # Assert that the DEPS file is there.
+ if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
+ self.Die("DEPS file not present.")
+
+
+class UpdateChromiumCheckout(Step):
+ MESSAGE = "Update the checkout and create a new branch."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'update_chromium'
+ cwd = self._options.chromium
+ self.GitCheckout("master", cwd=cwd)
+ self.DeleteBranch("work-branch", cwd=cwd)
+ self.Command("gclient", "sync --nohooks", cwd=cwd)
+ self.GitPull(cwd=cwd)
+
+ # Update v8 remotes.
+ self.GitFetchOrigin()
+
+ self.GitCreateBranch("work-branch", cwd=cwd)
+
+
+class UploadCL(Step):
+ MESSAGE = "Create and upload CL."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'upload'
+ cwd = self._options.chromium
+ # Patch DEPS file.
+ if self.Command("roll-dep-svn", "v8 %s" %
+ self["roll"], cwd=cwd) is None:
+ self.Die("Failed to create deps for %s" % self["roll"])
+
+ message = []
+ message.append("Update V8 to %s." % self["roll_title"].lower())
+
+ message.append(
+ ROLL_SUMMARY % (self["last_roll"][:8], self["roll"][:8]))
+
+ message.append(ISSUE_MSG)
+
+ message.append("TBR=%s" % self._options.reviewer)
+ self.GitCommit("\n\n".join(message), author=self._options.author, cwd=cwd)
+ if not self._options.dry_run:
+ self.GitUpload(author=self._options.author,
+ force=True,
+ cq=self._options.use_commit_queue,
+ cwd=cwd)
+ print "CL uploaded."
+ else:
+ print "Dry run - don't upload."
+
+ self.GitCheckout("master", cwd=cwd)
+ self.GitDeleteBranch("work-branch", cwd=cwd)
+
+class CleanUp(Step):
+ MESSAGE = "Done!"
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'success'
+ print("Congratulations, you have successfully rolled %s into "
+ "Chromium."
+ % self["roll"])
+
+ # Clean up all temporary files.
+ Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
class AutoRoll(ScriptsBase):
@@ -104,30 +183,45 @@ class AutoRoll(ScriptsBase):
parser.add_argument("-c", "--chromium", required=True,
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
- parser.add_argument("--max-age", default=3, type=int,
+ parser.add_argument("--last-roll",
+ help="The git commit ID of the last rolled version. "
+ "Auto-detected if not specified.")
+ parser.add_argument("--max-age", default=7, type=int,
help="Maximum age in days of the latest release.")
- parser.add_argument("--roll", help="Call Chromium roll script.",
- default=False, action="store_true")
+ parser.add_argument("--revision",
+ help="Revision to roll. Auto-detected if not "
+ "specified."),
+ parser.add_argument("--roll", help="Deprecated.",
+ default=True, action="store_true")
+ parser.add_argument("--use-commit-queue",
+ help="Check the CQ bit on upload.",
+ default=True, action="store_true")
def _ProcessOptions(self, options): # pragma: no cover
- if not options.reviewer:
- print "A reviewer (-r) is required."
- return False
- if not options.author:
- print "An author (-a) is required."
+ if not options.author or not options.reviewer:
+ print "A reviewer (-r) and an author (-a) are required."
return False
+
+ options.requires_editor = False
+ options.force = True
+ options.manual = False
return True
def _Config(self):
return {
- "PERSISTFILE_BASENAME": "/tmp/v8-auto-roll-tempfile",
+ "PERSISTFILE_BASENAME": "/tmp/v8-chromium-roll-tempfile",
}
def _Steps(self):
return [
- CheckActiveRoll,
+ Preparation,
DetectLastRoll,
- RollChromium,
+ DetectRevisionToRoll,
+ PrepareRollCandidate,
+ SwitchChromium,
+ UpdateChromiumCheckout,
+ UploadCL,
+ CleanUp,
]
diff --git a/chromium/v8/tools/release/chromium_roll.py b/chromium/v8/tools/release/chromium_roll.py
deleted file mode 100755
index a802bab1e32..00000000000
--- a/chromium/v8/tools/release/chromium_roll.py
+++ /dev/null
@@ -1,160 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import argparse
-import os
-import sys
-
-from common_includes import *
-
-ROLL_SUMMARY = ("Summary of changes available at:\n"
- "https://chromium.googlesource.com/v8/v8/+log/%s..%s")
-
-ISSUE_MSG = (
-"""Please follow these instructions for assigning/CC'ing issues:
-https://code.google.com/p/v8-wiki/wiki/TriagingIssues
-
-Please close rolling in case of a roll revert:
-https://v8-roll.appspot.com/""")
-
-class Preparation(Step):
- MESSAGE = "Preparation."
-
- def RunStep(self):
- # Update v8 remote tracking branches.
- self.GitFetchOrigin()
- self.Git("fetch origin +refs/tags/*:refs/tags/*")
-
-
-class PrepareRollCandidate(Step):
- MESSAGE = "Robustness checks of the roll candidate."
-
- def RunStep(self):
- self["roll_title"] = self.GitLog(n=1, format="%s",
- git_hash=self._options.roll)
-
- # Make sure the last roll and the roll candidate are releases.
- version = self.GetVersionTag(self._options.roll)
- assert version, "The revision to roll is not tagged."
- version = self.GetVersionTag(self._options.last_roll)
- assert version, "The revision used as last roll is not tagged."
-
-
-class SwitchChromium(Step):
- MESSAGE = "Switch to Chromium checkout."
-
- def RunStep(self):
- cwd = self._options.chromium
- self.InitialEnvironmentChecks(cwd)
- # Check for a clean workdir.
- if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
- self.Die("Workspace is not clean. Please commit or undo your changes.")
- # Assert that the DEPS file is there.
- if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
- self.Die("DEPS file not present.")
-
-
-class UpdateChromiumCheckout(Step):
- MESSAGE = "Update the checkout and create a new branch."
-
- def RunStep(self):
- cwd = self._options.chromium
- self.GitCheckout("master", cwd=cwd)
- self.DeleteBranch("work-branch", cwd=cwd)
- self.Command("gclient", "sync --nohooks", cwd=cwd)
- self.GitPull(cwd=cwd)
-
- # Update v8 remotes.
- self.GitFetchOrigin()
-
- self.GitCreateBranch("work-branch", cwd=cwd)
-
-
-class UploadCL(Step):
- MESSAGE = "Create and upload CL."
-
- def RunStep(self):
- cwd = self._options.chromium
- # Patch DEPS file.
- if self.Command("roll-dep-svn", "v8 %s" %
- self._options.roll, cwd=cwd) is None:
- self.Die("Failed to create deps for %s" % self._options.roll)
-
- message = []
- message.append("Update V8 to %s." % self["roll_title"].lower())
-
- message.append(
- ROLL_SUMMARY % (self._options.last_roll[:8], self._options.roll[:8]))
-
- message.append(ISSUE_MSG)
-
- message.append("TBR=%s" % self._options.reviewer)
- self.GitCommit("\n\n".join(message), author=self._options.author, cwd=cwd)
- if not self._options.dry_run:
- self.GitUpload(author=self._options.author,
- force=True,
- cq=self._options.use_commit_queue,
- cwd=cwd)
- print "CL uploaded."
- else:
- print "Dry run - don't upload."
-
- self.GitCheckout("master", cwd=cwd)
- self.GitDeleteBranch("work-branch", cwd=cwd)
-
-class CleanUp(Step):
- MESSAGE = "Done!"
-
- def RunStep(self):
- print("Congratulations, you have successfully rolled %s into "
- "Chromium."
- % self._options.roll)
-
- # Clean up all temporary files.
- Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
-
-
-class ChromiumRoll(ScriptsBase):
- def _PrepareOptions(self, parser):
- parser.add_argument("-c", "--chromium", required=True,
- help=("The path to your Chromium src/ "
- "directory to automate the V8 roll."))
- parser.add_argument("--last-roll", required=True,
- help="The git commit ID of the last rolled version.")
- parser.add_argument("roll", nargs=1, help="Revision to roll."),
- parser.add_argument("--use-commit-queue",
- help="Check the CQ bit on upload.",
- default=False, action="store_true")
-
- def _ProcessOptions(self, options): # pragma: no cover
- if not options.author or not options.reviewer:
- print "A reviewer (-r) and an author (-a) are required."
- return False
-
- options.requires_editor = False
- options.force = True
- options.manual = False
- options.roll = options.roll[0]
- return True
-
- def _Config(self):
- return {
- "PERSISTFILE_BASENAME": "/tmp/v8-chromium-roll-tempfile",
- }
-
- def _Steps(self):
- return [
- Preparation,
- PrepareRollCandidate,
- DetermineV8Sheriff,
- SwitchChromium,
- UpdateChromiumCheckout,
- UploadCL,
- CleanUp,
- ]
-
-
-if __name__ == "__main__": # pragma: no cover
- sys.exit(ChromiumRoll().Run())
diff --git a/chromium/v8/tools/release/common_includes.py b/chromium/v8/tools/release/common_includes.py
index 19841a34a66..c2b64c38ec8 100644
--- a/chromium/v8/tools/release/common_includes.py
+++ b/chromium/v8/tools/release/common_includes.py
@@ -50,7 +50,6 @@ DAY_IN_SECONDS = 24 * 60 * 60
PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
VERSION_FILE = os.path.join("include", "v8-version.h")
-VERSION_RE = re.compile(r"^\d+\.\d+\.\d+(?:\.\d+)?$")
# V8 base directory.
V8_BASE = os.path.dirname(
@@ -206,6 +205,30 @@ def Command(cmd, args="", prefix="", pipe=True, cwd=None):
sys.stderr.flush()
+def SanitizeVersionTag(tag):
+ version_without_prefix = re.compile(r"^\d+\.\d+\.\d+(?:\.\d+)?$")
+ version_with_prefix = re.compile(r"^tags\/\d+\.\d+\.\d+(?:\.\d+)?$")
+
+ if version_without_prefix.match(tag):
+ return tag
+ elif version_with_prefix.match(tag):
+ return tag[len("tags/"):]
+ else:
+ return None
+
+
+def NormalizeVersionTags(version_tags):
+ normalized_version_tags = []
+
+ # Remove tags/ prefix because of packed refs.
+ for current_tag in version_tags:
+ version_tag = SanitizeVersionTag(current_tag)
+ if version_tag != None:
+ normalized_version_tags.append(version_tag)
+
+ return normalized_version_tags
+
+
# Wrapper for side effects.
class SideEffectHandler(object): # pragma: no cover
def Call(self, fun, *args, **kwargs):
@@ -607,10 +630,7 @@ class Step(GitRecipesMixin):
def GetVersionTag(self, revision):
tag = self.Git("describe --tags %s" % revision).strip()
- if VERSION_RE.match(tag):
- return tag
- else:
- return None
+ return SanitizeVersionTag(tag)
def GetRecentReleases(self, max_age):
# Make sure tags are fetched.
@@ -633,7 +653,11 @@ class Step(GitRecipesMixin):
# Make sure tags are fetched.
self.Git("fetch origin +refs/tags/*:refs/tags/*")
- version = sorted(filter(VERSION_RE.match, self.vc.GetTags()),
+
+ all_tags = self.vc.GetTags()
+ only_version_tags = NormalizeVersionTags(all_tags)
+
+ version = sorted(only_version_tags,
key=SortingKey, reverse=True)[0]
self["latest_version"] = version
return version
@@ -714,9 +738,12 @@ class Step(GitRecipesMixin):
class BootstrapStep(Step):
- MESSAGE = "Bootstapping v8 checkout."
+ MESSAGE = "Bootstrapping checkout and state."
def RunStep(self):
+ # Reserve state entry for json output.
+ self['json_output'] = {}
+
if os.path.realpath(self.default_cwd) == os.path.realpath(V8_BASE):
self.Die("Can't use v8 checkout with calling script as work checkout.")
# Directory containing the working v8 checkout.
@@ -742,32 +769,6 @@ class UploadStep(Step):
cc=self._options.cc)
-class DetermineV8Sheriff(Step):
- MESSAGE = "Determine the V8 sheriff for code review."
-
- def RunStep(self):
- self["sheriff"] = None
- if not self._options.sheriff: # pragma: no cover
- return
-
- # The sheriff determined by the rotation on the waterfall has a
- # @google.com account.
- url = "https://chromium-build.appspot.com/p/chromium/sheriff_v8.js"
- match = re.match(r"document\.write\('(\w+)'\)", self.ReadURL(url))
-
- # If "channel is sheriff", we can't match an account.
- if match:
- g_name = match.group(1)
- # Optimistically assume that google and chromium account name are the
- # same.
- self["sheriff"] = g_name + "@chromium.org"
- self._options.reviewer = ("%s,%s" %
- (self["sheriff"], self._options.reviewer))
- print "Found active sheriff: %s" % self["sheriff"]
- else:
- print "No active sheriff found."
-
-
def MakeStep(step_class=Step, number=0, state=None, config=None,
options=None, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
# Allow to pass in empty dictionaries.
@@ -814,12 +815,10 @@ class ScriptsBase(object):
help="The author email used for rietveld.")
parser.add_argument("--dry-run", default=False, action="store_true",
help="Perform only read-only actions.")
+ parser.add_argument("--json-output",
+ help="File to write results summary to.")
parser.add_argument("-r", "--reviewer", default="",
help="The account name to be used for reviews.")
- parser.add_argument("--sheriff", default=False, action="store_true",
- help=("Determine current sheriff to review CLs. On "
- "success, this will overwrite the reviewer "
- "option."))
parser.add_argument("-s", "--step",
help="Specify the step where to start work. Default: 0.",
default=0, type=int)
@@ -872,9 +871,16 @@ class ScriptsBase(object):
for (number, step_class) in enumerate([BootstrapStep] + step_classes):
steps.append(MakeStep(step_class, number, self._state, self._config,
options, self._side_effect_handler))
- for step in steps[options.step:]:
- if step.Run():
- return 0
+
+ try:
+ for step in steps[options.step:]:
+ if step.Run():
+ return 0
+ finally:
+ if options.json_output:
+ with open(options.json_output, "w") as f:
+ json.dump(self._state['json_output'], f)
+
return 0
def Run(self, args=None):
diff --git a/chromium/v8/tools/release/merge_to_branch.py b/chromium/v8/tools/release/merge_to_branch.py
index 378a9fd1359..699fe1b3c66 100755
--- a/chromium/v8/tools/release/merge_to_branch.py
+++ b/chromium/v8/tools/release/merge_to_branch.py
@@ -47,10 +47,7 @@ class Preparation(Step):
open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
self.InitialEnvironmentChecks(self.default_cwd)
- if self._options.revert_master:
- # FIXME(machenbach): Make revert master obsolete?
- self["merge_to_branch"] = "master"
- elif self._options.branch:
+ if self._options.branch:
self["merge_to_branch"] = self._options.branch
else: # pragma: no cover
self.Die("Please specify a branch to merge to")
@@ -110,10 +107,7 @@ class CreateCommitMessage(Step):
if not self["revision_list"]: # pragma: no cover
self.Die("Revision list is empty.")
- if self._options.revert and not self._options.revert_master:
- action_text = "Rollback of %s"
- else:
- action_text = "Merged %s"
+ action_text = "Merged %s"
# The commit message title is added below after the version is specified.
msg_pieces = [
@@ -146,17 +140,15 @@ class ApplyPatches(Step):
% (commit_hash, self["merge_to_branch"]))
patch = self.GitGetPatch(commit_hash)
TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE"))
- self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"), self._options.revert)
+ self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"))
if self._options.patch:
- self.ApplyPatch(self._options.patch, self._options.revert)
+ self.ApplyPatch(self._options.patch)
class PrepareVersion(Step):
MESSAGE = "Prepare version file."
def RunStep(self):
- if self._options.revert_master:
- return
# This is used to calculate the patch level increment.
self.ReadAndPersistVersion()
@@ -165,8 +157,6 @@ class IncrementVersion(Step):
MESSAGE = "Increment version number."
def RunStep(self):
- if self._options.revert_master:
- return
new_patch = str(int(self["patch"]) + 1)
if self.Confirm("Automatically increment V8_PATCH_LEVEL? (Saying 'n' will "
"fire up your EDITOR on %s so you can make arbitrary "
@@ -191,12 +181,7 @@ class CommitLocal(Step):
def RunStep(self):
# Add a commit message title.
- if self._options.revert and self._options.revert_master:
- # TODO(machenbach): Find a better convention if multiple patches are
- # reverted in one CL.
- self["commit_title"] = "Revert on master"
- else:
- self["commit_title"] = "Version %s (cherry-pick)" % self["version"]
+ self["commit_title"] = "Version %s (cherry-pick)" % self["version"]
self["new_commit_msg"] = "%s\n\n%s" % (self["commit_title"],
self["new_commit_msg"])
TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE"))
@@ -217,8 +202,6 @@ class TagRevision(Step):
MESSAGE = "Create the tag."
def RunStep(self):
- if self._options.revert_master:
- return
print "Creating tag %s" % self["version"]
self.vc.Tag(self["version"],
self.vc.RemoteBranch(self["merge_to_branch"]),
@@ -230,12 +213,11 @@ class CleanUp(Step):
def RunStep(self):
self.CommonCleanup()
- if not self._options.revert_master:
- print "*** SUMMARY ***"
- print "version: %s" % self["version"]
- print "branch: %s" % self["merge_to_branch"]
- if self["revision_list"]:
- print "patches: %s" % self["revision_list"]
+ print "*** SUMMARY ***"
+ print "version: %s" % self["version"]
+ print "branch: %s" % self["merge_to_branch"]
+ if self["revision_list"]:
+ print "patches: %s" % self["revision_list"]
class MergeToBranch(ScriptsBase):
@@ -246,9 +228,6 @@ class MergeToBranch(ScriptsBase):
def _PrepareOptions(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--branch", help="The branch to merge to.")
- group.add_argument("-R", "--revert-master",
- help="Revert specified patches from master.",
- default=False, action="store_true")
parser.add_argument("revisions", nargs="*",
help="The revisions to merge.")
parser.add_argument("-f", "--force",
@@ -256,14 +235,10 @@ class MergeToBranch(ScriptsBase):
default=False, action="store_true")
parser.add_argument("-m", "--message",
help="A commit message for the patch.")
- parser.add_argument("--revert",
- help="Revert specified patches.",
- default=False, action="store_true")
parser.add_argument("-p", "--patch",
help="A patch file to apply as part of the merge.")
def _ProcessOptions(self, options):
- # TODO(machenbach): Add a test that covers revert from master
if len(options.revisions) < 1:
if not options.patch:
print "Either a patch file or revision numbers must be specified"
diff --git a/chromium/v8/tools/release/mergeinfo.py b/chromium/v8/tools/release/mergeinfo.py
index bf07e9f94af..7f8b9cbaf4c 100755
--- a/chromium/v8/tools/release/mergeinfo.py
+++ b/chromium/v8/tools/release/mergeinfo.py
@@ -7,29 +7,105 @@ import argparse
import os
import sys
-from subprocess import call
-
-def print_analysis(gitWorkingDir, hashToSearch):
- print '1.) Info'
- git_execute(gitWorkingDir, ['status'])
- print '2.) Searching for "' + hashToSearch + '"'
- print '=====================ORIGINAL COMMIT START====================='
- git_execute(gitWorkingDir, ['show', hashToSearch])
+from search_related_commits import git_execute
+
+GIT_OPTION_HASH_ONLY = '--pretty=format:%H'
+GIT_OPTION_NO_DIFF = '--quiet'
+GIT_OPTION_ONELINE = '--oneline'
+
+def describe_commit(git_working_dir, hash_to_search, one_line=False):
+ if one_line:
+ return git_execute(git_working_dir, ['show',
+ GIT_OPTION_NO_DIFF,
+ GIT_OPTION_ONELINE,
+ hash_to_search]).strip()
+ return git_execute(git_working_dir, ['show',
+ GIT_OPTION_NO_DIFF,
+ hash_to_search]).strip()
+
+
+def get_followup_commits(git_working_dir, hash_to_search):
+ return git_execute(git_working_dir, ['log',
+ '--grep=' + hash_to_search,
+ GIT_OPTION_HASH_ONLY,
+ 'master']).strip().splitlines()
+
+def get_merge_commits(git_working_dir, hash_to_search):
+ merges = get_related_commits_not_on_master(git_working_dir, hash_to_search)
+ false_merges = get_related_commits_not_on_master(
+ git_working_dir, 'Cr-Branched-From: ' + hash_to_search)
+ false_merges = set(false_merges)
+ return ([merge_commit for merge_commit in merges
+ if merge_commit not in false_merges])
+
+def get_related_commits_not_on_master(git_working_dir, grep_command):
+ commits = git_execute(git_working_dir, ['log',
+ '--all',
+ '--grep=' + grep_command,
+ GIT_OPTION_ONELINE,
+ '--decorate',
+ '--not',
+ 'master',
+ GIT_OPTION_HASH_ONLY])
+ return commits.splitlines()
+
+def get_branches_for_commit(git_working_dir, hash_to_search):
+ branches = git_execute(git_working_dir, ['branch',
+ '--contains',
+ hash_to_search,
+ '-a']).strip()
+ branches = branches.splitlines()
+ return map(str.strip, branches)
+
+def is_lkgr(git_working_dir, hash_to_search):
+ branches = get_branches_for_commit(git_working_dir, hash_to_search)
+ return 'remotes/origin/lkgr' in branches
+
+def get_first_canary(git_working_dir, hash_to_search):
+ branches = get_branches_for_commit(git_working_dir, hash_to_search)
+ canaries = ([currentBranch for currentBranch in branches if
+ currentBranch.startswith('remotes/origin/chromium/')])
+ canaries.sort()
+ if len(canaries) == 0:
+ return 'No Canary coverage'
+ return canaries[0].split('/')[-1]
+
+def print_analysis(git_working_dir, hash_to_search):
+ print '1.) Searching for "' + hash_to_search + '"'
+ print '=====================ORIGINAL COMMIT START==================='
+ print describe_commit(git_working_dir, hash_to_search)
print '=====================ORIGINAL COMMIT END====================='
- print '#####################FOUND MERGES & REVERTS START#####################'
- git_execute(gitWorkingDir, ["log",'--all', '--grep='+hashToSearch])
- print '#####################FOUND MERGES & REVERTS END#####################'
+ print '2.) General information:'
+ print 'Is LKGR: ' + str(is_lkgr(git_working_dir, hash_to_search))
+ print 'Is on Canary: ' + (
+ str(get_first_canary(git_working_dir, hash_to_search)))
+ print '3.) Found follow-up commits, reverts and ports:'
+ followups = get_followup_commits(git_working_dir, hash_to_search)
+ for followup in followups:
+ print describe_commit(git_working_dir, followup, True)
+
+ print '4.) Found merges:'
+ merges = get_merge_commits(git_working_dir, hash_to_search)
+ for currentMerge in merges:
+ print describe_commit(git_working_dir, currentMerge, True)
+ print '---Merged to:'
+ mergeOutput = git_execute(git_working_dir, ['branch',
+ '--contains',
+ currentMerge,
+ '-r']).strip()
+ print mergeOutput
print 'Finished successfully'
-def git_execute(workingDir, commands):
- return call(["git", '-C', workingDir] + commands)
+if __name__ == '__main__': # pragma: no cover
+ parser = argparse.ArgumentParser('Tool to check where a git commit was'
+ ' merged and reverted.')
-if __name__ == "__main__": # pragma: no cover
- parser = argparse.ArgumentParser('Tool to check where a git commit was merged and reverted.')
- parser.add_argument("-g", "--git-dir", required=False, default='.',
- help="The path to your git working directory.")
+ parser.add_argument('-g', '--git-dir', required=False, default='.',
+ help='The path to your git working directory.')
- parser.add_argument('hash', nargs=1, help="Hash of the commit to be searched.")
+ parser.add_argument('hash',
+ nargs=1,
+ help='Hash of the commit to be searched.')
args = sys.argv[1:]
options = parser.parse_args(args)
diff --git a/chromium/v8/tools/release/releases.py b/chromium/v8/tools/release/releases.py
index 5b826fccba0..7b659ccb806 100755
--- a/chromium/v8/tools/release/releases.py
+++ b/chromium/v8/tools/release/releases.py
@@ -463,10 +463,15 @@ class RetrieveInformationOnChromeReleases(Step):
def _GetGitHashForV8Version(self, v8_version):
if v8_version == "N/A":
return ""
+
+ real_v8_version = v8_version
if v8_version.split(".")[3]== "0":
- return self.GitGetHashOfTag(v8_version[:-2])
+ real_v8_version = v8_version[:-2]
- return self.GitGetHashOfTag(v8_version)
+ try:
+ return self.GitGetHashOfTag(real_v8_version)
+ except GitFailedException:
+ return ""
def _CreateCandidate(self, current_version):
params = None
diff --git a/chromium/v8/tools/release/search_related_commits.py b/chromium/v8/tools/release/search_related_commits.py
index aae258443b5..d27aa56f866 100755
--- a/chromium/v8/tools/release/search_related_commits.py
+++ b/chromium/v8/tools/release/search_related_commits.py
@@ -48,7 +48,7 @@ def _search_related_commits(
return []
# Extract commit position
- original_message = _git_execute(
+ original_message = git_execute(
git_working_dir,
["show", "-s", "--format=%B", start_hash],
verbose)
@@ -74,13 +74,13 @@ def _search_related_commits(
search_range,
]
- found_by_hash = _git_execute(
+ found_by_hash = git_execute(
git_working_dir, git_args(start_hash), verbose).strip()
if verbose:
print "2.) Found by hash: " + found_by_hash
- found_by_commit_pos = _git_execute(
+ found_by_commit_pos = git_execute(
git_working_dir, git_args(commit_position), verbose).strip()
if verbose:
@@ -90,7 +90,7 @@ def _search_related_commits(
title = title.replace("[", "\\[")
title = title.replace("]", "\\]")
- found_by_title = _git_execute(
+ found_by_title = git_execute(
git_working_dir, git_args(title), verbose).strip()
if verbose:
@@ -113,7 +113,7 @@ def _search_related_commits(
return hits
def _find_commits_inbetween(start_hash, end_hash, git_working_dir, verbose):
- commits_between = _git_execute(
+ commits_between = git_execute(
git_working_dir,
["rev-list", "--reverse", start_hash + ".." + end_hash],
verbose)
@@ -129,7 +129,7 @@ def _remove_duplicates(array):
no_duplicates.append(current)
return no_duplicates
-def _git_execute(working_dir, args, verbose=False):
+def git_execute(working_dir, args, verbose=False):
command = ["git", "-C", working_dir] + args
if verbose:
print "Git working dir: " + working_dir
@@ -145,7 +145,7 @@ def _git_execute(working_dir, args, verbose=False):
return output
def _pretty_print_entry(hash, git_dir, pre_text, verbose):
- text_to_print = _git_execute(
+ text_to_print = git_execute(
git_dir,
["show",
"--quiet",
@@ -164,7 +164,7 @@ def main(options):
options.verbose)
sort_key = lambda x: (
- _git_execute(
+ git_execute(
options.git_dir,
["show", "--quiet", "--date=iso", x, "--format=%ad"],
options.verbose)).strip()
diff --git a/chromium/v8/tools/release/test_mergeinfo.py b/chromium/v8/tools/release/test_mergeinfo.py
new file mode 100755
index 00000000000..d455fa23748
--- /dev/null
+++ b/chromium/v8/tools/release/test_mergeinfo.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import mergeinfo
+import shutil
+import unittest
+
+from collections import namedtuple
+from os import path
+from subprocess import Popen, PIPE, check_call
+
+TEST_CONFIG = {
+ "GIT_REPO": "/tmp/test-v8-search-related-commits",
+}
+
+class TestMergeInfo(unittest.TestCase):
+
+ base_dir = TEST_CONFIG["GIT_REPO"]
+
+ def _execute_git(self, git_args):
+
+ fullCommand = ["git", "-C", self.base_dir] + git_args
+ p = Popen(args=fullCommand, stdin=PIPE,
+ stdout=PIPE, stderr=PIPE)
+ output, err = p.communicate()
+ rc = p.returncode
+ if rc != 0:
+ raise Exception(err)
+ return output
+
+ def setUp(self):
+ if path.exists(self.base_dir):
+ shutil.rmtree(self.base_dir)
+
+ check_call(["git", "init", self.base_dir])
+
+ # Initial commit
+ message = '''Initial commit'''
+
+ self._make_empty_commit(message)
+
+ def tearDown(self):
+ if path.exists(self.base_dir):
+ shutil.rmtree(self.base_dir)
+
+ def _assert_correct_standard_result(
+ self, result, all_commits, hash_of_first_commit):
+ self.assertEqual(len(result), 1, "Master commit not found")
+ self.assertTrue(
+ result.get(hash_of_first_commit),
+ "Master commit is wrong")
+
+ self.assertEqual(
+ len(result[hash_of_first_commit]),
+ 1,
+ "Child commit not found")
+ self.assertEqual(
+ all_commits[2],
+ result[hash_of_first_commit][0],
+ "Child commit wrong")
+
+ def _get_commits(self):
+ commits = self._execute_git(
+ ["log", "--format=%H", "--reverse"]).splitlines()
+ return commits
+
+ def _make_empty_commit(self, message):
+ self._execute_git(["commit", "--allow-empty", "-m", message])
+ return self._get_commits()[-1]
+
+ def testCanDescribeCommit(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ result = mergeinfo.describe_commit(
+ self.base_dir,
+ hash_of_first_commit).splitlines()
+
+ self.assertEqual(
+ result[0],
+ 'commit ' + hash_of_first_commit)
+
+ def testCanDescribeCommitSingleLine(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ result = mergeinfo.describe_commit(
+ self.base_dir,
+ hash_of_first_commit, True).splitlines()
+
+ self.assertEqual(
+ str(result[0]),
+ str(hash_of_first_commit[0:7]) + ' Initial commit')
+
+ def testSearchFollowUpCommits(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ message = 'Follow-up commit of ' + hash_of_first_commit
+ self._make_empty_commit(message)
+ self._make_empty_commit(message)
+ self._make_empty_commit(message)
+ commits = self._get_commits()
+ message = 'Not related commit'
+ self._make_empty_commit(message)
+
+ followups = mergeinfo.get_followup_commits(
+ self.base_dir,
+ hash_of_first_commit)
+ self.assertEqual(set(followups), set(commits[1:]))
+
+ def testSearchMerges(self):
+ self._execute_git(['branch', 'test'])
+ self._execute_git(['checkout', 'master'])
+ message = 'real initial commit'
+ self._make_empty_commit(message)
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ self._execute_git(['checkout', 'test'])
+ message = 'Not related commit'
+ self._make_empty_commit(message)
+
+ # This should be found
+ message = 'Merge ' + hash_of_first_commit
+ hash_of_hit = self._make_empty_commit(message)
+
+ # This should be ignored
+ message = 'Cr-Branched-From: ' + hash_of_first_commit
+ hash_of_ignored = self._make_empty_commit(message)
+
+ self._execute_git(['checkout', 'master'])
+
+ followups = mergeinfo.get_followup_commits(
+ self.base_dir,
+ hash_of_first_commit)
+
+ # Check if follow ups and merges are not overlapping
+ self.assertEqual(len(followups), 0)
+
+ message = 'Follow-up commit of ' + hash_of_first_commit
+ hash_of_followup = self._make_empty_commit(message)
+
+ merges = mergeinfo.get_merge_commits(self.base_dir, hash_of_first_commit)
+ # Check if follow up is ignored
+ self.assertTrue(hash_of_followup not in merges)
+
+ # Check for proper return of merges
+ self.assertTrue(hash_of_hit in merges)
+ self.assertTrue(hash_of_ignored not in merges)
+
+ def testIsLkgr(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+ self._make_empty_commit('This one is the lkgr head')
+ self._execute_git(['branch', 'remotes/origin/lkgr'])
+ hash_of_not_lkgr = self._make_empty_commit('This one is not yet lkgr')
+
+ self.assertTrue(mergeinfo.is_lkgr(
+ self.base_dir, hash_of_first_commit))
+ self.assertFalse(mergeinfo.is_lkgr(
+ self.base_dir, hash_of_not_lkgr))
+
+ def testShowFirstCanary(self):
+ commits = self._get_commits()
+ hash_of_first_commit = commits[0]
+
+ self.assertEqual(mergeinfo.get_first_canary(
+ self.base_dir, hash_of_first_commit), 'No Canary coverage')
+
+ self._execute_git(['branch', 'remotes/origin/chromium/2345'])
+ self._execute_git(['branch', 'remotes/origin/chromium/2346'])
+
+ self.assertEqual(mergeinfo.get_first_canary(
+ self.base_dir, hash_of_first_commit), '2345')
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/chromium/v8/tools/release/test_scripts.py b/chromium/v8/tools/release/test_scripts.py
index 32d0fb837c8..4a3cb5b24a1 100644
--- a/chromium/v8/tools/release/test_scripts.py
+++ b/chromium/v8/tools/release/test_scripts.py
@@ -74,6 +74,30 @@ AUTO_PUSH_ARGS = [
class ToplevelTest(unittest.TestCase):
+ def testSaniniziteVersionTags(self):
+ self.assertEquals("4.8.230", SanitizeVersionTag("4.8.230"))
+ self.assertEquals("4.8.230", SanitizeVersionTag("tags/4.8.230"))
+ self.assertEquals(None, SanitizeVersionTag("candidate"))
+
+ def testNormalizeVersionTags(self):
+ input = ["4.8.230",
+ "tags/4.8.230",
+ "tags/4.8.224.1",
+ "4.8.224.1",
+ "4.8.223.1",
+ "tags/4.8.223",
+ "tags/4.8.231",
+ "candidates"]
+ expected = ["4.8.230",
+ "4.8.230",
+ "4.8.224.1",
+ "4.8.224.1",
+ "4.8.223.1",
+ "4.8.223",
+ "4.8.231",
+ ]
+ self.assertEquals(expected, NormalizeVersionTags(input))
+
def testSortBranches(self):
S = releases.SortBranches
self.assertEquals(["3.1", "2.25"], S(["2.25", "3.1"])[0:2])
@@ -979,26 +1003,68 @@ git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123456 123
"""
- ROLL_COMMIT_MSG = """Update V8 to version 3.22.4 (based on abc).
+ ROLL_COMMIT_MSG = """Update V8 to version 3.22.4.
Summary of changes available at:
https://chromium.googlesource.com/v8/v8/+log/last_rol..roll_hsh
Please follow these instructions for assigning/CC'ing issues:
-https://code.google.com/p/v8-wiki/wiki/TriagingIssues
+https://github.com/v8/v8/wiki/Triaging%20issues
+
+Please close rolling in case of a roll revert:
+https://v8-roll.appspot.com/
+This only works with a Google account.
+
+TBR=reviewer@chromium.org"""
+
+ # Snippet from the original DEPS file.
+ FAKE_DEPS = """
+vars = {
+ "v8_revision": "last_roll_hsh",
+}
+deps = {
+ "src/v8":
+ (Var("googlecode_url") % "v8") + "/" + Var("v8_branch") + "@" +
+ Var("v8_revision"),
+}
+"""
+
+ def testChromiumRollUpToDate(self):
+ TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+ json_output_file = os.path.join(TEST_CONFIG["CHROMIUM"], "out.json")
+ TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
+ self.Expect([
+ Cmd("git fetch origin", ""),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git describe --tags last_roll_hsh", "3.22.4"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git rev-list --max-age=395200 --tags",
+ "bad_tag\nroll_hsh\nhash_123"),
+ Cmd("git describe --tags bad_tag", ""),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git describe --tags hash_123", "3.22.3"),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git describe --tags hash_123", "3.22.3"),
+ ])
+
+ result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
+ AUTO_PUSH_ARGS + [
+ "-c", TEST_CONFIG["CHROMIUM"],
+ "--json-output", json_output_file])
+ self.assertEquals(0, result)
+ json_output = json.loads(FileToText(json_output_file))
+ self.assertEquals("up_to_date", json_output["monitoring_state"])
-TBR=g_name@chromium.org,reviewer@chromium.org"""
def testChromiumRoll(self):
# Setup fake directory structures.
TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+ json_output_file = os.path.join(TEST_CONFIG["CHROMIUM"], "out.json")
+ TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
TextToFile("", os.path.join(TEST_CONFIG["CHROMIUM"], ".git"))
chrome_dir = TEST_CONFIG["CHROMIUM"]
os.makedirs(os.path.join(chrome_dir, "v8"))
- # Write fake deps file.
- TextToFile("Some line\n \"v8_revision\": \"123444\",\n some line",
- os.path.join(chrome_dir, "DEPS"))
def WriteDeps():
TextToFile("Some line\n \"v8_revision\": \"22624\",\n some line",
os.path.join(chrome_dir, "DEPS"))
@@ -1006,12 +1072,17 @@ TBR=g_name@chromium.org,reviewer@chromium.org"""
expectations = [
Cmd("git fetch origin", ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
- Cmd("git log -1 --format=%s roll_hsh",
- "Version 3.22.4 (based on abc)\n"),
+ Cmd("git describe --tags last_roll_hsh", "3.22.3.1"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git rev-list --max-age=395200 --tags",
+ "bad_tag\nroll_hsh\nhash_123"),
+ Cmd("git describe --tags bad_tag", ""),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git describe --tags hash_123", "3.22.3"),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git log -1 --format=%s roll_hsh", "Version 3.22.4\n"),
Cmd("git describe --tags roll_hsh", "3.22.4"),
Cmd("git describe --tags last_roll_hsh", "3.22.2.1"),
- URL("https://chromium-build.appspot.com/p/chromium/sheriff_v8.js",
- "document.write('g_name')"),
Cmd("git status -s -uno", "", cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git branch", "", cwd=chrome_dir),
@@ -1024,23 +1095,23 @@ TBR=g_name@chromium.org,reviewer@chromium.org"""
"--author \"author@chromium.org <author@chromium.org>\"" %
self.ROLL_COMMIT_MSG),
"", cwd=chrome_dir),
- Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f", "",
- cwd=chrome_dir),
+ Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
+ "--use-commit-queue", "", cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git branch -D work-branch", "", cwd=chrome_dir),
]
self.Expect(expectations)
args = ["-a", "author@chromium.org", "-c", chrome_dir,
- "--sheriff",
- "-r", "reviewer@chromium.org",
- "--last-roll", "last_roll_hsh",
- "roll_hsh"]
- ChromiumRoll(TEST_CONFIG, self).Run(args)
+ "-r", "reviewer@chromium.org", "--json-output", json_output_file]
+ auto_roll.AutoRoll(TEST_CONFIG, self).Run(args)
deps = FileToText(os.path.join(chrome_dir, "DEPS"))
self.assertTrue(re.search("\"v8_revision\": \"22624\"", deps))
+ json_output = json.loads(FileToText(json_output_file))
+ self.assertEquals("success", json_output["monitoring_state"])
+
def testCheckLastPushRecently(self):
self.Expect([
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
@@ -1058,8 +1129,8 @@ TBR=g_name@chromium.org,reviewer@chromium.org"""
def testAutoPush(self):
self.Expect([
Cmd("git fetch", ""),
- Cmd("git fetch origin +refs/heads/roll:refs/heads/roll", ""),
- Cmd("git show-ref -s refs/heads/roll", "abc123\n"),
+ Cmd("git fetch origin +refs/heads/lkgr:refs/heads/lkgr", ""),
+ Cmd("git show-ref -s refs/heads/lkgr", "abc123\n"),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
@@ -1075,74 +1146,6 @@ TBR=g_name@chromium.org,reviewer@chromium.org"""
self.assertEquals("abc123", state["candidate"])
- def testAutoRollExistingRoll(self):
- self.Expect([
- URL("https://codereview.chromium.org/search",
- "owner=author%40chromium.org&limit=30&closed=3&format=json",
- ("{\"results\": [{\"subject\": \"different\"},"
- "{\"subject\": \"Update V8 to Version...\"}]}")),
- ])
-
- result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
- AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
- self.assertEquals(0, result)
-
- # Snippet from the original DEPS file.
- FAKE_DEPS = """
-vars = {
- "v8_revision": "abcd123455",
-}
-deps = {
- "src/v8":
- (Var("googlecode_url") % "v8") + "/" + Var("v8_branch") + "@" +
- Var("v8_revision"),
-}
-"""
-
- def testAutoRollUpToDate(self):
- TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
- TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
- self.Expect([
- URL("https://codereview.chromium.org/search",
- "owner=author%40chromium.org&limit=30&closed=3&format=json",
- ("{\"results\": [{\"subject\": \"different\"}]}")),
- Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
- Cmd("git rev-list --max-age=740800 --tags",
- "bad_tag\nhash_234\nhash_123"),
- Cmd("git describe --tags bad_tag", ""),
- Cmd("git describe --tags hash_234", "3.22.4"),
- Cmd("git describe --tags hash_123", "3.22.3"),
- Cmd("git describe --tags abcd123455", "3.22.4"),
- Cmd("git describe --tags hash_234", "3.22.4"),
- Cmd("git describe --tags hash_123", "3.22.3"),
- ])
-
- result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
- AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
- self.assertEquals(0, result)
-
- def testAutoRoll(self):
- TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
- TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
-
- self.Expect([
- URL("https://codereview.chromium.org/search",
- "owner=author%40chromium.org&limit=30&closed=3&format=json",
- ("{\"results\": [{\"subject\": \"different\"}]}")),
- Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
- Cmd("git rev-list --max-age=740800 --tags",
- "bad_tag\nhash_234\nhash_123"),
- Cmd("git describe --tags bad_tag", ""),
- Cmd("git describe --tags hash_234", "3.22.4"),
- Cmd("git describe --tags hash_123", "3.22.3"),
- Cmd("git describe --tags abcd123455", "3.22.3.1"),
- Cmd("git describe --tags hash_234", "3.22.4"),
- ])
-
- result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
- AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"], "--roll"])
- self.assertEquals(0, result)
-
def testMergeToBranch(self):
TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
diff --git a/chromium/v8/tools/run-deopt-fuzzer.py b/chromium/v8/tools/run-deopt-fuzzer.py
index 89474d81621..70e106ec1bf 100755
--- a/chromium/v8/tools/run-deopt-fuzzer.py
+++ b/chromium/v8/tools/run-deopt-fuzzer.py
@@ -316,6 +316,7 @@ def Main():
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
+ suite.SetupWorkingDirectory()
suites.append(suite)
if options.download_data:
@@ -380,7 +381,8 @@ def Execute(arch, mode, args, options, suites, workspace):
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
False, # No predictable mode.
- False) # No no_harness mode.
+ False, # No no_harness mode.
+ False) # Don't use perf data.
# Find available test suites and read test cases from them.
variables = {
@@ -388,6 +390,8 @@ def Execute(arch, mode, args, options, suites, workspace):
"asan": options.asan,
"deopt_fuzzer": True,
"gc_stress": False,
+ "gcov_coverage": False,
+ "ignition": False,
"isolates": options.isolates,
"mode": mode,
"no_i18n": False,
diff --git a/chromium/v8/tools/run-tests.py b/chromium/v8/tools/run-tests.py
index 516582ef0cb..fe8091efb36 100755
--- a/chromium/v8/tools/run-tests.py
+++ b/chromium/v8/tools/run-tests.py
@@ -51,6 +51,9 @@ from testrunner.network import network_execution
from testrunner.objects import context
+# Base dir of the v8 checkout to be used as cwd.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
ARCH_GUESS = utils.DefaultArch()
# Map of test name synonyms to lists of test suites. Should be ordered by
@@ -74,6 +77,10 @@ TEST_MAP = {
"intl",
"unittests",
],
+ "ignition": [
+ "mjsunit",
+ "cctest",
+ ],
"optimize_for_size": [
"mjsunit",
"cctest",
@@ -87,10 +94,11 @@ TEST_MAP = {
TIMEOUT_DEFAULT = 60
-VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
+VARIANTS = ["default", "stress", "turbofan"]
EXHAUSTIVE_VARIANTS = VARIANTS + [
- # TODO(machenbach): Add always opt turbo variant.
+ "nocrankshaft",
+ "turbofan_opt",
]
DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
@@ -194,6 +202,9 @@ def BuildOptions():
result.add_option("--asan",
help="Regard test expectations for ASAN",
default=False, action="store_true")
+ result.add_option("--cfi-vptr",
+ help="Run tests with UBSAN cfi_vptr option.",
+ default=False, action="store_true")
result.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
@@ -217,6 +228,9 @@ def BuildOptions():
result.add_option("--gc-stress",
help="Switch on GC stress mode",
default=False, action="store_true")
+ result.add_option("--gcov-coverage",
+ help="Uses executables instrumented for gcov coverage",
+ default=False, action="store_true")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
@@ -228,6 +242,8 @@ def BuildOptions():
result.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
+ result.add_option("--ignition", help="Skip tests which don't run in ignition",
+ default=False, action="store_true")
result.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
@@ -304,6 +320,9 @@ def BuildOptions():
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
+ result.add_option("--swarming",
+ help="Indicates running test driver on swarming.",
+ default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("-t", "--timeout", help="Timeout in seconds",
@@ -348,6 +367,41 @@ def BuildbotToV8Mode(config):
mode = config[:-4] if config.endswith('_x64') else config
return mode.lower()
+def SetupEnvironment(options):
+ """Setup additional environment variables."""
+ symbolizer = 'external_symbolizer_path=%s' % (
+ os.path.join(
+ BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
+ 'llvm-symbolizer',
+ )
+ )
+
+ if options.asan:
+ os.environ['ASAN_OPTIONS'] = symbolizer
+
+ if options.cfi_vptr:
+ os.environ['UBSAN_OPTIONS'] = ":".join([
+ 'print_stacktrace=1',
+ 'print_summary=1',
+ 'symbolize=1',
+ symbolizer,
+ ])
+
+ if options.msan:
+ os.environ['MSAN_OPTIONS'] = symbolizer
+
+ if options.tsan:
+ suppressions_file = os.path.join(
+ BASE_DIR, 'tools', 'sanitizers', 'tsan_suppressions.txt')
+ os.environ['TSAN_OPTIONS'] = " ".join([
+ symbolizer,
+ 'suppressions=%s' % suppressions_file,
+ 'exit_code=0',
+ 'report_thread_leaks=0',
+ 'history_size=7',
+ 'report_destroy_locked=0',
+ ])
+
def ProcessOptions(options):
global ALL_VARIANTS
global EXHAUSTIVE_VARIANTS
@@ -412,11 +466,6 @@ def ProcessOptions(options):
if options.tsan:
VARIANTS = ["default"]
- suppressions_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
- 'sanitizers', 'tsan_suppressions.txt')
- tsan_options = '%s suppressions=%s' % (
- os.environ.get('TSAN_OPTIONS', ''), suppressions_file)
- os.environ['TSAN_OPTIONS'] = tsan_options
if options.j == 0:
options.j = multiprocessing.cpu_count()
@@ -476,11 +525,32 @@ def ProcessOptions(options):
if not CheckTestMode("pass|fail test", options.pass_fail_tests):
return False
if options.no_i18n:
+ TEST_MAP["bot_default"].remove("intl")
TEST_MAP["default"].remove("intl")
return True
-def ShardTests(tests, shard_count, shard_run):
+def ShardTests(tests, options):
+ # Read gtest shard configuration from environment (e.g. set by swarming).
+ # If none is present, use values passed on the command line.
+ shard_count = int(os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
+ shard_run = os.environ.get('GTEST_SHARD_INDEX')
+ if shard_run is not None:
+ # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
+ shard_run = int(shard_run) + 1
+ else:
+ shard_run = options.shard_run
+
+ if options.shard_count > 1:
+ # Log if a value was passed on the cmd line and it differs from the
+ # environment variables.
+ if options.shard_count != shard_count:
+ print("shard_count from cmd line differs from environment variable "
+ "GTEST_TOTAL_SHARDS")
+ if options.shard_run > 1 and options.shard_run != shard_run:
+ print("shard_run from cmd line differs from environment variable "
+ "GTEST_SHARD_INDEX")
+
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
@@ -497,20 +567,23 @@ def ShardTests(tests, shard_count, shard_run):
def Main():
+ # Use the v8 root as cwd as some test cases use "load" with relative paths.
+ os.chdir(BASE_DIR)
+
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
+ SetupEnvironment(options)
exit_code = 0
- workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
if not options.no_presubmit:
print ">>> running presubmit tests"
exit_code = subprocess.call(
- [sys.executable, join(workspace, "tools", "presubmit.py")])
+ [sys.executable, join(BASE_DIR, "tools", "presubmit.py")])
- suite_paths = utils.GetSuitePaths(join(workspace, "test"))
+ suite_paths = utils.GetSuitePaths(join(BASE_DIR, "test"))
# Use default tests if no test configuration was provided at the cmd line.
if len(args) == 0:
@@ -520,7 +593,7 @@ def Main():
# suites as otherwise filters would break.
def ExpandTestGroups(name):
if name in TEST_MAP:
- return [suite for suite in TEST_MAP[arg]]
+ return [suite for suite in TEST_MAP[name]]
else:
return [name]
args = reduce(lambda x, y: x + y,
@@ -535,8 +608,9 @@ def Main():
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(workspace, "test", root))
+ os.path.join(BASE_DIR, "test", root))
if suite:
+ suite.SetupWorkingDirectory()
suites.append(suite)
if options.download_data or options.download_data_only:
@@ -548,14 +622,14 @@ def Main():
for (arch, mode) in options.arch_and_mode:
try:
- code = Execute(arch, mode, args, options, suites, workspace)
+ code = Execute(arch, mode, args, options, suites)
except KeyboardInterrupt:
return 2
exit_code = exit_code or code
return exit_code
-def Execute(arch, mode, args, options, suites, workspace):
+def Execute(arch, mode, args, options, suites):
print(">>> Running tests for %s.%s" % (arch, mode))
shell_dir = options.shell_dir
@@ -563,15 +637,14 @@ def Execute(arch, mode, args, options, suites, workspace):
if options.buildbot:
# TODO(machenbach): Get rid of different output folder location on
# buildbot. Currently this is capitalized Release and Debug.
- shell_dir = os.path.join(workspace, options.outdir, mode)
+ shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
mode = BuildbotToV8Mode(mode)
else:
shell_dir = os.path.join(
- workspace,
+ BASE_DIR,
options.outdir,
"%s.%s" % (arch, MODES[mode]["output_folder"]),
)
- shell_dir = os.path.relpath(shell_dir)
if not os.path.exists(shell_dir):
raise Exception('Could not find shell_dir: "%s"' % shell_dir)
@@ -608,7 +681,8 @@ def Execute(arch, mode, args, options, suites, workspace):
options.rerun_failures_count,
options.rerun_failures_max,
options.predictable,
- options.no_harness)
+ options.no_harness,
+ use_perf_data=not options.swarming)
# TODO(all): Combine "simulator" and "simulator_run".
simulator_run = not options.dont_skip_simulator_slow_tests and \
@@ -621,6 +695,8 @@ def Execute(arch, mode, args, options, suites, workspace):
"asan": options.asan,
"deopt_fuzzer": False,
"gc_stress": options.gc_stress,
+ "gcov_coverage": options.gcov_coverage,
+ "ignition": options.ignition,
"isolates": options.isolates,
"mode": MODES[mode]["status_mode"],
"no_i18n": options.no_i18n,
@@ -672,7 +748,7 @@ def Execute(arch, mode, args, options, suites, workspace):
else:
s.tests = variant_tests
- s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
+ s.tests = ShardTests(s.tests, options)
num_tests += len(s.tests)
if options.cat:
@@ -715,7 +791,7 @@ def Execute(arch, mode, args, options, suites, workspace):
if run_networked:
runner = network_execution.NetworkedRunner(suites, progress_indicator,
- ctx, peers, workspace)
+ ctx, peers, BASE_DIR)
else:
runner = execution.Runner(suites, progress_indicator, ctx)
@@ -728,6 +804,11 @@ def Execute(arch, mode, args, options, suites, workspace):
if num_tests == 0:
print("Warning: no tests were run!")
+ if exit_code == 1 and options.json_test_results:
+ print("Force exit code 0 after failures. Json test results file generated "
+ "with failure information.")
+ exit_code = 0
+
return exit_code
diff --git a/chromium/v8/tools/run-valgrind.py b/chromium/v8/tools/run-valgrind.py
index f25f7a113c5..e3f84f58fed 100755
--- a/chromium/v8/tools/run-valgrind.py
+++ b/chromium/v8/tools/run-valgrind.py
@@ -29,23 +29,47 @@
# Simple wrapper for running valgrind and checking the output on
# stderr for memory leaks.
+# Uses valgrind from third_party/valgrind. Assumes the executable is passed
+# with a path relative to the v8 root.
+
+from os import path
+import platform
+import re
import subprocess
import sys
-import re
+
+V8_ROOT = path.dirname(path.dirname(path.abspath(__file__)))
+MACHINE = 'linux_x64' if platform.machine() == 'x86_64' else 'linux_x86'
+VALGRIND_ROOT = path.join(V8_ROOT, 'third_party', 'valgrind', MACHINE)
+VALGRIND_BIN = path.join(VALGRIND_ROOT, 'bin', 'valgrind')
+VALGRIND_LIB = path.join(VALGRIND_ROOT, 'lib', 'valgrind')
VALGRIND_ARGUMENTS = [
- 'valgrind',
+ VALGRIND_BIN,
'--error-exitcode=1',
'--leak-check=full',
- '--smc-check=all'
+ '--smc-check=all',
]
+if len(sys.argv) < 2:
+ print 'Please provide an executable to analyze.'
+ sys.exit(1)
+
+executable = path.join(V8_ROOT, sys.argv[1])
+if not path.exists(executable):
+ print 'Cannot find the file specified: %s' % executable
+ sys.exit(1)
+
# Compute the command line.
-command = VALGRIND_ARGUMENTS + sys.argv[1:]
+command = VALGRIND_ARGUMENTS + [executable] + sys.argv[2:]
# Run valgrind.
-process = subprocess.Popen(command, stderr=subprocess.PIPE)
+process = subprocess.Popen(
+ command,
+ stderr=subprocess.PIPE,
+ env={'VALGRIND_LIB': VALGRIND_LIB}
+)
code = process.wait();
errors = process.stderr.readlines();
@@ -74,4 +98,5 @@ if len(leaks) < 2 or len(leaks) > 3:
sys.exit(1)
# No leaks found.
+sys.stderr.writelines(errors)
sys.exit(0)
diff --git a/chromium/v8/tools/shell-utils.h b/chromium/v8/tools/shell-utils.h
index 31bd8ea8f68..bfd729d9b5f 100644
--- a/chromium/v8/tools/shell-utils.h
+++ b/chromium/v8/tools/shell-utils.h
@@ -66,4 +66,5 @@ const byte* ReadFileAndRepeat(const char* name, int* size, int repeat) {
return chars;
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/tools/testrunner/local/execution.py b/chromium/v8/tools/testrunner/local/execution.py
index 4c63fb6e63f..c9fe54175a3 100644
--- a/chromium/v8/tools/testrunner/local/execution.py
+++ b/chromium/v8/tools/testrunner/local/execution.py
@@ -26,6 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import collections
import os
import shutil
import sys
@@ -35,10 +36,17 @@ from pool import Pool
from . import commands
from . import perfdata
from . import statusfile
+from . import testsuite
from . import utils
-class Job(object):
+# Base dir of the v8 checkout.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__)))))
+TEST_DIR = os.path.join(BASE_DIR, "test")
+
+
+class Instructions(object):
def __init__(self, command, dep_command, test_id, timeout, verbose):
self.command = command
self.dep_command = dep_command
@@ -47,24 +55,119 @@ class Job(object):
self.verbose = verbose
-def RunTest(job):
- start_time = time.time()
- if job.dep_command is not None:
- dep_output = commands.Execute(job.dep_command, job.verbose, job.timeout)
- # TODO(jkummerow): We approximate the test suite specific function
- # IsFailureOutput() by just checking the exit code here. Currently
- # only cctests define dependencies, for which this simplification is
- # correct.
- if dep_output.exit_code != 0:
- return (job.id, dep_output, time.time() - start_time)
- output = commands.Execute(job.command, job.verbose, job.timeout)
- return (job.id, output, time.time() - start_time)
+# Structure that keeps global information per worker process.
+ProcessContext = collections.namedtuple(
+ "process_context", ["suites", "context"])
+
+
+def MakeProcessContext(context):
+ """Generate a process-local context.
+
+ This reloads all suites per process and stores the global context.
+
+ Args:
+ context: The global context from the test runner.
+ """
+ suite_paths = utils.GetSuitePaths(TEST_DIR)
+ suites = {}
+ for root in suite_paths:
+ # Don't reinitialize global state as this is concurrently called from
+ # different processes.
+ suite = testsuite.TestSuite.LoadTestSuite(
+ os.path.join(TEST_DIR, root), global_init=False)
+ if suite:
+ suites[suite.name] = suite
+ return ProcessContext(suites, context)
+
+
+def GetCommand(test, context):
+ d8testflag = []
+ shell = test.suite.shell()
+ if shell == "d8":
+ d8testflag = ["--test"]
+ if utils.IsWindows():
+ shell += ".exe"
+ if context.random_seed:
+ d8testflag += ["--random-seed=%s" % context.random_seed]
+ cmd = (context.command_prefix +
+ [os.path.abspath(os.path.join(context.shell_dir, shell))] +
+ d8testflag +
+ test.suite.GetFlagsForTestCase(test, context) +
+ context.extra_flags)
+ return cmd
+
+
+def _GetInstructions(test, context):
+ command = GetCommand(test, context)
+ timeout = context.timeout
+ if ("--stress-opt" in test.flags or
+ "--stress-opt" in context.mode_flags or
+ "--stress-opt" in context.extra_flags):
+ timeout *= 4
+ if "--noenable-vfp3" in context.extra_flags:
+ timeout *= 2
+ # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
+ # the like.
+ if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
+ timeout *= 2
+ if test.dependency is not None:
+ dep_command = [ c.replace(test.path, test.dependency) for c in command ]
+ else:
+ dep_command = None
+ return Instructions(
+ command, dep_command, test.id, timeout, context.verbose)
+
+
+class Job(object):
+ """Stores data to be sent over the multi-process boundary.
+
+ All contained fields will be pickled/unpickled.
+ """
+
+ def Run(self, process_context):
+ """Executes the job.
+
+ Args:
+ process_context: Process-local information that is initialized by the
+ executing worker.
+ """
+ raise NotImplementedError()
+
+
+class TestJob(Job):
+ def __init__(self, test):
+ self.test = test
+
+ def Run(self, process_context):
+ # Retrieve a new suite object on the worker-process side. The original
+ # suite object isn't pickled.
+ self.test.SetSuiteObject(process_context.suites)
+ instr = _GetInstructions(self.test, process_context.context)
+
+ start_time = time.time()
+ if instr.dep_command is not None:
+ dep_output = commands.Execute(
+ instr.dep_command, instr.verbose, instr.timeout)
+ # TODO(jkummerow): We approximate the test suite specific function
+ # IsFailureOutput() by just checking the exit code here. Currently
+ # only cctests define dependencies, for which this simplification is
+ # correct.
+ if dep_output.exit_code != 0:
+ return (instr.id, dep_output, time.time() - start_time)
+ output = commands.Execute(instr.command, instr.verbose, instr.timeout)
+ return (instr.id, output, time.time() - start_time)
+
+
+def RunTest(job, process_context):
+ return job.Run(process_context)
+
class Runner(object):
def __init__(self, suites, progress_indicator, context):
self.datapath = os.path.join("out", "testrunner_data")
- self.perf_data_manager = perfdata.PerfDataManager(self.datapath)
+ self.perf_data_manager = perfdata.GetPerfDataManager(
+ context, self.datapath)
self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
self.perf_failures = False
self.printed_allocations = False
@@ -72,6 +175,8 @@ class Runner(object):
if not context.no_sorting:
for t in self.tests:
t.duration = self.perfdata.FetchPerfData(t) or 1.0
+ slow_key = lambda t: statusfile.IsSlow(t.outcomes)
+ self.tests.sort(key=slow_key, reverse=True)
self.tests.sort(key=lambda t: t.duration, reverse=True)
self._CommonInit(suites, progress_indicator, context)
@@ -97,25 +202,6 @@ class Runner(object):
print("PerfData exception: %s" % e)
self.perf_failures = True
- def _GetJob(self, test):
- command = self.GetCommand(test)
- timeout = self.context.timeout
- if ("--stress-opt" in test.flags or
- "--stress-opt" in self.context.mode_flags or
- "--stress-opt" in self.context.extra_flags):
- timeout *= 4
- if "--noenable-vfp3" in self.context.extra_flags:
- timeout *= 2
- # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
- # the like.
- if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
- timeout *= 2
- if test.dependency is not None:
- dep_command = [ c.replace(test.path, test.dependency) for c in command ]
- else:
- dep_command = None
- return Job(command, dep_command, test.id, timeout, self.context.verbose)
-
def _MaybeRerun(self, pool, test):
if test.run <= self.context.rerun_failures_count:
# Possibly rerun this test if its run count is below the maximum per
@@ -136,7 +222,7 @@ class Runner(object):
test.duration = None
test.output = None
test.run += 1
- pool.add([self._GetJob(test)])
+ pool.add([TestJob(test)])
self.remaining += 1
self.total += 1
@@ -206,7 +292,7 @@ class Runner(object):
# remember the output for comparison.
test.run += 1
test.output = result[1]
- pool.add([self._GetJob(test)])
+ pool.add([TestJob(test)])
# Always update the perf database.
return True
@@ -214,8 +300,10 @@ class Runner(object):
self.indicator.Starting()
self._RunInternal(jobs)
self.indicator.Done()
- if self.failed or self.remaining:
+ if self.failed:
return 1
+ elif self.remaining:
+ return 2
return 0
def _RunInternal(self, jobs):
@@ -227,14 +315,19 @@ class Runner(object):
assert test.id >= 0
test_map[test.id] = test
try:
- yield [self._GetJob(test)]
+ yield [TestJob(test)]
except Exception, e:
# If this failed, save the exception and re-raise it later (after
# all other tests have had a chance to run).
queued_exception[0] = e
continue
try:
- it = pool.imap_unordered(RunTest, gen_tests())
+ it = pool.imap_unordered(
+ fn=RunTest,
+ gen=gen_tests(),
+ process_context_fn=MakeProcessContext,
+ process_context_args=[self.context],
+ )
for result in it:
if result.heartbeat:
self.indicator.Heartbeat()
@@ -272,22 +365,6 @@ class Runner(object):
print text
sys.stdout.flush()
- def GetCommand(self, test):
- d8testflag = []
- shell = test.suite.shell()
- if shell == "d8":
- d8testflag = ["--test"]
- if utils.IsWindows():
- shell += ".exe"
- if self.context.random_seed:
- d8testflag += ["--random-seed=%s" % self.context.random_seed]
- cmd = (self.context.command_prefix +
- [os.path.abspath(os.path.join(self.context.shell_dir, shell))] +
- d8testflag +
- test.suite.GetFlagsForTestCase(test, self.context) +
- self.context.extra_flags)
- return cmd
-
class BreakNowException(Exception):
def __init__(self, value):
diff --git a/chromium/v8/tools/testrunner/local/perfdata.py b/chromium/v8/tools/testrunner/local/perfdata.py
index 2979dc48661..29ebff773a5 100644
--- a/chromium/v8/tools/testrunner/local/perfdata.py
+++ b/chromium/v8/tools/testrunner/local/perfdata.py
@@ -118,3 +118,29 @@ class PerfDataManager(object):
if not mode in modes:
modes[mode] = PerfDataStore(self.datadir, arch, mode)
return modes[mode]
+
+
+class NullPerfDataStore(object):
+ def UpdatePerfData(self, test):
+ pass
+
+ def FetchPerfData(self, test):
+ return None
+
+
+class NullPerfDataManager(object):
+ def __init__(self):
+ pass
+
+ def GetStore(self, *args, **kwargs):
+ return NullPerfDataStore()
+
+ def close(self):
+ pass
+
+
+def GetPerfDataManager(context, datadir):
+ if context.use_perf_data:
+ return PerfDataManager(datadir)
+ else:
+ return NullPerfDataManager()
diff --git a/chromium/v8/tools/testrunner/local/pool.py b/chromium/v8/tools/testrunner/local/pool.py
index b933f735e55..6d123fd4e5e 100644
--- a/chromium/v8/tools/testrunner/local/pool.py
+++ b/chromium/v8/tools/testrunner/local/pool.py
@@ -5,6 +5,8 @@
from Queue import Empty
from multiprocessing import Event, Process, Queue
+import traceback
+
class NormalResult():
def __init__(self, result):
@@ -39,17 +41,22 @@ class MaybeResult():
return MaybeResult(False, value)
-def Worker(fn, work_queue, done_queue, done):
+def Worker(fn, work_queue, done_queue, done,
+ process_context_fn=None, process_context_args=None):
"""Worker to be run in a child process.
The worker stops on two conditions. 1. When the poison pill "STOP" is
reached or 2. when the event "done" is set."""
try:
+ kwargs = {}
+ if process_context_fn and process_context_args is not None:
+ kwargs.update(process_context=process_context_fn(*process_context_args))
for args in iter(work_queue.get, "STOP"):
if done.is_set():
break
try:
- done_queue.put(NormalResult(fn(*args)))
+ done_queue.put(NormalResult(fn(*args, **kwargs)))
except Exception, e:
+ traceback.print_exc()
print(">>> EXCEPTION: %s" % e)
done_queue.put(ExceptionResult())
except KeyboardInterrupt:
@@ -84,13 +91,23 @@ class Pool():
self.done = Event()
self.heartbeat_timeout = heartbeat_timeout
- def imap_unordered(self, fn, gen):
+ def imap_unordered(self, fn, gen,
+ process_context_fn=None, process_context_args=None):
"""Maps function "fn" to items in generator "gen" on the worker processes
in an arbitrary order. The items are expected to be lists of arguments to
the function. Returns a results iterator. A result value of type
MaybeResult either indicates a heartbeat of the runner, i.e. indicating
that the runner is still waiting for the result to be computed, or it wraps
- the real result."""
+ the real result.
+
+ Args:
+ process_context_fn: Function executed once by each worker. Expected to
+ return a process-context object. If present, this object is passed
+ as additional argument to each call to fn.
+ process_context_args: List of arguments for the invocation of
+ process_context_fn. All arguments will be pickled and sent beyond the
+ process boundary.
+ """
try:
gen = iter(gen)
self.advance = self._advance_more
@@ -99,7 +116,9 @@ class Pool():
p = Process(target=Worker, args=(fn,
self.work_queue,
self.done_queue,
- self.done))
+ self.done,
+ process_context_fn,
+ process_context_args))
self.processes.append(p)
p.start()
diff --git a/chromium/v8/tools/testrunner/local/progress.py b/chromium/v8/tools/testrunner/local/progress.py
index 85d93285ebc..4e1be3e4cf6 100644
--- a/chromium/v8/tools/testrunner/local/progress.py
+++ b/chromium/v8/tools/testrunner/local/progress.py
@@ -32,24 +32,13 @@ import os
import sys
import time
+from . import execution
from . import junit_output
ABS_PATH_PREFIX = os.getcwd() + os.sep
-def EscapeCommand(command):
- parts = []
- for part in command:
- if ' ' in part:
- # Escape spaces. We may need to escape more characters for this
- # to work properly.
- parts.append('"%s"' % part)
- else:
- parts.append(part)
- return " ".join(parts)
-
-
class ProgressIndicator(object):
def __init__(self):
@@ -83,6 +72,18 @@ class ProgressIndicator(object):
'negative': negative_marker
}
+ def _EscapeCommand(self, test):
+ command = execution.GetCommand(test, self.runner.context)
+ parts = []
+ for part in command:
+ if ' ' in part:
+ # Escape spaces. We may need to escape more characters for this
+ # to work properly.
+ parts.append('"%s"' % part)
+ else:
+ parts.append(part)
+ return " ".join(parts)
+
class IndicatorNotifier(object):
"""Holds a list of progress indicators and notifies them all on events."""
@@ -124,7 +125,7 @@ class SimpleProgressIndicator(ProgressIndicator):
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
- print "Command: %s" % EscapeCommand(self.runner.GetCommand(failed))
+ print "Command: %s" % self._EscapeCommand(failed)
if failed.output.HasCrashed():
print "exit code: %d" % failed.output.exit_code
print "--- CRASHED ---"
@@ -212,7 +213,7 @@ class CompactProgressIndicator(ProgressIndicator):
stderr = test.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
- print "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
+ print "Command: %s" % self._EscapeCommand(test)
if test.output.HasCrashed():
print "exit code: %d" % test.output.exit_code
print "--- CRASHED ---"
@@ -300,7 +301,7 @@ class JUnitTestProgressIndicator(ProgressIndicator):
stderr = test.output.stderr.strip()
if len(stderr):
fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
+ fail_text += "Command: %s" % self._EscapeCommand(test)
if test.output.HasCrashed():
fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
if test.output.HasTimedOut():
@@ -335,8 +336,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
{
"name": test.GetLabel(),
"flags": test.flags,
- "command": EscapeCommand(self.runner.GetCommand(test)).replace(
- ABS_PATH_PREFIX, ""),
+ "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
"duration": test.duration,
} for test in timed_tests[:20]
]
@@ -362,8 +362,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.results.append({
"name": test.GetLabel(),
"flags": test.flags,
- "command": EscapeCommand(self.runner.GetCommand(test)).replace(
- ABS_PATH_PREFIX, ""),
+ "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
"run": test.run,
"stdout": test.output.stdout,
"stderr": test.output.stderr,
diff --git a/chromium/v8/tools/testrunner/local/statusfile.py b/chromium/v8/tools/testrunner/local/statusfile.py
index bfa53c53484..f86106b9d9c 100644
--- a/chromium/v8/tools/testrunner/local/statusfile.py
+++ b/chromium/v8/tools/testrunner/local/statusfile.py
@@ -25,6 +25,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import os
# These outcomes can occur in a TestCase's outcomes list:
SKIP = "SKIP"
@@ -125,10 +126,14 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables):
target_dict[rule] = result
-def ReadStatusFile(path, variables):
+def ReadContent(path):
with open(path) as f:
global KEYWORDS
- contents = eval(f.read(), KEYWORDS)
+ return eval(f.read(), KEYWORDS)
+
+
+def ReadStatusFile(path, variables):
+ contents = ReadContent(path)
rules = {}
wildcards = {}
@@ -146,3 +151,30 @@ def ReadStatusFile(path, variables):
else:
_ParseOutcomeList(rule, section[rule], rules, variables)
return rules, wildcards
+
+
+def PresubmitCheck(path):
+ contents = ReadContent(path)
+ root_prefix = os.path.basename(os.path.dirname(path)) + "/"
+ status = {"success": True}
+ def _assert(check, message): # Like "assert", but doesn't throw.
+ if not check:
+ print("%s: Error: %s" % (path, message))
+ status["success"] = False
+ try:
+ for section in contents:
+ _assert(type(section) == list, "Section must be a list")
+ _assert(len(section) == 2, "Section list must have exactly 2 entries")
+ section = section[1]
+ _assert(type(section) == dict,
+ "Second entry of section must be a dictionary")
+ for rule in section:
+ _assert(type(rule) == str, "Rule key must be a string")
+ _assert(not rule.startswith(root_prefix),
+ "Suite name prefix must not be used in rule keys")
+ _assert(not rule.endswith('.js'),
+ ".js extension must not be used in rule keys.")
+ return status["success"]
+ except Exception as e:
+ print e
+ return False
diff --git a/chromium/v8/tools/testrunner/local/testsuite.py b/chromium/v8/tools/testrunner/local/testsuite.py
index e0fff0d11a3..e3d1e232e80 100644
--- a/chromium/v8/tools/testrunner/local/testsuite.py
+++ b/chromium/v8/tools/testrunner/local/testsuite.py
@@ -38,8 +38,12 @@ from ..objects import testcase
ALL_VARIANT_FLAGS = {
"default": [[]],
"stress": [["--stress-opt", "--always-opt"]],
- "turbofan": [["--turbo", "--always-opt"]],
+ "turbofan": [["--turbo"]],
+ "turbofan_opt": [["--turbo", "--always-opt"]],
"nocrankshaft": [["--nocrankshaft"]],
+ "ignition": [["--ignition", "--turbo", "--ignition-fake-try-catch",
+ "--ignition-fallback-on-eval-and-catch"]],
+ "preparser": [["--min-preparse-length=0"]],
}
# FAST_VARIANTS implies no --always-opt.
@@ -48,9 +52,13 @@ FAST_VARIANT_FLAGS = {
"stress": [["--stress-opt"]],
"turbofan": [["--turbo"]],
"nocrankshaft": [["--nocrankshaft"]],
+ "ignition": [["--ignition", "--turbo", "--ignition-fake-try-catch",
+ "--ignition-fallback-on-eval-and-catch"]],
+ "preparser": [["--min-preparse-length=0"]],
}
-ALL_VARIANTS = set(["default", "stress", "turbofan", "nocrankshaft"])
+ALL_VARIANTS = set(["default", "stress", "turbofan", "turbofan_opt",
+ "nocrankshaft", "ignition", "preparser"])
FAST_VARIANTS = set(["default", "turbofan"])
STANDARD_VARIANT = set(["default"])
@@ -80,14 +88,14 @@ class VariantGenerator(object):
class TestSuite(object):
@staticmethod
- def LoadTestSuite(root):
+ def LoadTestSuite(root, global_init=True):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module("testcfg", f, pathname, description)
return module.GetSuite(name, root)
- except:
+ except ImportError:
# Use default if no testcfg is present.
return GoogleTestSuite(name, root)
finally:
@@ -95,6 +103,8 @@ class TestSuite(object):
f.close()
def __init__(self, name, root):
+ # Note: This might be called concurrently from different processes.
+ # Changing harddisk state should be done in 'SetupWorkingDirectory' below.
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
@@ -102,6 +112,11 @@ class TestSuite(object):
self.wildcards = None # dictionary mapping test paths to list of outcomes
self.total_duration = None # float, assigned on demand
+ def SetupWorkingDirectory(self):
+ # This is called once per test suite object in a multi-process setting.
+ # Multi-process-unsafe work-directory setup can go here.
+ pass
+
def shell(self):
return "d8"
diff --git a/chromium/v8/tools/testrunner/network/endpoint.py b/chromium/v8/tools/testrunner/network/endpoint.py
index d0950cf5a6b..516578ace49 100644
--- a/chromium/v8/tools/testrunner/network/endpoint.py
+++ b/chromium/v8/tools/testrunner/network/endpoint.py
@@ -93,6 +93,7 @@ def Execute(workspace, ctx, tests, sock, server):
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
+ suite.SetupWorkingDirectory()
suites.append(suite)
suites_dict = {}
diff --git a/chromium/v8/tools/testrunner/objects/context.py b/chromium/v8/tools/testrunner/objects/context.py
index b76e5628094..c9853d07cc7 100644
--- a/chromium/v8/tools/testrunner/objects/context.py
+++ b/chromium/v8/tools/testrunner/objects/context.py
@@ -30,7 +30,7 @@ class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, random_seed,
no_sorting, rerun_failures_count, rerun_failures_max,
- predictable, no_harness):
+ predictable, no_harness, use_perf_data):
self.arch = arch
self.mode = mode
self.shell_dir = shell_dir
@@ -47,12 +47,14 @@ class Context():
self.rerun_failures_max = rerun_failures_max
self.predictable = predictable
self.no_harness = no_harness
+ self.use_perf_data = use_perf_data
def Pack(self):
return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
self.command_prefix, self.extra_flags, self.noi18n,
self.random_seed, self.no_sorting, self.rerun_failures_count,
- self.rerun_failures_max, self.predictable, self.no_harness]
+ self.rerun_failures_max, self.predictable, self.no_harness,
+ self.use_perf_data]
@staticmethod
def Unpack(packed):
@@ -60,4 +62,4 @@ class Context():
return Context(packed[0], packed[1], None, packed[2], False,
packed[3], packed[4], packed[5], packed[6], packed[7],
packed[8], packed[9], packed[10], packed[11], packed[12],
- packed[13])
+ packed[13], packed[14])
diff --git a/chromium/v8/tools/testrunner/objects/testcase.py b/chromium/v8/tools/testrunner/objects/testcase.py
index 0ab06361b17..fa2265c0703 100644
--- a/chromium/v8/tools/testrunner/objects/testcase.py
+++ b/chromium/v8/tools/testrunner/objects/testcase.py
@@ -86,3 +86,11 @@ class TestCase(object):
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self)
+
+ def __getstate__(self):
+ """Representation to pickle test cases.
+
+ The original suite won't be sent beyond process boundaries. Instead
+ send the name only and retrieve a process-local suite later.
+ """
+ return dict(self.__dict__, suite=self.suite.name)
diff --git a/chromium/v8/tools/testrunner/testrunner.isolate b/chromium/v8/tools/testrunner/testrunner.isolate
new file mode 100644
index 00000000000..669614b2831
--- /dev/null
+++ b/chromium/v8/tools/testrunner/testrunner.isolate
@@ -0,0 +1,14 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ '../run-tests.py',
+ ],
+ 'files': [
+ '../run-tests.py',
+ './'
+ ],
+ },
+} \ No newline at end of file
diff --git a/chromium/v8/tools/try_perf.py b/chromium/v8/tools/try_perf.py
index 14b2329f742..2403f7d782d 100755
--- a/chromium/v8/tools/try_perf.py
+++ b/chromium/v8/tools/try_perf.py
@@ -12,6 +12,7 @@ BOTS = {
'--arm32': 'v8_arm32_perf_try',
'--linux32': 'v8_linux32_perf_try',
'--linux64': 'v8_linux64_perf_try',
+ '--linux64_atom': 'v8_linux64_atom_perf_try',
'--linux64_haswell': 'v8_linux64_haswell_perf_try',
'--nexus5': 'v8_nexus5_perf_try',
'--nexus7': 'v8_nexus7_perf_try',
@@ -26,6 +27,25 @@ DEFAULT_BOTS = [
'v8_nexus10_perf_try',
]
+PUBLIC_BENCHMARKS = [
+ 'arewefastyet',
+ 'embenchen',
+ 'emscripten',
+ 'compile',
+ 'jetstream',
+ 'jsbench',
+ 'jstests',
+ 'kraken_orig',
+ 'massive',
+ 'memory',
+ 'octane',
+ 'octane-pr',
+ 'octane-tf',
+ 'octane-tf-pr',
+ 'simdjs',
+ 'sunspider',
+]
+
V8_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def main():
@@ -46,6 +66,16 @@ def main():
print 'Please specify the benchmarks to run as arguments.'
return 1
+ for benchmark in options.benchmarks:
+ if benchmark not in PUBLIC_BENCHMARKS:
+ print ('%s not found in our benchmark list. The respective trybot might '
+ 'fail, unless you run something this script isn\'t aware of. '
+ 'Available public benchmarks: %s' % (benchmark, PUBLIC_BENCHMARKS))
+ print 'Proceed anyways? [Y/n] ',
+ answer = sys.stdin.readline().strip()
+ if answer != "" and answer != "Y" and answer != "y":
+ return 1
+
assert '"' not in options.extra_flags and '\'' not in options.extra_flags, (
'Invalid flag specification.')
diff --git a/chromium/v8/tools/v8heapconst.py b/chromium/v8/tools/v8heapconst.py
index f3d5d15ab52..0461bcbb66f 100644
--- a/chromium/v8/tools/v8heapconst.py
+++ b/chromium/v8/tools/v8heapconst.py
@@ -51,259 +51,247 @@ INSTANCE_TYPES = {
22: "SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
26: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
128: "SYMBOL_TYPE",
- 134: "FLOAT32X4_TYPE",
- 129: "MAP_TYPE",
- 130: "CODE_TYPE",
+ 130: "SIMD128_VALUE_TYPE",
+ 132: "MAP_TYPE",
+ 133: "CODE_TYPE",
131: "ODDBALL_TYPE",
- 182: "CELL_TYPE",
- 184: "PROPERTY_CELL_TYPE",
- 132: "HEAP_NUMBER_TYPE",
- 133: "MUTABLE_HEAP_NUMBER_TYPE",
+ 173: "CELL_TYPE",
+ 176: "PROPERTY_CELL_TYPE",
+ 129: "HEAP_NUMBER_TYPE",
+ 134: "MUTABLE_HEAP_NUMBER_TYPE",
135: "FOREIGN_TYPE",
136: "BYTE_ARRAY_TYPE",
137: "BYTECODE_ARRAY_TYPE",
138: "FREE_SPACE_TYPE",
- 139: "EXTERNAL_INT8_ARRAY_TYPE",
- 140: "EXTERNAL_UINT8_ARRAY_TYPE",
- 141: "EXTERNAL_INT16_ARRAY_TYPE",
- 142: "EXTERNAL_UINT16_ARRAY_TYPE",
- 143: "EXTERNAL_INT32_ARRAY_TYPE",
- 144: "EXTERNAL_UINT32_ARRAY_TYPE",
- 145: "EXTERNAL_FLOAT32_ARRAY_TYPE",
- 146: "EXTERNAL_FLOAT64_ARRAY_TYPE",
- 147: "EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE",
- 148: "FIXED_INT8_ARRAY_TYPE",
- 149: "FIXED_UINT8_ARRAY_TYPE",
- 150: "FIXED_INT16_ARRAY_TYPE",
- 151: "FIXED_UINT16_ARRAY_TYPE",
- 152: "FIXED_INT32_ARRAY_TYPE",
- 153: "FIXED_UINT32_ARRAY_TYPE",
- 154: "FIXED_FLOAT32_ARRAY_TYPE",
- 155: "FIXED_FLOAT64_ARRAY_TYPE",
- 156: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
- 158: "FILLER_TYPE",
- 159: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
- 160: "DECLARED_ACCESSOR_INFO_TYPE",
- 161: "EXECUTABLE_ACCESSOR_INFO_TYPE",
- 162: "ACCESSOR_PAIR_TYPE",
- 163: "ACCESS_CHECK_INFO_TYPE",
- 164: "INTERCEPTOR_INFO_TYPE",
- 165: "CALL_HANDLER_INFO_TYPE",
- 166: "FUNCTION_TEMPLATE_INFO_TYPE",
- 167: "OBJECT_TEMPLATE_INFO_TYPE",
- 168: "SIGNATURE_INFO_TYPE",
- 169: "TYPE_SWITCH_INFO_TYPE",
- 171: "ALLOCATION_MEMENTO_TYPE",
- 170: "ALLOCATION_SITE_TYPE",
- 172: "SCRIPT_TYPE",
- 173: "CODE_CACHE_TYPE",
- 174: "POLYMORPHIC_CODE_CACHE_TYPE",
- 175: "TYPE_FEEDBACK_INFO_TYPE",
- 176: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 177: "BOX_TYPE",
- 185: "PROTOTYPE_INFO_TYPE",
- 180: "FIXED_ARRAY_TYPE",
- 157: "FIXED_DOUBLE_ARRAY_TYPE",
- 181: "SHARED_FUNCTION_INFO_TYPE",
- 183: "WEAK_CELL_TYPE",
- 189: "JS_MESSAGE_OBJECT_TYPE",
- 188: "JS_VALUE_TYPE",
- 190: "JS_DATE_TYPE",
- 191: "JS_OBJECT_TYPE",
- 192: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 193: "JS_GENERATOR_OBJECT_TYPE",
- 194: "JS_MODULE_TYPE",
- 195: "JS_GLOBAL_OBJECT_TYPE",
- 196: "JS_BUILTINS_OBJECT_TYPE",
- 197: "JS_GLOBAL_PROXY_TYPE",
- 198: "JS_ARRAY_TYPE",
- 199: "JS_ARRAY_BUFFER_TYPE",
- 200: "JS_TYPED_ARRAY_TYPE",
- 201: "JS_DATA_VIEW_TYPE",
- 187: "JS_PROXY_TYPE",
- 202: "JS_SET_TYPE",
- 203: "JS_MAP_TYPE",
- 204: "JS_SET_ITERATOR_TYPE",
- 205: "JS_MAP_ITERATOR_TYPE",
- 206: "JS_WEAK_MAP_TYPE",
- 207: "JS_WEAK_SET_TYPE",
- 208: "JS_REGEXP_TYPE",
- 209: "JS_FUNCTION_TYPE",
- 186: "JS_FUNCTION_PROXY_TYPE",
- 178: "DEBUG_INFO_TYPE",
- 179: "BREAK_POINT_INFO_TYPE",
+ 139: "FIXED_INT8_ARRAY_TYPE",
+ 140: "FIXED_UINT8_ARRAY_TYPE",
+ 141: "FIXED_INT16_ARRAY_TYPE",
+ 142: "FIXED_UINT16_ARRAY_TYPE",
+ 143: "FIXED_INT32_ARRAY_TYPE",
+ 144: "FIXED_UINT32_ARRAY_TYPE",
+ 145: "FIXED_FLOAT32_ARRAY_TYPE",
+ 146: "FIXED_FLOAT64_ARRAY_TYPE",
+ 147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
+ 149: "FILLER_TYPE",
+ 150: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
+ 151: "DECLARED_ACCESSOR_INFO_TYPE",
+ 152: "EXECUTABLE_ACCESSOR_INFO_TYPE",
+ 153: "ACCESSOR_PAIR_TYPE",
+ 154: "ACCESS_CHECK_INFO_TYPE",
+ 155: "INTERCEPTOR_INFO_TYPE",
+ 156: "CALL_HANDLER_INFO_TYPE",
+ 157: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 158: "OBJECT_TEMPLATE_INFO_TYPE",
+ 159: "SIGNATURE_INFO_TYPE",
+ 160: "TYPE_SWITCH_INFO_TYPE",
+ 162: "ALLOCATION_MEMENTO_TYPE",
+ 161: "ALLOCATION_SITE_TYPE",
+ 163: "SCRIPT_TYPE",
+ 164: "CODE_CACHE_TYPE",
+ 165: "POLYMORPHIC_CODE_CACHE_TYPE",
+ 166: "TYPE_FEEDBACK_INFO_TYPE",
+ 167: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 168: "BOX_TYPE",
+ 177: "PROTOTYPE_INFO_TYPE",
+ 178: "SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE",
+ 171: "FIXED_ARRAY_TYPE",
+ 148: "FIXED_DOUBLE_ARRAY_TYPE",
+ 172: "SHARED_FUNCTION_INFO_TYPE",
+ 174: "WEAK_CELL_TYPE",
+ 175: "TRANSITION_ARRAY_TYPE",
+ 181: "JS_MESSAGE_OBJECT_TYPE",
+ 180: "JS_VALUE_TYPE",
+ 182: "JS_DATE_TYPE",
+ 183: "JS_OBJECT_TYPE",
+ 184: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 185: "JS_GENERATOR_OBJECT_TYPE",
+ 186: "JS_MODULE_TYPE",
+ 187: "JS_GLOBAL_OBJECT_TYPE",
+ 188: "JS_GLOBAL_PROXY_TYPE",
+ 189: "JS_ARRAY_TYPE",
+ 190: "JS_ARRAY_BUFFER_TYPE",
+ 191: "JS_TYPED_ARRAY_TYPE",
+ 192: "JS_DATA_VIEW_TYPE",
+ 179: "JS_PROXY_TYPE",
+ 193: "JS_SET_TYPE",
+ 194: "JS_MAP_TYPE",
+ 195: "JS_SET_ITERATOR_TYPE",
+ 196: "JS_MAP_ITERATOR_TYPE",
+ 197: "JS_ITERATOR_RESULT_TYPE",
+ 198: "JS_WEAK_MAP_TYPE",
+ 199: "JS_WEAK_SET_TYPE",
+ 200: "JS_PROMISE_TYPE",
+ 201: "JS_REGEXP_TYPE",
+ 202: "JS_BOUND_FUNCTION_TYPE",
+ 203: "JS_FUNCTION_TYPE",
+ 169: "DEBUG_INFO_TYPE",
+ 170: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
0x08081: (136, "ByteArrayMap"),
- 0x080ad: (129, "MetaMap"),
+ 0x080ad: (132, "MetaMap"),
0x080d9: (131, "NullMap"),
- 0x08105: (180, "FixedArrayMap"),
+ 0x08105: (171, "FixedArrayMap"),
0x08131: (4, "OneByteInternalizedStringMap"),
- 0x0815d: (183, "WeakCellMap"),
- 0x08189: (131, "TheHoleMap"),
- 0x081b5: (138, "FreeSpaceMap"),
- 0x081e1: (158, "OnePointerFillerMap"),
- 0x0820d: (158, "TwoPointerFillerMap"),
- 0x08239: (131, "UndefinedMap"),
- 0x08265: (132, "HeapNumberMap"),
- 0x08291: (131, "BooleanMap"),
- 0x082bd: (131, "UninitializedMap"),
- 0x082e9: (182, "CellMap"),
- 0x08315: (184, "GlobalPropertyCellMap"),
- 0x08341: (181, "SharedFunctionInfoMap"),
- 0x0836d: (133, "MutableHeapNumberMap"),
- 0x08399: (134, "Float32x4Map"),
- 0x083c5: (180, "NativeContextMap"),
- 0x083f1: (130, "CodeMap"),
- 0x0841d: (180, "ScopeInfoMap"),
- 0x08449: (180, "FixedCOWArrayMap"),
- 0x08475: (157, "FixedDoubleArrayMap"),
- 0x084a1: (68, "OneByteStringMap"),
- 0x084cd: (180, "FunctionContextMap"),
- 0x084f9: (131, "NoInterceptorResultSentinelMap"),
- 0x08525: (131, "ArgumentsMarkerMap"),
- 0x08551: (131, "ExceptionMap"),
- 0x0857d: (131, "TerminationExceptionMap"),
- 0x085a9: (180, "HashTableMap"),
- 0x085d5: (180, "OrderedHashTableMap"),
- 0x08601: (128, "SymbolMap"),
- 0x0862d: (64, "StringMap"),
- 0x08659: (69, "ConsOneByteStringMap"),
- 0x08685: (65, "ConsStringMap"),
- 0x086b1: (67, "SlicedStringMap"),
- 0x086dd: (71, "SlicedOneByteStringMap"),
- 0x08709: (66, "ExternalStringMap"),
- 0x08735: (74, "ExternalStringWithOneByteDataMap"),
- 0x08761: (70, "ExternalOneByteStringMap"),
- 0x0878d: (70, "NativeSourceStringMap"),
- 0x087b9: (82, "ShortExternalStringMap"),
- 0x087e5: (90, "ShortExternalStringWithOneByteDataMap"),
- 0x08811: (0, "InternalizedStringMap"),
- 0x0883d: (2, "ExternalInternalizedStringMap"),
- 0x08869: (10, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x08895: (6, "ExternalOneByteInternalizedStringMap"),
- 0x088c1: (18, "ShortExternalInternalizedStringMap"),
- 0x088ed: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x08919: (22, "ShortExternalOneByteInternalizedStringMap"),
- 0x08945: (86, "ShortExternalOneByteStringMap"),
- 0x08971: (139, "ExternalInt8ArrayMap"),
- 0x0899d: (140, "ExternalUint8ArrayMap"),
- 0x089c9: (141, "ExternalInt16ArrayMap"),
- 0x089f5: (142, "ExternalUint16ArrayMap"),
- 0x08a21: (143, "ExternalInt32ArrayMap"),
- 0x08a4d: (144, "ExternalUint32ArrayMap"),
- 0x08a79: (145, "ExternalFloat32ArrayMap"),
- 0x08aa5: (146, "ExternalFloat64ArrayMap"),
- 0x08ad1: (147, "ExternalUint8ClampedArrayMap"),
- 0x08afd: (149, "FixedUint8ArrayMap"),
- 0x08b29: (148, "FixedInt8ArrayMap"),
- 0x08b55: (151, "FixedUint16ArrayMap"),
- 0x08b81: (150, "FixedInt16ArrayMap"),
- 0x08bad: (153, "FixedUint32ArrayMap"),
- 0x08bd9: (152, "FixedInt32ArrayMap"),
- 0x08c05: (154, "FixedFloat32ArrayMap"),
- 0x08c31: (155, "FixedFloat64ArrayMap"),
- 0x08c5d: (156, "FixedUint8ClampedArrayMap"),
- 0x08c89: (180, "SloppyArgumentsElementsMap"),
- 0x08cb5: (180, "CatchContextMap"),
- 0x08ce1: (180, "WithContextMap"),
- 0x08d0d: (180, "BlockContextMap"),
- 0x08d39: (180, "ModuleContextMap"),
- 0x08d65: (180, "ScriptContextMap"),
- 0x08d91: (180, "ScriptContextTableMap"),
- 0x08dbd: (189, "JSMessageObjectMap"),
- 0x08de9: (135, "ForeignMap"),
- 0x08e15: (191, "NeanderMap"),
- 0x08e41: (191, "ExternalMap"),
- 0x08e6d: (171, "AllocationMementoMap"),
- 0x08e99: (170, "AllocationSiteMap"),
- 0x08ec5: (174, "PolymorphicCodeCacheMap"),
- 0x08ef1: (172, "ScriptMap"),
- 0x09101: (161, "ExecutableAccessorInfoMap"),
- 0x09159: (162, "AccessorPairMap"),
- 0x09209: (185, "PrototypeInfoMap"),
- 0x09839: (137, "BytecodeArrayMap"),
- 0x09865: (177, "BoxMap"),
- 0x09891: (163, "AccessCheckInfoMap"),
- 0x098bd: (164, "InterceptorInfoMap"),
- 0x098e9: (165, "CallHandlerInfoMap"),
- 0x09915: (166, "FunctionTemplateInfoMap"),
- 0x09941: (167, "ObjectTemplateInfoMap"),
- 0x0996d: (169, "TypeSwitchInfoMap"),
- 0x09999: (173, "CodeCacheMap"),
- 0x099c5: (175, "TypeFeedbackInfoMap"),
- 0x099f1: (176, "AliasedArgumentsEntryMap"),
- 0x09a1d: (178, "DebugInfoMap"),
- 0x09a49: (179, "BreakPointInfoMap"),
+ 0x0815d: (138, "FreeSpaceMap"),
+ 0x08189: (149, "OnePointerFillerMap"),
+ 0x081b5: (149, "TwoPointerFillerMap"),
+ 0x081e1: (131, "UndefinedMap"),
+ 0x0820d: (129, "HeapNumberMap"),
+ 0x08239: (131, "TheHoleMap"),
+ 0x08265: (131, "BooleanMap"),
+ 0x08291: (131, "UninitializedMap"),
+ 0x082bd: (173, "CellMap"),
+ 0x082e9: (176, "GlobalPropertyCellMap"),
+ 0x08315: (172, "SharedFunctionInfoMap"),
+ 0x08341: (134, "MutableHeapNumberMap"),
+ 0x0836d: (130, "Float32x4Map"),
+ 0x08399: (130, "Int32x4Map"),
+ 0x083c5: (130, "Uint32x4Map"),
+ 0x083f1: (130, "Bool32x4Map"),
+ 0x0841d: (130, "Int16x8Map"),
+ 0x08449: (130, "Uint16x8Map"),
+ 0x08475: (130, "Bool16x8Map"),
+ 0x084a1: (130, "Int8x16Map"),
+ 0x084cd: (130, "Uint8x16Map"),
+ 0x084f9: (130, "Bool8x16Map"),
+ 0x08525: (171, "NativeContextMap"),
+ 0x08551: (133, "CodeMap"),
+ 0x0857d: (171, "ScopeInfoMap"),
+ 0x085a9: (171, "FixedCOWArrayMap"),
+ 0x085d5: (148, "FixedDoubleArrayMap"),
+ 0x08601: (174, "WeakCellMap"),
+ 0x0862d: (175, "TransitionArrayMap"),
+ 0x08659: (68, "OneByteStringMap"),
+ 0x08685: (171, "FunctionContextMap"),
+ 0x086b1: (131, "NoInterceptorResultSentinelMap"),
+ 0x086dd: (131, "ArgumentsMarkerMap"),
+ 0x08709: (131, "ExceptionMap"),
+ 0x08735: (131, "TerminationExceptionMap"),
+ 0x08761: (171, "HashTableMap"),
+ 0x0878d: (171, "OrderedHashTableMap"),
+ 0x087b9: (128, "SymbolMap"),
+ 0x087e5: (64, "StringMap"),
+ 0x08811: (69, "ConsOneByteStringMap"),
+ 0x0883d: (65, "ConsStringMap"),
+ 0x08869: (67, "SlicedStringMap"),
+ 0x08895: (71, "SlicedOneByteStringMap"),
+ 0x088c1: (66, "ExternalStringMap"),
+ 0x088ed: (74, "ExternalStringWithOneByteDataMap"),
+ 0x08919: (70, "ExternalOneByteStringMap"),
+ 0x08945: (70, "NativeSourceStringMap"),
+ 0x08971: (82, "ShortExternalStringMap"),
+ 0x0899d: (90, "ShortExternalStringWithOneByteDataMap"),
+ 0x089c9: (0, "InternalizedStringMap"),
+ 0x089f5: (2, "ExternalInternalizedStringMap"),
+ 0x08a21: (10, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x08a4d: (6, "ExternalOneByteInternalizedStringMap"),
+ 0x08a79: (18, "ShortExternalInternalizedStringMap"),
+ 0x08aa5: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x08ad1: (22, "ShortExternalOneByteInternalizedStringMap"),
+ 0x08afd: (86, "ShortExternalOneByteStringMap"),
+ 0x08b29: (140, "FixedUint8ArrayMap"),
+ 0x08b55: (139, "FixedInt8ArrayMap"),
+ 0x08b81: (142, "FixedUint16ArrayMap"),
+ 0x08bad: (141, "FixedInt16ArrayMap"),
+ 0x08bd9: (144, "FixedUint32ArrayMap"),
+ 0x08c05: (143, "FixedInt32ArrayMap"),
+ 0x08c31: (145, "FixedFloat32ArrayMap"),
+ 0x08c5d: (146, "FixedFloat64ArrayMap"),
+ 0x08c89: (147, "FixedUint8ClampedArrayMap"),
+ 0x08cb5: (171, "SloppyArgumentsElementsMap"),
+ 0x08ce1: (171, "CatchContextMap"),
+ 0x08d0d: (171, "WithContextMap"),
+ 0x08d39: (171, "BlockContextMap"),
+ 0x08d65: (171, "ModuleContextMap"),
+ 0x08d91: (171, "ScriptContextMap"),
+ 0x08dbd: (171, "ScriptContextTableMap"),
+ 0x08de9: (181, "JSMessageObjectMap"),
+ 0x08e15: (135, "ForeignMap"),
+ 0x08e41: (183, "NeanderMap"),
+ 0x08e6d: (183, "ExternalMap"),
+ 0x08e99: (162, "AllocationMementoMap"),
+ 0x08ec5: (161, "AllocationSiteMap"),
+ 0x08ef1: (165, "PolymorphicCodeCacheMap"),
+ 0x08f1d: (163, "ScriptMap"),
+ 0x08f75: (137, "BytecodeArrayMap"),
+ 0x08fa1: (168, "BoxMap"),
+ 0x08fcd: (152, "ExecutableAccessorInfoMap"),
+ 0x08ff9: (153, "AccessorPairMap"),
+ 0x09025: (154, "AccessCheckInfoMap"),
+ 0x09051: (155, "InterceptorInfoMap"),
+ 0x0907d: (156, "CallHandlerInfoMap"),
+ 0x090a9: (157, "FunctionTemplateInfoMap"),
+ 0x090d5: (158, "ObjectTemplateInfoMap"),
+ 0x09101: (164, "CodeCacheMap"),
+ 0x0912d: (166, "TypeFeedbackInfoMap"),
+ 0x09159: (167, "AliasedArgumentsEntryMap"),
+ 0x09185: (169, "DebugInfoMap"),
+ 0x091b1: (170, "BreakPointInfoMap"),
+ 0x091dd: (177, "PrototypeInfoMap"),
+ 0x09209: (178, "SloppyBlockWithEvalContextExtensionMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
("OLD_SPACE", 0x08081): "NullValue",
- ("OLD_SPACE", 0x08091): "EmptyDescriptorArray",
- ("OLD_SPACE", 0x08099): "EmptyFixedArray",
- ("OLD_SPACE", 0x080bd): "TheHoleValue",
- ("OLD_SPACE", 0x080dd): "UndefinedValue",
- ("OLD_SPACE", 0x08105): "NanValue",
- ("OLD_SPACE", 0x08115): "TrueValue",
- ("OLD_SPACE", 0x08135): "FalseValue",
- ("OLD_SPACE", 0x08159): "empty_string",
- ("OLD_SPACE", 0x08165): "UninitializedValue",
- ("OLD_SPACE", 0x08191): "EmptyByteArray",
- ("OLD_SPACE", 0x08199): "NoInterceptorResultSentinel",
- ("OLD_SPACE", 0x081d5): "ArgumentsMarker",
- ("OLD_SPACE", 0x08201): "Exception",
- ("OLD_SPACE", 0x08229): "TerminationException",
- ("OLD_SPACE", 0x0825d): "NumberStringCache",
- ("OLD_SPACE", 0x08a65): "SingleCharacterStringCache",
- ("OLD_SPACE", 0x08efd): "StringSplitCache",
- ("OLD_SPACE", 0x09305): "RegExpMultipleCache",
- ("OLD_SPACE", 0x0970d): "EmptyExternalInt8Array",
- ("OLD_SPACE", 0x09719): "EmptyExternalUint8Array",
- ("OLD_SPACE", 0x09725): "EmptyExternalInt16Array",
- ("OLD_SPACE", 0x09731): "EmptyExternalUint16Array",
- ("OLD_SPACE", 0x0973d): "EmptyExternalInt32Array",
- ("OLD_SPACE", 0x09749): "EmptyExternalUint32Array",
- ("OLD_SPACE", 0x09755): "EmptyExternalFloat32Array",
- ("OLD_SPACE", 0x09761): "EmptyExternalFloat64Array",
- ("OLD_SPACE", 0x0976d): "EmptyExternalUint8ClampedArray",
- ("OLD_SPACE", 0x09779): "EmptyFixedUint8Array",
- ("OLD_SPACE", 0x09789): "EmptyFixedInt8Array",
- ("OLD_SPACE", 0x09799): "EmptyFixedUint16Array",
- ("OLD_SPACE", 0x097a9): "EmptyFixedInt16Array",
- ("OLD_SPACE", 0x097b9): "EmptyFixedUint32Array",
- ("OLD_SPACE", 0x097c9): "EmptyFixedInt32Array",
- ("OLD_SPACE", 0x097d9): "EmptyFixedFloat32Array",
- ("OLD_SPACE", 0x097e9): "EmptyFixedFloat64Array",
- ("OLD_SPACE", 0x097f9): "EmptyFixedUint8ClampedArray",
- ("OLD_SPACE", 0x0980d): "InfinityValue",
- ("OLD_SPACE", 0x0981d): "MinusZeroValue",
- ("OLD_SPACE", 0x0982d): "MinusInfinityValue",
- ("OLD_SPACE", 0x09839): "MessageListeners",
- ("OLD_SPACE", 0x09855): "CodeStubs",
- ("OLD_SPACE", 0x0e52d): "ArrayProtector",
- ("OLD_SPACE", 0x0e9a1): "KeyedLoadDummyVector",
- ("OLD_SPACE", 0x13ded): "NonMonomorphicCache",
- ("OLD_SPACE", 0x14131): "PolymorphicCodeCache",
- ("OLD_SPACE", 0x14139): "NativesSourceCache",
- ("OLD_SPACE", 0x14429): "ExperimentalNativesSourceCache",
- ("OLD_SPACE", 0x14461): "ExtraNativesSourceCache",
- ("OLD_SPACE", 0x1446d): "CodeStubNativesSourceCache",
- ("OLD_SPACE", 0x1448d): "EmptyScript",
- ("OLD_SPACE", 0x144cd): "IntrinsicFunctionNames",
- ("OLD_SPACE", 0x240e1): "UndefinedCell",
- ("OLD_SPACE", 0x240e9): "ObservationState",
- ("OLD_SPACE", 0x240f5): "SymbolRegistry",
- ("OLD_SPACE", 0x24f9d): "EmptySlowElementDictionary",
- ("OLD_SPACE", 0x24fc5): "AllocationSitesScratchpad",
- ("OLD_SPACE", 0x253cd): "WeakObjectToCodeTable",
- ("OLD_SPACE", 0x25461): "EmptyPropertyCell",
- ("OLD_SPACE", 0x25471): "CodeStubContext",
- ("OLD_SPACE", 0x2ba11): "CodeStubExportsObject",
- ("OLD_SPACE", 0x2be89): "EmptyBytecodeArray",
- ("OLD_SPACE", 0x594dd): "StringTable",
- ("CODE_SPACE", 0x16341): "JsEntryCode",
- ("CODE_SPACE", 0x26a61): "JsConstructEntryCode",
+ ("OLD_SPACE", 0x08095): "EmptyDescriptorArray",
+ ("OLD_SPACE", 0x0809d): "EmptyFixedArray",
+ ("OLD_SPACE", 0x080c9): "UndefinedValue",
+ ("OLD_SPACE", 0x080f5): "NanValue",
+ ("OLD_SPACE", 0x08105): "TheHoleValue",
+ ("OLD_SPACE", 0x08129): "TrueValue",
+ ("OLD_SPACE", 0x08161): "FalseValue",
+ ("OLD_SPACE", 0x08189): "empty_string",
+ ("OLD_SPACE", 0x08195): "hidden_string",
+ ("OLD_SPACE", 0x081a1): "UninitializedValue",
+ ("OLD_SPACE", 0x081d1): "EmptyByteArray",
+ ("OLD_SPACE", 0x081d9): "NoInterceptorResultSentinel",
+ ("OLD_SPACE", 0x08219): "ArgumentsMarker",
+ ("OLD_SPACE", 0x08249): "Exception",
+ ("OLD_SPACE", 0x08275): "TerminationException",
+ ("OLD_SPACE", 0x082ad): "NumberStringCache",
+ ("OLD_SPACE", 0x08ab5): "SingleCharacterStringCache",
+ ("OLD_SPACE", 0x08f4d): "StringSplitCache",
+ ("OLD_SPACE", 0x09355): "RegExpMultipleCache",
+ ("OLD_SPACE", 0x0975d): "EmptyFixedUint8Array",
+ ("OLD_SPACE", 0x0976d): "EmptyFixedInt8Array",
+ ("OLD_SPACE", 0x0977d): "EmptyFixedUint16Array",
+ ("OLD_SPACE", 0x0978d): "EmptyFixedInt16Array",
+ ("OLD_SPACE", 0x0979d): "EmptyFixedUint32Array",
+ ("OLD_SPACE", 0x097ad): "EmptyFixedInt32Array",
+ ("OLD_SPACE", 0x097bd): "EmptyFixedFloat32Array",
+ ("OLD_SPACE", 0x097cd): "EmptyFixedFloat64Array",
+ ("OLD_SPACE", 0x097dd): "EmptyFixedUint8ClampedArray",
+ ("OLD_SPACE", 0x097ed): "InfinityValue",
+ ("OLD_SPACE", 0x097fd): "MinusZeroValue",
+ ("OLD_SPACE", 0x0980d): "MinusInfinityValue",
+ ("OLD_SPACE", 0x0981d): "MessageListeners",
+ ("OLD_SPACE", 0x09839): "CodeStubs",
+ ("OLD_SPACE", 0x10201): "DummyVector",
+ ("OLD_SPACE", 0x1403d): "NonMonomorphicCache",
+ ("OLD_SPACE", 0x14651): "PolymorphicCodeCache",
+ ("OLD_SPACE", 0x14659): "NativesSourceCache",
+ ("OLD_SPACE", 0x148f5): "ExperimentalNativesSourceCache",
+ ("OLD_SPACE", 0x14929): "ExtraNativesSourceCache",
+ ("OLD_SPACE", 0x14949): "ExperimentalExtraNativesSourceCache",
+ ("OLD_SPACE", 0x14955): "EmptyScript",
+ ("OLD_SPACE", 0x14995): "IntrinsicFunctionNames",
+ ("OLD_SPACE", 0x2e73d): "UndefinedCell",
+ ("OLD_SPACE", 0x2e745): "ObservationState",
+ ("OLD_SPACE", 0x2e751): "ScriptList",
+ ("OLD_SPACE", 0x2e8d9): "ClearedOptimizedCodeMap",
+ ("OLD_SPACE", 0x2e8e5): "EmptyWeakCell",
+ ("OLD_SPACE", 0x54715): "EmptySlowElementDictionary",
+ ("OLD_SPACE", 0x54761): "WeakObjectToCodeTable",
+ ("OLD_SPACE", 0x54875): "ArrayProtector",
+ ("OLD_SPACE", 0x54885): "EmptyPropertyCell",
+ ("OLD_SPACE", 0x54895): "NoScriptSharedFunctionInfos",
+ ("OLD_SPACE", 0x5711d): "InterpreterTable",
+ ("OLD_SPACE", 0x57325): "EmptyBytecodeArray",
+ ("OLD_SPACE", 0x5a2d1): "StringTable",
+ ("CODE_SPACE", 0x1a2a1): "JsEntryCode",
+ ("CODE_SPACE", 0x1f081): "JsConstructEntryCode",
}
diff --git a/chromium/v8/tools/whitespace.txt b/chromium/v8/tools/whitespace.txt
index 5a830c0e127..687be113dd4 100644
--- a/chromium/v8/tools/whitespace.txt
+++ b/chromium/v8/tools/whitespace.txt
@@ -5,4 +5,4 @@ Try to write something funny. And please don't add trailing whitespace.
A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them when a crazy v8-autoroll account showed up.
+The Smi looked at them when a crazy v8-autoroll account showed up.....